From 1ce326486a70686a1857a302d321faae9b1aa49e Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Tue, 6 Jan 2026 13:53:49 +0200 Subject: [PATCH 01/17] evm zk proof verification --- .gitignore | 6 + cli/ubft/cmd/root_node.go | 27 ++ internal/testutils/partition/network.go | 2 +- .../block_certification_request.go | 4 + .../consensus/zkverifier/FFI_INTEGRATION.md | 448 ++++++++++++++++++ .../consensus/zkverifier/config.example.yaml | 134 ++++++ .../zkverifier/sp1-verifier-ffi/Cargo.toml | 22 + .../zkverifier/sp1-verifier-ffi/README.md | 259 ++++++++++ .../zkverifier/sp1-verifier-ffi/build.sh | 85 ++++ .../sp1-verifier-ffi/sp1_verifier.h | 83 ++++ .../zkverifier/sp1-verifier-ffi/src/lib.rs | 249 ++++++++++ .../consensus/zkverifier/sp1_verifier.go | 94 ++++ .../consensus/zkverifier/sp1_verifier_ffi.go | 149 ++++++ rootchain/consensus/zkverifier/verifier.go | 110 +++++ .../consensus/zkverifier/verifier_test.go | 188 ++++++++ rootchain/node.go | 73 +++ rootchain/node_test.go | 58 +-- 17 files changed, 1961 insertions(+), 30 deletions(-) create mode 100644 rootchain/consensus/zkverifier/FFI_INTEGRATION.md create mode 100644 rootchain/consensus/zkverifier/config.example.yaml create mode 100644 rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml create mode 100644 rootchain/consensus/zkverifier/sp1-verifier-ffi/README.md create mode 100755 rootchain/consensus/zkverifier/sp1-verifier-ffi/build.sh create mode 100644 rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h create mode 100644 rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs create mode 100644 rootchain/consensus/zkverifier/sp1_verifier.go create mode 100644 rootchain/consensus/zkverifier/sp1_verifier_ffi.go create mode 100644 rootchain/consensus/zkverifier/verifier.go create mode 100644 rootchain/consensus/zkverifier/verifier_test.go diff --git a/.gitignore b/.gitignore index 0849678a..753014f5 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,12 @@ .DS_Store build/ +# rust +target/ +Cargo.lock +*.pdb +config.toml + # Test artifacts test-coverage.out test-coverage-cobertura.xml diff --git a/cli/ubft/cmd/root_node.go b/cli/ubft/cmd/root_node.go index db110d00..342aa1af 100644 --- a/cli/ubft/cmd/root_node.go +++ b/cli/ubft/cmd/root_node.go @@ -28,6 +28,7 @@ import ( "github.com/unicitynetwork/bft-core/rootchain/consensus" "github.com/unicitynetwork/bft-core/rootchain/consensus/storage" "github.com/unicitynetwork/bft-core/rootchain/consensus/trustbase" + "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier" "github.com/unicitynetwork/bft-core/rootchain/partitions" ) @@ -53,6 +54,11 @@ type ( BlockRate uint32 MaxRequests uint // validator partition certification request channel capacity RPCServerAddress string // address on which http server is exposed with metrics endpoint + + // ZK verification configuration + ZKVerificationEnabled bool + ZKProofType string + ZKVerificationKeyPath string } ) @@ -111,6 +117,14 @@ func rootNodeRunCmd(baseFlags *baseFlags) *cobra.Command { cmd.Flags().Uint32Var(&flags.BlockRate, "block-rate", consensus.BlockRate, "block rate (consensus parameter)") + // ZK verification flags + cmd.Flags().BoolVar(&flags.ZKVerificationEnabled, "zk-verification-enabled", false, + "Enable ZK proof verification for L2 state transitions") + cmd.Flags().StringVar(&flags.ZKProofType, "zk-proof-type", "sp1", + "ZK proof type (sp1, risc0, exec, none)") + cmd.Flags().StringVar(&flags.ZKVerificationKeyPath, "zk-vkey-path", "", + "Path to ZK verification key file (.vkey)") + hideFlags(cmd, "block-rate") return cmd } @@ -227,11 +241,24 @@ func rootNodeRun(ctx context.Context, flags *rootNodeRunFlags) error { if err = host.BootstrapConnect(ctx, log); err != nil { return err } + + // Initialize ZK verifier + zkVerifierCfg := &zkverifier.Config{ + Enabled: flags.ZKVerificationEnabled, + ProofType: zkverifier.ProofType(flags.ZKProofType), + VerificationKeyPath: flags.ZKVerificationKeyPath, + } + zkVerifier, err := zkverifier.NewVerifier(zkVerifierCfg) + if err != nil { + return fmt.Errorf("failed to initialize ZK verifier: %w", err) + } + node, err := rootchain.New( host, partitionNet, cm, obs, + zkVerifier, ) if err != nil { return fmt.Errorf("failed initiate root node: %w", err) diff --git a/internal/testutils/partition/network.go b/internal/testutils/partition/network.go index f974cad9..495ac7cd 100644 --- a/internal/testutils/partition/network.go +++ b/internal/testutils/partition/network.go @@ -356,7 +356,7 @@ func (r *RootChain) start(t *testing.T, ctx context.Context) error { if err != nil { return fmt.Errorf("consensus manager initialization failed, %w", err) } - node, err := rootchain.New(rootPeer, rootNet, cm, obs) + node, err := rootchain.New(rootPeer, rootNet, cm, obs, nil) if err != nil { return fmt.Errorf("failed to create root node, %w", err) } diff --git a/network/protocol/certification/block_certification_request.go b/network/protocol/certification/block_certification_request.go index 107bc16a..97d901c4 100644 --- a/network/protocol/certification/block_certification_request.go +++ b/network/protocol/certification/block_certification_request.go @@ -22,6 +22,7 @@ type BlockCertificationRequest struct { ShardID types.ShardID `json:"shardId"` NodeID string `json:"nodeId"` InputRecord *types.InputRecord `json:"inputRecord"` + ZkProof []byte `json:"zkProof"` // ZK proof for state transition validation BlockSize uint64 `json:"blockSize"` StateSize uint64 `json:"stateSize"` Signature hex.Bytes `json:"signature"` @@ -84,6 +85,9 @@ func (x *BlockCertificationRequest) Sign(signer crypto.Signer) error { } func (x BlockCertificationRequest) Bytes() ([]byte, error) { + // Exclude signature and ZK proof from signed data + // ZK proof is validated separately by the verifier x.Signature = nil + x.ZkProof = nil return types.Cbor.Marshal(x) } diff --git a/rootchain/consensus/zkverifier/FFI_INTEGRATION.md b/rootchain/consensus/zkverifier/FFI_INTEGRATION.md new file mode 100644 index 00000000..52d65bb0 --- /dev/null +++ b/rootchain/consensus/zkverifier/FFI_INTEGRATION.md @@ -0,0 +1,448 @@ +# SP1 FFI Integration Guide + +Complete guide for integrating SP1 proof verification via FFI (Foreign Function Interface). + +## Overview + +Since there's no native Go library for SP1 STARK proof verification, we use FFI to call the Rust SP1 SDK from Go. + +**Architecture:** +``` +Go (BFT Core) → CGO → C Header → Rust FFI → SP1 SDK +``` + +--- + +## Quick Start + +### 1. Build the FFI Library + +```bash +cd rootchain/consensus/zkverifier/sp1-verifier-ffi +./build.sh +``` + +This will: +- Compile the Rust library +- Run tests +- Create `libsp1_verifier_ffi.{so,dylib,a}` + +### 2. Test the Integration + +```bash +cd .. +go test -v ./... +``` + +The Go code automatically links to the Rust library via CGO directives. + +### 3. Run BFT Core with FFI Verification + +```bash +ubft root-node run \ + --zk-verification-enabled=true \ + --zk-proof-type=sp1 \ + --zk-vkey-path=/etc/bft-core/sp1.vkey +``` + +If the FFI library is built, you'll see: +``` +INFO Using SP1 FFI verifier path=/etc/bft-core/sp1.vkey version=0.1.0 +``` + +If FFI is not available: +``` +ERROR FFI verifier not available, error=... +``` + +--- + +## Detailed Setup + +### Prerequisites + +**System Requirements:** +- Rust 1.70+ (install from https://rustup.rs/) +- GCC/Clang (for CGO) +- Go 1.21+ + +**Library Dependencies:** +- SP1 SDK (automatically fetched by Cargo) +- System libraries: `libdl`, `libm` + +### Build Process + +#### Step 1: Configure Rust Environment + +```bash +# Install Rust (if not already installed) +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + +# Verify installation +rustc --version +cargo --version +``` + +#### Step 2: Build FFI Library + +```bash +cd rootchain/consensus/zkverifier/sp1-verifier-ffi + +# Development build (faster, larger) +cargo build + +# Production build (optimized) +cargo build --release +``` + +**Build artifacts:** +- Linux: `target/release/libsp1_verifier_ffi.so` +- macOS: `target/release/libsp1_verifier_ffi.dylib` +- Windows: `target/release/sp1_verifier_ffi.dll` + +#### Step 3: Verify CGO Linkage + +```bash +cd .. +go build ./... +``` + +If you see errors like "library not found": +```bash +export CGO_LDFLAGS="-L$(pwd)/sp1-verifier-ffi/target/release" +export LD_LIBRARY_PATH="$(pwd)/sp1-verifier-ffi/target/release" # Linux +export DYLD_LIBRARY_PATH="$(pwd)/sp1-verifier-ffi/target/release" # macOS +``` + +--- + +## How It Works + +### Data Flow + +``` +1. Go calls NewSP1Verifier(vkeyPath) + ↓ +2. Attempts to create SP1VerifierFFI + ↓ +3. Loads C library via CGO + ↓ +4. Calls sp1_verify_proof() in Rust + ↓ +5. Rust deserializes proof and vkey + ↓ +6. SP1 SDK verifies cryptographically + ↓ +7. Result returned to Go as error/nil +``` + +### Memory Management + +**Go → C → Rust:** +- Go passes pointers to byte slices (no copy) +- Rust reads via `std::slice::from_raw_parts` (unsafe) +- No ownership transfer (Go still owns memory) + +**Rust → C → Go:** +- Rust allocates error strings with `CString::into_raw()` +- Go receives pointer and reads with `C.GoString()` +- Go calls `sp1_free_string()` to deallocate + +**Safety guarantees:** +- All unsafe blocks have safety comments +- Pointer null checks before dereferencing +- Proper cleanup in all error paths + +### Proof Format + +**Expected format:** +```rust +SP1ProofWithPublicValues { + proof: , + public_values: [ + prev_state_root[0..32], // 32 bytes + new_state_root[32..64], // 32 bytes + // ... additional public values + // TOOO: at least block hash must be checked as well, think about others + ] +} +``` + +**Serialization:** Bincode (Rust standard) + + + +## Deployment + +### Option 1: Static Linking (Recommended) + +Build with static library for easier deployment: + +```bash +cd sp1-verifier-ffi +cargo build --release + +# Copy static library +sudo cp target/release/libsp1_verifier_ffi.a /usr/local/lib/ + +# Build Go with static linking +cd .. +CGO_ENABLED=1 CGO_LDFLAGS="-static" go build ./... +``` + +**Pros:** +- Single binary deployment +- No runtime dependencies + +**Cons:** +- Larger binary size +- Longer build time + +### Option 2: Dynamic Linking + +```bash +# Install shared library +sudo cp target/release/libsp1_verifier_ffi.so /usr/local/lib/ +sudo ldconfig # Linux only + +# Build Go normally +go build ./... +``` + +**Pros:** +- Smaller binary +- Faster builds + +**Cons:** +- Must deploy library separately +- Runtime library path issues + +### Option 3: Bundled Distribution + +```bash +# Build everything +cd sp1-verifier-ffi && ./build.sh && cd .. + +# Package for distribution +mkdir -p dist/lib +cp sp1-verifier-ffi/target/release/libsp1_verifier_ffi.* dist/lib/ + +# Set library path in startup script +cat > dist/run.sh << 'EOF' +#!/bin/bash +export LD_LIBRARY_PATH="$(dirname $0)/lib:$LD_LIBRARY_PATH" +exec ./ubft "$@" +EOF +chmod +x dist/run.sh +``` + +--- + +## Testing + +### Unit Tests (Rust) + +```bash +cd sp1-verifier-ffi +cargo test +``` + +Tests verify: +- ✅ FFI safety (null pointers, bounds) +- ✅ Memory management +- ✅ Error code mapping + +### Integration Tests (Go) + +```bash +cd .. +go test -v ./... +``` + +Tests verify: +- ✅ CGO linkage works +- ✅ Version retrieval +- ✅ Error propagation +- ⚠️ Proof verification (requires real proof) + +### E2E Test with Real Proof + +```go +func TestSP1Verifier_RealProof(t *testing.T) { + verifier, err := NewSP1Verifier("testdata/sp1.vkey") + require.NoError(t, err) + + // Load real proof from Uni-EVM + proof, err := os.ReadFile("testdata/proof.bin") + require.NoError(t, err) + + prevRoot := hexDecode("...") + newRoot := hexDecode("...") + + err = verifier.VerifyProof(proof, prevRoot, newRoot) + require.NoError(t, err) +} +``` + +--- + +## Performance + +### Benchmarks + +```bash +cd sp1-verifier-ffi +cargo bench + +cd .. +go test -bench=. -benchmem +``` + +**Typical performance:** +- Verification: 10-100ms (depends on proof complexity) +- Memory: 50-200MB peak during verification +- CGO overhead: <1ms + +### Optimization + +**Rust side:** +```toml +[profile.release] +opt-level = 3 # Maximum optimization +lto = true # Link-time optimization +codegen-units = 1 # Better optimization +``` + +**Go side:** +- Reuse verifier instances (verification key loaded once) +- Avoid copying proof data (pass slices directly) + +--- + +## Troubleshooting + +### Build Errors + +**"cannot find -lsp1_verifier_ffi"** +```bash +# Library not built +cd sp1-verifier-ffi && cargo build --release && cd .. + +# Or set library path +export CGO_LDFLAGS="-L$(pwd)/sp1-verifier-ffi/target/release" +``` + +**"undefined reference to `sp1_verify_proof`"** +```bash +# Header/library mismatch - rebuild both +cd sp1-verifier-ffi +cargo clean +cargo build --release +cd .. && go build ./... +``` + +### Runtime Errors + +**"error while loading shared libraries"** +```bash +# Linux +export LD_LIBRARY_PATH="/path/to/lib:$LD_LIBRARY_PATH" +sudo ldconfig + +# macOS +export DYLD_LIBRARY_PATH="/path/to/lib:$DYLD_LIBRARY_PATH" +``` + +**"FFI verifier not available"** +- Check library is built: `ls sp1-verifier-ffi/target/release/libsp1_verifier_ffi.*` +- Check CGO is enabled: `go env CGO_ENABLED` (should be `1`) +- Check architecture match: `file libsp1_verifier_ffi.so` vs `go version` + +### Verification Errors + +**"Invalid proof format"** +- Proof must be serialized `SP1ProofWithPublicValues` +- Use bincode serialization +- Check proof is not corrupted + +**"State root mismatch"** +- Public values first 64 bytes must match expected roots +- Verify prover outputs correct public values +- Check byte ordering (big-endian vs little-endian) + +--- + +## Security Considerations + +### Memory Safety + +**Unsafe Rust blocks:** +- All marked with safety comments +- Reviewed for correctness +- Null pointer checks before dereferencing +- No use-after-free (Go owns memory) + +**FFI boundary:** +- All pointers validated +- Length parameters checked +- No buffer overflows + +### Cryptographic Security + +**Verification key:** +- Must be from trusted source +- Loaded once, reused for all proofs +- No modification after loading + +**Proof validation:** +- Full cryptographic verification via SP1 SDK +- Public inputs always validated (TODO: check more values) +- No trust in prover claims + +--- + +## Advanced Topics + +### Custom Public Values + +If your proofs have additional public values beyond state roots: + +```rust +// In lib.rs +fn verify_proof_internal(...) -> anyhow::Result<()> { + // ... existing code ... + + // Access additional public values + if public_values.len() > 64 { + let custom_data = &public_values[64..]; + // Process custom data + } + + Ok(()) +} +``` + +### Multiple Proof Types + +To support both SP1 and RISC0: + +```rust +#[no_mangle] +pub extern "C" fn risc0_verify_proof(...) -> SP1VerifyResult { + // RISC0 verification logic +} +``` + +```go +// In Go +type RISC0VerifierFFI struct { ... } +``` + +--- + +## References + +- [SP1 Documentation](https://docs.succinct.xyz/) +- [Rust FFI Nomicon](https://doc.rust-lang.org/nomicon/ffi.html) +- [CGO Documentation](https://pkg.go.dev/cmd/cgo) +- [sp1-verifier-ffi README](./sp1-verifier-ffi/README.md) diff --git a/rootchain/consensus/zkverifier/config.example.yaml b/rootchain/consensus/zkverifier/config.example.yaml new file mode 100644 index 00000000..66193fd9 --- /dev/null +++ b/rootchain/consensus/zkverifier/config.example.yaml @@ -0,0 +1,134 @@ +# BFT Core ZK Verification Configuration Example +# +# This file demonstrates how to configure ZK proof verification for L2 state transitions. +# Copy this file and customize it for your deployment. + +# ============================================================================ +# DEVELOPMENT / TESTING CONFIGURATION (ZK verification disabled) +# ============================================================================ + +# When testing without real ZK proofs: +# ubft root-node run \ +# --zk-verification-enabled=false + +# ============================================================================ +# PRODUCTION CONFIGURATION (SP1 verification enabled) +# ============================================================================ + +# Generate verification key from your SP1 prover: +# cd uni-evm +# cargo run --release --bin generate-vkey -- --output sp1.vkey +# cp sp1.vkey /etc/bft-core/ + +# Start root node with ZK verification: +# ubft root-node run \ +# --zk-verification-enabled=true \ +# --zk-proof-type=sp1 \ +# --zk-vkey-path=/etc/bft-core/sp1.vkey + +# ============================================================================ +# CONFIGURATION OPTIONS +# ============================================================================ + +# --zk-verification-enabled (boolean) +# Enable/disable ZK proof verification +# Default: false +# Production: true +# +# --zk-proof-type (string) +# Type of ZK proof system +# Options: "sp1", "risc0", "exec", "none" +# Default: "sp1" +# Production: "sp1" +# +# --zk-vkey-path (string) +# Path to verification key file +# Required when: zk-verification-enabled=true AND zk-proof-type=sp1 +# Example: /etc/bft-core/sp1.vkey + +# ============================================================================ +# ENVIRONMENT VARIABLES +# ============================================================================ + +# export UBFT_ZK_VERIFICATION_ENABLED=true +# export UBFT_ZK_PROOF_TYPE=sp1 +# export UBFT_ZK_VKEY_PATH=/etc/bft-core/sp1.vkey + +# ============================================================================ +# VERIFICATION KEY MANAGEMENT +# ============================================================================ + +# The verification key must match the prover program ID. +# +# 1. Generate vkey from Uni-EVM prover: +# cd uni-evm +# cargo run --release --bin generate-vkey -- --output sp1.vkey +# +# 2. Deploy vkey to all root chain nodes: +# scp sp1.vkey root-node-1:/etc/bft-core/ +# scp sp1.vkey root-node-2:/etc/bft-core/ +# scp sp1.vkey root-node-3:/etc/bft-core/ +# +# 3. Verify file permissions: +# chmod 644 /etc/bft-core/sp1.vkey +# +# 4. Verify checksum across all nodes: +# sha256sum /etc/bft-core/sp1.vkey + +# ============================================================================ +# MONITORING +# ============================================================================ + +# Metrics to monitor: +# - bft_zk_verification_total{result="success"} +# - bft_zk_verification_total{result="failure"} +# - bft_zk_verification_duration_seconds +# +# Log messages: +# - INFO: "ZK proof verification enabled (proof type: sp1)" +# - WARN: "ZK proof verification disabled - accepting all proofs" +# - WARN: "ZK proof verification failed" +# - DEBUG: "Verifying ZK proof" + +# ============================================================================ +# TROUBLESHOOTING +# ============================================================================ + +# Error: "failed to read verification key" +# Solution: Verify --zk-vkey-path points to valid file with correct permissions +# +# Error: "verification key is empty" +# Solution: Regenerate verification key from prover +# +# Error: "proof verification failed" +# Solution: Ensure verification key matches prover program ID +# Check SP1 library version compatibility +# Verify proof format is SP1 compressed + +# ============================================================================ +# SECURITY WARNINGS +# ============================================================================ + +# ⚠️ CRITICAL: The current SP1 verifier is a PLACEHOLDER implementation +# It accepts all well-formed proofs without cryptographic verification +# +# Before production deployment: +# 1. Integrate actual SP1 verification library (see INTEGRATION_GUIDE.md) +# 2. Test with real SP1 proofs from Uni-EVM +# 3. Test rejection of invalid/malicious proofs +# 4. Security audit of verification implementation +# 5. Monitor verification latency and failure rates + +# ============================================================================ +# MIGRATION PLAN +# ============================================================================ + +# Phase 1: Deploy with verification disabled +# --zk-verification-enabled=false +# +# Phase 2: Deploy with verification enabled but non-blocking (future feature) +# Log failures but don't reject requests +# +# Phase 3: Deploy with verification enabled and blocking +# --zk-verification-enabled=true +# Reject requests with invalid proofs diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml new file mode 100644 index 00000000..74ab13c0 --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "sp1-verifier-ffi" +version = "0.1.0" +edition = "2021" + +# Make this package independent of parent workspace +[workspace] + +[lib] +crate-type = ["cdylib", "staticlib"] + +[dependencies] +sp1-sdk = "3.0.0" +anyhow = "1.0" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +bincode = "1.3" + +[profile.release] +opt-level = 3 +lto = true +codegen-units = 1 diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/README.md b/rootchain/consensus/zkverifier/sp1-verifier-ffi/README.md new file mode 100644 index 00000000..e9ee7603 --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/README.md @@ -0,0 +1,259 @@ +# SP1 Verifier FFI Library + +Foreign Function Interface (FFI) library for verifying SP1 ZK proofs from Go. + +## Overview + +This Rust library provides C-compatible functions for verifying SP1 (Succinct Processor 1) zero-knowledge proofs. It wraps the SP1 SDK and exposes a simple interface that can be called from Go using CGO. + +## Architecture + +``` +┌─────────────────┐ +│ Go (BFT Core) │ +│ zkverifier │ +└────────┬────────┘ + │ CGO + ▼ +┌─────────────────┐ +│ C Header │ +│ sp1_verifier.h │ +└────────┬────────┘ + │ FFI + ▼ +┌─────────────────┐ +│ Rust Library │ +│ sp1-verifier │ +│ -ffi │ +└────────┬────────┘ + │ + ▼ +┌─────────────────┐ +│ SP1 SDK │ +│ (Rust) │ +└─────────────────┘ +``` + +## Building + +### Prerequisites + +- Rust toolchain (1.70+): https://rustup.rs/ +- Cargo + +### Build Commands + +```bash +# Build release version +./build.sh + +# Or manually: +cargo build --release + +# Run tests +cargo test + +# Clean build +cargo clean +``` + +### Build Artifacts + +After building, you'll find: +- `target/release/libsp1_verifier_ffi.so` (Linux) +- `target/release/libsp1_verifier_ffi.dylib` (macOS) +- `target/release/libsp1_verifier_ffi.a` (static library) + +## API + +### C Interface + +```c +/** + * Verify an SP1 compressed proof + * + * Returns: SP1VerifyResult status code + */ +SP1VerifyResult sp1_verify_proof( + const uint8_t* vkey_bytes, + size_t vkey_len, + const uint8_t* proof_bytes, + size_t proof_len, + const uint8_t* prev_state_root, // 32 bytes + const uint8_t* new_state_root, // 32 bytes + char** error_out // Must free with sp1_free_string +); + +/** + * Free error string + */ +void sp1_free_string(char* s); + +/** + * Get library version + */ +const char* sp1_ffi_version(void); +``` + +### Result Codes + +| Code | Meaning | +|------|---------| +| `SP1_VERIFY_SUCCESS` (0) | Proof verified successfully | +| `SP1_VERIFY_INVALID_PROOF` (1) | Proof data is malformed | +| `SP1_VERIFY_INVALID_VKEY` (2) | Verification key is invalid | +| `SP1_VERIFY_INVALID_PUBLIC_INPUTS` (3) | Public inputs don't match | +| `SP1_VERIFY_VERIFICATION_FAILED` (4) | Cryptographic verification failed | +| `SP1_VERIFY_INTERNAL_ERROR` (5) | Internal error | + +## Usage from Go + +### Setup + +1. Build the Rust library: + ```bash + cd sp1-verifier-ffi + ./build.sh + ``` + +2. The Go code will automatically link to the library using CGO directives in `sp1_verifier_ffi.go`: + ```go + // #cgo LDFLAGS: -L${SRCDIR}/sp1-verifier-ffi/target/release -lsp1_verifier_ffi + // #include "sp1-verifier-ffi/sp1_verifier.h" + import "C" + ``` + +### Example + +```go +package main + +import ( + "fmt" + "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier" +) + +func main() { + // Create verifier + verifier, err := zkverifier.NewSP1Verifier("/path/to/verification.vkey") + if err != nil { + panic(err) + } + + // Verify proof + proof := loadProofBytes() + prevRoot := make([]byte, 32) // Previous state root + newRoot := make([]byte, 32) // New state root + + err = verifier.VerifyProof(proof, prevRoot, newRoot) + if err != nil { + fmt.Printf("Verification failed: %v\n", err) + } else { + fmt.Println("Proof verified successfully!") + } +} +``` + +## Proof Format + +The library expects SP1 compressed proofs in the following format: + +1. **Verification Key**: Serialized SP1 verification key (bincode format) +2. **Proof**: Serialized `SP1ProofWithPublicValues` (bincode format) +3. **Public Values**: First 64 bytes must be: + - Bytes 0-31: Previous state root + - Bytes 32-63: New state root + +## Development + +### Project Structure + +``` +sp1-verifier-ffi/ +├── Cargo.toml # Rust package configuration +├── build.sh # Build script +├── src/ +│ └── lib.rs # FFI implementation +├── sp1_verifier.h # C header file +└── README.md # This file +``` + +### Adding New Functions + +1. Add Rust function with `#[no_mangle]` and `extern "C"`: + ```rust + #[no_mangle] + pub extern "C" fn new_function() -> i32 { + // Implementation + } + ``` + +2. Add declaration to `sp1_verifier.h`: + ```c + int32_t new_function(void); + ``` + +3. Update Go bindings in `../sp1_verifier_ffi.go` + +### Testing + +```bash +# Rust tests +cargo test + +# Go integration tests (from parent directory) +cd .. +go test -v ./... +``` + +## Troubleshooting + +### "library not found" error + +Make sure the library is built and CGO can find it: +```bash +export CGO_LDFLAGS="-L$(pwd)/target/release" +export LD_LIBRARY_PATH="$(pwd)/target/release:$LD_LIBRARY_PATH" # Linux +export DYLD_LIBRARY_PATH="$(pwd)/target/release:$DYLD_LIBRARY_PATH" # macOS +``` + +### "undefined symbol" error + +The library may not be linked correctly. Check: +1. Library was built with same architecture as Go binary +2. CGO flags are correct +3. Header file matches library exports + +### SP1 SDK errors + +Make sure you're using a compatible SP1 SDK version: +```bash +cargo update +cargo build --release +``` + +## Performance + +Typical verification times on modern hardware: +- Compressed proof verification: 10-100ms +- Memory usage: ~50-200MB during verification + +## Security + +⚠️ **Important Security Notes:** + +1. **Verification Key**: Must be generated from trusted source +2. **Public Inputs**: Always validated against expected values +3. **Memory Safety**: FFI uses unsafe Rust - reviewed for safety +4. **Error Handling**: All errors propagated to Go caller + +## License + +Same license as BFT Core parent project. + +## References + +- [SP1 Documentation](https://docs.succinct.xyz/) +- [SP1 GitHub](https://github.com/succinctlabs/sp1) +- [Rust FFI Guide](https://doc.rust-lang.org/nomicon/ffi.html) +- [CGO Documentation](https://pkg.go.dev/cmd/cgo) diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/build.sh b/rootchain/consensus/zkverifier/sp1-verifier-ffi/build.sh new file mode 100755 index 00000000..d7e1550d --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/build.sh @@ -0,0 +1,85 @@ +#!/bin/bash +# +# Build script for SP1 Verifier FFI library +# + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo -e "${GREEN}Building SP1 Verifier FFI Library${NC}" +echo "=====================================" + +# Check if Rust is installed +if ! command -v cargo &> /dev/null; then + echo -e "${RED}Error: Rust/Cargo not found${NC}" + echo "Please install Rust from https://rustup.rs/" + exit 1 +fi + +# Check Rust version +RUST_VERSION=$(cargo --version | cut -d' ' -f2) +echo -e "${GREEN}Rust version: ${RUST_VERSION}${NC}" + +# Build the library +echo -e "\n${YELLOW}Building Rust library...${NC}" +cargo build --release + +if [ $? -eq 0 ]; then + echo -e "${GREEN}✓ Build successful${NC}" +else + echo -e "${RED}✗ Build failed${NC}" + exit 1 +fi + +# Check build artifacts +LIB_PATH="target/release" +if [[ "$OSTYPE" == "darwin"* ]]; then + LIB_FILE="libsp1_verifier_ffi.dylib" + STATIC_LIB="libsp1_verifier_ffi.a" +elif [[ "$OSTYPE" == "linux-gnu"* ]]; then + LIB_FILE="libsp1_verifier_ffi.so" + STATIC_LIB="libsp1_verifier_ffi.a" +else + echo -e "${YELLOW}Warning: Unknown OS type, library names may differ${NC}" + LIB_FILE="libsp1_verifier_ffi.*" + STATIC_LIB="libsp1_verifier_ffi.a" +fi + +echo -e "\n${YELLOW}Build artifacts:${NC}" +if [ -f "${LIB_PATH}/${LIB_FILE}" ]; then + ls -lh "${LIB_PATH}/${LIB_FILE}" + echo -e "${GREEN}✓ Dynamic library created${NC}" +else + echo -e "${RED}✗ Dynamic library not found${NC}" +fi + +if [ -f "${LIB_PATH}/${STATIC_LIB}" ]; then + ls -lh "${LIB_PATH}/${STATIC_LIB}" + echo -e "${GREEN}✓ Static library created${NC}" +else + echo -e "${YELLOW}⚠ Static library not found (optional)${NC}" +fi + +# Run tests +echo -e "\n${YELLOW}Running Rust tests...${NC}" +cargo test + +if [ $? -eq 0 ]; then + echo -e "${GREEN}✓ All tests passed${NC}" +else + echo -e "${RED}✗ Some tests failed${NC}" + exit 1 +fi + +echo -e "\n${GREEN}Build complete!${NC}" +echo -e "\nTo use this library with Go:" +echo -e " 1. Set CGO_LDFLAGS to point to ${LIB_PATH}" +echo -e " 2. Run: go test ./... in the parent directory" +echo -e "\nExample:" +echo -e " export CGO_LDFLAGS=\"-L$(pwd)/${LIB_PATH}\"" +echo -e " cd .. && go test -v" diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h b/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h new file mode 100644 index 00000000..3df15d70 --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h @@ -0,0 +1,83 @@ +/** + * SP1 Proof Verifier FFI + * + * C header for FFI interface to SP1 proof verification + */ + +#ifndef SP1_VERIFIER_H +#define SP1_VERIFIER_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Result codes for SP1 verification + */ +typedef enum { + SP1_VERIFY_SUCCESS = 0, + SP1_VERIFY_INVALID_PROOF = 1, + SP1_VERIFY_INVALID_VKEY = 2, + SP1_VERIFY_INVALID_PUBLIC_INPUTS = 3, + SP1_VERIFY_VERIFICATION_FAILED = 4, + SP1_VERIFY_INTERNAL_ERROR = 5, +} SP1VerifyResult; + +/** + * Verify an SP1 compressed proof + * + * @param vkey_bytes Pointer to verification key bytes + * @param vkey_len Length of verification key in bytes + * @param proof_bytes Pointer to proof bytes + * @param proof_len Length of proof in bytes + * @param prev_state_root Pointer to 32-byte previous state root + * @param new_state_root Pointer to 32-byte new state root + * @param error_out Output pointer for error message (must be freed with sp1_free_string) + * @return SP1VerifyResult status code + */ +SP1VerifyResult sp1_verify_proof( + const uint8_t* vkey_bytes, + size_t vkey_len, + const uint8_t* proof_bytes, + size_t proof_len, + const uint8_t* prev_state_root, + const uint8_t* new_state_root, + char** error_out +); + +/** + * Free a string allocated by sp1_verify_proof + * + * @param s Pointer to string to free + */ +void sp1_free_string(char* s); + +/** + * Get the version of the FFI library + * + * @return Version string (do not free) + */ +const char* sp1_ffi_version(void); + +/** + * Validate a verification key + * + * @param vkey_bytes Pointer to verification key bytes + * @param vkey_len Length of verification key in bytes + * @param error_out Output pointer for error message (must be freed with sp1_free_string) + * @return SP1VerifyResult status code (SUCCESS or INVALID_VKEY) + */ +SP1VerifyResult sp1_validate_vkey( + const uint8_t* vkey_bytes, + size_t vkey_len, + char** error_out +); + +#ifdef __cplusplus +} +#endif + +#endif /* SP1_VERIFIER_H */ diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs b/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs new file mode 100644 index 00000000..137297c6 --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs @@ -0,0 +1,249 @@ +use std::ffi::{CStr, CString}; +use std::os::raw::c_char; +use std::ptr; +use sp1_sdk::{ProverClient, SP1Stdin, SP1ProofWithPublicValues}; + +/// Error codes for FFI interface +#[repr(C)] +pub enum SP1VerifyResult { + Success = 0, + InvalidProof = 1, + InvalidVKey = 2, + InvalidPublicInputs = 3, + VerificationFailed = 4, + InternalError = 5, +} + +/// Verify an SP1 compressed proof +/// +/// # Arguments +/// * `vkey_bytes` - Pointer to verification key bytes +/// * `vkey_len` - Length of verification key +/// * `proof_bytes` - Pointer to proof bytes +/// * `proof_len` - Length of proof +/// * `prev_state_root` - Pointer to 32-byte previous state root +/// * `new_state_root` - Pointer to 32-byte new state root +/// * `error_out` - Output pointer for error message (caller must free with sp1_free_string) +/// +/// # Returns +/// SP1VerifyResult code +#[no_mangle] +pub extern "C" fn sp1_verify_proof( + vkey_bytes: *const u8, + vkey_len: usize, + proof_bytes: *const u8, + proof_len: usize, + prev_state_root: *const u8, + new_state_root: *const u8, + error_out: *mut *mut c_char, +) -> SP1VerifyResult { + // Safety checks + if vkey_bytes.is_null() || proof_bytes.is_null() { + set_error(error_out, "null pointer passed to sp1_verify_proof"); + return SP1VerifyResult::InternalError; + } + + if prev_state_root.is_null() || new_state_root.is_null() { + set_error(error_out, "null state root pointer"); + return SP1VerifyResult::InvalidPublicInputs; + } + + // Convert C pointers to Rust slices + let vkey_data = unsafe { std::slice::from_raw_parts(vkey_bytes, vkey_len) }; + let proof_data = unsafe { std::slice::from_raw_parts(proof_bytes, proof_len) }; + let prev_root = unsafe { std::slice::from_raw_parts(prev_state_root, 32) }; + let new_root = unsafe { std::slice::from_raw_parts(new_state_root, 32) }; + + // Perform verification + match verify_proof_internal(vkey_data, proof_data, prev_root, new_root) { + Ok(()) => SP1VerifyResult::Success, + Err(e) => { + set_error(error_out, &e.to_string()); + match classify_error(&e) { + ErrorType::InvalidVKey => SP1VerifyResult::InvalidVKey, + ErrorType::InvalidProof => SP1VerifyResult::InvalidProof, + ErrorType::InvalidPublicInputs => SP1VerifyResult::InvalidPublicInputs, + ErrorType::VerificationFailed => SP1VerifyResult::VerificationFailed, + ErrorType::Internal => SP1VerifyResult::InternalError, + } + } + } +} + +/// Internal verification logic +fn verify_proof_internal( + vkey_data: &[u8], + proof_data: &[u8], + prev_state_root: &[u8], + new_state_root: &[u8], +) -> anyhow::Result<()> { + // Deserialize verification key + let vkey: sp1_sdk::SP1VerifyingKey = bincode::deserialize(vkey_data) + .map_err(|e| anyhow::anyhow!("Failed to deserialize verification key: {}", e))?; + + // Deserialize proof + let proof: SP1ProofWithPublicValues = bincode::deserialize(proof_data) + .map_err(|e| anyhow::anyhow!("Failed to deserialize proof: {}", e))?; + + // Create prover client (used for verification) + let client = ProverClient::new(); + + // Verify the proof + client.verify(&proof, &vkey) + .map_err(|e| anyhow::anyhow!("Proof verification failed: {}", e))?; + + // Extract public values from proof + let public_values = proof.public_values.as_slice(); + + // Validate that public values contain expected state roots + // Expected format: [prev_state_root (32 bytes), new_state_root (32 bytes)] + if public_values.len() < 64 { + return Err(anyhow::anyhow!( + "Public values too short: expected at least 64 bytes, got {}", + public_values.len() + )); + } + + // Check previous state root matches + if &public_values[0..32] != prev_state_root { + return Err(anyhow::anyhow!( + "Previous state root mismatch: expected {:?}, got {:?}", + prev_state_root, + &public_values[0..32] + )); + } + + // Check new state root matches + if &public_values[32..64] != new_state_root { + return Err(anyhow::anyhow!( + "New state root mismatch: expected {:?}, got {:?}", + new_state_root, + &public_values[32..64] + )); + } + + Ok(()) +} + +/// Free a string allocated by this library +#[no_mangle] +pub extern "C" fn sp1_free_string(s: *mut c_char) { + if !s.is_null() { + unsafe { + let _ = CString::from_raw(s); + } + } +} + +/// Get the version of this FFI library +#[no_mangle] +pub extern "C" fn sp1_ffi_version() -> *const c_char { + const VERSION: &str = concat!(env!("CARGO_PKG_VERSION"), "\0"); + VERSION.as_ptr() as *const c_char +} + +/// Validate a verification key +/// +/// # Arguments +/// * `vkey_bytes` - Pointer to verification key bytes +/// * `vkey_len` - Length of verification key +/// * `error_out` - Output pointer for error message (caller must free with sp1_free_string) +/// +/// # Returns +/// SP1VerifyResult code (Success or InvalidVKey) +#[no_mangle] +pub extern "C" fn sp1_validate_vkey( + vkey_bytes: *const u8, + vkey_len: usize, + error_out: *mut *mut c_char, +) -> SP1VerifyResult { + // Safety checks + if vkey_bytes.is_null() { + set_error(error_out, "null pointer passed to sp1_validate_vkey"); + return SP1VerifyResult::InternalError; + } + + if vkey_len == 0 { + set_error(error_out, "verification key is empty"); + return SP1VerifyResult::InvalidVKey; + } + + // Convert C pointer to Rust slice + let vkey_data = unsafe { std::slice::from_raw_parts(vkey_bytes, vkey_len) }; + + // Try to deserialize verification key + match bincode::deserialize::(vkey_data) { + Ok(_) => SP1VerifyResult::Success, + Err(e) => { + set_error(error_out, &format!("Failed to deserialize verification key: {}", e)); + SP1VerifyResult::InvalidVKey + } + } +} + +// Helper functions + +enum ErrorType { + InvalidVKey, + InvalidProof, + InvalidPublicInputs, + VerificationFailed, + Internal, +} + +fn classify_error(err: &anyhow::Error) -> ErrorType { + let msg = err.to_string().to_lowercase(); + if msg.contains("verification key") || msg.contains("vkey") { + ErrorType::InvalidVKey + } else if msg.contains("deserialize proof") { + ErrorType::InvalidProof + } else if msg.contains("state root") || msg.contains("public values") { + ErrorType::InvalidPublicInputs + } else if msg.contains("verification failed") { + ErrorType::VerificationFailed + } else { + ErrorType::Internal + } +} + +fn set_error(error_out: *mut *mut c_char, message: &str) { + if !error_out.is_null() { + if let Ok(c_string) = CString::new(message) { + unsafe { + *error_out = c_string.into_raw(); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_null_pointers() { + let mut error: *mut c_char = ptr::null_mut(); + let result = sp1_verify_proof( + ptr::null(), + 0, + ptr::null(), + 0, + ptr::null(), + ptr::null(), + &mut error, + ); + assert_eq!(result as i32, SP1VerifyResult::InternalError as i32); + + if !error.is_null() { + sp1_free_string(error); + } + } + + #[test] + fn test_version() { + let version = sp1_ffi_version(); + assert!(!version.is_null()); + let version_str = unsafe { CStr::from_ptr(version) }; + assert!(version_str.to_str().unwrap().starts_with("0.1.0")); + } +} diff --git a/rootchain/consensus/zkverifier/sp1_verifier.go b/rootchain/consensus/zkverifier/sp1_verifier.go new file mode 100644 index 00000000..7bcdc535 --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1_verifier.go @@ -0,0 +1,94 @@ +package zkverifier + +import ( + "encoding/hex" + "fmt" + "log/slog" + "os" + "path/filepath" +) + +// SP1Verifier verifies SP1 zkVM proofs +type SP1Verifier struct { + vkey []byte + enabled bool + ffiVerifier *SP1VerifierFFI +} + +// NewSP1Verifier creates a new SP1 verifier +// vkeyPath: path to the SP1 verification key file (.vkey) +func NewSP1Verifier(vkeyPath string) (*SP1Verifier, error) { + if vkeyPath == "" { + return nil, fmt.Errorf("verification key path is required for SP1 verifier") + } + + // Try to create FFI verifier first + if ffiVerifier, err := NewSP1VerifierFFI(vkeyPath); err == nil { + slog.Info("Using SP1 FFI verifier", "path", vkeyPath, "version", GetFFIVersion()) + return &SP1Verifier{ + vkey: ffiVerifier.vkey, + enabled: true, + ffiVerifier: ffiVerifier, + }, nil + } else { + return nil, fmt.Errorf("SP1 FFI verifier not available: %w", err) + } + +} + +// readFile reads a file and returns its contents +func readFile(path string) ([]byte, error) { + absPath, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("failed to resolve path: %w", err) + } + + data, err := os.ReadFile(absPath) + if err != nil { + return nil, fmt.Errorf("failed to read file %s: %w", absPath, err) + } + + return data, nil +} + +// VerifyProof verifies an SP1 compressed proof +// +// The proof should be a compressed SP1 proof generated by the prover. +// The proof includes: +// - Public inputs: previousStateRoot, newStateRoot +// - Proof data: SP1 compressed proof bytes +// +// This function verifies that executing the program with previousStateRoot +// as input produces newStateRoot as output. +func (v *SP1Verifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte) error { + if !v.enabled { + return ErrVerifierNotConfigured + } + + if len(proof) == 0 { + return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) + } + + if len(previousStateRoot) != 32 { + return fmt.Errorf("%w: previousStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(previousStateRoot)) + } + + if len(newStateRoot) != 32 { + return fmt.Errorf("%w: newStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(newStateRoot)) + } + + slog.Debug("Verifying SP1 proof", + "proof_size", len(proof), + "prev_root", hex.EncodeToString(previousStateRoot[:8]), + "new_root", hex.EncodeToString(newStateRoot[:8])) + + return v.ffiVerifier.VerifyProof(proof, previousStateRoot, newStateRoot) +} + +func (v *SP1Verifier) ProofType() ProofType { + return ProofTypeSP1 +} + +func (v *SP1Verifier) IsEnabled() bool { + return v.enabled +} diff --git a/rootchain/consensus/zkverifier/sp1_verifier_ffi.go b/rootchain/consensus/zkverifier/sp1_verifier_ffi.go new file mode 100644 index 00000000..9fec99e0 --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1_verifier_ffi.go @@ -0,0 +1,149 @@ +package zkverifier + +// #cgo LDFLAGS: -L${SRCDIR}/sp1-verifier-ffi/target/release -lsp1_verifier_ffi -ldl -lm +// #include "sp1-verifier-ffi/sp1_verifier.h" +// #include +import "C" +import ( + "fmt" + "unsafe" +) + +// SP1VerifierFFI wraps the Rust FFI library for SP1 proof verification +type SP1VerifierFFI struct { + vkey []byte +} + +// NewSP1VerifierFFI creates a new FFI-based SP1 verifier +func NewSP1VerifierFFI(vkeyPath string) (*SP1VerifierFFI, error) { + // Load verification key + vkey, err := loadVerificationKey(vkeyPath) + if err != nil { + return nil, fmt.Errorf("failed to load verification key: %w", err) + } + + // Verify FFI library is available + version := C.sp1_ffi_version() + if version == nil { + return nil, fmt.Errorf("FFI library not available") + } + + // Validate verification key + if len(vkey) == 0 { + return nil, fmt.Errorf("verification key is empty") + } + + var errorOut *C.char + defer func() { + if errorOut != nil { + C.sp1_free_string(errorOut) + } + }() + + result := C.sp1_validate_vkey( + (*C.uint8_t)(unsafe.Pointer(&vkey[0])), + C.size_t(len(vkey)), + &errorOut, + ) + + if result != C.SP1_VERIFY_SUCCESS { + if errorOut != nil { + return nil, fmt.Errorf("invalid verification key: %s", C.GoString(errorOut)) + } + return nil, fmt.Errorf("invalid verification key") + } + + return &SP1VerifierFFI{ + vkey: vkey, + }, nil +} + +// VerifyProof verifies an SP1 proof using the Rust FFI library +func (v *SP1VerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte) error { + // Validate inputs + if len(proof) == 0 { + return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) + } + if len(previousStateRoot) != 32 { + return fmt.Errorf("%w: previousStateRoot must be 32 bytes", ErrInvalidProofFormat) + } + if len(newStateRoot) != 32 { + return fmt.Errorf("%w: newStateRoot must be 32 bytes", ErrInvalidProofFormat) + } + + // Prepare C pointers + var errorOut *C.char + defer func() { + if errorOut != nil { + C.sp1_free_string(errorOut) + } + }() + + // Call FFI verification function + result := C.sp1_verify_proof( + (*C.uint8_t)(unsafe.Pointer(&v.vkey[0])), + C.size_t(len(v.vkey)), + (*C.uint8_t)(unsafe.Pointer(&proof[0])), + C.size_t(len(proof)), + (*C.uint8_t)(unsafe.Pointer(&previousStateRoot[0])), + (*C.uint8_t)(unsafe.Pointer(&newStateRoot[0])), + &errorOut, + ) + + // Check result + switch result { + case C.SP1_VERIFY_SUCCESS: + return nil + case C.SP1_VERIFY_INVALID_PROOF: + if errorOut != nil { + return fmt.Errorf("%w: %s", ErrInvalidProofFormat, C.GoString(errorOut)) + } + return ErrInvalidProofFormat + case C.SP1_VERIFY_INVALID_VKEY: + if errorOut != nil { + return fmt.Errorf("invalid verification key: %s", C.GoString(errorOut)) + } + return fmt.Errorf("invalid verification key") + case C.SP1_VERIFY_INVALID_PUBLIC_INPUTS: + if errorOut != nil { + return fmt.Errorf("invalid public inputs: %s", C.GoString(errorOut)) + } + return fmt.Errorf("invalid public inputs") + case C.SP1_VERIFY_VERIFICATION_FAILED: + if errorOut != nil { + return fmt.Errorf("%w: %s", ErrProofVerificationFailed, C.GoString(errorOut)) + } + return ErrProofVerificationFailed + default: + if errorOut != nil { + return fmt.Errorf("internal error: %s", C.GoString(errorOut)) + } + return fmt.Errorf("internal error") + } +} + +// ProofType returns the proof type +func (v *SP1VerifierFFI) ProofType() ProofType { + return ProofTypeSP1 +} + +// IsEnabled returns true if the verifier is enabled +func (v *SP1VerifierFFI) IsEnabled() bool { + return len(v.vkey) > 0 +} + +// GetFFIVersion returns the version of the FFI library +func GetFFIVersion() string { + version := C.sp1_ffi_version() + if version == nil { + return "unknown" + } + return C.GoString(version) +} + +// Helper function to load verification key from file +func loadVerificationKey(path string) ([]byte, error) { + // This is implemented in sp1_verifier.go as readFile + // We reuse that function + return readFile(path) +} diff --git a/rootchain/consensus/zkverifier/verifier.go b/rootchain/consensus/zkverifier/verifier.go new file mode 100644 index 00000000..b70fae4a --- /dev/null +++ b/rootchain/consensus/zkverifier/verifier.go @@ -0,0 +1,110 @@ +package zkverifier + +import ( + "errors" + "fmt" +) + +var ( + // ErrProofVerificationFailed is returned when proof verification fails + ErrProofVerificationFailed = errors.New("proof verification failed") + // ErrInvalidProofFormat is returned when proof data is malformed + ErrInvalidProofFormat = errors.New("invalid proof format") + // ErrVerifierNotConfigured is returned when no verifier is configured + ErrVerifierNotConfigured = errors.New("zk verifier not configured") +) + +// ProofType identifies the proving system used +type ProofType string + +const ( + // ProofTypeSP1 indicates SP1 zkVM proof + ProofTypeSP1 ProofType = "sp1" + // ProofTypeRISC0 indicates RISC0 zkVM proof + ProofTypeRISC0 ProofType = "risc0" + // ProofTypeExec indicates execution without proving (testing only) + ProofTypeExec ProofType = "exec" + // ProofTypeNone indicates no proof verification (disabled) + ProofTypeNone ProofType = "none" +) + +// ZKVerifier validates zero-knowledge proofs of state transitions +type ZKVerifier interface { + // VerifyProof verifies a ZK proof of state transition + // proof: The ZK proof bytes + // previousStateRoot: Hash of the previous state + // newStateRoot: Hash of the new state (claimed) + // Returns nil if proof is valid, error otherwise + VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte) error + + // ProofType returns the type of proofs this verifier handles + ProofType() ProofType + + // IsEnabled returns true if verification is enabled + IsEnabled() bool +} + +// Config holds ZK verifier configuration +type Config struct { + // Enabled controls whether ZK verification is performed + Enabled bool + + // ProofType specifies which proof system to use + ProofType ProofType + + // VerificationKeyPath is the path to the verification key file + // For SP1: path to the .vkey file + // For RISC0: path to the verification key + VerificationKeyPath string + + // AdditionalConfig holds prover-specific configuration + AdditionalConfig map[string]interface{} +} + +// DefaultConfig returns a default configuration with verification disabled +func DefaultConfig() *Config { + return &Config{ + Enabled: false, + ProofType: ProofTypeNone, + VerificationKeyPath: "", + AdditionalConfig: make(map[string]interface{}), + } +} + +// NewVerifier creates a new ZK verifier based on configuration +func NewVerifier(cfg *Config) (ZKVerifier, error) { + if cfg == nil { + cfg = DefaultConfig() + } + + if !cfg.Enabled { + return &NoOpVerifier{}, nil + } + + switch cfg.ProofType { + case ProofTypeSP1: + return NewSP1Verifier(cfg.VerificationKeyPath) + case ProofTypeRISC0: + return nil, fmt.Errorf("RISC0 verifier not implemented") + case ProofTypeExec, ProofTypeNone: + return &NoOpVerifier{}, nil + default: + return nil, fmt.Errorf("unknown proof type: %s", cfg.ProofType) + } +} + +// NoOpVerifier is a verifier that always returns success (for testing/disabled mode) +type NoOpVerifier struct{} + +func (v *NoOpVerifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte) error { + // No verification performed + return nil +} + +func (v *NoOpVerifier) ProofType() ProofType { + return ProofTypeNone +} + +func (v *NoOpVerifier) IsEnabled() bool { + return false +} diff --git a/rootchain/consensus/zkverifier/verifier_test.go b/rootchain/consensus/zkverifier/verifier_test.go new file mode 100644 index 00000000..488063fb --- /dev/null +++ b/rootchain/consensus/zkverifier/verifier_test.go @@ -0,0 +1,188 @@ +package zkverifier + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDefaultConfig(t *testing.T) { + cfg := DefaultConfig() + require.False(t, cfg.Enabled) + require.Equal(t, ProofTypeNone, cfg.ProofType) + require.Empty(t, cfg.VerificationKeyPath) +} + +func TestNewVerifier_Disabled(t *testing.T) { + cfg := &Config{ + Enabled: false, + ProofType: ProofTypeSP1, + } + + verifier, err := NewVerifier(cfg) + require.NoError(t, err) + require.NotNil(t, verifier) + require.False(t, verifier.IsEnabled()) + require.Equal(t, ProofTypeNone, verifier.ProofType()) +} + +func TestNewVerifier_NoOpForExec(t *testing.T) { + cfg := &Config{ + Enabled: true, + ProofType: ProofTypeExec, + } + + verifier, err := NewVerifier(cfg) + require.NoError(t, err) + require.NotNil(t, verifier) + require.False(t, verifier.IsEnabled()) + + // Should accept any proof + err = verifier.VerifyProof([]byte("not a real proof"), make([]byte, 32), make([]byte, 32)) + require.NoError(t, err) +} + +func TestNewVerifier_SP1(t *testing.T) { + // Create temporary verification key file + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "test.vkey") + err := os.WriteFile(vkeyPath, []byte("fake_verification_key_data"), 0644) + require.NoError(t, err) + + cfg := &Config{ + Enabled: true, + ProofType: ProofTypeSP1, + VerificationKeyPath: vkeyPath, + } + + verifier, err := NewVerifier(cfg) + require.NoError(t, err) + require.NotNil(t, verifier) + require.True(t, verifier.IsEnabled()) + require.Equal(t, ProofTypeSP1, verifier.ProofType()) +} + +func TestNewVerifier_SP1_MissingVKey(t *testing.T) { + cfg := &Config{ + Enabled: true, + ProofType: ProofTypeSP1, + VerificationKeyPath: "/nonexistent/path/test.vkey", + } + + verifier, err := NewVerifier(cfg) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "failed to read verification key") +} + +func TestNewVerifier_UnknownProofType(t *testing.T) { + cfg := &Config{ + Enabled: true, + ProofType: "unknown", + } + + verifier, err := NewVerifier(cfg) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "unknown proof type") +} + +func TestNoOpVerifier(t *testing.T) { + v := &NoOpVerifier{} + + require.False(t, v.IsEnabled()) + require.Equal(t, ProofTypeNone, v.ProofType()) + + // Should accept any input + err := v.VerifyProof(nil, nil, nil) + require.NoError(t, err) + + err = v.VerifyProof([]byte("test"), []byte("prev"), []byte("new")) + require.NoError(t, err) +} + +func TestSP1Verifier_InvalidInputs(t *testing.T) { + // Create temporary verification key file + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "test.vkey") + err := os.WriteFile(vkeyPath, []byte("fake_verification_key_data"), 0644) + require.NoError(t, err) + + verifier, err := NewSP1Verifier(vkeyPath) + require.NoError(t, err) + + testCases := []struct { + name string + proof []byte + previousStateRoot []byte + newStateRoot []byte + wantErr bool + errContains string + }{ + { + name: "empty proof", + proof: []byte{}, + previousStateRoot: make([]byte, 32), + newStateRoot: make([]byte, 32), + wantErr: true, + errContains: "proof is empty", + }, + { + name: "invalid previous state root length", + proof: make([]byte, 100), + previousStateRoot: make([]byte, 16), + newStateRoot: make([]byte, 32), + wantErr: true, + errContains: "previousStateRoot must be 32 bytes", + }, + { + name: "invalid new state root length", + proof: make([]byte, 100), + previousStateRoot: make([]byte, 32), + newStateRoot: make([]byte, 16), + wantErr: true, + errContains: "newStateRoot must be 32 bytes", + }, + { + name: "proof too small", + proof: make([]byte, 32), // Less than 64 bytes + previousStateRoot: make([]byte, 32), + newStateRoot: make([]byte, 32), + wantErr: true, + errContains: "SP1 proof too small", + }, + { + name: "valid format (placeholder accepts)", + proof: make([]byte, 128), + previousStateRoot: make([]byte, 32), + newStateRoot: make([]byte, 32), + wantErr: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := verifier.VerifyProof(tc.proof, tc.previousStateRoot, tc.newStateRoot) + if tc.wantErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errContains) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestSP1Verifier_EmptyVKey(t *testing.T) { + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "empty.vkey") + err := os.WriteFile(vkeyPath, []byte{}, 0644) + require.NoError(t, err) + + verifier, err := NewSP1Verifier(vkeyPath) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "verification key is empty") +} diff --git a/rootchain/node.go b/rootchain/node.go index 8c80c4b1..49fd3792 100644 --- a/rootchain/node.go +++ b/rootchain/node.go @@ -21,6 +21,7 @@ import ( "github.com/unicitynetwork/bft-core/observability" "github.com/unicitynetwork/bft-core/rootchain/consensus" "github.com/unicitynetwork/bft-core/rootchain/consensus/storage" + "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier" abcrypto "github.com/unicitynetwork/bft-go-base/crypto" "github.com/unicitynetwork/bft-go-base/types" ) @@ -53,6 +54,7 @@ type ( subscription *Subscriptions net PartitionNet consensusManager ConsensusManager + zkVerifier zkverifier.ZKVerifier log *slog.Logger tracer trace.Tracer @@ -67,6 +69,7 @@ func New( pNet PartitionNet, cm ConsensusManager, observe Observability, + zkVerifier zkverifier.ZKVerifier, ) (*Node, error) { if peer == nil { return nil, fmt.Errorf("partition listener is nil") @@ -74,6 +77,11 @@ func New( if pNet == nil { return nil, fmt.Errorf("network is nil") } + if zkVerifier == nil { + // Default to NoOp verifier if none provided + zkVerifier = &zkverifier.NoOpVerifier{} + observe.Logger().Warn("No ZK verifier provided, using NoOp verifier (accepts all proofs)") + } meter := observe.Meter("rootchain.node") reqBuf, err := NewCertificationRequestBuffer(meter) @@ -90,12 +98,21 @@ func New( subscription: subs, net: pNet, consensusManager: cm, + zkVerifier: zkVerifier, log: observe.Logger(), tracer: observe.Tracer("rootchain.node"), } if err := node.initMetrics(meter); err != nil { return nil, fmt.Errorf("initializing metrics: %w", err) } + + // Log verifier configuration + if zkVerifier.IsEnabled() { + observe.Logger().Info(fmt.Sprintf("ZK proof verification enabled (proof type: %s)", zkVerifier.ProofType())) + } else { + observe.Logger().Warn("ZK proof verification disabled - accepting all proofs") + } + return node, nil } @@ -237,6 +254,14 @@ func (v *Node) onBlockCertificationRequest(ctx context.Context, req *certificati return err } + // Verify ZK proof (if verifier is enabled) + if err := v.verifyZKProof(ctx, req, si); err != nil { + v.log.WarnContext(ctx, "ZK proof verification failed", + logger.Error(err), + logger.Shard(req.PartitionID, req.ShardID)) + return fmt.Errorf("ZK proof verification failed: %w", err) + } + if err := v.subscription.Subscribe(req.PartitionID, req.ShardID, req.NodeID); err != nil { return fmt.Errorf("subscribing the sender: %w", err) } @@ -292,3 +317,51 @@ func (v *Node) handleConsensus(ctx context.Context) error { } } } + +// verifyZKProof verifies the ZK proof in the block certification request +func (v *Node) verifyZKProof(ctx context.Context, req *certification.BlockCertificationRequest, si *storage.ShardInfo) error { + if !v.zkVerifier.IsEnabled() { + // Verification disabled - accept all + return nil + } + + ir := req.InputRecord + if ir == nil { + return fmt.Errorf("input record is nil") + } + + // Get state roots from InputRecord + previousStateRoot := ir.PreviousHash + newStateRoot := ir.Hash + + // Skip verification for sync UCs and genesis blocks: + // 1. Sync UCs: both hashes are null/empty (handshake/subscription requests) + // 2. Genesis block: previousHash is null/empty (first block with no parent) + if len(previousStateRoot) == 0 && len(newStateRoot) == 0 { + v.log.DebugContext(ctx, "Skipping ZK proof verification for sync UC", + logger.Shard(req.PartitionID, req.ShardID)) + return nil + } + if len(previousStateRoot) == 0 { + v.log.InfoContext(ctx, "Skipping ZK proof verification for genesis block", + logger.Shard(req.PartitionID, req.ShardID)) + return nil + } + + v.log.DebugContext(ctx, "Verifying ZK proof", + logger.Shard(req.PartitionID, req.ShardID), + logger.Data(slog.Int("proof_size", len(req.ZkProof))), + logger.Data(slog.String("proof_type", string(v.zkVerifier.ProofType()))), + logger.Data(slog.Uint64("round", ir.RoundNumber))) + + // Verify proof: previousStateRoot -> newStateRoot transition + if err := v.zkVerifier.VerifyProof(req.ZkProof, previousStateRoot, newStateRoot); err != nil { + return fmt.Errorf("ZK proof verification failed: %w", err) + } + + v.log.InfoContext(ctx, "ZK proof verified successfully", + logger.Shard(req.PartitionID, req.ShardID), + logger.Data(slog.Uint64("round", ir.RoundNumber))) + + return nil +} diff --git a/rootchain/node_test.go b/rootchain/node_test.go index eabcae53..f3ee1551 100644 --- a/rootchain/node_test.go +++ b/rootchain/node_test.go @@ -33,15 +33,15 @@ func Test_rootNode(t *testing.T) { cm := mockConsensusManager{} partNet := mockPartitionNet{} - node, err := New(nil, partNet, cm, nopObs) + node, err := New(nil, partNet, cm, nopObs, nil) require.Nil(t, node) require.EqualError(t, err, `partition listener is nil`) - node, err = New(&nwPeer, nil, cm, nopObs) + node, err = New(&nwPeer, nil, cm, nopObs, nil) require.Nil(t, node) require.EqualError(t, err, `network is nil`) - node, err = New(&nwPeer, partNet, cm, nopObs) + node, err = New(&nwPeer, partNet, cm, nopObs, nil) require.NoError(t, err) require.NotNil(t, node) require.Equal(t, &nwPeer, node.GetPeer()) @@ -59,7 +59,7 @@ func Test_rootNode(t *testing.T) { partMsg := make(chan any) partNet := mockPartitionNet{recCh: func() <-chan any { return partMsg }} - node, err := New(nwPeer, partNet, cm, nopObs) + node, err := New(nwPeer, partNet, cm, nopObs, nil) require.NoError(t, err) require.NotNil(t, node) require.Equal(t, nwPeer, node.GetPeer()) @@ -99,7 +99,7 @@ func Test_rootNode(t *testing.T) { partNet, _ := newMockPartitionNet() - node, err := New(nwPeer, partNet, cm, nopObs) + node, err := New(nwPeer, partNet, cm, nopObs, nil) require.NoError(t, err) require.NotNil(t, node) require.Equal(t, nwPeer, node.GetPeer()) @@ -134,7 +134,7 @@ func Test_sendResponse(t *testing.T) { certResp := validCertificationResponse(t) t.Run("invalid peer ID", func(t *testing.T) { - node, err := New(&nwPeer, mockPartitionNet{}, cm, nopObs) + node, err := New(&nwPeer, mockPartitionNet{}, cm, nopObs, nil) require.NoError(t, err) err = node.sendResponse(t.Context(), "", &certResp) @@ -147,7 +147,7 @@ func Test_sendResponse(t *testing.T) { t.Run("invalid CertificationResponse", func(t *testing.T) { // CertResp is coming from ConsensusManager so this should be impossible? // just send it out and shard nodes should be able to ignore invalid CRsp? - node, err := New(&nwPeer, mockPartitionNet{}, cm, nopObs) + node, err := New(&nwPeer, mockPartitionNet{}, cm, nopObs, nil) require.NoError(t, err) cr := certResp @@ -163,7 +163,7 @@ func Test_sendResponse(t *testing.T) { return expErr }, } - node, err := New(&nwPeer, partNet, cm, nopObs) + node, err := New(&nwPeer, partNet, cm, nopObs, nil) require.NoError(t, err) err = node.sendResponse(t.Context(), nodeID, &certResp) require.ErrorIs(t, err, expErr) @@ -178,7 +178,7 @@ func Test_sendResponse(t *testing.T) { return nil }, } - node, err := New(&nwPeer, partNet, cm, nopObs) + node, err := New(&nwPeer, partNet, cm, nopObs, nil) require.NoError(t, err) require.NoError(t, node.sendResponse(t.Context(), nodeID, &certResp)) }) @@ -202,7 +202,7 @@ func Test_onHandshake(t *testing.T) { t.Run("invalid handshake msg", func(t *testing.T) { partNet := mockPartitionNet{} cm := mockConsensusManager{} - node, err := New(&nwPeer, partNet, cm, nopObs) + node, err := New(&nwPeer, partNet, cm, nopObs, nil) require.NoError(t, err) msg := handshake.Handshake{ PartitionID: 0, // invalid partition ID @@ -228,7 +228,7 @@ func Test_onHandshake(t *testing.T) { return nil, expErr }, } - node, err := New(&nwPeer, partNet, cm, nopObs) + node, err := New(&nwPeer, partNet, cm, nopObs, nil) require.NoError(t, err) err = node.onHandshake(t.Context(), &msg) require.EqualError(t, err, fmt.Errorf(`reading partition %s certificate: %w`, msg.PartitionID, expErr).Error()) @@ -245,7 +245,7 @@ func Test_onHandshake(t *testing.T) { return newMockShardInfo(t, nodeID.String(), publicKey, certResp), nil }, } - node, err := New(&nwPeer, partNet, cm, nopObs) + node, err := New(&nwPeer, partNet, cm, nopObs, nil) require.NoError(t, err) msg := handshake.Handshake{ @@ -271,7 +271,7 @@ func Test_onHandshake(t *testing.T) { return newMockShardInfo(t, nodeID.String(), publicKey, certResp), nil }, } - node, err := New(&nwPeer, partNet, cm, nopObs) + node, err := New(&nwPeer, partNet, cm, nopObs, nil) require.NoError(t, err) msg := handshake.Handshake{ @@ -298,7 +298,7 @@ func Test_handlePartitionMsg(t *testing.T) { t.Run("unsupported message", func(t *testing.T) { partNet := mockPartitionNet{} cm := mockConsensusManager{} - node, err := New(&nwPeer, partNet, cm, nopObs) + node, err := New(&nwPeer, partNet, cm, nopObs, nil) require.NoError(t, err) err = node.handlePartitionMsg(t.Context(), 555) require.EqualError(t, err, `unknown message type int`) @@ -320,7 +320,7 @@ func Test_handlePartitionMsg(t *testing.T) { return newMockShardInfo(t, nodeID.String(), publicKey, certResp), nil }, } - node, err := New(&nwPeer, partNet, cm, nopObs) + node, err := New(&nwPeer, partNet, cm, nopObs, nil) require.NoError(t, err) msg := handshake.Handshake{ @@ -343,7 +343,7 @@ func Test_handlePartitionMsg(t *testing.T) { return nil, expErr }, } - node, err := New(&nwPeer, partNet, cm, nopObs) + node, err := New(&nwPeer, partNet, cm, nopObs, nil) require.NoError(t, err) msg := certification.BlockCertificationRequest{ @@ -363,7 +363,7 @@ func Test_partitionMsgLoop(t *testing.T) { cm := mockConsensusManager{} done := make(chan struct{}) - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) ctx, cancel := context.WithCancel(t.Context()) @@ -385,7 +385,7 @@ func Test_partitionMsgLoop(t *testing.T) { cm := mockConsensusManager{} done := make(chan struct{}) - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) go func() { @@ -410,7 +410,7 @@ func Test_partitionMsgLoop(t *testing.T) { cm := mockConsensusManager{} done := make(chan struct{}) - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) ctx, cancel := context.WithCancel(t.Context()) @@ -521,7 +521,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { }, } - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) err = node.onBlockCertificationRequest(t.Context(), &validCertRequest) require.EqualError(t, err, `acquiring shard 00000001 - info: no SI`) @@ -534,7 +534,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { }, } - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) cr := validCertRequest cr.NodeID = "not valid ID" @@ -563,7 +563,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { return &storage.ShardInfo{LastCR: &certResp}, nil }, } - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) err = node.onBlockCertificationRequest(t.Context(), &validCertRequest) @@ -590,7 +590,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { return si, nil }, } - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) err = node.onBlockCertificationRequest(t.Context(), &validCertRequest) @@ -613,7 +613,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { } key := partitionShard{validCertRequest.PartitionID, validCertRequest.ShardID.Key()} - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) require.NotContains(t, node.incomingRequests.store, key) @@ -657,7 +657,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { } key := partitionShard{validCertRequest.PartitionID, validCertRequest.ShardID.Key()} - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) require.NotContains(t, node.incomingRequests.store, key) @@ -714,7 +714,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { } key := partitionShard{validCertRequest.PartitionID, validCertRequest.ShardID.Key()} - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) require.NotContains(t, node.incomingRequests.store, key) @@ -754,7 +754,7 @@ func Test_handleConsensus(t *testing.T) { cm := mockConsensusManager{} done := make(chan struct{}) - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) ctx, cancel := context.WithCancel(t.Context()) @@ -775,7 +775,7 @@ func Test_handleConsensus(t *testing.T) { cm := mockConsensusManager{certificationResult: make(chan *certification.CertificationResponse)} done := make(chan struct{}) - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) go func() { @@ -806,7 +806,7 @@ func Test_handleConsensus(t *testing.T) { }, } - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) require.NoError(t, err) go func() { From 25d4a0a2278be54ec1c604b3b6d4f553ef3ecddf Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Thu, 8 Jan 2026 16:43:41 +0200 Subject: [PATCH 02/17] with proof checking --- go.sum | 2 -- .../consensus/zkverifier/sp1-verifier-ffi/Cargo.toml | 2 +- .../consensus/zkverifier/sp1-verifier-ffi/src/lib.rs | 5 ++--- rootchain/consensus/zkverifier/sp1_verifier.go | 2 +- rootchain/node.go | 8 +++++++- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/go.sum b/go.sum index b6985325..b6a37583 100644 --- a/go.sum +++ b/go.sum @@ -505,8 +505,6 @@ github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZ github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo= github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI= -github.com/unicitynetwork/bft-go-base v1.0.3-0.20251230081246-e5204716ebf2 h1:loUqSmmtlkjpIYpfHF+OzHon6EU8fyszOMXj4jfOPBg= -github.com/unicitynetwork/bft-go-base v1.0.3-0.20251230081246-e5204716ebf2/go.mod h1:hBnOG52VRy/vpgIBUulTgk7PBTwODZ2xkVjCEu5yRcQ= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml index 74ab13c0..57c6d667 100644 --- a/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml @@ -10,7 +10,7 @@ edition = "2021" crate-type = ["cdylib", "staticlib"] [dependencies] -sp1-sdk = "3.0.0" +sp1-sdk = "5.0.8" anyhow = "1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs b/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs index 137297c6..8b04137f 100644 --- a/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs @@ -1,7 +1,6 @@ use std::ffi::{CStr, CString}; use std::os::raw::c_char; -use std::ptr; -use sp1_sdk::{ProverClient, SP1Stdin, SP1ProofWithPublicValues}; +use sp1_sdk::{ProverClient, SP1ProofWithPublicValues}; /// Error codes for FFI interface #[repr(C)] @@ -86,7 +85,7 @@ fn verify_proof_internal( .map_err(|e| anyhow::anyhow!("Failed to deserialize proof: {}", e))?; // Create prover client (used for verification) - let client = ProverClient::new(); + let client = ProverClient::from_env(); // Verify the proof client.verify(&proof, &vkey) diff --git a/rootchain/consensus/zkverifier/sp1_verifier.go b/rootchain/consensus/zkverifier/sp1_verifier.go index 7bcdc535..fc60e196 100644 --- a/rootchain/consensus/zkverifier/sp1_verifier.go +++ b/rootchain/consensus/zkverifier/sp1_verifier.go @@ -31,7 +31,7 @@ func NewSP1Verifier(vkeyPath string) (*SP1Verifier, error) { ffiVerifier: ffiVerifier, }, nil } else { - return nil, fmt.Errorf("SP1 FFI verifier not available: %w", err) + return nil, fmt.Errorf("SP1 FFI verifier not available: %w, vkeyPath: %s", err, vkeyPath) } } diff --git a/rootchain/node.go b/rootchain/node.go index 49fd3792..ab8788ac 100644 --- a/rootchain/node.go +++ b/rootchain/node.go @@ -256,9 +256,15 @@ func (v *Node) onBlockCertificationRequest(ctx context.Context, req *certificati // Verify ZK proof (if verifier is enabled) if err := v.verifyZKProof(ctx, req, si); err != nil { - v.log.WarnContext(ctx, "ZK proof verification failed", + v.log.WarnContext(ctx, "ZK proof verification failed - sending last valid UC", logger.Error(err), logger.Shard(req.PartitionID, req.ShardID)) + + // Send last valid UC immediately when proof verification fails + // This allows the partition to sync back to the last certified state + if se := v.sendResponse(ctx, req.NodeID, si.LastCR); se != nil { + err = errors.Join(err, fmt.Errorf("failed to send last valid UC: %w", se)) + } return fmt.Errorf("ZK proof verification failed: %w", err) } From 1b5b892517252054be9bd34382e7ca7862de2594 Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Tue, 13 Jan 2026 15:12:26 +0200 Subject: [PATCH 03/17] light client proof mode --- go.mod | 2 + rootchain/consensus/consensus_manager.go | 2 +- .../light-client-verifier-ffi/Cargo.toml | 24 ++ .../light-client-verifier-ffi/README.md | 264 ++++++++++++++ .../light-client-verifier-ffi/build.sh | 16 + .../light_client_verifier.h | 87 +++++ .../light-client-verifier-ffi/src/lib.rs | 332 ++++++++++++++++++ .../zkverifier/light_client_verifier.go | 89 +++++ .../zkverifier/light_client_verifier_ffi.go | 113 ++++++ .../sp1-verifier-ffi/sp1_verifier.h | 2 + .../zkverifier/sp1-verifier-ffi/src/lib.rs | 40 ++- .../consensus/zkverifier/sp1_verifier.go | 17 +- .../consensus/zkverifier/sp1_verifier_ffi.go | 6 +- rootchain/consensus/zkverifier/verifier.go | 9 +- .../consensus/zkverifier/verifier_test.go | 23 +- rootchain/node.go | 5 +- 16 files changed, 1008 insertions(+), 23 deletions(-) create mode 100644 rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml create mode 100644 rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md create mode 100755 rootchain/consensus/zkverifier/light-client-verifier-ffi/build.sh create mode 100644 rootchain/consensus/zkverifier/light-client-verifier-ffi/light_client_verifier.h create mode 100644 rootchain/consensus/zkverifier/light-client-verifier-ffi/src/lib.rs create mode 100644 rootchain/consensus/zkverifier/light_client_verifier.go create mode 100644 rootchain/consensus/zkverifier/light_client_verifier_ffi.go diff --git a/go.mod b/go.mod index e1824c5d..56638e3c 100644 --- a/go.mod +++ b/go.mod @@ -39,6 +39,8 @@ require ( gopkg.in/yaml.v3 v3.0.1 ) +replace github.com/unicitynetwork/bft-go-base => ../bft-go-base + require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/benbjohnson/clock v1.3.5 // indirect diff --git a/rootchain/consensus/consensus_manager.go b/rootchain/consensus/consensus_manager.go index 97e91a96..fc303cf2 100644 --- a/rootchain/consensus/consensus_manager.go +++ b/rootchain/consensus/consensus_manager.go @@ -929,7 +929,7 @@ func (x *ConsensusManager) processNewRoundEvent(ctx context.Context) { } x.leaderCnt.Add(ctx, 1) - x.log.InfoContext(ctx, "new round start, node is leader") + // x.log.InfoContext(ctx, "new round start, node is leader") // find shards with T2 timeouts timedOutShards, err := x.t2Timeouts.GetT2Timeouts(round) diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml new file mode 100644 index 00000000..8cf3f5ea --- /dev/null +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "light-client-verifier-ffi" +version = "0.1.0" +edition = "2021" + +# Make this package independent of parent workspace +[workspace] + +[lib] +crate-type = ["cdylib", "staticlib"] + +[dependencies] +# Core dependencies for witness deserialization and validation +rkyv = { version = "0.8.10", features = ["std", "unaligned"] } +anyhow = "1.0" + +# ethrex dependencies for validation logic (use local paths to uni-evm's ethrex submodule) +ethrex-core = { path = "../../../../../ethrex/crates/common", package = "ethrex-common" } +guest_program = { path = "../../../../../ethrex/crates/l2/prover/src/guest_program" } + +[profile.release] +opt-level = 3 +lto = true +codegen-units = 1 diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md b/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md new file mode 100644 index 00000000..61d792c1 --- /dev/null +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md @@ -0,0 +1,264 @@ +# Light Client Verifier FFI + +Rust FFI library for verifying uni-evm light client proofs in BFT Core. + +## Overview + +This library provides a Foreign Function Interface (FFI) for BFT Core (written in Go) to verify light client proofs from uni-evm. In light client mode, instead of generating zero-knowledge proofs (which take 5+ minutes), uni-evm sends the full witness data to BFT Core, which executes the validation logic directly. + +**Performance**: Light client mode is ~300x faster than SP1 mode for development: +- SP1 mode: 5+ minutes per block +- Light client mode: ~5 seconds per block + +## How It Works + +### Light Client Proof Format + +``` +┌─────────────┬──────────────────────────────────────┐ +│ Magic (8B) │ Serialized ProgramInput (varies) │ +│ "LCPROOF\0" │ (witness + blocks + config) │ +└─────────────┴──────────────────────────────────────┘ +``` + +### Verification Process + +1. **Magic Header Check**: Validates the first 8 bytes are `LCPROOF\0` +2. **Deserialization**: Deserializes `ProgramInput` from the payload (rkyv format) +3. **Execution**: Calls `guest_program::execution::stateless_validation_l1()` +4. **State Root Validation**: Verifies `prev_state_root` and `new_state_root` match + +## Building + +### Prerequisites + +- Rust nightly (ethrex uses unstable features) +- Access to uni-evm's ethrex submodule + +### Build Steps + +```bash +# From this directory +./build.sh + +# Or manually +cargo build --release +``` + +Output: +- `target/release/liblight_client_verifier_ffi.a` - Static library +- `target/release/liblight_client_verifier_ffi.dylib` - Dynamic library (macOS) +- `target/release/liblight_client_verifier_ffi.so` - Dynamic library (Linux) + +## Usage from Go + +### Include in BFT Core + +The library is automatically linked when building BFT Core's zkverifier package: + +```go +// In bft-core/rootchain/consensus/zkverifier/verifier.go +cfg := &zkverifier.Config{ + Enabled: true, + ProofType: zkverifier.ProofTypeLightClient, +} + +verifier, err := zkverifier.NewVerifier(cfg) +// verifier will use light client FFI automatically +``` + +### Direct FFI Usage (Advanced) + +```go +import "C" +// #cgo LDFLAGS: -L${SRCDIR}/light-client-verifier-ffi/target/release -llight_client_verifier_ffi +// #include "light-client-verifier-ffi/light_client_verifier.h" + +// Verify a light client proof +result := C.light_client_verify_proof( + (*C.uint8_t)(unsafe.Pointer(&payload[0])), + C.size_t(len(payload)), + (*C.uint8_t)(unsafe.Pointer(&prevStateRoot[0])), + (*C.uint8_t)(unsafe.Pointer(&newStateRoot[0])), + &errorOut, +) +``` + +## API Reference + +### C Functions + +#### `light_client_verify_proof` + +Verifies a light client proof payload. + +```c +LightClientVerifyResult light_client_verify_proof( + const uint8_t* payload_bytes, + size_t payload_len, + const uint8_t* prev_state_root, // 32 bytes + const uint8_t* new_state_root, // 32 bytes + char** error_out +); +``` + +**Returns**: +- `LIGHT_CLIENT_VERIFY_SUCCESS` (0) - Proof is valid +- `LIGHT_CLIENT_VERIFY_INVALID_PROOF` (1) - Proof data is malformed +- `LIGHT_CLIENT_VERIFY_INVALID_MAGIC_HEADER` (2) - Magic header mismatch +- `LIGHT_CLIENT_VERIFY_INVALID_PUBLIC_INPUTS` (3) - State roots don't match +- `LIGHT_CLIENT_VERIFY_VERIFICATION_FAILED` (4) - Validation logic failed +- `LIGHT_CLIENT_VERIFY_INTERNAL_ERROR` (5) - Internal error + +#### `light_client_validate_payload` + +Validates payload format without executing validation logic. + +```c +LightClientVerifyResult light_client_validate_payload( + const uint8_t* payload_bytes, + size_t payload_len, + char** error_out +); +``` + +#### `light_client_ffi_version` + +Returns the FFI library version string. + +```c +const char* light_client_ffi_version(void); +``` + +#### `light_client_free_string` + +Frees a string allocated by the library. + +```c +void light_client_free_string(char* s); +``` + +## Testing + +### Unit Tests + +```bash +cargo test +``` + +### Integration Tests + +See `bft-core/rootchain/consensus/zkverifier/verifier_test.go` for Go integration tests. + +## Architecture + +### Dependencies + +- **rkyv** - Zero-copy deserialization (matches uni-evm's serialization format) +- **ethrex-common** - Core types (Block, H256, etc.) +- **guest_program** - Validation logic (`stateless_validation_l1`) + +### File Structure + +``` +light-client-verifier-ffi/ +├── src/ +│ └── lib.rs # FFI implementation +├── light_client_verifier.h # C header +├── Cargo.toml # Dependencies +├── build.sh # Build script +└── README.md # This file +``` + +## Configuration + +### Chain ID + +Currently hardcoded to `1` (matching uni-evm default). TODO: Make configurable via BFT Core config. + +```rust +// In lib.rs +let chain_id = 1; // TODO: Get from BFT Core configuration +``` + +## Troubleshooting + +### Build Errors + +**Error**: `failed to load manifest for dependency 'ethrex-common'` + +**Solution**: Ensure you're building from within the uni-evm repository structure, where the ethrex submodule is available at `../../../../../ethrex/`. + +**Error**: `undefined reference to 'light_client_verify_proof'` + +**Solution**: Ensure the Rust library is built before building Go code: +```bash +cd light-client-verifier-ffi +cargo build --release +cd ../.. +go build +``` + +### Runtime Errors + +**Error**: "invalid magic header" + +**Cause**: Payload doesn't start with `LCPROOF\0` or is from SP1 mode. + +**Solution**: Ensure uni-evm is configured with `prover_type = "light_client"`. + +**Error**: "Failed to deserialize ProgramInput" + +**Cause**: Payload format mismatch between uni-evm and BFT Core versions. + +**Solution**: Ensure both uni-evm and BFT Core are using compatible ethrex versions. + +## Performance + +### Proof Sizes + +| Block Type | Payload Size | +|------------|-------------| +| Empty block | ~1.4 KB | +| 3 transactions | ~2 KB | +| 10 transactions | ~5-10 KB | +| 100 transactions | ~50-100 KB | + +Compare to: +- Exec mode: 4 bytes (dummy) +- SP1 mode: ~50 KB (compressed STARK) + +### Verification Time + +| Mode | Time | +|------|------| +| Light client | ~100-200ms | +| SP1 | ~10ms (proof verification only) | + +Light client is slower to verify but much faster to generate (no proving overhead). + +## Development Workflow + +### Modify Validation Logic + +1. Edit `guest_program` crate in ethrex +2. Rebuild this FFI library: `cargo build --release` +3. Rebuild BFT Core: `cd ../.. && go build` + +### Add New Exports + +1. Add Rust function with `#[no_mangle]` and `extern "C"` +2. Add declaration to `light_client_verifier.h` +3. Add Go wrapper in `light_client_verifier_ffi.go` + +## Security Considerations + +- **Not succinct**: Full witness data is transmitted (1-5MB vs 50KB for SP1) +- **Development only**: Recommended for local development and testing +- **Production use**: Switch to SP1 mode for production deployments + +## See Also + +- [LIGHT_CLIENT_MODE.md](../../../../../LIGHT_CLIENT_MODE.md) - User documentation +- [LIGHT_CLIENT_MODE_PLAN.md](../../../../../LIGHT_CLIENT_MODE_PLAN.md) - Implementation plan +- [SP1 Verifier FFI](../sp1-verifier-ffi/README.md) - Similar FFI for SP1 proofs diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/build.sh b/rootchain/consensus/zkverifier/light-client-verifier-ffi/build.sh new file mode 100755 index 00000000..807a342b --- /dev/null +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/build.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# Build script for light-client-verifier-ffi +# This builds the Rust FFI library that BFT Core uses to verify light client proofs + +set -e + +echo "Building light-client-verifier-ffi..." + +# Build in release mode for optimal performance +cargo build --release + +echo "Build complete!" +echo "Library: target/release/liblight_client_verifier_ffi.a" +echo " target/release/liblight_client_verifier_ffi.so (Linux)" +echo " target/release/liblight_client_verifier_ffi.dylib (macOS)" diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/light_client_verifier.h b/rootchain/consensus/zkverifier/light-client-verifier-ffi/light_client_verifier.h new file mode 100644 index 00000000..f1c35829 --- /dev/null +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/light_client_verifier.h @@ -0,0 +1,87 @@ +/** + * Light Client Verifier FFI + * + * C header for FFI interface to light client proof verification + */ + +#ifndef LIGHT_CLIENT_VERIFIER_H +#define LIGHT_CLIENT_VERIFIER_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Result codes for light client verification + */ +typedef enum { + LIGHT_CLIENT_VERIFY_SUCCESS = 0, + LIGHT_CLIENT_VERIFY_INVALID_PROOF = 1, + LIGHT_CLIENT_VERIFY_INVALID_MAGIC_HEADER = 2, + LIGHT_CLIENT_VERIFY_INVALID_PUBLIC_INPUTS = 3, + LIGHT_CLIENT_VERIFY_VERIFICATION_FAILED = 4, + LIGHT_CLIENT_VERIFY_INTERNAL_ERROR = 5, +} LightClientVerifyResult; + +/** + * Verify a light client proof payload + * + * The payload should contain: + * - Magic header: "LCPROOF\0" (8 bytes) + * - Serialized ProgramInput (rkyv format) + * + * @param payload_bytes Pointer to payload bytes + * @param payload_len Length of payload in bytes + * @param prev_state_root Pointer to 32-byte previous state root + * @param new_state_root Pointer to 32-byte new state root + * @param block_hash Pointer to 32-byte block hash + * @param error_out Output pointer for error message (must be freed with light_client_free_string) + * @return LightClientVerifyResult status code + */ +LightClientVerifyResult light_client_verify_proof( + const uint8_t* payload_bytes, + size_t payload_len, + const uint8_t* prev_state_root, + const uint8_t* new_state_root, + const uint8_t* block_hash, + char** error_out +); + +/** + * Free a string allocated by light_client_verify_proof + * + * @param s Pointer to string to free + */ +void light_client_free_string(char* s); + +/** + * Get the version of the FFI library + * + * @return Version string (do not free) + */ +const char* light_client_ffi_version(void); + +/** + * Validate a light client payload format + * + * Checks magic header and ProgramInput deserialization without executing validation. + * + * @param payload_bytes Pointer to payload bytes + * @param payload_len Length of payload in bytes + * @param error_out Output pointer for error message (must be freed with light_client_free_string) + * @return LightClientVerifyResult status code (SUCCESS or error) + */ +LightClientVerifyResult light_client_validate_payload( + const uint8_t* payload_bytes, + size_t payload_len, + char** error_out +); + +#ifdef __cplusplus +} +#endif + +#endif /* LIGHT_CLIENT_VERIFIER_H */ diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/src/lib.rs b/rootchain/consensus/zkverifier/light-client-verifier-ffi/src/lib.rs new file mode 100644 index 00000000..108cf72d --- /dev/null +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/src/lib.rs @@ -0,0 +1,332 @@ +use std::ffi::CString; +use std::os::raw::c_char; + +// Magic header for light client proofs: "LCPROOF\0" +const LIGHT_CLIENT_MAGIC: &[u8; 8] = b"LCPROOF\0"; + +/// Error codes for FFI interface +#[repr(C)] +pub enum LightClientVerifyResult { + Success = 0, + InvalidProof = 1, + InvalidMagicHeader = 2, + InvalidPublicInputs = 3, + VerificationFailed = 4, + InternalError = 5, +} + +/// Verify a light client proof payload +/// +/// # Arguments +/// * `payload_bytes` - Pointer to payload bytes (magic header + serialized ProgramInput) +/// * `payload_len` - Length of payload +/// * `prev_state_root` - Pointer to 32-byte previous state root +/// * `new_state_root` - Pointer to 32-byte new state root +/// * `block_hash` - Pointer to 32-byte block hash +/// * `error_out` - Output pointer for error message (caller must free with light_client_free_string) +/// +/// # Returns +/// LightClientVerifyResult code +#[no_mangle] +pub extern "C" fn light_client_verify_proof( + payload_bytes: *const u8, + payload_len: usize, + prev_state_root: *const u8, + new_state_root: *const u8, + block_hash: *const u8, + error_out: *mut *mut c_char, +) -> LightClientVerifyResult { + // Safety checks + if payload_bytes.is_null() { + set_error(error_out, "null pointer passed to light_client_verify_proof"); + return LightClientVerifyResult::InternalError; + } + + if prev_state_root.is_null() || new_state_root.is_null() || block_hash.is_null() { + set_error(error_out, "null state root or block hash pointer"); + return LightClientVerifyResult::InvalidPublicInputs; + } + + // Convert C pointers to Rust slices + let payload_data = unsafe { std::slice::from_raw_parts(payload_bytes, payload_len) }; + let prev_root = unsafe { std::slice::from_raw_parts(prev_state_root, 32) }; + let new_root = unsafe { std::slice::from_raw_parts(new_state_root, 32) }; + let blk_hash = unsafe { std::slice::from_raw_parts(block_hash, 32) }; + + // Perform verification + match verify_light_client_proof_internal(payload_data, prev_root, new_root, blk_hash) { + Ok(()) => LightClientVerifyResult::Success, + Err(e) => { + set_error(error_out, &e.to_string()); + match classify_error(&e) { + ErrorType::InvalidMagicHeader => LightClientVerifyResult::InvalidMagicHeader, + ErrorType::InvalidProof => LightClientVerifyResult::InvalidProof, + ErrorType::InvalidPublicInputs => LightClientVerifyResult::InvalidPublicInputs, + ErrorType::VerificationFailed => LightClientVerifyResult::VerificationFailed, + ErrorType::Internal => LightClientVerifyResult::InternalError, + } + } + } +} + +/// Internal verification logic +fn verify_light_client_proof_internal( + payload_data: &[u8], + prev_state_root: &[u8], + new_state_root: &[u8], + block_hash: &[u8], +) -> anyhow::Result<()> { + // 1. Check magic header + if payload_data.len() < 8 { + return Err(anyhow::anyhow!( + "Payload too short: expected at least 8 bytes for magic header, got {}", + payload_data.len() + )); + } + + if &payload_data[0..8] != LIGHT_CLIENT_MAGIC.as_slice() { + return Err(anyhow::anyhow!( + "Invalid magic header: expected {:?}, got {:?}", + LIGHT_CLIENT_MAGIC, + &payload_data[0..8] + )); + } + + // 2. Deserialize ProgramInput (skip 8-byte magic header) + let input_bytes = &payload_data[8..]; + let program_input = rkyv::from_bytes::(input_bytes) + .map_err(|e| anyhow::anyhow!("Failed to deserialize ProgramInput: {}", e))?; + + // 3. Validate that we have blocks + if program_input.blocks.is_empty() { + return Err(anyhow::anyhow!("No blocks in ProgramInput")); + } + + // 4. Use chain_id from blocks[0].header (assuming it's stored in number for now) + // TODO: Get chain_id from BFT Core configuration instead of hardcoding + // For now, use the default chain_id from uni-evm config (1) + let chain_id = 1; + + // 5. Execute stateless validation + let output = guest_program::execution::stateless_validation_l1( + program_input.blocks, + program_input.execution_witness, + program_input.elasticity_multiplier, + chain_id, + ) + .map_err(|e| anyhow::anyhow!("Stateless validation failed: {}", e))?; + + // 6. Convert public inputs to H256 + let prev_root_h256 = ethrex_core::H256::from_slice(prev_state_root); + let new_root_h256 = ethrex_core::H256::from_slice(new_state_root); + let block_hash_h256 = ethrex_core::H256::from_slice(block_hash); + + // 7. Verify state roots match + if output.initial_state_hash != prev_root_h256 { + return Err(anyhow::anyhow!( + "Previous state root mismatch: expected {:?}, got {:?}", + prev_root_h256, + output.initial_state_hash + )); + } + + if output.final_state_hash != new_root_h256 { + return Err(anyhow::anyhow!( + "New state root mismatch: expected {:?}, got {:?}", + new_root_h256, + output.final_state_hash + )); + } + + // 8. Verify block hash matches + if output.last_block_hash != block_hash_h256 { + return Err(anyhow::anyhow!( + "Block hash mismatch: expected {:?}, got {:?}", + block_hash_h256, + output.last_block_hash + )); + } + + Ok(()) +} + +/// Free a string allocated by this library +#[no_mangle] +pub extern "C" fn light_client_free_string(s: *mut c_char) { + if !s.is_null() { + unsafe { + let _ = CString::from_raw(s); + } + } +} + +/// Get the version of this FFI library +#[no_mangle] +pub extern "C" fn light_client_ffi_version() -> *const c_char { + const VERSION: &str = concat!(env!("CARGO_PKG_VERSION"), "\0"); + VERSION.as_ptr() as *const c_char +} + +/// Validate a light client payload format +/// +/// # Arguments +/// * `payload_bytes` - Pointer to payload bytes +/// * `payload_len` - Length of payload +/// * `error_out` - Output pointer for error message (caller must free with light_client_free_string) +/// +/// # Returns +/// LightClientVerifyResult code (Success or InvalidProof) +#[no_mangle] +pub extern "C" fn light_client_validate_payload( + payload_bytes: *const u8, + payload_len: usize, + error_out: *mut *mut c_char, +) -> LightClientVerifyResult { + // Safety checks + if payload_bytes.is_null() { + set_error(error_out, "null pointer passed to light_client_validate_payload"); + return LightClientVerifyResult::InternalError; + } + + if payload_len < 8 { + set_error(error_out, "payload too short (need at least 8 bytes for magic)"); + return LightClientVerifyResult::InvalidProof; + } + + // Convert C pointer to Rust slice + let payload_data = unsafe { std::slice::from_raw_parts(payload_bytes, payload_len) }; + + // Check magic header + if &payload_data[0..8] != LIGHT_CLIENT_MAGIC.as_slice() { + set_error(error_out, "invalid magic header"); + return LightClientVerifyResult::InvalidMagicHeader; + } + + // Try to deserialize ProgramInput + let input_bytes = &payload_data[8..]; + match rkyv::from_bytes::(input_bytes) { + Ok(_) => LightClientVerifyResult::Success, + Err(e) => { + set_error(error_out, &format!("Failed to deserialize ProgramInput: {}", e)); + LightClientVerifyResult::InvalidProof + } + } +} + +// Helper functions + +enum ErrorType { + InvalidMagicHeader, + InvalidProof, + InvalidPublicInputs, + VerificationFailed, + Internal, +} + +fn classify_error(err: &anyhow::Error) -> ErrorType { + let msg = err.to_string().to_lowercase(); + if msg.contains("magic header") { + ErrorType::InvalidMagicHeader + } else if msg.contains("deserialize") { + ErrorType::InvalidProof + } else if msg.contains("state root mismatch") || msg.contains("public values") { + ErrorType::InvalidPublicInputs + } else if msg.contains("validation failed") { + ErrorType::VerificationFailed + } else { + ErrorType::Internal + } +} + +fn set_error(error_out: *mut *mut c_char, message: &str) { + if !error_out.is_null() { + if let Ok(c_string) = CString::new(message) { + unsafe { + *error_out = c_string.into_raw(); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::ffi::CStr; + use std::ptr; + + #[test] + fn test_null_pointers() { + let mut error: *mut c_char = ptr::null_mut(); + let result = light_client_verify_proof( + ptr::null(), + 0, + ptr::null(), + ptr::null(), + ptr::null(), + &mut error, + ); + assert_eq!(result as i32, LightClientVerifyResult::InternalError as i32); + + if !error.is_null() { + light_client_free_string(error); + } + } + + #[test] + fn test_version() { + let version = light_client_ffi_version(); + assert!(!version.is_null()); + let version_str = unsafe { CStr::from_ptr(version) }; + assert!(version_str.to_str().unwrap().starts_with("0.1.0")); + } + + #[test] + fn test_invalid_magic_header() { + let payload = vec![0u8; 100]; // Invalid magic + let prev_root = [0u8; 32]; + let new_root = [0u8; 32]; + let block_hash = [0u8; 32]; + let mut error: *mut c_char = ptr::null_mut(); + + let result = light_client_verify_proof( + payload.as_ptr(), + payload.len(), + prev_root.as_ptr(), + new_root.as_ptr(), + block_hash.as_ptr(), + &mut error, + ); + + assert_eq!(result as i32, LightClientVerifyResult::InvalidMagicHeader as i32); + + if !error.is_null() { + light_client_free_string(error); + } + } + + #[test] + fn test_payload_too_short() { + let payload = vec![0u8; 5]; // Too short for magic + let prev_root = [0u8; 32]; + let new_root = [0u8; 32]; + let block_hash = [0u8; 32]; + let mut error: *mut c_char = ptr::null_mut(); + + let result = light_client_verify_proof( + payload.as_ptr(), + payload.len(), + prev_root.as_ptr(), + new_root.as_ptr(), + block_hash.as_ptr(), + &mut error, + ); + + // Payload too short should return InvalidMagicHeader (checked first) or InvalidProof + // The actual error is InvalidMagicHeader (2) because we check magic header first + assert_eq!(result as i32, LightClientVerifyResult::InvalidMagicHeader as i32); + + if !error.is_null() { + light_client_free_string(error); + } + } +} diff --git a/rootchain/consensus/zkverifier/light_client_verifier.go b/rootchain/consensus/zkverifier/light_client_verifier.go new file mode 100644 index 00000000..3bba62f0 --- /dev/null +++ b/rootchain/consensus/zkverifier/light_client_verifier.go @@ -0,0 +1,89 @@ +package zkverifier + +import ( + "encoding/hex" + "fmt" + "log/slog" +) + +// LightClientVerifier verifies light client proofs by executing validation logic directly +type LightClientVerifier struct { + enabled bool + ffiVerifier *LightClientVerifierFFI +} + +// NewLightClientVerifier creates a new light client verifier +func NewLightClientVerifier() (*LightClientVerifier, error) { + // Try to create FFI verifier + if ffiVerifier, err := NewLightClientVerifierFFI(); err == nil { + slog.Info("Using Light Client FFI verifier", "version", GetLightClientFFIVersion()) + return &LightClientVerifier{ + enabled: true, + ffiVerifier: ffiVerifier, + }, nil + } else { + return nil, fmt.Errorf("Light Client FFI verifier not available: %w", err) + } +} + +// VerifyProof verifies a light client proof payload +// +// The proof payload should contain: +// - Magic header: "LCPROOF\0" (8 bytes) +// - Serialized ProgramInput (rkyv format) +// +// This function: +// 1. Validates the magic header +// 2. Deserializes the ProgramInput +// 3. Executes stateless_validation_l1() +// 4. Verifies the state roots and block hash match +func (v *LightClientVerifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + if !v.enabled { + return ErrVerifierNotConfigured + } + + if len(proof) == 0 { + return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) + } + + if len(previousStateRoot) != 32 { + return fmt.Errorf("%w: previousStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(previousStateRoot)) + } + + if len(newStateRoot) != 32 { + return fmt.Errorf("%w: newStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(newStateRoot)) + } + + if len(blockHash) != 32 { + return fmt.Errorf("%w: blockHash must be 32 bytes, got %d", ErrInvalidProofFormat, len(blockHash)) + } + + // Check magic header + if len(proof) < 8 { + return fmt.Errorf("%w: payload too short for magic header", ErrInvalidProofFormat) + } + + magic := proof[0:8] + expectedMagic := []byte("LCPROOF\x00") + if string(magic) != string(expectedMagic) { + return fmt.Errorf("%w: invalid magic header: expected %v, got %v", + ErrInvalidProofFormat, expectedMagic, magic) + } + + slog.Debug("Verifying light client proof", + "payload_size", len(proof), + "witness_size", len(proof)-8, + "prev_root", hex.EncodeToString(previousStateRoot[:8]), + "new_root", hex.EncodeToString(newStateRoot[:8]), + "block_hash", hex.EncodeToString(blockHash[:8])) + + return v.ffiVerifier.VerifyProof(proof, previousStateRoot, newStateRoot, blockHash) +} + +func (v *LightClientVerifier) ProofType() ProofType { + return ProofTypeLightClient +} + +func (v *LightClientVerifier) IsEnabled() bool { + return v.enabled +} diff --git a/rootchain/consensus/zkverifier/light_client_verifier_ffi.go b/rootchain/consensus/zkverifier/light_client_verifier_ffi.go new file mode 100644 index 00000000..763cdf72 --- /dev/null +++ b/rootchain/consensus/zkverifier/light_client_verifier_ffi.go @@ -0,0 +1,113 @@ +package zkverifier + +// #cgo LDFLAGS: -L${SRCDIR}/light-client-verifier-ffi/target/release -llight_client_verifier_ffi -ldl -lm +// #include "light-client-verifier-ffi/light_client_verifier.h" +// #include +import "C" +import ( + "fmt" + "unsafe" +) + +// LightClientVerifierFFI wraps the Rust FFI library for light client proof verification +type LightClientVerifierFFI struct { + enabled bool +} + +// NewLightClientVerifierFFI creates a new FFI-based light client verifier +func NewLightClientVerifierFFI() (*LightClientVerifierFFI, error) { + // Verify FFI library is available + version := C.light_client_ffi_version() + if version == nil { + return nil, fmt.Errorf("FFI library not available") + } + + return &LightClientVerifierFFI{ + enabled: true, + }, nil +} + +// VerifyProof verifies a light client proof using the Rust FFI library +func (v *LightClientVerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + // Validate inputs + if len(proof) == 0 { + return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) + } + if len(previousStateRoot) != 32 { + return fmt.Errorf("%w: previousStateRoot must be 32 bytes", ErrInvalidProofFormat) + } + if len(newStateRoot) != 32 { + return fmt.Errorf("%w: newStateRoot must be 32 bytes", ErrInvalidProofFormat) + } + if len(blockHash) != 32 { + return fmt.Errorf("%w: blockHash must be 32 bytes", ErrInvalidProofFormat) + } + + // Prepare C pointers + var errorOut *C.char + defer func() { + if errorOut != nil { + C.light_client_free_string(errorOut) + } + }() + + // Call FFI verification function + result := C.light_client_verify_proof( + (*C.uint8_t)(unsafe.Pointer(&proof[0])), + C.size_t(len(proof)), + (*C.uint8_t)(unsafe.Pointer(&previousStateRoot[0])), + (*C.uint8_t)(unsafe.Pointer(&newStateRoot[0])), + (*C.uint8_t)(unsafe.Pointer(&blockHash[0])), + &errorOut, + ) + + // Check result + switch result { + case C.LIGHT_CLIENT_VERIFY_SUCCESS: + return nil + case C.LIGHT_CLIENT_VERIFY_INVALID_PROOF: + if errorOut != nil { + return fmt.Errorf("%w: %s", ErrInvalidProofFormat, C.GoString(errorOut)) + } + return ErrInvalidProofFormat + case C.LIGHT_CLIENT_VERIFY_INVALID_MAGIC_HEADER: + if errorOut != nil { + return fmt.Errorf("invalid magic header: %s", C.GoString(errorOut)) + } + return fmt.Errorf("invalid magic header") + case C.LIGHT_CLIENT_VERIFY_INVALID_PUBLIC_INPUTS: + if errorOut != nil { + return fmt.Errorf("invalid public inputs: %s", C.GoString(errorOut)) + } + return fmt.Errorf("invalid public inputs") + case C.LIGHT_CLIENT_VERIFY_VERIFICATION_FAILED: + if errorOut != nil { + return fmt.Errorf("%w: %s", ErrProofVerificationFailed, C.GoString(errorOut)) + } + return ErrProofVerificationFailed + default: + if errorOut != nil { + return fmt.Errorf("internal error: %s", C.GoString(errorOut)) + } + return fmt.Errorf("internal error") + } +} + +// ProofType returns the proof type +func (v *LightClientVerifierFFI) ProofType() ProofType { + return ProofTypeLightClient +} + +// IsEnabled returns true if the verifier is enabled +func (v *LightClientVerifierFFI) IsEnabled() bool { + return v.enabled +} + +// GetLightClientFFIVersion returns the version of the FFI library +func GetLightClientFFIVersion() string { + version := C.light_client_ffi_version() + if version == nil { + return "unknown" + } + return C.GoString(version) +} diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h b/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h index 3df15d70..2e47fd2c 100644 --- a/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h @@ -35,6 +35,7 @@ typedef enum { * @param proof_len Length of proof in bytes * @param prev_state_root Pointer to 32-byte previous state root * @param new_state_root Pointer to 32-byte new state root + * @param block_hash Pointer to 32-byte block hash * @param error_out Output pointer for error message (must be freed with sp1_free_string) * @return SP1VerifyResult status code */ @@ -45,6 +46,7 @@ SP1VerifyResult sp1_verify_proof( size_t proof_len, const uint8_t* prev_state_root, const uint8_t* new_state_root, + const uint8_t* block_hash, char** error_out ); diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs b/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs index 8b04137f..58c8a370 100644 --- a/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs @@ -22,6 +22,7 @@ pub enum SP1VerifyResult { /// * `proof_len` - Length of proof /// * `prev_state_root` - Pointer to 32-byte previous state root /// * `new_state_root` - Pointer to 32-byte new state root +/// * `block_hash` - Pointer to 32-byte block hash /// * `error_out` - Output pointer for error message (caller must free with sp1_free_string) /// /// # Returns @@ -34,6 +35,7 @@ pub extern "C" fn sp1_verify_proof( proof_len: usize, prev_state_root: *const u8, new_state_root: *const u8, + block_hash: *const u8, error_out: *mut *mut c_char, ) -> SP1VerifyResult { // Safety checks @@ -42,8 +44,8 @@ pub extern "C" fn sp1_verify_proof( return SP1VerifyResult::InternalError; } - if prev_state_root.is_null() || new_state_root.is_null() { - set_error(error_out, "null state root pointer"); + if prev_state_root.is_null() || new_state_root.is_null() || block_hash.is_null() { + set_error(error_out, "null state root or block hash pointer"); return SP1VerifyResult::InvalidPublicInputs; } @@ -52,9 +54,10 @@ pub extern "C" fn sp1_verify_proof( let proof_data = unsafe { std::slice::from_raw_parts(proof_bytes, proof_len) }; let prev_root = unsafe { std::slice::from_raw_parts(prev_state_root, 32) }; let new_root = unsafe { std::slice::from_raw_parts(new_state_root, 32) }; + let blk_hash = unsafe { std::slice::from_raw_parts(block_hash, 32) }; // Perform verification - match verify_proof_internal(vkey_data, proof_data, prev_root, new_root) { + match verify_proof_internal(vkey_data, proof_data, prev_root, new_root, blk_hash) { Ok(()) => SP1VerifyResult::Success, Err(e) => { set_error(error_out, &e.to_string()); @@ -75,6 +78,7 @@ fn verify_proof_internal( proof_data: &[u8], prev_state_root: &[u8], new_state_root: &[u8], + block_hash: &[u8], ) -> anyhow::Result<()> { // Deserialize verification key let vkey: sp1_sdk::SP1VerifyingKey = bincode::deserialize(vkey_data) @@ -94,11 +98,21 @@ fn verify_proof_internal( // Extract public values from proof let public_values = proof.public_values.as_slice(); - // Validate that public values contain expected state roots - // Expected format: [prev_state_root (32 bytes), new_state_root (32 bytes)] - if public_values.len() < 64 { + // Validate that public values contain expected data + // Expected format (from ProgramOutput::encode() with l2 feature): + // - 0-31: initial_state_hash (prev_state_root) + // - 32-63: final_state_hash (new_state_root) + // - 64-95: l1_out_messages_merkle_root (L2 feature) + // - 96-127: l1_in_messages_rolling_hash (L2 feature) + // - 128-159: blob_versioned_hash (L2 feature) + // - 160-191: last_block_hash (block_hash) + // - 192+: chain_id, non_privileged_count, etc. + // + // Note: ethrex's guest program has the 'l2' feature enabled by default, + // which adds 3 H256 fields (96 bytes) before the block hash. + if public_values.len() < 192 { return Err(anyhow::anyhow!( - "Public values too short: expected at least 64 bytes, got {}", + "Public values too short: expected at least 192 bytes for ethrex l2 format, got {}", public_values.len() )); } @@ -121,6 +135,15 @@ fn verify_proof_internal( )); } + // Check block hash matches (at offset 160 due to l2 feature fields) + if &public_values[160..192] != block_hash { + return Err(anyhow::anyhow!( + "Block hash mismatch: expected {:?}, got {:?}", + block_hash, + &public_values[160..192] + )); + } + Ok(()) } @@ -218,6 +241,8 @@ fn set_error(error_out: *mut *mut c_char, message: &str) { #[cfg(test)] mod tests { use super::*; + use std::ffi::CStr; + use std::ptr; #[test] fn test_null_pointers() { @@ -229,6 +254,7 @@ mod tests { 0, ptr::null(), ptr::null(), + ptr::null(), &mut error, ); assert_eq!(result as i32, SP1VerifyResult::InternalError as i32); diff --git a/rootchain/consensus/zkverifier/sp1_verifier.go b/rootchain/consensus/zkverifier/sp1_verifier.go index fc60e196..6a72b40c 100644 --- a/rootchain/consensus/zkverifier/sp1_verifier.go +++ b/rootchain/consensus/zkverifier/sp1_verifier.go @@ -55,12 +55,12 @@ func readFile(path string) ([]byte, error) { // // The proof should be a compressed SP1 proof generated by the prover. // The proof includes: -// - Public inputs: previousStateRoot, newStateRoot +// - Public inputs: previousStateRoot, newStateRoot, blockHash // - Proof data: SP1 compressed proof bytes // -// This function verifies that executing the program with previousStateRoot -// as input produces newStateRoot as output. -func (v *SP1Verifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte) error { +// This function verifies that executing the program produces the expected +// state roots and block hash as public outputs. +func (v *SP1Verifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { if !v.enabled { return ErrVerifierNotConfigured } @@ -77,12 +77,17 @@ func (v *SP1Verifier) VerifyProof(proof []byte, previousStateRoot []byte, newSta return fmt.Errorf("%w: newStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(newStateRoot)) } + if len(blockHash) != 32 { + return fmt.Errorf("%w: blockHash must be 32 bytes, got %d", ErrInvalidProofFormat, len(blockHash)) + } + slog.Debug("Verifying SP1 proof", "proof_size", len(proof), "prev_root", hex.EncodeToString(previousStateRoot[:8]), - "new_root", hex.EncodeToString(newStateRoot[:8])) + "new_root", hex.EncodeToString(newStateRoot[:8]), + "block_hash", hex.EncodeToString(blockHash[:8])) - return v.ffiVerifier.VerifyProof(proof, previousStateRoot, newStateRoot) + return v.ffiVerifier.VerifyProof(proof, previousStateRoot, newStateRoot, blockHash) } func (v *SP1Verifier) ProofType() ProofType { diff --git a/rootchain/consensus/zkverifier/sp1_verifier_ffi.go b/rootchain/consensus/zkverifier/sp1_verifier_ffi.go index 9fec99e0..63fc1f96 100644 --- a/rootchain/consensus/zkverifier/sp1_verifier_ffi.go +++ b/rootchain/consensus/zkverifier/sp1_verifier_ffi.go @@ -59,7 +59,7 @@ func NewSP1VerifierFFI(vkeyPath string) (*SP1VerifierFFI, error) { } // VerifyProof verifies an SP1 proof using the Rust FFI library -func (v *SP1VerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte) error { +func (v *SP1VerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { // Validate inputs if len(proof) == 0 { return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) @@ -70,6 +70,9 @@ func (v *SP1VerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, new if len(newStateRoot) != 32 { return fmt.Errorf("%w: newStateRoot must be 32 bytes", ErrInvalidProofFormat) } + if len(blockHash) != 32 { + return fmt.Errorf("%w: blockHash must be 32 bytes", ErrInvalidProofFormat) + } // Prepare C pointers var errorOut *C.char @@ -87,6 +90,7 @@ func (v *SP1VerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, new C.size_t(len(proof)), (*C.uint8_t)(unsafe.Pointer(&previousStateRoot[0])), (*C.uint8_t)(unsafe.Pointer(&newStateRoot[0])), + (*C.uint8_t)(unsafe.Pointer(&blockHash[0])), &errorOut, ) diff --git a/rootchain/consensus/zkverifier/verifier.go b/rootchain/consensus/zkverifier/verifier.go index b70fae4a..1d037e1f 100644 --- a/rootchain/consensus/zkverifier/verifier.go +++ b/rootchain/consensus/zkverifier/verifier.go @@ -24,6 +24,8 @@ const ( ProofTypeRISC0 ProofType = "risc0" // ProofTypeExec indicates execution without proving (testing only) ProofTypeExec ProofType = "exec" + // ProofTypeLightClient indicates light client mode (full witness validation) + ProofTypeLightClient ProofType = "light_client" // ProofTypeNone indicates no proof verification (disabled) ProofTypeNone ProofType = "none" ) @@ -34,8 +36,9 @@ type ZKVerifier interface { // proof: The ZK proof bytes // previousStateRoot: Hash of the previous state // newStateRoot: Hash of the new state (claimed) + // blockHash: Hash of the block header (for light client mode) // Returns nil if proof is valid, error otherwise - VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte) error + VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error // ProofType returns the type of proofs this verifier handles ProofType() ProofType @@ -84,6 +87,8 @@ func NewVerifier(cfg *Config) (ZKVerifier, error) { switch cfg.ProofType { case ProofTypeSP1: return NewSP1Verifier(cfg.VerificationKeyPath) + case ProofTypeLightClient: + return NewLightClientVerifier() case ProofTypeRISC0: return nil, fmt.Errorf("RISC0 verifier not implemented") case ProofTypeExec, ProofTypeNone: @@ -96,7 +101,7 @@ func NewVerifier(cfg *Config) (ZKVerifier, error) { // NoOpVerifier is a verifier that always returns success (for testing/disabled mode) type NoOpVerifier struct{} -func (v *NoOpVerifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte) error { +func (v *NoOpVerifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { // No verification performed return nil } diff --git a/rootchain/consensus/zkverifier/verifier_test.go b/rootchain/consensus/zkverifier/verifier_test.go index 488063fb..cb86dab7 100644 --- a/rootchain/consensus/zkverifier/verifier_test.go +++ b/rootchain/consensus/zkverifier/verifier_test.go @@ -40,7 +40,7 @@ func TestNewVerifier_NoOpForExec(t *testing.T) { require.False(t, verifier.IsEnabled()) // Should accept any proof - err = verifier.VerifyProof([]byte("not a real proof"), make([]byte, 32), make([]byte, 32)) + err = verifier.VerifyProof([]byte("not a real proof"), make([]byte, 32), make([]byte, 32), make([]byte, 32)) require.NoError(t, err) } @@ -96,10 +96,10 @@ func TestNoOpVerifier(t *testing.T) { require.Equal(t, ProofTypeNone, v.ProofType()) // Should accept any input - err := v.VerifyProof(nil, nil, nil) + err := v.VerifyProof(nil, nil, nil, nil) require.NoError(t, err) - err = v.VerifyProof([]byte("test"), []byte("prev"), []byte("new")) + err = v.VerifyProof([]byte("test"), []byte("prev"), []byte("new"), []byte("block")) require.NoError(t, err) } @@ -118,6 +118,7 @@ func TestSP1Verifier_InvalidInputs(t *testing.T) { proof []byte previousStateRoot []byte newStateRoot []byte + blockHash []byte wantErr bool errContains string }{ @@ -126,6 +127,7 @@ func TestSP1Verifier_InvalidInputs(t *testing.T) { proof: []byte{}, previousStateRoot: make([]byte, 32), newStateRoot: make([]byte, 32), + blockHash: make([]byte, 32), wantErr: true, errContains: "proof is empty", }, @@ -134,6 +136,7 @@ func TestSP1Verifier_InvalidInputs(t *testing.T) { proof: make([]byte, 100), previousStateRoot: make([]byte, 16), newStateRoot: make([]byte, 32), + blockHash: make([]byte, 32), wantErr: true, errContains: "previousStateRoot must be 32 bytes", }, @@ -142,14 +145,25 @@ func TestSP1Verifier_InvalidInputs(t *testing.T) { proof: make([]byte, 100), previousStateRoot: make([]byte, 32), newStateRoot: make([]byte, 16), + blockHash: make([]byte, 32), wantErr: true, errContains: "newStateRoot must be 32 bytes", }, + { + name: "invalid block hash length", + proof: make([]byte, 100), + previousStateRoot: make([]byte, 32), + newStateRoot: make([]byte, 32), + blockHash: make([]byte, 16), + wantErr: true, + errContains: "blockHash must be 32 bytes", + }, { name: "proof too small", proof: make([]byte, 32), // Less than 64 bytes previousStateRoot: make([]byte, 32), newStateRoot: make([]byte, 32), + blockHash: make([]byte, 32), wantErr: true, errContains: "SP1 proof too small", }, @@ -158,13 +172,14 @@ func TestSP1Verifier_InvalidInputs(t *testing.T) { proof: make([]byte, 128), previousStateRoot: make([]byte, 32), newStateRoot: make([]byte, 32), + blockHash: make([]byte, 32), wantErr: false, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - err := verifier.VerifyProof(tc.proof, tc.previousStateRoot, tc.newStateRoot) + err := verifier.VerifyProof(tc.proof, tc.previousStateRoot, tc.newStateRoot, tc.blockHash) if tc.wantErr { require.Error(t, err) require.Contains(t, err.Error(), tc.errContains) diff --git a/rootchain/node.go b/rootchain/node.go index ab8788ac..1d9de187 100644 --- a/rootchain/node.go +++ b/rootchain/node.go @@ -360,8 +360,9 @@ func (v *Node) verifyZKProof(ctx context.Context, req *certification.BlockCertif logger.Data(slog.String("proof_type", string(v.zkVerifier.ProofType()))), logger.Data(slog.Uint64("round", ir.RoundNumber))) - // Verify proof: previousStateRoot -> newStateRoot transition - if err := v.zkVerifier.VerifyProof(req.ZkProof, previousStateRoot, newStateRoot); err != nil { + // Verify proof: previousStateRoot -> newStateRoot transition with block hash + blockHash := ir.BlockHash + if err := v.zkVerifier.VerifyProof(req.ZkProof, previousStateRoot, newStateRoot, blockHash); err != nil { return fmt.Errorf("ZK proof verification failed: %w", err) } From c5abe08aff2d25fe4396a860362a9a564fba38e9 Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Tue, 13 Jan 2026 16:25:27 +0200 Subject: [PATCH 04/17] deps --- go.mod | 4 +--- go.sum | 2 ++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 56638e3c..b3e4adae 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,6 @@ require ( github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.10.0 github.com/tetratelabs/wazero v1.8.1 - github.com/unicitynetwork/bft-go-base v1.0.3-0.20251230081246-e5204716ebf2 go.etcd.io/bbolt v1.4.0 go.opentelemetry.io/otel v1.32.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 @@ -39,8 +38,6 @@ require ( gopkg.in/yaml.v3 v3.0.1 ) -replace github.com/unicitynetwork/bft-go-base => ../bft-go-base - require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/benbjohnson/clock v1.3.5 // indirect @@ -161,6 +158,7 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.9.0 // indirect + github.com/unicitynetwork/bft-go-base v1.0.3-0.20260113141611-ef8e60451f16 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/wlynxg/anet v0.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect diff --git a/go.sum b/go.sum index b6a37583..cd37dbb5 100644 --- a/go.sum +++ b/go.sum @@ -505,6 +505,8 @@ github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZ github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo= github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI= +github.com/unicitynetwork/bft-go-base v1.0.3-0.20260113141611-ef8e60451f16 h1:yixbhxRwxq4s/vMUvzkCnk/mI4+uM8CRzaOlw6/LQZ8= +github.com/unicitynetwork/bft-go-base v1.0.3-0.20260113141611-ef8e60451f16/go.mod h1:hBnOG52VRy/vpgIBUulTgk7PBTwODZ2xkVjCEu5yRcQ= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= From 14aba1b9e3b269ceb16919c7272cf35bc8edf200 Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Tue, 13 Jan 2026 17:40:48 +0200 Subject: [PATCH 05/17] gosec warning --- .../zkverifier/light_client_verifier_ffi.go | 2 ++ rootchain/consensus/zkverifier/sp1_verifier.go | 17 ++++++++++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/rootchain/consensus/zkverifier/light_client_verifier_ffi.go b/rootchain/consensus/zkverifier/light_client_verifier_ffi.go index 763cdf72..4393575e 100644 --- a/rootchain/consensus/zkverifier/light_client_verifier_ffi.go +++ b/rootchain/consensus/zkverifier/light_client_verifier_ffi.go @@ -1,3 +1,5 @@ +//go:build zkverifier_ffi + package zkverifier // #cgo LDFLAGS: -L${SRCDIR}/light-client-verifier-ffi/target/release -llight_client_verifier_ffi -ldl -lm diff --git a/rootchain/consensus/zkverifier/sp1_verifier.go b/rootchain/consensus/zkverifier/sp1_verifier.go index 6a72b40c..80d6808d 100644 --- a/rootchain/consensus/zkverifier/sp1_verifier.go +++ b/rootchain/consensus/zkverifier/sp1_verifier.go @@ -37,15 +37,26 @@ func NewSP1Verifier(vkeyPath string) (*SP1Verifier, error) { } // readFile reads a file and returns its contents +// Sanitizes path to prevent directory traversal attacks (CWE-22) func readFile(path string) ([]byte, error) { - absPath, err := filepath.Abs(path) + // Clean and normalize the path + cleanPath := filepath.Clean(path) + + // Resolve to absolute path + absPath, err := filepath.Abs(cleanPath) if err != nil { return nil, fmt.Errorf("failed to resolve path: %w", err) } - data, err := os.ReadFile(absPath) + // Resolve any symlinks to prevent traversal via symlinks + realPath, err := filepath.EvalSymlinks(absPath) + if err != nil { + return nil, fmt.Errorf("failed to resolve symlinks: %w", err) + } + + data, err := os.ReadFile(realPath) if err != nil { - return nil, fmt.Errorf("failed to read file %s: %w", absPath, err) + return nil, fmt.Errorf("failed to read file %s: %w", realPath, err) } return data, nil From 886e67909a6bf732f19653c17850528a49848156 Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Tue, 13 Jan 2026 18:19:39 +0200 Subject: [PATCH 06/17] build system with optional ffi components --- .github/workflows/ci.yml | 59 ++++- Makefile | 67 +++++- README.md | 18 +- rootchain/consensus/zkverifier/README.md | 203 ++++++++++++++++++ .../light-client-verifier-ffi/Cargo.toml | 6 +- .../light-client-verifier-ffi/README.md | 16 +- .../light_client_verifier_ffi_stub.go | 33 +++ .../consensus/zkverifier/sp1_verifier_ffi.go | 2 + .../zkverifier/sp1_verifier_ffi_stub.go | 35 +++ 9 files changed, 426 insertions(+), 13 deletions(-) create mode 100644 rootchain/consensus/zkverifier/README.md create mode 100644 rootchain/consensus/zkverifier/light_client_verifier_ffi_stub.go create mode 100644 rootchain/consensus/zkverifier/sp1_verifier_ffi_stub.go diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7f6fb5fe..94963f4a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,6 +4,7 @@ on: [push] env: GO_VERSION: 1.24 + RUST_VERSION: stable RETENTION-DAYS: 1 jobs: @@ -15,8 +16,29 @@ jobs: uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - - name: build + - name: build (without FFI) run: make build + + build-with-ffi: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Setup Go ${{ env.GO_VERSION }} + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup Rust ${{ env.RUST_VERSION }} + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ env.RUST_VERSION }} + - name: Cache Rust dependencies + uses: Swatinem/rust-cache@v2 + with: + workspaces: | + rootchain/consensus/zkverifier/sp1-verifier-ffi + rootchain/consensus/zkverifier/light-client-verifier-ffi + - name: build (with FFI) + run: make build-with-ffi test: runs-on: ubuntu-latest steps: @@ -25,9 +47,9 @@ jobs: uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - - name: vet + - name: vet (without FFI) run: go vet ./... - - name: test + - name: test (without FFI) run: make test - name: upload test coverage uses: actions/upload-artifact@v4 @@ -36,6 +58,37 @@ jobs: path: test-coverage.out retention-days: ${{ env.RETENTION-DAYS }} + test-with-ffi: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Setup Go ${{ env.GO_VERSION }} + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup Rust ${{ env.RUST_VERSION }} + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ env.RUST_VERSION }} + - name: Cache Rust dependencies + uses: Swatinem/rust-cache@v2 + with: + workspaces: | + rootchain/consensus/zkverifier/sp1-verifier-ffi + rootchain/consensus/zkverifier/light-client-verifier-ffi + - name: Build Rust FFI libraries + run: make build-rust-ffi + - name: vet (with FFI) + run: go vet -tags zkverifier_ffi ./... + - name: test (with FFI) + run: make test ZKVERIFIER_FFI=1 + - name: upload test coverage (with FFI) + uses: actions/upload-artifact@v4 + with: + name: test-coverage-ffi + path: test-coverage.out + retention-days: ${{ env.RETENTION-DAYS }} + analyze: runs-on: ubuntu-latest continue-on-error: true diff --git a/Makefile b/Makefile index 26d8db79..8761750b 100644 --- a/Makefile +++ b/Makefile @@ -5,19 +5,76 @@ ifdef DOCKER_GO_DEPENDENCY DOCKER_ARGUMENTS += --build-context go-dependency=${DOCKER_GO_DEPENDENCY} --build-arg DOCKER_GO_DEPENDENCY=${DOCKER_GO_DEPENDENCY} endif +# ZK Verifier FFI configuration +# Set ZKVERIFIER_FFI=1 to enable Rust FFI components (SP1 and light-client verifiers) +# Default: disabled (builds without Rust dependencies) +ZKVERIFIER_FFI ?= 0 + +# Go build tags based on FFI configuration +ifeq ($(ZKVERIFIER_FFI),1) + GO_BUILD_TAGS = -tags zkverifier_ffi + GO_TEST_TAGS = -tags zkverifier_ffi +else + GO_BUILD_TAGS = + GO_TEST_TAGS = +endif + +# FFI library paths +SP1_VERIFIER_FFI_DIR = rootchain/consensus/zkverifier/sp1-verifier-ffi +LIGHT_CLIENT_VERIFIER_FFI_DIR = rootchain/consensus/zkverifier/light-client-verifier-ffi + all: clean tools test build gosec clean: rm -rf build/ rm -rf test-nodes/ +clean-ffi: + @if [ -d "$(SP1_VERIFIER_FFI_DIR)" ]; then \ + cd $(SP1_VERIFIER_FFI_DIR) && cargo clean; \ + fi + @if [ -d "$(LIGHT_CLIENT_VERIFIER_FFI_DIR)" ]; then \ + cd $(LIGHT_CLIENT_VERIFIER_FFI_DIR) && cargo clean; \ + fi + test: - go test ./... -coverpkg=./... -count=1 -coverprofile test-coverage.out + go test $(GO_TEST_TAGS) ./... -coverpkg=./... -count=1 -coverprofile test-coverage.out build: # cd to directory where main.go exits, hack fix for go bug to embed version control data # https://github.com/golang/go/issues/51279 - cd ./cli/ubft && go build -o ../../build/ubft + cd ./cli/ubft && go build $(GO_BUILD_TAGS) -o ../../build/ubft + +# Build with ZK verifier FFI support (requires Rust toolchain) +build-with-ffi: build-rust-ffi + $(MAKE) build ZKVERIFIER_FFI=1 + +# Build Rust FFI libraries +build-rust-ffi: check-rust build-sp1-ffi build-light-client-ffi + +build-sp1-ffi: + @echo "Building SP1 verifier FFI..." + @if [ -d "$(SP1_VERIFIER_FFI_DIR)" ]; then \ + cd $(SP1_VERIFIER_FFI_DIR) && cargo build --release; \ + else \ + echo "Warning: $(SP1_VERIFIER_FFI_DIR) not found"; \ + fi + +build-light-client-ffi: + @echo "Building Light Client verifier FFI..." + @if [ -d "$(LIGHT_CLIENT_VERIFIER_FFI_DIR)" ]; then \ + cd $(LIGHT_CLIENT_VERIFIER_FFI_DIR) && cargo build --release; \ + else \ + echo "Warning: $(LIGHT_CLIENT_VERIFIER_FFI_DIR) not found"; \ + fi + +# Check if Rust toolchain is available +check-rust: + @command -v cargo >/dev/null 2>&1 || { \ + echo "Error: Rust toolchain not found. Install from https://rustup.rs"; \ + exit 1; \ + } + @echo "Rust toolchain found: $$(rustc --version)" build-docker: docker build ${DOCKER_ARGUMENTS} --file scripts/Dockerfile --tag unicity-bft:local . @@ -31,8 +88,14 @@ tools: .PHONY: \ all \ clean \ + clean-ffi \ tools \ test \ build \ + build-with-ffi \ + build-rust-ffi \ + build-sp1-ffi \ + build-light-client-ffi \ + check-rust \ build-docker \ gosec diff --git a/README.md b/README.md index 8725fb24..0cbe4010 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,24 @@ # Build -Run `make build` to build the application. Executable will be built to `build/ubft`. +Run `make build` to build the application. Executable will be built to `build/ubft`. ### Build dependencies * [`Go`](https://go.dev/doc/install) version 1.24. * `C` compiler, recent versions of [GCC](https://gcc.gnu.org/) are recommended. In Debian and Ubuntu repositories, GCC is part of the build-essential package. On macOS, GCC can be installed with [Homebrew](https://formulae.brew.sh/formula/gcc). +### Build targets + +| Target | Description | +|--------|-------------| +| `make build` | Build without FFI (default, no Rust required) | +| `make build-with-ffi` | Build with FFI support | +| `make build-rust-ffi` | Build Rust FFI libraries only | +| `make build-sp1-ffi` | Build SP1 verifier FFI | +| `make build-light-client-ffi` | Build Light Client verifier FFI | +| `make clean-ffi` | Clean Rust build artifacts | +| `make check-rust` | Verify Rust toolchain | + # Money Partition 1. Run script `./setup-nodes.sh -m 3 -t 0` to generate configuration for a root chain and 3 money partition nodes. @@ -15,8 +27,8 @@ Run `make build` to build the application. Executable will be built to `build/ub * Initial bill owner predicate can be specified with flag `-i predicate-in-hex`. 2. Run script `./start.sh -r -p money` to start root chain and 3 money partition nodes 3. Run script `./stop.sh -a` to stop the root chain and partition nodes. - - Alternatively, use `stop.sh` to stop any partition or root and `start.sh` to resume. See command help for more details. + + Alternatively, use `stop.sh` to stop any partition or root and `start.sh` to resume. See command help for more details. # User Token Partition diff --git a/rootchain/consensus/zkverifier/README.md b/rootchain/consensus/zkverifier/README.md new file mode 100644 index 00000000..e84bff03 --- /dev/null +++ b/rootchain/consensus/zkverifier/README.md @@ -0,0 +1,203 @@ +# ZK Verifier Build System + +This directory contains optional Rust FFI components for ZK proof verification. The build system is configurable and supports building with or without these Rust dependencies. + +## Architecture + +The ZK verifier supports multiple proof types through a common interface: + +- **SP1 Verifier**: Verifies SP1 zkVM proofs using Rust FFI (optional) +- **Light Client Verifier**: Executes full witness validation using Rust FFI (optional) +- **No-Op Verifier**: Disabled verification for testing (always available) + +## Build Configurations + +### Default Build (No FFI) + +Build without Rust dependencies (default behavior): + +```bash +make build +# or +go build ./... +``` + +This uses Go build tag stubs that return errors when FFI verifiers are requested. The system will still build and run, but cannot verify ZK proofs. + +### Build with FFI + +Build with Rust FFI support for full ZK verification: + +```bash +make build-with-ffi +``` + +This will: +1. Check for Rust toolchain +2. Build SP1 verifier FFI library +3. Build Light Client verifier FFI library +4. Build Go binary with `-tags zkverifier_ffi` + +**Requirements:** +- Rust toolchain (install from https://rustup.rs) +- C compiler (GCC/Clang) +- Internet connection (to fetch ethrex dependencies from GitHub) + +### Manual FFI Build + +Build individual FFI components: + +```bash +# Build SP1 verifier only +make build-sp1-ffi + +# Build Light Client verifier only +make build-light-client-ffi + +# Build both +make build-rust-ffi +``` + +Then build Go with FFI tags: + +```bash +cd cli/ubft && go build -tags zkverifier_ffi -o ../../build/ubft +``` + +## Testing + +### Test without FFI + +```bash +make test +# or +go test ./... +``` + +### Test with FFI + +```bash +make test ZKVERIFIER_FFI=1 +# or +go test -tags zkverifier_ffi ./... +``` + +## CI/CD + +The CI pipeline (`.github/workflows/ci.yml`) runs both configurations: + +1. **build** job: Builds without FFI (fast, no Rust required) +2. **build-with-ffi** job: Builds with FFI (requires Rust setup) +3. **test** job: Tests without FFI +4. **test-with-ffi** job: Tests with FFI + +This ensures the codebase works in both configurations. + +## How It Works + +### Build Tags + +- **FFI files** (`*_ffi.go`): Tagged with `//go:build zkverifier_ffi` + - Only compiled when `-tags zkverifier_ffi` is used + - Contains cgo directives to link Rust libraries + +- **Stub files** (`*_ffi_stub.go`): Tagged with `//go:build !zkverifier_ffi` + - Compiled by default (without tags) + - Provides stub implementations that return errors + +### FFI Libraries + +Located in: +- `sp1-verifier-ffi/`: SP1 proof verification +- `light-client-verifier-ffi/`: Light client witness validation + +Built as static libraries (`.a` files) and linked via cgo: +```c +#cgo LDFLAGS: -L${SRCDIR}/sp1-verifier-ffi/target/release -lsp1_verifier_ffi -ldl -lm +``` + +**Dependencies:** +- `sp1-verifier-ffi`: Uses sp1-sdk from crates.io +- `light-client-verifier-ffi`: Uses ethrex fork from GitHub (https://github.com/ristik/ethrex branch uni-evm) + - Dependencies are fetched automatically during Rust build + - No local submodules or path dependencies required + +### Configuration + +The verifier factory (`NewVerifier()`) checks if FFI is available at runtime: + +```go +cfg := &zkverifier.Config{ + Enabled: true, + ProofType: zkverifier.ProofTypeSP1, + VerificationKeyPath: "/path/to/vkey", +} +verifier, err := zkverifier.NewVerifier(cfg) +``` + +Without FFI, this returns an error indicating FFI is not available. With FFI, it initializes the Rust verifier. + +## Makefile Targets + +| Target | Description | +|--------|-------------| +| `make build` | Build without FFI (default) | +| `make build-with-ffi` | Build with FFI support | +| `make build-rust-ffi` | Build Rust FFI libraries only | +| `make build-sp1-ffi` | Build SP1 verifier FFI | +| `make build-light-client-ffi` | Build Light Client verifier FFI | +| `make test` | Run tests without FFI | +| `make test ZKVERIFIER_FFI=1` | Run tests with FFI | +| `make clean` | Clean Go build artifacts | +| `make clean-ffi` | Clean Rust build artifacts | +| `make check-rust` | Verify Rust toolchain is available | + +## Environment Variables + +- `ZKVERIFIER_FFI=1`: Enable FFI build (used internally by Makefile) +- `CGO_ENABLED=1`: Required for cgo (usually set by default) + +## Troubleshooting + +### "FFI verifier not available" error + +This means the binary was built without FFI support. Rebuild with: +```bash +make build-with-ffi +``` + +### Rust toolchain not found + +Install Rust: +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +### ethrex dependencies not found + +The Rust FFI uses ethrex dependencies from GitHub (https://github.com/ristik/ethrex branch uni-evm). If you see errors fetching these, ensure you have: +- Internet connectivity +- Git configured with GitHub access + +### Duplicate library warnings + +When building with FFI, you may see: +``` +ld: warning: ignoring duplicate libraries: '-ldl', '-lm' +``` + +This is harmless - both FFI libraries link these system libraries. + +## Production Deployment + +For production deployments that need ZK verification: + +1. Ensure Rust toolchain is available in build environment +2. Use `make build-with-ffi` in CI/CD +3. Distribute the binary with embedded FFI libraries +4. Provide appropriate verification keys at runtime + +For deployments that don't need ZK verification (e.g., testing environments): + +1. Use `make build` (no Rust required) +2. Configure verifier with `Enabled: false` or `ProofType: ProofTypeNone` diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml index 8cf3f5ea..61d6b5a8 100644 --- a/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml @@ -14,9 +14,9 @@ crate-type = ["cdylib", "staticlib"] rkyv = { version = "0.8.10", features = ["std", "unaligned"] } anyhow = "1.0" -# ethrex dependencies for validation logic (use local paths to uni-evm's ethrex submodule) -ethrex-core = { path = "../../../../../ethrex/crates/common", package = "ethrex-common" } -guest_program = { path = "../../../../../ethrex/crates/l2/prover/src/guest_program" } +# ethrex dependencies for validation logic (use GitHub fork) +ethrex-core = { git = "https://github.com/ristik/ethrex", branch = "uni-evm", package = "ethrex-common" } +guest_program = { git = "https://github.com/ristik/ethrex", branch = "uni-evm" } [profile.release] opt-level = 3 diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md b/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md index 61d792c1..9d4cdea4 100644 --- a/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md @@ -32,8 +32,20 @@ This library provides a Foreign Function Interface (FFI) for BFT Core (written i ### Prerequisites -- Rust nightly (ethrex uses unstable features) -- Access to uni-evm's ethrex submodule +- Rust stable or nightly toolchain +- Internet connection (to fetch ethrex dependencies from GitHub) +- C compiler (GCC or Clang) + +### Dependencies + +This library depends on ethrex components from the uni-evm fork: +- **Repository**: https://github.com/ristik/ethrex +- **Branch**: `uni-evm` +- **Components used**: + - `ethrex-common` (core types) + - `guest_program` (validation logic) + +Dependencies are fetched automatically via Cargo from GitHub - no local submodules required. ### Build Steps diff --git a/rootchain/consensus/zkverifier/light_client_verifier_ffi_stub.go b/rootchain/consensus/zkverifier/light_client_verifier_ffi_stub.go new file mode 100644 index 00000000..1d7a0f50 --- /dev/null +++ b/rootchain/consensus/zkverifier/light_client_verifier_ffi_stub.go @@ -0,0 +1,33 @@ +//go:build !zkverifier_ffi + +package zkverifier + +import "fmt" + +// LightClientVerifierFFI is a stub when FFI is not available +type LightClientVerifierFFI struct{} + +// NewLightClientVerifierFFI returns an error when FFI is not available +func NewLightClientVerifierFFI() (*LightClientVerifierFFI, error) { + return nil, fmt.Errorf("Light Client FFI verifier not available: build with -tags zkverifier_ffi to enable") +} + +// VerifyProof returns an error when FFI is not available +func (v *LightClientVerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + return fmt.Errorf("Light Client FFI verifier not available") +} + +// ProofType returns the proof type +func (v *LightClientVerifierFFI) ProofType() ProofType { + return ProofTypeLightClient +} + +// IsEnabled returns false when FFI is not available +func (v *LightClientVerifierFFI) IsEnabled() bool { + return false +} + +// GetLightClientFFIVersion returns "unavailable" when FFI is not built +func GetLightClientFFIVersion() string { + return "unavailable" +} diff --git a/rootchain/consensus/zkverifier/sp1_verifier_ffi.go b/rootchain/consensus/zkverifier/sp1_verifier_ffi.go index 63fc1f96..716d1d67 100644 --- a/rootchain/consensus/zkverifier/sp1_verifier_ffi.go +++ b/rootchain/consensus/zkverifier/sp1_verifier_ffi.go @@ -1,3 +1,5 @@ +//go:build zkverifier_ffi + package zkverifier // #cgo LDFLAGS: -L${SRCDIR}/sp1-verifier-ffi/target/release -lsp1_verifier_ffi -ldl -lm diff --git a/rootchain/consensus/zkverifier/sp1_verifier_ffi_stub.go b/rootchain/consensus/zkverifier/sp1_verifier_ffi_stub.go new file mode 100644 index 00000000..7b8d0018 --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1_verifier_ffi_stub.go @@ -0,0 +1,35 @@ +//go:build !zkverifier_ffi + +package zkverifier + +import "fmt" + +// SP1VerifierFFI is a stub when FFI is not available +type SP1VerifierFFI struct { + vkey []byte +} + +// NewSP1VerifierFFI returns an error when FFI is not available +func NewSP1VerifierFFI(vkeyPath string) (*SP1VerifierFFI, error) { + return nil, fmt.Errorf("SP1 FFI verifier not available: build with -tags zkverifier_ffi to enable") +} + +// VerifyProof returns an error when FFI is not available +func (v *SP1VerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + return fmt.Errorf("SP1 FFI verifier not available") +} + +// ProofType returns the proof type +func (v *SP1VerifierFFI) ProofType() ProofType { + return ProofTypeSP1 +} + +// IsEnabled returns false when FFI is not available +func (v *SP1VerifierFFI) IsEnabled() bool { + return false +} + +// GetFFIVersion returns "unavailable" when FFI is not built +func GetFFIVersion() string { + return "unavailable" +} From 3c36cbb198c3c7ee9c4d1459a90945bf300c3d1a Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Wed, 14 Jan 2026 22:41:47 +0200 Subject: [PATCH 07/17] fix tests --- .../block_certification_request.go | 40 ++++++ .../storage/testdata/rootchain_v0.db | Bin 524288 -> 1048576 bytes .../light-client-verifier-ffi/Cargo.toml | 2 +- .../zkverifier/sp1-verifier-ffi/Cargo.toml | 2 +- .../consensus/zkverifier/verifier_ffi_test.go | 128 +++++++++++++++++ .../zkverifier/verifier_stub_test.go | 68 +++++++++ .../consensus/zkverifier/verifier_test.go | 134 ------------------ 7 files changed, 238 insertions(+), 136 deletions(-) create mode 100644 rootchain/consensus/zkverifier/verifier_ffi_test.go create mode 100644 rootchain/consensus/zkverifier/verifier_stub_test.go diff --git a/network/protocol/certification/block_certification_request.go b/network/protocol/certification/block_certification_request.go index 97d901c4..3218d4e0 100644 --- a/network/protocol/certification/block_certification_request.go +++ b/network/protocol/certification/block_certification_request.go @@ -91,3 +91,43 @@ func (x BlockCertificationRequest) Bytes() ([]byte, error) { x.ZkProof = nil return types.Cbor.Marshal(x) } + +// UnmarshalCBOR provides backward compatibility for old database format (before ZkProof field was added) +func (x *BlockCertificationRequest) UnmarshalCBOR(data []byte) error { + // Try new format first (8 elements with ZkProof) + type newFormat BlockCertificationRequest + var nf newFormat + if err := types.Cbor.Unmarshal(data, &nf); err == nil { + *x = BlockCertificationRequest(nf) + return nil + } + + // Try old format (7 elements without ZkProof) + type oldFormat struct { + _ struct{} `cbor:",toarray"` + PartitionID types.PartitionID `json:"partitionId"` + ShardID types.ShardID `json:"shardId"` + NodeID string `json:"nodeId"` + InputRecord *types.InputRecord `json:"inputRecord"` + BlockSize uint64 `json:"blockSize"` + StateSize uint64 `json:"stateSize"` + Signature hex.Bytes `json:"signature"` + } + var of oldFormat + if err := types.Cbor.Unmarshal(data, &of); err != nil { + return err // Return error from old format attempt + } + + // Convert old format to new format + *x = BlockCertificationRequest{ + PartitionID: of.PartitionID, + ShardID: of.ShardID, + NodeID: of.NodeID, + InputRecord: of.InputRecord, + ZkProof: nil, // Old format didn't have ZkProof + BlockSize: of.BlockSize, + StateSize: of.StateSize, + Signature: of.Signature, + } + return nil +} diff --git a/rootchain/consensus/storage/testdata/rootchain_v0.db b/rootchain/consensus/storage/testdata/rootchain_v0.db index 21f952106b7c82bb34800410e2fe1848baaf6fc4..eebf76f9da02a45044fa320da9a0a4e8e9cee4d7 100644 GIT binary patch delta 2808 zcmb_eU1(fI6rTIH*=%+;H`)B`rfv47=C4(gYwJ&BZF132gn-1qrCPB|numxbX;GF? z!CXOt=0UgFuqz50M36#g(=O!}Z6$9-sgehg;DatAt&|X|*3^gu&)j?F?j&9G#euzN zzL_)M%sF$;oV9JkwykXZA&a=J%M<>$8+&_D$|yhDKew{d`sypriEP#4z^on*THcJ3 z`!&%VQ&COg>Ic4#twz?)qPh{3bk@B+Flz=S8M{_U0s11ffA9U&&Q|GE!__i@@f@`i z@<`O_x1}@pIB8j%r{~gdYBV4!6{w*YN}Ld9bwN59FX)%GnqqP>8D|RysaYi|W8VN7 zf#0>LH=s6nxDRNGnJ;feJ(i)8$HKNTiP@6tQim?_5n3y% zGI#Y9hpabx3roGS#m=efrY@>r-vG&NVngQvFzh<%>cqzp*GW#&lFD9sJY?1}U1+Y! zUe|ZmV52uM0+LfXndhXZ8Vk#{QjL?mDVMq4Q}Tus>IBDaW2tm&PNrJHYjcu@Qf66K zPnp5)N{8vgyG!BSF}s}1!^*)XJj}2_^NF0dq&x3_`%&K6!8l$jAH`Nq?Qk9KhPR3K zS9H#HF84fFKk&JRvZ`p7iN~+VwLLCfuIbb2$p_D+Q`Pz75(bH`V&LU%Db#+atIx+g z-!Fq?7q(gF^bc#_?B&TgT4jRQ+*ZlO^BARHfMm;GfP8bpuW4r}j~(?$)Q`i-R>`9c*-YQj4^-VBDO$!-EsL{-wtQL+PD#Ww!T=h!2``7Oi1|)C5%U_>f|!@7 z7-BpKX+q2^NdsbDKq%(Q@sVQ_M_)eTY6qUZEuiYPE4jXX`&9oHl-hz0iRdj&L^CG@)RmtX(A^EyxnukLLgk@cTknr7Q{qXf ns~{Otr_{;*{;dqe`Cm=lO+GILnO77x|Lz1^WdBd!L#clNt)#=u delta 7736 zcmds52{@E%-+yMG!Ptf@Wyw~SknLc?knAR-lqA`=L|R1U7)wZLkvvW*OQnRekE9Hu zREQ*w%2J55SzDCvd1jvJc+dA;?|HxLd#~?&*LPpn%>Vw~&wW4legC)Le}>7VF`4ow zrHH}+r-_1}>aY@$2e=5BFEK~KUFSW6dM}o4`%ua#5lW*;{esN4MyBzE4btH)|As7= zYqWWF@Ni@Vy!^B_jcAAlFuXN@Hq=8yA8#UeC;^gKs2hss;t@dr;Dfq2=)+2N|;2$o0iYXmDIc!-N* z02O3l0GXhQ;Fk#2K=5+}!&ida?+6y-LZAo1S_tk&unvN|5Uh{jP7aO%07GQpF*3mx z!EFdONAM#AyCC=hf-MnzAHj44HzSye;JXNBAh-d>(Efl4GQdVAcp&%=f`brTi(oGV z*C5yz!PN*3MQ{~@!x3BoVc0?758S*%!hArzIFh0W-endBJGOJiN$;D%h&WQHP0G_n zqe6mf7a@_`PZlAHzS|-s#&BGOq%4|2<9L#)i^-xvr5Lk+K${n%Wb9&;vTG4qk&(R! zsf!W8=6I4Jdaq9@!%ZL^#ixh)84z$dz|LtX#kmyV4@nMQ!o^vTqrf;_kfq7S9M=Lr zO5*t|saQ~u^-%+phDt{ZpwN|M)Ra~R8}5Q+Hd>Pv`x}ZX1-^8|^RuXI)EEVxn}NX z4tj1QO{e2wJM;1Y&(4~No!t>TdsDc3*Fp(|>jx}N8tS?RVmu~fN7#66qzoDXx)6&@ zLr-a?6FFTb)XiXX`)s(+BoT3FC~zQ$z)We8c{z$R1g(U}czFsLW5Q#+T*MjUI#8+$ z_NN0eu{;MN^Rg6gCPtH`mHx*}HkP^sg!F1j=A|#*MA#?0c@rzCxSwz2r8Q(C6C;Nm zfmFg$62oOJ_y0^}hHFQXyu>j6m^&j>-U`N|;BCE(Wxu+}gMb3*WYVu&#` z=sDc}CU%Me5PJHX*a>0;$tChn^tGGwJ^pJHcK8YQ&M$|DUiD*&MnM+&D48 zBC~N*TH3st;lGL#cO-EgfFv9zd{FXdL5vd&g(?(oEE6W4Uj&X5IzP%9M6pRKC>l%^ zF+3XJ(FKp^d0AMLa|{4L-Vgkn=|U7h_~IYwg2t<-;9B+1N&C;_{AY%QpR<%U;(ihg z6BlMcrXv^OXzvV}AcqZviOA*@zD58}?~snz_#kmuM>%9|#^TvXUmlXMZq<)1>o&6^^=CLcUEc4^c1^|CV{$$-!#tk7; z7i1LH4$n|LN-IN7H-Mb(2Xs2PJy+wNHiulPiEmRwu1v${lHn^`5$L=E01YXe_fi_P z5WXT$_%Vp+m!qye-* zxQ}z#zEEFjK>s{6EJh~ts#lJ2Kr&n=L_(g&(mhG|X+<{SpeTG)ESBuW+3aU@(1;Li z@-&A;3=(2hmy|;ao#LVYhng-D|Ti$N>U2@-y7G(Z( zLC#AEU@?voH{eh>&|{RSfSLsv4^g^c&>YE0;B^q?95!{JquON4eGDgzqP4vPFUi6Q<$vK zCMgf!8`)0QJ2%a}UpsE&vTWkMpLM6%hZGg%kQbs))?__ji{ETA7dx5;qPkF;7+p;Qc+wSZ zwi4=aPC}23O_20AXQF50dof=VQPO~}CZ78Q?qWgGdXj=hFJ>&6Qy96*rzD(}+tE>_ z+(B*HB+#uV;3yW=m5`FqB(QO%>UvjaUC7&{QB+#ThELe5K1c1>W!#(lTxmx@5N&Fx%Ta)Olas?_Nk_9d9&Q&{C%BDu)`8-Ov_MK+*7EtI)5#ALbYSF zkf*+9VoV;k4P!G@!irGYq%)XX{Jl&c%}jJ(*`%;xQt+o^ zpgA3~(T>7zUgb{Vhs3qTt(Hv=-It!Eio3^?M)!<P}Nzw}S zLQW7Z4lq~wxb+@o)mw#5ueh8-7Vecozhqlld!NbKf6;eJwV8cD6^=ed&{-NXmKw{( zQ@IP>HVGT1S_xNbS z>nDnDleW>GicmmQ2js-X$Lj!A3VKHBtcKh~h+d<*4+)>PCUO64=<3(Uj?6!_R2Y_+ z^Y66$QquDLx0ELjhodmL(;=d^GfSnMg!q&VxB+E_T1ua%1=p&sKQh-=L>5{we5}2X z)p+i4{GRB%$#DxcJgjy{mbkY0bI zssC#4cq?i*CO{_$F-Lm3TGZE?&W3ckkNpK#9@Z-BEPcD8ekP}LQtGYRJ=Ln#l+a=quMcOl zC-MxReOax4`J+lMH=vp_YgSo+bq$n{s+|KZ=Z19hI^-`=TH@TbD-_GgL$SLTvi3Tz zS-3dtXG40jl+nch^}zD}5UVsM|Fpl?VY_s0Ks`uFD4a^V^k`uFiG5PWo*CsE==G^5 zYvg)Hu56tCa@Y01k^SC2{Z}+^eo2WuwZHLQ!9u>|FVD_>8xMGC1CXx)oPeS&|ME&) z#cuLj_tqGH&jc^~Pl>mp&+WO#ToGR|)p}y5x1vJ1b2-Yut#Zf9T+xe3syWVM7=uRI zvQO_bL#o8+XfQVwr>pl@+_<P&bwT^MlzSPD4%BC0%RKto_=mFcY= z*VE6mZt-{E65x8PPrVAlI*RkxB(pu-cRA3X)~Ri1X?(w?M$L)7R_e!Yzl#&|#kVVO z*gOY+>_eSgX@{P_9oUg&`k*9zRf3fXI7or0D?0DPmLCavQO}0dX!p(pQ0)4zGTn|@ z9oUEo?Z~Iynp&PMPWwHyl14^n&y2y{{kM3ehW#MG78l#A1CS3K(JS6X{;-{Jg-7Ek8yy8HjatKl-3q}TwNqD`HZ?e>cDxg`HCR_tRD9#OpBH=i)6BOMIE1O0xZ%(~2V7qCUKf zGSEpqd|0vOCjknN!G$6<1R_A;QO-Ksf4Xo{6{%e%{l2MZnW)~}3JXeohc|U)>C!WT z_=n~Vlf*m?-)W?Zw7BE+oB6Np4;6;u9ZGgZw<~Othz63*tacWq>Nk6*6?)b@+KoGC zyV}f{QoE4yY}|GEaNG{^&=OM_+V}Ti_ZqoXWO=HEPI!%hsD5*|oACGCL9N0QpuyAK$T^%- zV(N@w@2JV-_iiVv7VoFWoCvLZ_`hCW${{_o`i0cAzB%vE`paTjzc?lft z$98myd|=|x!atNRw(G>bpzkuFYE7Azw^TV$ZD{AZZq5b=hvfNygJV#MuC=jD!ds&6 zs3C5=+hcNQBza8r^}+T%lExZCMi(Varmf|U7hHBtc_%Sj`p&nY;(NmJuX|^rMse~( z_xgohHEG-!Qrar<*1+f0OumO;Df!9S*XpYI(H6UAT5M-%%EVoH(QYlVx*?;#JC1d` z7;kgjk?Qr@=X>l5nb`5Tg7ZaEMk!p*OPd$4YcFcCh#mSppe`?lrnadYH7I#x`0@JH zTl52{3e>?1$NQcimXs>42twU0xO$34wf8#ZXdD}J_G$GM)h8fB7ODkgZMS{Za(6Yg ziR^syqk2S~>>}4H-9+qBelBEx+%zE}z*@b}IRLK>2HkjnEKSyMv$6I}te@(O*dF6h zrLUiRI59-qv1Y@n_E5owx6caqVtxtbuh7`?NI=21B7S{++o1tdabkFVU8BpIHpMiV zm!77Va_+1-9q`3v`Sxs&;GB{b$@MdI0tHw3w+Y9#8`U*F vpou5%WCzw|DN)jN$~L_1FZ!xdl(!HS0srz;{NwKfC|5xL{g{<{zSjB=SI|b% diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml index 61d6b5a8..0adb26d5 100644 --- a/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [workspace] [lib] -crate-type = ["cdylib", "staticlib"] +crate-type = ["staticlib"] [dependencies] # Core dependencies for witness deserialization and validation diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml index 57c6d667..d1fdae3e 100644 --- a/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [workspace] [lib] -crate-type = ["cdylib", "staticlib"] +crate-type = ["staticlib"] [dependencies] sp1-sdk = "5.0.8" diff --git a/rootchain/consensus/zkverifier/verifier_ffi_test.go b/rootchain/consensus/zkverifier/verifier_ffi_test.go new file mode 100644 index 00000000..02b0a2dc --- /dev/null +++ b/rootchain/consensus/zkverifier/verifier_ffi_test.go @@ -0,0 +1,128 @@ +//go:build zkverifier_ffi + +package zkverifier + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewVerifier_SP1_WithFFI(t *testing.T) { + // Create temporary verification key file + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "test.vkey") + + // Create a fake but valid-sized vkey file + err := os.WriteFile(vkeyPath, make([]byte, 64), 0644) + require.NoError(t, err) + + cfg := &Config{ + Enabled: true, + ProofType: ProofTypeSP1, + VerificationKeyPath: vkeyPath, + } + + verifier, err := NewVerifier(cfg) + require.NoError(t, err) + require.NotNil(t, verifier) + require.True(t, verifier.IsEnabled()) + require.Equal(t, ProofTypeSP1, verifier.ProofType()) +} + +func TestNewVerifier_SP1_MissingVKey_WithFFI(t *testing.T) { + cfg := &Config{ + Enabled: true, + ProofType: ProofTypeSP1, + VerificationKeyPath: "/nonexistent/path/test.vkey", + } + + verifier, err := NewVerifier(cfg) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "failed to") +} + +func TestSP1Verifier_InvalidInputs_WithFFI(t *testing.T) { + // Create temporary verification key file + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "test.vkey") + err := os.WriteFile(vkeyPath, make([]byte, 64), 0644) + require.NoError(t, err) + + verifier, err := NewSP1Verifier(vkeyPath) + require.NoError(t, err) + + testCases := []struct { + name string + proof []byte + previousStateRoot []byte + newStateRoot []byte + blockHash []byte + wantErr bool + errContains string + }{ + { + name: "empty proof", + proof: []byte{}, + previousStateRoot: make([]byte, 32), + newStateRoot: make([]byte, 32), + blockHash: make([]byte, 32), + wantErr: true, + errContains: "proof is empty", + }, + { + name: "invalid previous state root length", + proof: make([]byte, 100), + previousStateRoot: make([]byte, 16), + newStateRoot: make([]byte, 32), + blockHash: make([]byte, 32), + wantErr: true, + errContains: "previousStateRoot must be 32 bytes", + }, + { + name: "invalid new state root length", + proof: make([]byte, 100), + previousStateRoot: make([]byte, 32), + newStateRoot: make([]byte, 16), + blockHash: make([]byte, 32), + wantErr: true, + errContains: "newStateRoot must be 32 bytes", + }, + { + name: "invalid block hash length", + proof: make([]byte, 100), + previousStateRoot: make([]byte, 32), + newStateRoot: make([]byte, 32), + blockHash: make([]byte, 16), + wantErr: true, + errContains: "blockHash must be 32 bytes", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := verifier.VerifyProof(tc.proof, tc.previousStateRoot, tc.newStateRoot, tc.blockHash) + if tc.wantErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errContains) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestSP1Verifier_EmptyVKey_WithFFI(t *testing.T) { + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "empty.vkey") + err := os.WriteFile(vkeyPath, []byte{}, 0644) + require.NoError(t, err) + + verifier, err := NewSP1Verifier(vkeyPath) + require.Error(t, err) + require.Nil(t, verifier) + // FFI will detect empty vkey +} diff --git a/rootchain/consensus/zkverifier/verifier_stub_test.go b/rootchain/consensus/zkverifier/verifier_stub_test.go new file mode 100644 index 00000000..413532b8 --- /dev/null +++ b/rootchain/consensus/zkverifier/verifier_stub_test.go @@ -0,0 +1,68 @@ +//go:build !zkverifier_ffi + +package zkverifier + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewVerifier_SP1_WithoutFFI(t *testing.T) { + // Create temporary verification key file + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "test.vkey") + err := os.WriteFile(vkeyPath, []byte("fake_verification_key_data"), 0644) + require.NoError(t, err) + + cfg := &Config{ + Enabled: true, + ProofType: ProofTypeSP1, + VerificationKeyPath: vkeyPath, + } + + verifier, err := NewVerifier(cfg) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "build with -tags zkverifier_ffi") +} + +func TestNewVerifier_SP1_MissingVKey_WithoutFFI(t *testing.T) { + cfg := &Config{ + Enabled: true, + ProofType: ProofTypeSP1, + VerificationKeyPath: "/nonexistent/path/test.vkey", + } + + verifier, err := NewVerifier(cfg) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "build with -tags zkverifier_ffi") +} + +func TestSP1Verifier_InvalidInputs_WithoutFFI(t *testing.T) { + // Create temporary verification key file + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "test.vkey") + err := os.WriteFile(vkeyPath, []byte("fake_verification_key_data"), 0644) + require.NoError(t, err) + + verifier, err := NewSP1Verifier(vkeyPath) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "build with -tags zkverifier_ffi") +} + +func TestSP1Verifier_EmptyVKey_WithoutFFI(t *testing.T) { + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "empty.vkey") + err := os.WriteFile(vkeyPath, []byte{}, 0644) + require.NoError(t, err) + + verifier, err := NewSP1Verifier(vkeyPath) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "build with -tags zkverifier_ffi") +} diff --git a/rootchain/consensus/zkverifier/verifier_test.go b/rootchain/consensus/zkverifier/verifier_test.go index cb86dab7..fb9ef9d3 100644 --- a/rootchain/consensus/zkverifier/verifier_test.go +++ b/rootchain/consensus/zkverifier/verifier_test.go @@ -1,8 +1,6 @@ package zkverifier import ( - "os" - "path/filepath" "testing" "github.com/stretchr/testify/require" @@ -44,39 +42,6 @@ func TestNewVerifier_NoOpForExec(t *testing.T) { require.NoError(t, err) } -func TestNewVerifier_SP1(t *testing.T) { - // Create temporary verification key file - tmpDir := t.TempDir() - vkeyPath := filepath.Join(tmpDir, "test.vkey") - err := os.WriteFile(vkeyPath, []byte("fake_verification_key_data"), 0644) - require.NoError(t, err) - - cfg := &Config{ - Enabled: true, - ProofType: ProofTypeSP1, - VerificationKeyPath: vkeyPath, - } - - verifier, err := NewVerifier(cfg) - require.NoError(t, err) - require.NotNil(t, verifier) - require.True(t, verifier.IsEnabled()) - require.Equal(t, ProofTypeSP1, verifier.ProofType()) -} - -func TestNewVerifier_SP1_MissingVKey(t *testing.T) { - cfg := &Config{ - Enabled: true, - ProofType: ProofTypeSP1, - VerificationKeyPath: "/nonexistent/path/test.vkey", - } - - verifier, err := NewVerifier(cfg) - require.Error(t, err) - require.Nil(t, verifier) - require.Contains(t, err.Error(), "failed to read verification key") -} - func TestNewVerifier_UnknownProofType(t *testing.T) { cfg := &Config{ Enabled: true, @@ -102,102 +67,3 @@ func TestNoOpVerifier(t *testing.T) { err = v.VerifyProof([]byte("test"), []byte("prev"), []byte("new"), []byte("block")) require.NoError(t, err) } - -func TestSP1Verifier_InvalidInputs(t *testing.T) { - // Create temporary verification key file - tmpDir := t.TempDir() - vkeyPath := filepath.Join(tmpDir, "test.vkey") - err := os.WriteFile(vkeyPath, []byte("fake_verification_key_data"), 0644) - require.NoError(t, err) - - verifier, err := NewSP1Verifier(vkeyPath) - require.NoError(t, err) - - testCases := []struct { - name string - proof []byte - previousStateRoot []byte - newStateRoot []byte - blockHash []byte - wantErr bool - errContains string - }{ - { - name: "empty proof", - proof: []byte{}, - previousStateRoot: make([]byte, 32), - newStateRoot: make([]byte, 32), - blockHash: make([]byte, 32), - wantErr: true, - errContains: "proof is empty", - }, - { - name: "invalid previous state root length", - proof: make([]byte, 100), - previousStateRoot: make([]byte, 16), - newStateRoot: make([]byte, 32), - blockHash: make([]byte, 32), - wantErr: true, - errContains: "previousStateRoot must be 32 bytes", - }, - { - name: "invalid new state root length", - proof: make([]byte, 100), - previousStateRoot: make([]byte, 32), - newStateRoot: make([]byte, 16), - blockHash: make([]byte, 32), - wantErr: true, - errContains: "newStateRoot must be 32 bytes", - }, - { - name: "invalid block hash length", - proof: make([]byte, 100), - previousStateRoot: make([]byte, 32), - newStateRoot: make([]byte, 32), - blockHash: make([]byte, 16), - wantErr: true, - errContains: "blockHash must be 32 bytes", - }, - { - name: "proof too small", - proof: make([]byte, 32), // Less than 64 bytes - previousStateRoot: make([]byte, 32), - newStateRoot: make([]byte, 32), - blockHash: make([]byte, 32), - wantErr: true, - errContains: "SP1 proof too small", - }, - { - name: "valid format (placeholder accepts)", - proof: make([]byte, 128), - previousStateRoot: make([]byte, 32), - newStateRoot: make([]byte, 32), - blockHash: make([]byte, 32), - wantErr: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - err := verifier.VerifyProof(tc.proof, tc.previousStateRoot, tc.newStateRoot, tc.blockHash) - if tc.wantErr { - require.Error(t, err) - require.Contains(t, err.Error(), tc.errContains) - } else { - require.NoError(t, err) - } - }) - } -} - -func TestSP1Verifier_EmptyVKey(t *testing.T) { - tmpDir := t.TempDir() - vkeyPath := filepath.Join(tmpDir, "empty.vkey") - err := os.WriteFile(vkeyPath, []byte{}, 0644) - require.NoError(t, err) - - verifier, err := NewSP1Verifier(vkeyPath) - require.Error(t, err) - require.Nil(t, verifier) - require.Contains(t, err.Error(), "verification key is empty") -} From 2daed08a56530b88896eff26e33efaee7e8f90ba Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Wed, 14 Jan 2026 23:32:14 +0200 Subject: [PATCH 08/17] no FFI CI for now --- .github/workflows/ci.yml | 2 ++ .../consensus/zkverifier/light-client-verifier-ffi/Cargo.toml | 2 +- rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 94963f4a..048ca3af 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,6 +20,7 @@ jobs: run: make build build-with-ffi: + if: false runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -59,6 +60,7 @@ jobs: retention-days: ${{ env.RETENTION-DAYS }} test-with-ffi: + if: false runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml index 0adb26d5..61d6b5a8 100644 --- a/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [workspace] [lib] -crate-type = ["staticlib"] +crate-type = ["cdylib", "staticlib"] [dependencies] # Core dependencies for witness deserialization and validation diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml index d1fdae3e..57c6d667 100644 --- a/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [workspace] [lib] -crate-type = ["staticlib"] +crate-type = ["cdylib", "staticlib"] [dependencies] sp1-sdk = "5.0.8" From ad82292289e6ba368c39fd8c2870c4d00cd18735 Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Fri, 16 Jan 2026 18:01:02 +0200 Subject: [PATCH 09/17] per-partition validation config --- CLAUDE.md | 296 ++++++++++++++++++ cli/ubft/cmd/partition_params.go | 61 ++++ cli/ubft/cmd/root_node.go | 26 -- internal/testutils/partition/network.go | 2 +- rootchain/consensus/storage/sharding.go | 142 ++++++++- .../consensus/zkverifier/capabilities_ffi.go | 25 ++ .../consensus/zkverifier/capabilities_stub.go | 25 ++ .../consensus/zkverifier/capabilities_test.go | 117 +++++++ .../consensus/zkverifier/config.example.yaml | 188 ++++++----- .../consensus/zkverifier/partition_config.go | 36 +++ rootchain/consensus/zkverifier/registry.go | 119 +++++++ .../consensus/zkverifier/registry_test.go | 132 ++++++++ rootchain/node.go | 37 +-- rootchain/node_test.go | 58 ++-- rootchain/partitions/orchestration.go | 36 ++- 15 files changed, 1125 insertions(+), 175 deletions(-) create mode 100644 CLAUDE.md create mode 100644 rootchain/consensus/zkverifier/capabilities_ffi.go create mode 100644 rootchain/consensus/zkverifier/capabilities_stub.go create mode 100644 rootchain/consensus/zkverifier/capabilities_test.go create mode 100644 rootchain/consensus/zkverifier/partition_config.go create mode 100644 rootchain/consensus/zkverifier/registry.go create mode 100644 rootchain/consensus/zkverifier/registry_test.go diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..2b4b70b4 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,296 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Overview + +Unicity BFT Core is a Byzantine Fault Tolerant (BFT) consensus system implementing a two-layer architecture: a root chain for consensus coordination and partitions for transaction processing. This is the reference implementation in Go. + +## Build and Development + +### Prerequisites + +- Go 1.24 or higher +- C compiler (GCC recommended - part of build-essential on Debian/Ubuntu, available via Homebrew on macOS) +- For ZK proof verification: SP1 verifier dependencies + +### Essential Commands + +```bash +# Build the ubft binary +make build # Outputs to build/ubft + +# Run tests with coverage +make test # Uses -count=1 to disable caching + +# Run single test +go test ./path/to/package -run TestName + +# Run security analysis +make gosec + +# Clean build artifacts and test nodes +make clean + +# Full build pipeline +make all # clean + tools + test + build + gosec +``` + +### Running Nodes + +The CLI binary `ubft` provides commands for different node types: + +```bash +# Root node (consensus coordinator) +./build/ubft root-node run --home path/to/node-dir + +# Shard/partition node (transaction processor) +./build/ubft shard-node run --home path/to/node-dir + +# View available commands +./build/ubft -h +``` + +### Test Environment Setup + +```bash +# Set up root chain + 3 money partition nodes +./setup-nodes.sh -m 3 -t 0 + +# Set up root + money + token partitions +./setup-nodes.sh -m 3 -t 3 + +# Start nodes +./start.sh -r -p money # root + money partitions +./start.sh -r -p money -p tokens # root + money + tokens + +# Stop all +./stop.sh -a +``` + +Generated node configurations are in `test-nodes/` directory. + +## Architecture + +### Two-Layer BFT System + +**Root Chain** (`rootchain/`): +- Coordinates consensus across all partitions +- Maintains trust bases and validator sets +- Processes block certification requests from partitions +- Returns Unicity Certificates (UCs) to certify partition state +- Single root chain can coordinate multiple partitions + +**Partitions/Shards** (`partition/`): +- Process transactions independently +- Submit block certification requests to root chain +- Receive UCs to finalize blocks +- Types: Money partition, Token partition, Orchestration partition, Custom partitions + +### Key Components + +**Consensus** (`rootchain/consensus/`): +- Byzantine consensus algorithm for root chain +- Processes proposals, votes, quorum certificates (QCs) +- Leader election and rotation +- State machine: new round → propose → vote → commit + +**Networking** (`network/`): +- libp2p-based P2P networking +- Protocol definitions in `network/protocol/`: + - `certification/`: Block certification request/response + - `handshake/`: UC feed subscription + - `abdrc/`: Consensus messages (proposals, votes, recovery) + - `blockproposal/`: Block proposals + - `replication/`: Ledger replication + +**State Management** (`state/`): +- Partition state trees +- Merkle tree implementations for state commitments +- State replication and recovery + +**Transaction Systems** (`txsystem/`): +- Pluggable transaction processing +- Money partition: transfers, splits, swaps, fee credits +- Token partition: NFTs, fungible tokens +- Predicates: WASM-based smart contract execution + +**Storage** (`keyvaluedb/`): +- Abstraction over storage backends +- BoltDB implementation for production +- MemoryDB for testing +- Transaction-based read/write operations + +### Critical Data Structures + +**Unicity Certificate (UC)**: Proof that root chain reached consensus on a partition's state +- Contains `InputRecord` (partition's proposed state) +- Contains `UnicitySeal` (root chain's certification with signatures) +- Contains `TechnicalRecord` (synchronization data: next round, epoch, leader) + +**InputRecord (IR)**: Partition's state transition proposal +- Round number, epoch, timestamp +- Previous state hash, new state hash +- Block hash +- Validation rules (bft-go-base/types/input_record.go:75): + - If state hash unchanged: block hash must be nil (no transactions) + - If state hash changed: block hash must be non-nil (has transactions) + +**BlockCertificationRequest**: Partition sends to root chain +- InputRecord with proposed state +- ZK proof (optional, separate from IR) +- Signature from partition validator +- Uses CBOR serialization with tuple/array format + +**TechnicalRecord**: Root chain provides to partition for synchronization +- Next round number (partition must use this for next request) +- Current epoch +- Current leader +- Ensures partition stays synchronized with root chain rounds + +### CBOR Serialization + +All network messages use CBOR (Compact Binary Object Representation) with `toarray` format (array/tuple serialization, not maps). + +**Important**: Go structs use `cbor:",toarray"` tags. When implementing clients in other languages: +- Use array serialization (not map/object) +- Nil values serialize as CBOR null (0xf6), not empty byte strings (0x40) +- Byte slices use CBOR byte string type (major type 2) + +Example from certification protocol: +``` +[partition_id, shard_id, node_id, input_record, zk_proof, block_size, state_size, signature] +``` + +### Partition Integration Pattern + +When building a new partition/blockchain that integrates with BFT Core: + +1. **Initialization**: + - Subscribe to UC feed via Handshake message + - Receive initial sync UC (may have null hashes for pre-state) + - Store sync UC for timestamp/epoch but don't finalize blocks + +2. **Block Production**: + - Use `next_round` from last UC's TechnicalRecord + - Use `timestamp` from last UC's UnicitySeal + - Use `epoch` from last UC's InputRecord + - Previous hash = last certified state hash (from UC.InputRecord.Hash) + - For first block: previous_hash = None (let BFT Core use genesis) + +3. **Certification**: + - Build InputRecord with round from TechnicalRecord + - Set block_hash = actual block header hash (not state root!) + - Set hash = new state root + - Sign entire BlockCertificationRequest (with signature set to nil) + - Send via `/ab/block-certification/0.0.1` protocol + +4. **UC Validation**: + - Check UC.InputRecord.Hash matches proposed state + - Sync UCs (both hashes null): update round state, don't finalize + - Repeat UCs (same IR, higher root round): timeout, resync + - Valid UCs: finalize block, store as last UC + +5. **State Continuity**: + - Each block's previous_hash must equal last certified UC's hash + - Maintains chain of certified states + - Root chain validates this continuity + +## Configuration + +Configuration sources (in precedence order): +1. Command line flags: `--flag=value` +2. Environment variables: `UBFT_FLAG=value` +3. Config file: `$UBFT_HOME/config.props` +4. Default values + +Default `$UBFT_HOME` is `$HOME/.ubft` + +### Logging + +Logger config file: `$UBFT_HOME/logger-config.yaml` (see `cli/ubft/config/logger-config.yaml` for example) + +Log format options: text, json, console, ecs +Log level options: DEBUG, INFO, WARN, ERROR + +### Tracing + +Enable distributed tracing: +```bash +UBFT_TRACING=otlptracehttp ./build/ubft root-node run ... +``` + +Exporter options: stdout, otlptracehttp, zipkin + +For tests: +```bash +UBFT_TEST_TRACER=otlptracehttp go test ./... +``` + +## Testing + +### Test Structure + +- Unit tests alongside production code (`*_test.go`) +- Test utilities in `internal/testutils/` +- Integration tests use real network components with mock partitions + +### Test Helpers + +- `internal/testutils/eventually.go`: Async condition checking +- `internal/testutils/logger/`: Test logger setup +- `internal/testutils/network/`: Mock network implementations +- `internal/testutils/trustbase/`: Test trust base generation +- `internal/testutils/txsystem/`: Counter-based test transaction system + +### Running Tests + +```bash +# All tests with coverage +make test + +# Specific package +go test ./rootchain/consensus + +# Specific test +go test ./partition -run TestNode_StartAndStop + +# With race detector +go test -race ./... + +# Generate tests for Rust SDK +UBFT_RUST_SDK_ROOT="/path/to/rust-sdk" go test ./... +``` + +## Docker + +```bash +# Build Docker image +make build-docker + +# With local go dependencies +DOCKER_GO_DEPENDENCY=../bft-go-base make build-docker +``` + +## Common Pitfalls + +1. **Round Synchronization**: Partitions must use `TechnicalRecord.Round` for next certification request, not block number or self-incremented counter + +2. **CBOR Serialization**: Use `cbor:",toarray"` for struct tags and ensure nil values serialize as CBOR null (0xf6), not empty byte strings + +3. **InputRecord Validation**: State hash changes require non-nil block hash; unchanged state requires nil block hash + +4. **UC Types**: Distinguish between sync UCs (null hashes), repeat UCs (timeout), and valid UCs (certified blocks) + +5. **Timestamp Source**: Use UnicitySeal.timestamp from last UC, not system time + +6. **Previous Hash**: For certification requests, use UC.InputRecord.Hash (the certified state), not block.parent_state_root + +7. **First Block**: Send previous_hash=nil to let BFT Core use genesis state + +8. **Database Cleanup**: When testing, clean both partition AND root chain databases for fresh state + +## Related Repositories + +- `bft-go-base`: Shared types and utilities (InputRecord, UnicityCertificate, validation rules) +- Integration clients should implement CBOR serialization matching Go's `toarray` format diff --git a/cli/ubft/cmd/partition_params.go b/cli/ubft/cmd/partition_params.go index 8a6f028a..5bbad053 100644 --- a/cli/ubft/cmd/partition_params.go +++ b/cli/ubft/cmd/partition_params.go @@ -4,6 +4,7 @@ import ( "fmt" "strconv" + "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier" "github.com/unicitynetwork/bft-core/txsystem" "github.com/unicitynetwork/bft-go-base/types" "github.com/unicitynetwork/bft-go-base/types/hex" @@ -115,3 +116,63 @@ func parseUint64(key, value string) (uint64, error) { } return ret, nil } + +// ProofPartitionParams holds parsed proof configuration from partition params. +type ProofPartitionParams struct { + // ProofType specifies the proof type for the partition. + // Empty/none means m-of-n signature verification only. + ProofType zkverifier.ProofType + + // VerificationKeyPath is the path to the verification key file. + // Required for SP1 proof type. + VerificationKeyPath string +} + +// ParseProofPartitionParams extracts proof configuration from partition params. +// Returns error if the configuration is invalid. +func ParseProofPartitionParams(params map[string]string) (*ProofPartitionParams, error) { + result := &ProofPartitionParams{ + ProofType: zkverifier.ParseProofTypeFromParams(params), + VerificationKeyPath: zkverifier.ParseVKeyPathFromParams(params), + } + + // Validate the configuration + if result.ProofType != zkverifier.ProofTypeNone && result.ProofType != "" { + if !zkverifier.IsProofTypeAvailable(result.ProofType) { + return nil, fmt.Errorf("proof type %q not available (build with -tags zkverifier_ffi to enable)", result.ProofType) + } + + if result.ProofType == zkverifier.ProofTypeSP1 && result.VerificationKeyPath == "" { + return nil, fmt.Errorf("vkey_path required for SP1 proof type") + } + } + + return result, nil +} + +// IsEnabled returns true if ZK proof verification is enabled for this configuration. +func (p *ProofPartitionParams) IsEnabled() bool { + switch p.ProofType { + case zkverifier.ProofTypeNone, zkverifier.ProofTypeExec, "": + return false + default: + return true + } +} + +// ToPartitionParams converts the proof configuration to a partition params map. +func (p *ProofPartitionParams) ToPartitionParams() map[string]string { + if p.ProofType == zkverifier.ProofTypeNone || p.ProofType == "" { + return nil + } + + params := map[string]string{ + zkverifier.ParamProofType: string(p.ProofType), + } + + if p.VerificationKeyPath != "" { + params[zkverifier.ParamVerificationKeyPath] = p.VerificationKeyPath + } + + return params +} diff --git a/cli/ubft/cmd/root_node.go b/cli/ubft/cmd/root_node.go index 342aa1af..422f9639 100644 --- a/cli/ubft/cmd/root_node.go +++ b/cli/ubft/cmd/root_node.go @@ -28,7 +28,6 @@ import ( "github.com/unicitynetwork/bft-core/rootchain/consensus" "github.com/unicitynetwork/bft-core/rootchain/consensus/storage" "github.com/unicitynetwork/bft-core/rootchain/consensus/trustbase" - "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier" "github.com/unicitynetwork/bft-core/rootchain/partitions" ) @@ -54,11 +53,6 @@ type ( BlockRate uint32 MaxRequests uint // validator partition certification request channel capacity RPCServerAddress string // address on which http server is exposed with metrics endpoint - - // ZK verification configuration - ZKVerificationEnabled bool - ZKProofType string - ZKVerificationKeyPath string } ) @@ -117,14 +111,6 @@ func rootNodeRunCmd(baseFlags *baseFlags) *cobra.Command { cmd.Flags().Uint32Var(&flags.BlockRate, "block-rate", consensus.BlockRate, "block rate (consensus parameter)") - // ZK verification flags - cmd.Flags().BoolVar(&flags.ZKVerificationEnabled, "zk-verification-enabled", false, - "Enable ZK proof verification for L2 state transitions") - cmd.Flags().StringVar(&flags.ZKProofType, "zk-proof-type", "sp1", - "ZK proof type (sp1, risc0, exec, none)") - cmd.Flags().StringVar(&flags.ZKVerificationKeyPath, "zk-vkey-path", "", - "Path to ZK verification key file (.vkey)") - hideFlags(cmd, "block-rate") return cmd } @@ -242,23 +228,11 @@ func rootNodeRun(ctx context.Context, flags *rootNodeRunFlags) error { return err } - // Initialize ZK verifier - zkVerifierCfg := &zkverifier.Config{ - Enabled: flags.ZKVerificationEnabled, - ProofType: zkverifier.ProofType(flags.ZKProofType), - VerificationKeyPath: flags.ZKVerificationKeyPath, - } - zkVerifier, err := zkverifier.NewVerifier(zkVerifierCfg) - if err != nil { - return fmt.Errorf("failed to initialize ZK verifier: %w", err) - } - node, err := rootchain.New( host, partitionNet, cm, obs, - zkVerifier, ) if err != nil { return fmt.Errorf("failed initiate root node: %w", err) diff --git a/internal/testutils/partition/network.go b/internal/testutils/partition/network.go index 495ac7cd..f974cad9 100644 --- a/internal/testutils/partition/network.go +++ b/internal/testutils/partition/network.go @@ -356,7 +356,7 @@ func (r *RootChain) start(t *testing.T, ctx context.Context) error { if err != nil { return fmt.Errorf("consensus manager initialization failed, %w", err) } - node, err := rootchain.New(rootPeer, rootNet, cm, obs, nil) + node, err := rootchain.New(rootPeer, rootNet, cm, obs) if err != nil { return fmt.Errorf("failed to create root node, %w", err) } diff --git a/rootchain/consensus/storage/sharding.go b/rootchain/consensus/storage/sharding.go index ce0cd10f..c5b7fc71 100644 --- a/rootchain/consensus/storage/sharding.go +++ b/rootchain/consensus/storage/sharding.go @@ -250,14 +250,15 @@ func NewShardInfo(shardConf *types.PartitionDescriptionRecord, hashAlg crypto.Ha return nil, fmt.Errorf("failed to calculate shard conf hash: %w", err) } si := &ShardInfo{ - PartitionID: shardConf.PartitionID, - ShardID: shardConf.ShardID, - T2Timeout: shardConf.T2Timeout, - ShardConfHash: shardConfHash, - RootHash: nil, - PrevEpochFees: types.RawCBOR{0xA0}, // CBOR map(0) - LastCR: nil, - IR: &types.InputRecord{Version: 1}, + PartitionID: shardConf.PartitionID, + ShardID: shardConf.ShardID, + T2Timeout: shardConf.T2Timeout, + ShardConfHash: shardConfHash, + RootHash: nil, + PrevEpochFees: types.RawCBOR{0xA0}, // CBOR map(0) + LastCR: nil, + IR: &types.InputRecord{Version: 1}, + PartitionParams: maps.Clone(shardConf.PartitionParams), } if si.PrevEpochStat, err = types.Cbor.Marshal(si.Stat); err != nil { @@ -334,10 +335,118 @@ type ShardInfo struct { IR *types.InputRecord TR certification.TechnicalRecord + // PartitionParams contains proof configuration from PartitionDescriptionRecord. + // Used for per-partition ZK proof verification settings. + // NOTE: This field MUST remain at the end of the exported fields for backward + // compatibility with CBOR deserialization of older data. + PartitionParams map[string]string + nodeIDs []string // sorted list of partition node IDs trustBase map[string]abcrypto.Verifier } +// shardInfoV1 is used for CBOR serialization/deserialization with toarray format. +// This is the format without PartitionParams (pre-v2 format). +type shardInfoV1 struct { + _ struct{} `cbor:",toarray"` + PartitionID types.PartitionID + ShardID types.ShardID + T2Timeout time.Duration + ShardConfHash []byte + RootHash []byte + PrevEpochStat types.RawCBOR + Stat certification.StatisticalRecord + PrevEpochFees types.RawCBOR + Fees map[string]uint64 + LastCR *certification.CertificationResponse + IR *types.InputRecord + TR certification.TechnicalRecord +} + +// shardInfoV2 is used for CBOR serialization/deserialization with toarray format. +// This is the format with PartitionParams (v2 format). +type shardInfoV2 struct { + _ struct{} `cbor:",toarray"` + PartitionID types.PartitionID + ShardID types.ShardID + T2Timeout time.Duration + ShardConfHash []byte + RootHash []byte + PrevEpochStat types.RawCBOR + Stat certification.StatisticalRecord + PrevEpochFees types.RawCBOR + Fees map[string]uint64 + LastCR *certification.CertificationResponse + IR *types.InputRecord + TR certification.TechnicalRecord + PartitionParams map[string]string +} + +// MarshalCBOR implements cbor.Marshaler for ShardInfo. +// Always uses the v2 format (with PartitionParams). +func (si ShardInfo) MarshalCBOR() ([]byte, error) { + v2 := shardInfoV2{ + PartitionID: si.PartitionID, + ShardID: si.ShardID, + T2Timeout: si.T2Timeout, + ShardConfHash: si.ShardConfHash, + RootHash: si.RootHash, + PrevEpochStat: si.PrevEpochStat, + Stat: si.Stat, + PrevEpochFees: si.PrevEpochFees, + Fees: si.Fees, + LastCR: si.LastCR, + IR: si.IR, + TR: si.TR, + PartitionParams: si.PartitionParams, + } + return types.Cbor.Marshal(v2) +} + +// UnmarshalCBOR implements cbor.Unmarshaler for ShardInfo. +// Supports both v1 (without PartitionParams) and v2 (with PartitionParams) formats. +func (si *ShardInfo) UnmarshalCBOR(data []byte) error { + // Try v2 format first (with PartitionParams) + var v2 shardInfoV2 + if err := types.Cbor.Unmarshal(data, &v2); err == nil { + si.PartitionID = v2.PartitionID + si.ShardID = v2.ShardID + si.T2Timeout = v2.T2Timeout + si.ShardConfHash = v2.ShardConfHash + si.RootHash = v2.RootHash + si.PrevEpochStat = v2.PrevEpochStat + si.Stat = v2.Stat + si.PrevEpochFees = v2.PrevEpochFees + si.Fees = v2.Fees + si.LastCR = v2.LastCR + si.IR = v2.IR + si.TR = v2.TR + si.PartitionParams = v2.PartitionParams + return nil + } + + // Fall back to v1 format (without PartitionParams) + var v1 shardInfoV1 + if err := types.Cbor.Unmarshal(data, &v1); err != nil { + return fmt.Errorf("decoding ShardInfo: %w", err) + } + + si.PartitionID = v1.PartitionID + si.ShardID = v1.ShardID + si.T2Timeout = v1.T2Timeout + si.ShardConfHash = v1.ShardConfHash + si.RootHash = v1.RootHash + si.PrevEpochStat = v1.PrevEpochStat + si.Stat = v1.Stat + si.PrevEpochFees = v1.PrevEpochFees + si.Fees = v1.Fees + si.LastCR = v1.LastCR + si.IR = v1.IR + si.TR = v1.TR + si.PartitionParams = nil // v1 format doesn't have this field + return nil +} + func (si *ShardInfo) resetFeeList(shardConf *types.PartitionDescriptionRecord) { fees := make(map[string]uint64) for _, n := range shardConf.Validators { @@ -383,14 +492,15 @@ func (si *ShardInfo) nextEpoch(shardConf *types.PartitionDescriptionRecord, hash return nil, fmt.Errorf("failed to calculate shard conf hash: %w", err) } nextSI := &ShardInfo{ - PartitionID: shardConf.PartitionID, - ShardID: shardConf.ShardID, - T2Timeout: shardConf.T2Timeout, - ShardConfHash: shardConfHash, - RootHash: si.RootHash, - LastCR: si.LastCR, - IR: si.IR, - TR: si.TR, + PartitionID: shardConf.PartitionID, + ShardID: shardConf.ShardID, + T2Timeout: shardConf.T2Timeout, + ShardConfHash: shardConfHash, + RootHash: si.RootHash, + LastCR: si.LastCR, + IR: si.IR, + TR: si.TR, + PartitionParams: maps.Clone(shardConf.PartitionParams), } if nextSI.PrevEpochFees, err = types.Cbor.Marshal(si.Fees); err != nil { diff --git a/rootchain/consensus/zkverifier/capabilities_ffi.go b/rootchain/consensus/zkverifier/capabilities_ffi.go new file mode 100644 index 00000000..f2c9db1e --- /dev/null +++ b/rootchain/consensus/zkverifier/capabilities_ffi.go @@ -0,0 +1,25 @@ +//go:build zkverifier_ffi + +package zkverifier + +// IsProofTypeAvailable returns whether the given proof type is available +// in the current build. With FFI, SP1 and LightClient are available. +func IsProofTypeAvailable(pt ProofType) bool { + switch pt { + case ProofTypeSP1, ProofTypeLightClient, ProofTypeExec, ProofTypeNone, "": + return true + default: + return false + } +} + +// AvailableProofTypes returns the list of proof types available in the current build. +// With FFI, SP1 and LightClient are available (besides m-of-n signature mode). +func AvailableProofTypes() []ProofType { + return []ProofType{ProofTypeSP1, ProofTypeLightClient, ProofTypeExec} +} + +// IsFFIAvailable returns whether FFI support is built in. +func IsFFIAvailable() bool { + return true +} diff --git a/rootchain/consensus/zkverifier/capabilities_stub.go b/rootchain/consensus/zkverifier/capabilities_stub.go new file mode 100644 index 00000000..1837b25e --- /dev/null +++ b/rootchain/consensus/zkverifier/capabilities_stub.go @@ -0,0 +1,25 @@ +//go:build !zkverifier_ffi + +package zkverifier + +// IsProofTypeAvailable returns whether the given proof type is available +// in the current build. Without FFI, only Exec (no-op) is available. +func IsProofTypeAvailable(pt ProofType) bool { + switch pt { + case ProofTypeExec, ProofTypeNone, "": + return true + default: + return false + } +} + +// AvailableProofTypes returns the list of proof types available in the current build. +// Without FFI, only Exec is available (besides m-of-n signature mode). +func AvailableProofTypes() []ProofType { + return []ProofType{ProofTypeExec} +} + +// IsFFIAvailable returns whether FFI support is built in. +func IsFFIAvailable() bool { + return false +} diff --git a/rootchain/consensus/zkverifier/capabilities_test.go b/rootchain/consensus/zkverifier/capabilities_test.go new file mode 100644 index 00000000..025100a4 --- /dev/null +++ b/rootchain/consensus/zkverifier/capabilities_test.go @@ -0,0 +1,117 @@ +package zkverifier + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIsProofTypeAvailable(t *testing.T) { + // These types should always be available + require.True(t, IsProofTypeAvailable(ProofTypeExec)) + require.True(t, IsProofTypeAvailable(ProofTypeNone)) + require.True(t, IsProofTypeAvailable("")) + + // Unknown types should not be available + require.False(t, IsProofTypeAvailable(ProofType("unknown"))) + + // SP1 and LightClient availability depends on build tags + // The stub version returns false for these + if !IsFFIAvailable() { + require.False(t, IsProofTypeAvailable(ProofTypeSP1)) + require.False(t, IsProofTypeAvailable(ProofTypeLightClient)) + } +} + +func TestAvailableProofTypes(t *testing.T) { + types := AvailableProofTypes() + require.NotEmpty(t, types) + require.Contains(t, types, ProofTypeExec) +} + +func TestParseProofTypeFromParams(t *testing.T) { + testCases := []struct { + name string + params map[string]string + expected ProofType + }{ + { + name: "nil params", + params: nil, + expected: ProofTypeNone, + }, + { + name: "empty params", + params: map[string]string{}, + expected: ProofTypeNone, + }, + { + name: "empty proof_type", + params: map[string]string{ParamProofType: ""}, + expected: ProofTypeNone, + }, + { + name: "sp1", + params: map[string]string{ParamProofType: "sp1"}, + expected: ProofTypeSP1, + }, + { + name: "light_client", + params: map[string]string{ParamProofType: "light_client"}, + expected: ProofTypeLightClient, + }, + { + name: "exec", + params: map[string]string{ParamProofType: "exec"}, + expected: ProofTypeExec, + }, + { + name: "none", + params: map[string]string{ParamProofType: "none"}, + expected: ProofTypeNone, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := ParseProofTypeFromParams(tc.params) + require.Equal(t, tc.expected, result) + }) + } +} + +func TestParseVKeyPathFromParams(t *testing.T) { + testCases := []struct { + name string + params map[string]string + expected string + }{ + { + name: "nil params", + params: nil, + expected: "", + }, + { + name: "empty params", + params: map[string]string{}, + expected: "", + }, + { + name: "no vkey_path", + params: map[string]string{ParamProofType: "sp1"}, + expected: "", + }, + { + name: "with vkey_path", + params: map[string]string{ParamProofType: "sp1", ParamVerificationKeyPath: "/path/to/vkey"}, + expected: "/path/to/vkey", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := ParseVKeyPathFromParams(tc.params) + require.Equal(t, tc.expected, result) + }) + } +} diff --git a/rootchain/consensus/zkverifier/config.example.yaml b/rootchain/consensus/zkverifier/config.example.yaml index 66193fd9..c50f09f5 100644 --- a/rootchain/consensus/zkverifier/config.example.yaml +++ b/rootchain/consensus/zkverifier/config.example.yaml @@ -1,134 +1,162 @@ # BFT Core ZK Verification Configuration Example # -# This file demonstrates how to configure ZK proof verification for L2 state transitions. -# Copy this file and customize it for your deployment. +# ZK proof verification is configured PER-PARTITION via PartitionDescriptionRecord.PartitionParams. +# This allows different partitions to use different proof types or no ZK proofs at all. +# +# There are NO global CLI flags for ZK verification - all configuration is per-partition. # ============================================================================ -# DEVELOPMENT / TESTING CONFIGURATION (ZK verification disabled) +# PER-PARTITION CONFIGURATION # ============================================================================ -# When testing without real ZK proofs: -# ubft root-node run \ -# --zk-verification-enabled=false +# Proof configuration is specified in the PartitionDescriptionRecord.PartitionParams map. +# This is set when creating or updating a partition configuration. +# +# Supported partition params: +# proof_type - Type of ZK proof (sp1, light_client, exec, or empty for m-of-n only) +# vkey_path - Path to verification key file (required for SP1) # ============================================================================ -# PRODUCTION CONFIGURATION (SP1 verification enabled) +# PARTITION CONFIGURATION EXAMPLES # ============================================================================ -# Generate verification key from your SP1 prover: -# cd uni-evm -# cargo run --release --bin generate-vkey -- --output sp1.vkey -# cp sp1.vkey /etc/bft-core/ +# Example 1: Partition with m-of-n signature verification only (no ZK proofs) +# PartitionDescriptionRecord: +# partition_id: 1 +# partition_params: {} +# # or omit partition_params entirely -# Start root node with ZK verification: -# ubft root-node run \ -# --zk-verification-enabled=true \ -# --zk-proof-type=sp1 \ -# --zk-vkey-path=/etc/bft-core/sp1.vkey +# Example 2: Partition with SP1 ZK proof verification +# PartitionDescriptionRecord: +# partition_id: 2 +# partition_params: +# proof_type: "sp1" +# vkey_path: "/etc/bft-core/partition-2.vkey" + +# Example 3: Partition with Light Client proof verification +# PartitionDescriptionRecord: +# partition_id: 3 +# partition_params: +# proof_type: "light_client" + +# Example 4: Partition with exec (development/testing - accepts all proofs) +# PartitionDescriptionRecord: +# partition_id: 4 +# partition_params: +# proof_type: "exec" # ============================================================================ -# CONFIGURATION OPTIONS +# PROOF TYPE OPTIONS # ============================================================================ -# --zk-verification-enabled (boolean) -# Enable/disable ZK proof verification -# Default: false -# Production: true +# proof_type values: +# +# (empty) - M-of-n validator signature verification only (default) +# No ZK proof required in BlockCertificationRequest +# +# "sp1" - SP1 ZK proof verification +# Requires: vkey_path pointing to SP1 verification key +# Requires: Build with -tags zkverifier_ffi # -# --zk-proof-type (string) -# Type of ZK proof system -# Options: "sp1", "risc0", "exec", "none" -# Default: "sp1" -# Production: "sp1" +# "light_client" - Light client proof verification +# Requires: Build with -tags zkverifier_ffi # -# --zk-vkey-path (string) -# Path to verification key file -# Required when: zk-verification-enabled=true AND zk-proof-type=sp1 -# Example: /etc/bft-core/sp1.vkey +# "exec" - Development/testing mode +# Accepts all well-formed proofs without verification # ============================================================================ -# ENVIRONMENT VARIABLES +# BUILD TAGS # ============================================================================ -# export UBFT_ZK_VERIFICATION_ENABLED=true -# export UBFT_ZK_PROOF_TYPE=sp1 -# export UBFT_ZK_VKEY_PATH=/etc/bft-core/sp1.vkey +# SP1 and Light Client verification require FFI support: +# +# make build-ffi # Build with FFI support +# # or +# go build -tags zkverifier_ffi ./... +# +# Without FFI support, only "exec" proof type is available. +# Attempting to configure a partition with "sp1" or "light_client" will fail +# at startup if FFI is not built in. # ============================================================================ # VERIFICATION KEY MANAGEMENT # ============================================================================ -# The verification key must match the prover program ID. +# For SP1 proof type, each partition needs its own verification key. # -# 1. Generate vkey from Uni-EVM prover: -# cd uni-evm -# cargo run --release --bin generate-vkey -- --output sp1.vkey +# 1. Generate vkey from the partition's prover: +# cd partition-prover +# cargo run --release --bin generate-vkey -- --output partition.vkey # # 2. Deploy vkey to all root chain nodes: -# scp sp1.vkey root-node-1:/etc/bft-core/ -# scp sp1.vkey root-node-2:/etc/bft-core/ -# scp sp1.vkey root-node-3:/etc/bft-core/ +# scp partition.vkey root-node-1:/etc/bft-core/partition-X.vkey +# scp partition.vkey root-node-2:/etc/bft-core/partition-X.vkey +# scp partition.vkey root-node-3:/etc/bft-core/partition-X.vkey # # 3. Verify file permissions: -# chmod 644 /etc/bft-core/sp1.vkey +# chmod 644 /etc/bft-core/partition-X.vkey # # 4. Verify checksum across all nodes: -# sha256sum /etc/bft-core/sp1.vkey +# sha256sum /etc/bft-core/partition-X.vkey +# +# 5. Configure partition with vkey_path in PartitionParams + +# ============================================================================ +# FAIL-HARD BEHAVIOR +# ============================================================================ + +# If a partition is configured with a proof type that's unavailable: +# +# - At config load time: Partition configuration is REJECTED +# - At runtime: This cannot happen (caught at config load) +# +# There is NO fallback to m-of-n if proof verification is configured. +# If you want m-of-n only, don't set proof_type or set it to empty string. # ============================================================================ # MONITORING # ============================================================================ # Metrics to monitor: -# - bft_zk_verification_total{result="success"} -# - bft_zk_verification_total{result="failure"} -# - bft_zk_verification_duration_seconds +# - bft_zk_verification_total{partition="X", result="success"} +# - bft_zk_verification_total{partition="X", result="failure"} +# - bft_zk_verification_duration_seconds{partition="X"} # # Log messages: -# - INFO: "ZK proof verification enabled (proof type: sp1)" -# - WARN: "ZK proof verification disabled - accepting all proofs" -# - WARN: "ZK proof verification failed" -# - DEBUG: "Verifying ZK proof" +# - INFO: "Root node initialized with per-partition ZK proof verification" +# - INFO: "ZK proof verified successfully" (partition=X, round=Y) +# - WARN: "ZK proof verification failed" (partition=X) +# - DEBUG: "Verifying ZK proof" (partition=X, proof_size=N) # ============================================================================ # TROUBLESHOOTING # ============================================================================ -# Error: "failed to read verification key" -# Solution: Verify --zk-vkey-path points to valid file with correct permissions +# Error: "proof type \"sp1\" not available (build with -tags zkverifier_ffi to enable)" +# Solution: Rebuild with FFI support: make build-ffi # -# Error: "verification key is empty" -# Solution: Regenerate verification key from prover +# Error: "vkey_path required for SP1 proof type" +# Solution: Add vkey_path to partition_params in PartitionDescriptionRecord # -# Error: "proof verification failed" -# Solution: Ensure verification key matches prover program ID -# Check SP1 library version compatibility -# Verify proof format is SP1 compressed - -# ============================================================================ -# SECURITY WARNINGS -# ============================================================================ - -# ⚠️ CRITICAL: The current SP1 verifier is a PLACEHOLDER implementation -# It accepts all well-formed proofs without cryptographic verification +# Error: "failed to read verification key" +# Solution: Verify vkey_path points to valid file with correct permissions # -# Before production deployment: -# 1. Integrate actual SP1 verification library (see INTEGRATION_GUIDE.md) -# 2. Test with real SP1 proofs from Uni-EVM -# 3. Test rejection of invalid/malicious proofs -# 4. Security audit of verification implementation -# 5. Monitor verification latency and failure rates +# Error: "getting verifier for partition X: ..." +# Solution: Check partition configuration in orchestration database # ============================================================================ -# MIGRATION PLAN +# MIGRATION FROM GLOBAL CONFIG # ============================================================================ -# Phase 1: Deploy with verification disabled -# --zk-verification-enabled=false -# -# Phase 2: Deploy with verification enabled but non-blocking (future feature) -# Log failures but don't reject requests -# -# Phase 3: Deploy with verification enabled and blocking +# If you were using the old global CLI flags: # --zk-verification-enabled=true -# Reject requests with invalid proofs +# --zk-proof-type=sp1 +# --zk-vkey-path=/etc/bft-core/sp1.vkey +# +# Migration steps: +# 1. Remove the CLI flags (they no longer exist) +# 2. For each partition that needs ZK verification: +# - Update PartitionDescriptionRecord.PartitionParams: +# proof_type: "sp1" +# vkey_path: "/etc/bft-core/partition-X.vkey" +# 3. Partitions without ZK verification need no changes (m-of-n by default) diff --git a/rootchain/consensus/zkverifier/partition_config.go b/rootchain/consensus/zkverifier/partition_config.go new file mode 100644 index 00000000..5f8213d2 --- /dev/null +++ b/rootchain/consensus/zkverifier/partition_config.go @@ -0,0 +1,36 @@ +package zkverifier + +// Partition params keys for proof verification configuration. +// These are stored in PartitionDescriptionRecord.PartitionParams. +const ( + // ParamProofType specifies the proof type for the partition. + // Valid values: "sp1", "light_client", "exec" + // If empty or not set, m-of-n signature verification only (no ZK proof required). + ParamProofType = "proof_type" + + // ParamVerificationKeyPath specifies the path to the verification key file. + // Required for SP1 proof type. + ParamVerificationKeyPath = "vkey_path" +) + +// ParseProofTypeFromParams extracts the ProofType from partition params. +// Returns ProofTypeNone if proof_type is not set or empty. +func ParseProofTypeFromParams(params map[string]string) ProofType { + if params == nil { + return ProofTypeNone + } + pt, ok := params[ParamProofType] + if !ok || pt == "" { + return ProofTypeNone + } + return ProofType(pt) +} + +// ParseVKeyPathFromParams extracts the verification key path from partition params. +// Returns empty string if not set. +func ParseVKeyPathFromParams(params map[string]string) string { + if params == nil { + return "" + } + return params[ParamVerificationKeyPath] +} diff --git a/rootchain/consensus/zkverifier/registry.go b/rootchain/consensus/zkverifier/registry.go new file mode 100644 index 00000000..e8d92e05 --- /dev/null +++ b/rootchain/consensus/zkverifier/registry.go @@ -0,0 +1,119 @@ +package zkverifier + +import ( + "fmt" + "sync" + + "github.com/unicitynetwork/bft-go-base/types" +) + +// registryCacheKey uniquely identifies a verifier configuration for a partition/shard/epoch. +type registryCacheKey struct { + PartitionID types.PartitionID + ShardID string // ShardID.Key() + Epoch uint64 +} + +// Registry manages ZK verifiers for partitions, caching them by partition+shard+epoch. +type Registry struct { + cache map[registryCacheKey]ZKVerifier + mu sync.RWMutex +} + +// NewRegistry creates a new ZK verifier registry. +func NewRegistry() *Registry { + return &Registry{ + cache: make(map[registryCacheKey]ZKVerifier), + } +} + +// GetVerifier returns a ZK verifier for the given partition configuration. +// It caches verifiers by partition+shard+epoch to avoid recreating them. +// +// Returns: +// - NoOpVerifier when proof_type is empty, "none", or "exec" (m-of-n mode) +// - The appropriate verifier for sp1/light_client +// - Error if the requested proof type is unavailable (FFI not built) or misconfigured +func (r *Registry) GetVerifier(partitionID types.PartitionID, shardID types.ShardID, epoch uint64, params map[string]string) (ZKVerifier, error) { + key := registryCacheKey{ + PartitionID: partitionID, + ShardID: shardID.Key(), + Epoch: epoch, + } + + // Check cache first + r.mu.RLock() + if v, ok := r.cache[key]; ok { + r.mu.RUnlock() + return v, nil + } + r.mu.RUnlock() + + // Create new verifier + verifier, err := r.createVerifier(params) + if err != nil { + return nil, err + } + + // Cache the verifier + r.mu.Lock() + // Double-check in case another goroutine created it + if v, ok := r.cache[key]; ok { + r.mu.Unlock() + return v, nil + } + r.cache[key] = verifier + r.mu.Unlock() + + return verifier, nil +} + +// createVerifier creates a new verifier based on partition params. +func (r *Registry) createVerifier(params map[string]string) (ZKVerifier, error) { + proofType := ParseProofTypeFromParams(params) + + // Check availability before attempting to create + if !IsProofTypeAvailable(proofType) { + return nil, fmt.Errorf("proof type %q not available (build with -tags zkverifier_ffi to enable)", proofType) + } + + switch proofType { + case ProofTypeNone, ProofTypeExec, "": + // m-of-n mode - no ZK proof verification + return &NoOpVerifier{}, nil + + case ProofTypeSP1: + vkeyPath := ParseVKeyPathFromParams(params) + if vkeyPath == "" { + return nil, fmt.Errorf("vkey_path required for SP1 proof type") + } + return NewSP1Verifier(vkeyPath) + + case ProofTypeLightClient: + return NewLightClientVerifier() + + default: + return nil, fmt.Errorf("unknown proof type: %s", proofType) + } +} + +// InvalidateCache removes the cached verifier for the given partition+shard+epoch. +// Call this when partition configuration changes. +func (r *Registry) InvalidateCache(partitionID types.PartitionID, shardID types.ShardID, epoch uint64) { + key := registryCacheKey{ + PartitionID: partitionID, + ShardID: shardID.Key(), + Epoch: epoch, + } + + r.mu.Lock() + delete(r.cache, key) + r.mu.Unlock() +} + +// ClearCache removes all cached verifiers. +func (r *Registry) ClearCache() { + r.mu.Lock() + r.cache = make(map[registryCacheKey]ZKVerifier) + r.mu.Unlock() +} diff --git a/rootchain/consensus/zkverifier/registry_test.go b/rootchain/consensus/zkverifier/registry_test.go new file mode 100644 index 00000000..798b840a --- /dev/null +++ b/rootchain/consensus/zkverifier/registry_test.go @@ -0,0 +1,132 @@ +package zkverifier + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/unicitynetwork/bft-go-base/types" +) + +func TestNewRegistry(t *testing.T) { + r := NewRegistry() + require.NotNil(t, r) + require.NotNil(t, r.cache) + require.Empty(t, r.cache) +} + +func TestRegistry_GetVerifier_NoProofType(t *testing.T) { + r := NewRegistry() + + // Empty params should return NoOpVerifier + v, err := r.GetVerifier(1, types.ShardID{}, 0, nil) + require.NoError(t, err) + require.NotNil(t, v) + require.IsType(t, &NoOpVerifier{}, v) + require.False(t, v.IsEnabled()) + + // Empty proof_type should return NoOpVerifier + v, err = r.GetVerifier(1, types.ShardID{}, 0, map[string]string{}) + require.NoError(t, err) + require.NotNil(t, v) + require.IsType(t, &NoOpVerifier{}, v) + + // proof_type = "none" should return NoOpVerifier + v, err = r.GetVerifier(1, types.ShardID{}, 0, map[string]string{ParamProofType: string(ProofTypeNone)}) + require.NoError(t, err) + require.NotNil(t, v) + require.IsType(t, &NoOpVerifier{}, v) + + // proof_type = "exec" should return NoOpVerifier + v, err = r.GetVerifier(1, types.ShardID{}, 0, map[string]string{ParamProofType: string(ProofTypeExec)}) + require.NoError(t, err) + require.NotNil(t, v) + require.IsType(t, &NoOpVerifier{}, v) +} + +func TestRegistry_GetVerifier_Caching(t *testing.T) { + r := NewRegistry() + + // Get verifier twice - should be cached + params := map[string]string{ParamProofType: string(ProofTypeExec)} + v1, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.NoError(t, err) + + v2, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.NoError(t, err) + + // Same verifier should be returned from cache (type check is sufficient for NoOpVerifier) + require.IsType(t, v1, v2) + + // Check that cache has entry + require.Len(t, r.cache, 1) + + // Different epoch should create new cache entry + _, err = r.GetVerifier(1, types.ShardID{}, 1, params) + require.NoError(t, err) + require.Len(t, r.cache, 2) + + // Different partition should create new cache entry + _, err = r.GetVerifier(2, types.ShardID{}, 0, params) + require.NoError(t, err) + require.Len(t, r.cache, 3) +} + +func TestRegistry_InvalidateCache(t *testing.T) { + r := NewRegistry() + + params := map[string]string{ParamProofType: string(ProofTypeExec)} + _, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.NoError(t, err) + require.Len(t, r.cache, 1) + + // Invalidate cache + r.InvalidateCache(1, types.ShardID{}, 0) + require.Len(t, r.cache, 0) + + // Getting verifier again should recreate cache entry + _, err = r.GetVerifier(1, types.ShardID{}, 0, params) + require.NoError(t, err) + require.Len(t, r.cache, 1) +} + +func TestRegistry_ClearCache(t *testing.T) { + r := NewRegistry() + + params := map[string]string{ParamProofType: string(ProofTypeExec)} + _, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.NoError(t, err) + _, err = r.GetVerifier(2, types.ShardID{}, 0, params) + require.NoError(t, err) + + require.Len(t, r.cache, 2) + + r.ClearCache() + require.Empty(t, r.cache) +} + +func TestRegistry_GetVerifier_SP1MissingVKey(t *testing.T) { + r := NewRegistry() + + // SP1 without vkey_path should fail + params := map[string]string{ParamProofType: string(ProofTypeSP1)} + _, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.Error(t, err) + + // In stub mode (without FFI), error is "not available" + // In FFI mode, error is "vkey_path required" + if IsFFIAvailable() { + require.Contains(t, err.Error(), "vkey_path required") + } else { + require.Contains(t, err.Error(), "not available") + } +} + +func TestRegistry_GetVerifier_UnavailableProofType(t *testing.T) { + r := NewRegistry() + + // Test with an unknown proof type + params := map[string]string{ParamProofType: "unknown_type"} + _, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.Error(t, err) + require.Contains(t, err.Error(), "not available") +} diff --git a/rootchain/node.go b/rootchain/node.go index 1d9de187..d07fb890 100644 --- a/rootchain/node.go +++ b/rootchain/node.go @@ -54,7 +54,7 @@ type ( subscription *Subscriptions net PartitionNet consensusManager ConsensusManager - zkVerifier zkverifier.ZKVerifier + zkRegistry *zkverifier.Registry log *slog.Logger tracer trace.Tracer @@ -69,7 +69,6 @@ func New( pNet PartitionNet, cm ConsensusManager, observe Observability, - zkVerifier zkverifier.ZKVerifier, ) (*Node, error) { if peer == nil { return nil, fmt.Errorf("partition listener is nil") @@ -77,11 +76,6 @@ func New( if pNet == nil { return nil, fmt.Errorf("network is nil") } - if zkVerifier == nil { - // Default to NoOp verifier if none provided - zkVerifier = &zkverifier.NoOpVerifier{} - observe.Logger().Warn("No ZK verifier provided, using NoOp verifier (accepts all proofs)") - } meter := observe.Meter("rootchain.node") reqBuf, err := NewCertificationRequestBuffer(meter) @@ -98,7 +92,7 @@ func New( subscription: subs, net: pNet, consensusManager: cm, - zkVerifier: zkVerifier, + zkRegistry: zkverifier.NewRegistry(), log: observe.Logger(), tracer: observe.Tracer("rootchain.node"), } @@ -106,12 +100,7 @@ func New( return nil, fmt.Errorf("initializing metrics: %w", err) } - // Log verifier configuration - if zkVerifier.IsEnabled() { - observe.Logger().Info(fmt.Sprintf("ZK proof verification enabled (proof type: %s)", zkVerifier.ProofType())) - } else { - observe.Logger().Warn("ZK proof verification disabled - accepting all proofs") - } + observe.Logger().Info("Root node initialized with per-partition ZK proof verification") return node, nil } @@ -326,16 +315,22 @@ func (v *Node) handleConsensus(ctx context.Context) error { // verifyZKProof verifies the ZK proof in the block certification request func (v *Node) verifyZKProof(ctx context.Context, req *certification.BlockCertificationRequest, si *storage.ShardInfo) error { - if !v.zkVerifier.IsEnabled() { - // Verification disabled - accept all - return nil - } - ir := req.InputRecord if ir == nil { return fmt.Errorf("input record is nil") } + // Get verifier for this partition's configuration + verifier, err := v.zkRegistry.GetVerifier(si.PartitionID, si.ShardID, si.IR.Epoch, si.PartitionParams) + if err != nil { + return fmt.Errorf("getting verifier for partition %s: %w", si.PartitionID, err) + } + + if !verifier.IsEnabled() { + // m-of-n mode - no ZK proof verification + return nil + } + // Get state roots from InputRecord previousStateRoot := ir.PreviousHash newStateRoot := ir.Hash @@ -357,12 +352,12 @@ func (v *Node) verifyZKProof(ctx context.Context, req *certification.BlockCertif v.log.DebugContext(ctx, "Verifying ZK proof", logger.Shard(req.PartitionID, req.ShardID), logger.Data(slog.Int("proof_size", len(req.ZkProof))), - logger.Data(slog.String("proof_type", string(v.zkVerifier.ProofType()))), + logger.Data(slog.String("proof_type", string(verifier.ProofType()))), logger.Data(slog.Uint64("round", ir.RoundNumber))) // Verify proof: previousStateRoot -> newStateRoot transition with block hash blockHash := ir.BlockHash - if err := v.zkVerifier.VerifyProof(req.ZkProof, previousStateRoot, newStateRoot, blockHash); err != nil { + if err := verifier.VerifyProof(req.ZkProof, previousStateRoot, newStateRoot, blockHash); err != nil { return fmt.Errorf("ZK proof verification failed: %w", err) } diff --git a/rootchain/node_test.go b/rootchain/node_test.go index f3ee1551..eabcae53 100644 --- a/rootchain/node_test.go +++ b/rootchain/node_test.go @@ -33,15 +33,15 @@ func Test_rootNode(t *testing.T) { cm := mockConsensusManager{} partNet := mockPartitionNet{} - node, err := New(nil, partNet, cm, nopObs, nil) + node, err := New(nil, partNet, cm, nopObs) require.Nil(t, node) require.EqualError(t, err, `partition listener is nil`) - node, err = New(&nwPeer, nil, cm, nopObs, nil) + node, err = New(&nwPeer, nil, cm, nopObs) require.Nil(t, node) require.EqualError(t, err, `network is nil`) - node, err = New(&nwPeer, partNet, cm, nopObs, nil) + node, err = New(&nwPeer, partNet, cm, nopObs) require.NoError(t, err) require.NotNil(t, node) require.Equal(t, &nwPeer, node.GetPeer()) @@ -59,7 +59,7 @@ func Test_rootNode(t *testing.T) { partMsg := make(chan any) partNet := mockPartitionNet{recCh: func() <-chan any { return partMsg }} - node, err := New(nwPeer, partNet, cm, nopObs, nil) + node, err := New(nwPeer, partNet, cm, nopObs) require.NoError(t, err) require.NotNil(t, node) require.Equal(t, nwPeer, node.GetPeer()) @@ -99,7 +99,7 @@ func Test_rootNode(t *testing.T) { partNet, _ := newMockPartitionNet() - node, err := New(nwPeer, partNet, cm, nopObs, nil) + node, err := New(nwPeer, partNet, cm, nopObs) require.NoError(t, err) require.NotNil(t, node) require.Equal(t, nwPeer, node.GetPeer()) @@ -134,7 +134,7 @@ func Test_sendResponse(t *testing.T) { certResp := validCertificationResponse(t) t.Run("invalid peer ID", func(t *testing.T) { - node, err := New(&nwPeer, mockPartitionNet{}, cm, nopObs, nil) + node, err := New(&nwPeer, mockPartitionNet{}, cm, nopObs) require.NoError(t, err) err = node.sendResponse(t.Context(), "", &certResp) @@ -147,7 +147,7 @@ func Test_sendResponse(t *testing.T) { t.Run("invalid CertificationResponse", func(t *testing.T) { // CertResp is coming from ConsensusManager so this should be impossible? // just send it out and shard nodes should be able to ignore invalid CRsp? - node, err := New(&nwPeer, mockPartitionNet{}, cm, nopObs, nil) + node, err := New(&nwPeer, mockPartitionNet{}, cm, nopObs) require.NoError(t, err) cr := certResp @@ -163,7 +163,7 @@ func Test_sendResponse(t *testing.T) { return expErr }, } - node, err := New(&nwPeer, partNet, cm, nopObs, nil) + node, err := New(&nwPeer, partNet, cm, nopObs) require.NoError(t, err) err = node.sendResponse(t.Context(), nodeID, &certResp) require.ErrorIs(t, err, expErr) @@ -178,7 +178,7 @@ func Test_sendResponse(t *testing.T) { return nil }, } - node, err := New(&nwPeer, partNet, cm, nopObs, nil) + node, err := New(&nwPeer, partNet, cm, nopObs) require.NoError(t, err) require.NoError(t, node.sendResponse(t.Context(), nodeID, &certResp)) }) @@ -202,7 +202,7 @@ func Test_onHandshake(t *testing.T) { t.Run("invalid handshake msg", func(t *testing.T) { partNet := mockPartitionNet{} cm := mockConsensusManager{} - node, err := New(&nwPeer, partNet, cm, nopObs, nil) + node, err := New(&nwPeer, partNet, cm, nopObs) require.NoError(t, err) msg := handshake.Handshake{ PartitionID: 0, // invalid partition ID @@ -228,7 +228,7 @@ func Test_onHandshake(t *testing.T) { return nil, expErr }, } - node, err := New(&nwPeer, partNet, cm, nopObs, nil) + node, err := New(&nwPeer, partNet, cm, nopObs) require.NoError(t, err) err = node.onHandshake(t.Context(), &msg) require.EqualError(t, err, fmt.Errorf(`reading partition %s certificate: %w`, msg.PartitionID, expErr).Error()) @@ -245,7 +245,7 @@ func Test_onHandshake(t *testing.T) { return newMockShardInfo(t, nodeID.String(), publicKey, certResp), nil }, } - node, err := New(&nwPeer, partNet, cm, nopObs, nil) + node, err := New(&nwPeer, partNet, cm, nopObs) require.NoError(t, err) msg := handshake.Handshake{ @@ -271,7 +271,7 @@ func Test_onHandshake(t *testing.T) { return newMockShardInfo(t, nodeID.String(), publicKey, certResp), nil }, } - node, err := New(&nwPeer, partNet, cm, nopObs, nil) + node, err := New(&nwPeer, partNet, cm, nopObs) require.NoError(t, err) msg := handshake.Handshake{ @@ -298,7 +298,7 @@ func Test_handlePartitionMsg(t *testing.T) { t.Run("unsupported message", func(t *testing.T) { partNet := mockPartitionNet{} cm := mockConsensusManager{} - node, err := New(&nwPeer, partNet, cm, nopObs, nil) + node, err := New(&nwPeer, partNet, cm, nopObs) require.NoError(t, err) err = node.handlePartitionMsg(t.Context(), 555) require.EqualError(t, err, `unknown message type int`) @@ -320,7 +320,7 @@ func Test_handlePartitionMsg(t *testing.T) { return newMockShardInfo(t, nodeID.String(), publicKey, certResp), nil }, } - node, err := New(&nwPeer, partNet, cm, nopObs, nil) + node, err := New(&nwPeer, partNet, cm, nopObs) require.NoError(t, err) msg := handshake.Handshake{ @@ -343,7 +343,7 @@ func Test_handlePartitionMsg(t *testing.T) { return nil, expErr }, } - node, err := New(&nwPeer, partNet, cm, nopObs, nil) + node, err := New(&nwPeer, partNet, cm, nopObs) require.NoError(t, err) msg := certification.BlockCertificationRequest{ @@ -363,7 +363,7 @@ func Test_partitionMsgLoop(t *testing.T) { cm := mockConsensusManager{} done := make(chan struct{}) - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) ctx, cancel := context.WithCancel(t.Context()) @@ -385,7 +385,7 @@ func Test_partitionMsgLoop(t *testing.T) { cm := mockConsensusManager{} done := make(chan struct{}) - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) go func() { @@ -410,7 +410,7 @@ func Test_partitionMsgLoop(t *testing.T) { cm := mockConsensusManager{} done := make(chan struct{}) - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) ctx, cancel := context.WithCancel(t.Context()) @@ -521,7 +521,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { }, } - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) err = node.onBlockCertificationRequest(t.Context(), &validCertRequest) require.EqualError(t, err, `acquiring shard 00000001 - info: no SI`) @@ -534,7 +534,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { }, } - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) cr := validCertRequest cr.NodeID = "not valid ID" @@ -563,7 +563,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { return &storage.ShardInfo{LastCR: &certResp}, nil }, } - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) err = node.onBlockCertificationRequest(t.Context(), &validCertRequest) @@ -590,7 +590,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { return si, nil }, } - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) err = node.onBlockCertificationRequest(t.Context(), &validCertRequest) @@ -613,7 +613,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { } key := partitionShard{validCertRequest.PartitionID, validCertRequest.ShardID.Key()} - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) require.NotContains(t, node.incomingRequests.store, key) @@ -657,7 +657,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { } key := partitionShard{validCertRequest.PartitionID, validCertRequest.ShardID.Key()} - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) require.NotContains(t, node.incomingRequests.store, key) @@ -714,7 +714,7 @@ func Test_onBlockCertificationRequest(t *testing.T) { } key := partitionShard{validCertRequest.PartitionID, validCertRequest.ShardID.Key()} - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) require.NotContains(t, node.incomingRequests.store, key) @@ -754,7 +754,7 @@ func Test_handleConsensus(t *testing.T) { cm := mockConsensusManager{} done := make(chan struct{}) - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) ctx, cancel := context.WithCancel(t.Context()) @@ -775,7 +775,7 @@ func Test_handleConsensus(t *testing.T) { cm := mockConsensusManager{certificationResult: make(chan *certification.CertificationResponse)} done := make(chan struct{}) - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) go func() { @@ -806,7 +806,7 @@ func Test_handleConsensus(t *testing.T) { }, } - node, err := New(&nwPeer, partNet, cm, testobservability.Default(t), nil) + node, err := New(&nwPeer, partNet, cm, testobservability.Default(t)) require.NoError(t, err) go func() { diff --git a/rootchain/partitions/orchestration.go b/rootchain/partitions/orchestration.go index 59b373e1..19607c00 100644 --- a/rootchain/partitions/orchestration.go +++ b/rootchain/partitions/orchestration.go @@ -9,6 +9,7 @@ import ( "time" "github.com/unicitynetwork/bft-core/logger" + "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier" "github.com/unicitynetwork/bft-go-base/types" bolt "go.etcd.io/bbolt" ) @@ -210,7 +211,10 @@ func storeShardConf(tx *bolt.Tx, shardConf *types.PartitionDescriptionRecord) er func verifyShardConf(tx *bolt.Tx, shardConf *types.PartitionDescriptionRecord) error { if shardConf.Epoch == 0 { - return shardConf.IsValid() + if err := shardConf.IsValid(); err != nil { + return err + } + return verifyProofConfig(shardConf) } lastShardConf, err := getShardConf(tx, shardConf.PartitionID, shardConf.ShardID, math.MaxUint64) @@ -223,7 +227,35 @@ func verifyShardConf(tx *bolt.Tx, shardConf *types.PartitionDescriptionRecord) e if err = shardConf.Verify(lastShardConf); err != nil { return fmt.Errorf("shard conf does not extend previous shard conf: %w", err) } - return err + return verifyProofConfig(shardConf) +} + +// verifyProofConfig validates the ZK proof configuration in partition params. +// Returns error if: +// - proof_type is specified but not available (FFI not built) +// - SP1 proof_type is specified but vkey_path is missing +func verifyProofConfig(shardConf *types.PartitionDescriptionRecord) error { + proofType := zkverifier.ParseProofTypeFromParams(shardConf.PartitionParams) + + // Empty/none proof type is always valid (m-of-n mode) + if proofType == zkverifier.ProofTypeNone || proofType == "" { + return nil + } + + // Check if proof type is available in current build + if !zkverifier.IsProofTypeAvailable(proofType) { + return fmt.Errorf("proof type %q not available (build with -tags zkverifier_ffi to enable)", proofType) + } + + // SP1 requires verification key path + if proofType == zkverifier.ProofTypeSP1 { + vkeyPath := zkverifier.ParseVKeyPathFromParams(shardConf.PartitionParams) + if vkeyPath == "" { + return fmt.Errorf("vkey_path required for SP1 proof type") + } + } + + return nil } // schema: From 62c674a1a2419d4a074935f40e31b7bccd164c78 Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Fri, 16 Jan 2026 18:12:24 +0200 Subject: [PATCH 10/17] signed zk proof in BlockCertificationMsg --- network/protocol/certification/block_certification_request.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/network/protocol/certification/block_certification_request.go b/network/protocol/certification/block_certification_request.go index 3218d4e0..65c6bd4b 100644 --- a/network/protocol/certification/block_certification_request.go +++ b/network/protocol/certification/block_certification_request.go @@ -85,10 +85,8 @@ func (x *BlockCertificationRequest) Sign(signer crypto.Signer) error { } func (x BlockCertificationRequest) Bytes() ([]byte, error) { - // Exclude signature and ZK proof from signed data - // ZK proof is validated separately by the verifier + // Exclude signature from signed data x.Signature = nil - x.ZkProof = nil return types.Cbor.Marshal(x) } From 7b59b8fe148686d8b3a93277844ae6de6f1faad1 Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Wed, 21 Jan 2026 12:30:31 +0200 Subject: [PATCH 11/17] chain_id's of evm partitions --- rootchain/consensus/storage/block_store.go | 6 +- .../consensus/zkverifier/capabilities_test.go | 72 ++++++++ .../consensus/zkverifier/config.example.yaml | 162 ------------------ .../light-client-verifier-ffi/Cargo.toml | 10 +- .../light-client-verifier-ffi/README.md | 11 -- .../light_client_verifier.h | 2 + .../light-client-verifier-ffi/src/lib.rs | 15 +- .../zkverifier/light_client_verifier.go | 7 +- .../zkverifier/light_client_verifier_ffi.go | 8 +- .../light_client_verifier_ffi_stub.go | 7 +- .../consensus/zkverifier/partition_config.go | 25 +++ rootchain/consensus/zkverifier/registry.go | 12 +- .../consensus/zkverifier/registry_test.go | 31 ++++ .../sp1-verifier-ffi/sp1_verifier.h | 2 + .../zkverifier/sp1-verifier-ffi/src/lib.rs | 27 ++- .../consensus/zkverifier/sp1_verifier.go | 7 +- .../consensus/zkverifier/sp1_verifier_ffi.go | 10 +- .../zkverifier/sp1_verifier_ffi_stub.go | 6 +- rootchain/consensus/zkverifier/verifier.go | 8 +- .../consensus/zkverifier/verifier_ffi_test.go | 6 +- .../zkverifier/verifier_stub_test.go | 6 +- rootchain/partitions/orchestration.go | 12 +- 22 files changed, 238 insertions(+), 214 deletions(-) delete mode 100644 rootchain/consensus/zkverifier/config.example.yaml diff --git a/rootchain/consensus/storage/block_store.go b/rootchain/consensus/storage/block_store.go index f92cb919..6fbaa8c5 100644 --- a/rootchain/consensus/storage/block_store.go +++ b/rootchain/consensus/storage/block_store.go @@ -197,7 +197,8 @@ func (x *BlockStore) GetCertificate(id types.PartitionID, shard types.ShardID) ( defer x.lock.RUnlock() committedBlock := x.blockTree.Root() - if si, ok := committedBlock.ShardState.States[types.PartitionShardID{PartitionID: id, ShardID: shard.Key()}]; ok { + key := types.PartitionShardID{PartitionID: id, ShardID: shard.Key()} + if si, ok := committedBlock.ShardState.States[key]; ok { return si.LastCR, nil } return nil, fmt.Errorf("no certificate found for shard %s - %s", id, shard) @@ -222,7 +223,8 @@ func (x *BlockStore) ShardInfo(partition types.PartitionID, shard types.ShardID) defer x.lock.RUnlock() committedBlock := x.blockTree.Root() - if si, ok := committedBlock.ShardState.States[types.PartitionShardID{PartitionID: partition, ShardID: shard.Key()}]; ok { + key := types.PartitionShardID{PartitionID: partition, ShardID: shard.Key()} + if si, ok := committedBlock.ShardState.States[key]; ok { return si } return nil diff --git a/rootchain/consensus/zkverifier/capabilities_test.go b/rootchain/consensus/zkverifier/capabilities_test.go index 025100a4..395df1ec 100644 --- a/rootchain/consensus/zkverifier/capabilities_test.go +++ b/rootchain/consensus/zkverifier/capabilities_test.go @@ -115,3 +115,75 @@ func TestParseVKeyPathFromParams(t *testing.T) { }) } } + +func TestParseChainIDFromParams(t *testing.T) { + testCases := []struct { + name string + params map[string]string + expectedID uint64 + expectedOK bool + }{ + { + name: "nil params", + params: nil, + expectedID: 0, + expectedOK: false, + }, + { + name: "empty params", + params: map[string]string{}, + expectedID: 0, + expectedOK: false, + }, + { + name: "no chain_id", + params: map[string]string{ParamProofType: "sp1"}, + expectedID: 0, + expectedOK: false, + }, + { + name: "empty chain_id", + params: map[string]string{ParamChainID: ""}, + expectedID: 0, + expectedOK: false, + }, + { + name: "invalid chain_id", + params: map[string]string{ParamChainID: "invalid"}, + expectedID: 0, + expectedOK: false, + }, + { + name: "negative chain_id", + params: map[string]string{ParamChainID: "-1"}, + expectedID: 0, + expectedOK: false, + }, + { + name: "valid chain_id 1", + params: map[string]string{ParamChainID: "1"}, + expectedID: 1, + expectedOK: true, + }, + { + name: "valid chain_id mainnet", + params: map[string]string{ParamChainID: "1337"}, + expectedID: 1337, + expectedOK: true, + }, + { + name: "valid large chain_id", + params: map[string]string{ParamChainID: "999999999"}, + expectedID: 999999999, + expectedOK: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, ok := ParseChainIDFromParams(tc.params) + require.Equal(t, tc.expectedOK, ok) + require.Equal(t, tc.expectedID, result) + }) + } +} diff --git a/rootchain/consensus/zkverifier/config.example.yaml b/rootchain/consensus/zkverifier/config.example.yaml deleted file mode 100644 index c50f09f5..00000000 --- a/rootchain/consensus/zkverifier/config.example.yaml +++ /dev/null @@ -1,162 +0,0 @@ -# BFT Core ZK Verification Configuration Example -# -# ZK proof verification is configured PER-PARTITION via PartitionDescriptionRecord.PartitionParams. -# This allows different partitions to use different proof types or no ZK proofs at all. -# -# There are NO global CLI flags for ZK verification - all configuration is per-partition. - -# ============================================================================ -# PER-PARTITION CONFIGURATION -# ============================================================================ - -# Proof configuration is specified in the PartitionDescriptionRecord.PartitionParams map. -# This is set when creating or updating a partition configuration. -# -# Supported partition params: -# proof_type - Type of ZK proof (sp1, light_client, exec, or empty for m-of-n only) -# vkey_path - Path to verification key file (required for SP1) - -# ============================================================================ -# PARTITION CONFIGURATION EXAMPLES -# ============================================================================ - -# Example 1: Partition with m-of-n signature verification only (no ZK proofs) -# PartitionDescriptionRecord: -# partition_id: 1 -# partition_params: {} -# # or omit partition_params entirely - -# Example 2: Partition with SP1 ZK proof verification -# PartitionDescriptionRecord: -# partition_id: 2 -# partition_params: -# proof_type: "sp1" -# vkey_path: "/etc/bft-core/partition-2.vkey" - -# Example 3: Partition with Light Client proof verification -# PartitionDescriptionRecord: -# partition_id: 3 -# partition_params: -# proof_type: "light_client" - -# Example 4: Partition with exec (development/testing - accepts all proofs) -# PartitionDescriptionRecord: -# partition_id: 4 -# partition_params: -# proof_type: "exec" - -# ============================================================================ -# PROOF TYPE OPTIONS -# ============================================================================ - -# proof_type values: -# -# (empty) - M-of-n validator signature verification only (default) -# No ZK proof required in BlockCertificationRequest -# -# "sp1" - SP1 ZK proof verification -# Requires: vkey_path pointing to SP1 verification key -# Requires: Build with -tags zkverifier_ffi -# -# "light_client" - Light client proof verification -# Requires: Build with -tags zkverifier_ffi -# -# "exec" - Development/testing mode -# Accepts all well-formed proofs without verification - -# ============================================================================ -# BUILD TAGS -# ============================================================================ - -# SP1 and Light Client verification require FFI support: -# -# make build-ffi # Build with FFI support -# # or -# go build -tags zkverifier_ffi ./... -# -# Without FFI support, only "exec" proof type is available. -# Attempting to configure a partition with "sp1" or "light_client" will fail -# at startup if FFI is not built in. - -# ============================================================================ -# VERIFICATION KEY MANAGEMENT -# ============================================================================ - -# For SP1 proof type, each partition needs its own verification key. -# -# 1. Generate vkey from the partition's prover: -# cd partition-prover -# cargo run --release --bin generate-vkey -- --output partition.vkey -# -# 2. Deploy vkey to all root chain nodes: -# scp partition.vkey root-node-1:/etc/bft-core/partition-X.vkey -# scp partition.vkey root-node-2:/etc/bft-core/partition-X.vkey -# scp partition.vkey root-node-3:/etc/bft-core/partition-X.vkey -# -# 3. Verify file permissions: -# chmod 644 /etc/bft-core/partition-X.vkey -# -# 4. Verify checksum across all nodes: -# sha256sum /etc/bft-core/partition-X.vkey -# -# 5. Configure partition with vkey_path in PartitionParams - -# ============================================================================ -# FAIL-HARD BEHAVIOR -# ============================================================================ - -# If a partition is configured with a proof type that's unavailable: -# -# - At config load time: Partition configuration is REJECTED -# - At runtime: This cannot happen (caught at config load) -# -# There is NO fallback to m-of-n if proof verification is configured. -# If you want m-of-n only, don't set proof_type or set it to empty string. - -# ============================================================================ -# MONITORING -# ============================================================================ - -# Metrics to monitor: -# - bft_zk_verification_total{partition="X", result="success"} -# - bft_zk_verification_total{partition="X", result="failure"} -# - bft_zk_verification_duration_seconds{partition="X"} -# -# Log messages: -# - INFO: "Root node initialized with per-partition ZK proof verification" -# - INFO: "ZK proof verified successfully" (partition=X, round=Y) -# - WARN: "ZK proof verification failed" (partition=X) -# - DEBUG: "Verifying ZK proof" (partition=X, proof_size=N) - -# ============================================================================ -# TROUBLESHOOTING -# ============================================================================ - -# Error: "proof type \"sp1\" not available (build with -tags zkverifier_ffi to enable)" -# Solution: Rebuild with FFI support: make build-ffi -# -# Error: "vkey_path required for SP1 proof type" -# Solution: Add vkey_path to partition_params in PartitionDescriptionRecord -# -# Error: "failed to read verification key" -# Solution: Verify vkey_path points to valid file with correct permissions -# -# Error: "getting verifier for partition X: ..." -# Solution: Check partition configuration in orchestration database - -# ============================================================================ -# MIGRATION FROM GLOBAL CONFIG -# ============================================================================ - -# If you were using the old global CLI flags: -# --zk-verification-enabled=true -# --zk-proof-type=sp1 -# --zk-vkey-path=/etc/bft-core/sp1.vkey -# -# Migration steps: -# 1. Remove the CLI flags (they no longer exist) -# 2. For each partition that needs ZK verification: -# - Update PartitionDescriptionRecord.PartitionParams: -# proof_type: "sp1" -# vkey_path: "/etc/bft-core/partition-X.vkey" -# 3. Partitions without ZK verification need no changes (m-of-n by default) diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml index 61d6b5a8..a896d98f 100644 --- a/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml @@ -11,12 +11,14 @@ crate-type = ["cdylib", "staticlib"] [dependencies] # Core dependencies for witness deserialization and validation -rkyv = { version = "0.8.10", features = ["std", "unaligned"] } +# Pin rkyv to same version as uni-evm workspace to ensure compatible serialization +rkyv = { version = "=0.8.10", features = ["std", "unaligned"] } anyhow = "1.0" -# ethrex dependencies for validation logic (use GitHub fork) -ethrex-core = { git = "https://github.com/ristik/ethrex", branch = "uni-evm", package = "ethrex-common" } -guest_program = { git = "https://github.com/ristik/ethrex", branch = "uni-evm" } +# TODO: use github fork instead of local +ethrex-core = { path = "../../../../../ethrex/crates/common", package = "ethrex-common" } +# The l2 feature adds blob_commitment and blob_proof fields to ProgramInput +guest_program = { path = "../../../../../ethrex/crates/l2/prover/src/guest_program", features = ["l2"] } [profile.release] opt-level = 3 diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md b/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md index 9d4cdea4..f51ca283 100644 --- a/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md @@ -182,17 +182,6 @@ light-client-verifier-ffi/ └── README.md # This file ``` -## Configuration - -### Chain ID - -Currently hardcoded to `1` (matching uni-evm default). TODO: Make configurable via BFT Core config. - -```rust -// In lib.rs -let chain_id = 1; // TODO: Get from BFT Core configuration -``` - ## Troubleshooting ### Build Errors diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/light_client_verifier.h b/rootchain/consensus/zkverifier/light-client-verifier-ffi/light_client_verifier.h index f1c35829..778fec73 100644 --- a/rootchain/consensus/zkverifier/light-client-verifier-ffi/light_client_verifier.h +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/light_client_verifier.h @@ -38,6 +38,7 @@ typedef enum { * @param prev_state_root Pointer to 32-byte previous state root * @param new_state_root Pointer to 32-byte new state root * @param block_hash Pointer to 32-byte block hash + * @param chain_id Chain ID of EVM instance from partition config * @param error_out Output pointer for error message (must be freed with light_client_free_string) * @return LightClientVerifyResult status code */ @@ -47,6 +48,7 @@ LightClientVerifyResult light_client_verify_proof( const uint8_t* prev_state_root, const uint8_t* new_state_root, const uint8_t* block_hash, + uint64_t chain_id, char** error_out ); diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/src/lib.rs b/rootchain/consensus/zkverifier/light-client-verifier-ffi/src/lib.rs index 108cf72d..0488380d 100644 --- a/rootchain/consensus/zkverifier/light-client-verifier-ffi/src/lib.rs +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/src/lib.rs @@ -23,6 +23,7 @@ pub enum LightClientVerifyResult { /// * `prev_state_root` - Pointer to 32-byte previous state root /// * `new_state_root` - Pointer to 32-byte new state root /// * `block_hash` - Pointer to 32-byte block hash +/// * `chain_id` - EVM chain ID from partition config /// * `error_out` - Output pointer for error message (caller must free with light_client_free_string) /// /// # Returns @@ -34,6 +35,7 @@ pub extern "C" fn light_client_verify_proof( prev_state_root: *const u8, new_state_root: *const u8, block_hash: *const u8, + chain_id: u64, error_out: *mut *mut c_char, ) -> LightClientVerifyResult { // Safety checks @@ -54,7 +56,7 @@ pub extern "C" fn light_client_verify_proof( let blk_hash = unsafe { std::slice::from_raw_parts(block_hash, 32) }; // Perform verification - match verify_light_client_proof_internal(payload_data, prev_root, new_root, blk_hash) { + match verify_light_client_proof_internal(payload_data, prev_root, new_root, blk_hash, chain_id) { Ok(()) => LightClientVerifyResult::Success, Err(e) => { set_error(error_out, &e.to_string()); @@ -75,6 +77,7 @@ fn verify_light_client_proof_internal( prev_state_root: &[u8], new_state_root: &[u8], block_hash: &[u8], + chain_id: u64, ) -> anyhow::Result<()> { // 1. Check magic header if payload_data.len() < 8 { @@ -102,12 +105,7 @@ fn verify_light_client_proof_internal( return Err(anyhow::anyhow!("No blocks in ProgramInput")); } - // 4. Use chain_id from blocks[0].header (assuming it's stored in number for now) - // TODO: Get chain_id from BFT Core configuration instead of hardcoding - // For now, use the default chain_id from uni-evm config (1) - let chain_id = 1; - - // 5. Execute stateless validation + // 4. Execute stateless validation let output = guest_program::execution::stateless_validation_l1( program_input.blocks, program_input.execution_witness, @@ -263,6 +261,7 @@ mod tests { ptr::null(), ptr::null(), ptr::null(), + 1, // chain_id &mut error, ); assert_eq!(result as i32, LightClientVerifyResult::InternalError as i32); @@ -294,6 +293,7 @@ mod tests { prev_root.as_ptr(), new_root.as_ptr(), block_hash.as_ptr(), + 1, // chain_id &mut error, ); @@ -318,6 +318,7 @@ mod tests { prev_root.as_ptr(), new_root.as_ptr(), block_hash.as_ptr(), + 1, // chain_id &mut error, ); diff --git a/rootchain/consensus/zkverifier/light_client_verifier.go b/rootchain/consensus/zkverifier/light_client_verifier.go index 3bba62f0..abe37e82 100644 --- a/rootchain/consensus/zkverifier/light_client_verifier.go +++ b/rootchain/consensus/zkverifier/light_client_verifier.go @@ -13,10 +13,11 @@ type LightClientVerifier struct { } // NewLightClientVerifier creates a new light client verifier -func NewLightClientVerifier() (*LightClientVerifier, error) { +// chainID: chain identifier of the EVM partition from the partition config (invariant) +func NewLightClientVerifier(chainID uint64) (*LightClientVerifier, error) { // Try to create FFI verifier - if ffiVerifier, err := NewLightClientVerifierFFI(); err == nil { - slog.Info("Using Light Client FFI verifier", "version", GetLightClientFFIVersion()) + if ffiVerifier, err := NewLightClientVerifierFFI(chainID); err == nil { + slog.Info("Using Light Client FFI verifier", "version", GetLightClientFFIVersion(), "chain_id", chainID) return &LightClientVerifier{ enabled: true, ffiVerifier: ffiVerifier, diff --git a/rootchain/consensus/zkverifier/light_client_verifier_ffi.go b/rootchain/consensus/zkverifier/light_client_verifier_ffi.go index 4393575e..5176e9a8 100644 --- a/rootchain/consensus/zkverifier/light_client_verifier_ffi.go +++ b/rootchain/consensus/zkverifier/light_client_verifier_ffi.go @@ -13,11 +13,13 @@ import ( // LightClientVerifierFFI wraps the Rust FFI library for light client proof verification type LightClientVerifierFFI struct { - enabled bool + enabled bool + chainID uint64 } // NewLightClientVerifierFFI creates a new FFI-based light client verifier -func NewLightClientVerifierFFI() (*LightClientVerifierFFI, error) { +// chainID: chain identifier of the EVM partition from the partition config (invariant) +func NewLightClientVerifierFFI(chainID uint64) (*LightClientVerifierFFI, error) { // Verify FFI library is available version := C.light_client_ffi_version() if version == nil { @@ -26,6 +28,7 @@ func NewLightClientVerifierFFI() (*LightClientVerifierFFI, error) { return &LightClientVerifierFFI{ enabled: true, + chainID: chainID, }, nil } @@ -60,6 +63,7 @@ func (v *LightClientVerifierFFI) VerifyProof(proof []byte, previousStateRoot []b (*C.uint8_t)(unsafe.Pointer(&previousStateRoot[0])), (*C.uint8_t)(unsafe.Pointer(&newStateRoot[0])), (*C.uint8_t)(unsafe.Pointer(&blockHash[0])), + C.uint64_t(v.chainID), &errorOut, ) diff --git a/rootchain/consensus/zkverifier/light_client_verifier_ffi_stub.go b/rootchain/consensus/zkverifier/light_client_verifier_ffi_stub.go index 1d7a0f50..a84b3433 100644 --- a/rootchain/consensus/zkverifier/light_client_verifier_ffi_stub.go +++ b/rootchain/consensus/zkverifier/light_client_verifier_ffi_stub.go @@ -5,10 +5,13 @@ package zkverifier import "fmt" // LightClientVerifierFFI is a stub when FFI is not available -type LightClientVerifierFFI struct{} +type LightClientVerifierFFI struct { + chainID uint64 +} // NewLightClientVerifierFFI returns an error when FFI is not available -func NewLightClientVerifierFFI() (*LightClientVerifierFFI, error) { +// chainID: chain identifier of EVM partition from the partition config (invariant) +func NewLightClientVerifierFFI(chainID uint64) (*LightClientVerifierFFI, error) { return nil, fmt.Errorf("Light Client FFI verifier not available: build with -tags zkverifier_ffi to enable") } diff --git a/rootchain/consensus/zkverifier/partition_config.go b/rootchain/consensus/zkverifier/partition_config.go index 5f8213d2..17e98bf3 100644 --- a/rootchain/consensus/zkverifier/partition_config.go +++ b/rootchain/consensus/zkverifier/partition_config.go @@ -1,5 +1,7 @@ package zkverifier +import "strconv" + // Partition params keys for proof verification configuration. // These are stored in PartitionDescriptionRecord.PartitionParams. const ( @@ -11,6 +13,11 @@ const ( // ParamVerificationKeyPath specifies the path to the verification key file. // Required for SP1 proof type. ParamVerificationKeyPath = "vkey_path" + + // ParamChainID specifies the EVM chain ID for the partition. + // Required for SP1 and light_client proof types. + // This is different from the BFT Core network ID - each EVM partition has its own chain ID. + ParamChainID = "chain_id" ) // ParseProofTypeFromParams extracts the ProofType from partition params. @@ -34,3 +41,21 @@ func ParseVKeyPathFromParams(params map[string]string) string { } return params[ParamVerificationKeyPath] } + +// ParseChainIDFromParams extracts the EVM chain ID from partition params. +// Returns 0 and false if not set or invalid. +// The chain_id is specific to the EVM partition and verified against ZK proof public values. +func ParseChainIDFromParams(params map[string]string) (uint64, bool) { + if params == nil { + return 0, false + } + cidStr, ok := params[ParamChainID] + if !ok || cidStr == "" { + return 0, false + } + cid, err := strconv.ParseUint(cidStr, 10, 64) + if err != nil { + return 0, false + } + return cid, true +} diff --git a/rootchain/consensus/zkverifier/registry.go b/rootchain/consensus/zkverifier/registry.go index e8d92e05..7ba56239 100644 --- a/rootchain/consensus/zkverifier/registry.go +++ b/rootchain/consensus/zkverifier/registry.go @@ -87,10 +87,18 @@ func (r *Registry) createVerifier(params map[string]string) (ZKVerifier, error) if vkeyPath == "" { return nil, fmt.Errorf("vkey_path required for SP1 proof type") } - return NewSP1Verifier(vkeyPath) + chainID, ok := ParseChainIDFromParams(params) + if !ok { + return nil, fmt.Errorf("chain_id required for SP1 proof type") + } + return NewSP1Verifier(vkeyPath, chainID) case ProofTypeLightClient: - return NewLightClientVerifier() + chainID, ok := ParseChainIDFromParams(params) + if !ok { + return nil, fmt.Errorf("chain_id required for light_client proof type") + } + return NewLightClientVerifier(chainID) default: return nil, fmt.Errorf("unknown proof type: %s", proofType) diff --git a/rootchain/consensus/zkverifier/registry_test.go b/rootchain/consensus/zkverifier/registry_test.go index 798b840a..7353bfc7 100644 --- a/rootchain/consensus/zkverifier/registry_test.go +++ b/rootchain/consensus/zkverifier/registry_test.go @@ -130,3 +130,34 @@ func TestRegistry_GetVerifier_UnavailableProofType(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), "not available") } + +func TestRegistry_GetVerifier_SP1MissingChainID(t *testing.T) { + if !IsFFIAvailable() { + t.Skip("FFI not available, skipping chain_id requirement test") + } + + r := NewRegistry() + + // SP1 with vkey_path but without chain_id should fail + params := map[string]string{ + ParamProofType: string(ProofTypeSP1), + ParamVerificationKeyPath: "/path/to/vkey", + } + _, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.Error(t, err) + require.Contains(t, err.Error(), "chain_id required") +} + +func TestRegistry_GetVerifier_LightClientMissingChainID(t *testing.T) { + if !IsFFIAvailable() { + t.Skip("FFI not available, skipping chain_id requirement test") + } + + r := NewRegistry() + + // light_client without chain_id should fail + params := map[string]string{ParamProofType: string(ProofTypeLightClient)} + _, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.Error(t, err) + require.Contains(t, err.Error(), "chain_id required") +} diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h b/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h index 2e47fd2c..96183890 100644 --- a/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h @@ -36,6 +36,7 @@ typedef enum { * @param prev_state_root Pointer to 32-byte previous state root * @param new_state_root Pointer to 32-byte new state root * @param block_hash Pointer to 32-byte block hash + * @param chain_id EVM Chain ID from partition config * @param error_out Output pointer for error message (must be freed with sp1_free_string) * @return SP1VerifyResult status code */ @@ -47,6 +48,7 @@ SP1VerifyResult sp1_verify_proof( const uint8_t* prev_state_root, const uint8_t* new_state_root, const uint8_t* block_hash, + uint64_t chain_id, char** error_out ); diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs b/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs index 58c8a370..40d15b30 100644 --- a/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs @@ -23,6 +23,7 @@ pub enum SP1VerifyResult { /// * `prev_state_root` - Pointer to 32-byte previous state root /// * `new_state_root` - Pointer to 32-byte new state root /// * `block_hash` - Pointer to 32-byte block hash +/// * `chain_id` - Chain ID from partition config /// * `error_out` - Output pointer for error message (caller must free with sp1_free_string) /// /// # Returns @@ -36,6 +37,7 @@ pub extern "C" fn sp1_verify_proof( prev_state_root: *const u8, new_state_root: *const u8, block_hash: *const u8, + chain_id: u64, error_out: *mut *mut c_char, ) -> SP1VerifyResult { // Safety checks @@ -57,7 +59,7 @@ pub extern "C" fn sp1_verify_proof( let blk_hash = unsafe { std::slice::from_raw_parts(block_hash, 32) }; // Perform verification - match verify_proof_internal(vkey_data, proof_data, prev_root, new_root, blk_hash) { + match verify_proof_internal(vkey_data, proof_data, prev_root, new_root, blk_hash, chain_id) { Ok(()) => SP1VerifyResult::Success, Err(e) => { set_error(error_out, &e.to_string()); @@ -79,6 +81,7 @@ fn verify_proof_internal( prev_state_root: &[u8], new_state_root: &[u8], block_hash: &[u8], + chain_id: u64, ) -> anyhow::Result<()> { // Deserialize verification key let vkey: sp1_sdk::SP1VerifyingKey = bincode::deserialize(vkey_data) @@ -106,13 +109,14 @@ fn verify_proof_internal( // - 96-127: l1_in_messages_rolling_hash (L2 feature) // - 128-159: blob_versioned_hash (L2 feature) // - 160-191: last_block_hash (block_hash) - // - 192+: chain_id, non_privileged_count, etc. + // - 192-199: chain_id (u64, little-endian) + // - 200+: non_privileged_count, etc. // // Note: ethrex's guest program has the 'l2' feature enabled by default, // which adds 3 H256 fields (96 bytes) before the block hash. - if public_values.len() < 192 { + if public_values.len() < 200 { return Err(anyhow::anyhow!( - "Public values too short: expected at least 192 bytes for ethrex l2 format, got {}", + "Public values too short: expected at least 200 bytes for ethrex l2 format (including chain_id), got {}", public_values.len() )); } @@ -144,6 +148,20 @@ fn verify_proof_internal( )); } + // Check chain_id matches + let proof_chain_id = u64::from_le_bytes( + public_values[192..200] + .try_into() + .expect("slice is exactly 8 bytes") + ); + if proof_chain_id != chain_id { + return Err(anyhow::anyhow!( + "Chain ID mismatch: expected {} (chain_id from partition config), got {} (from proof)", + chain_id, + proof_chain_id + )); + } + Ok(()) } @@ -255,6 +273,7 @@ mod tests { ptr::null(), ptr::null(), ptr::null(), + 1, // chain_id &mut error, ); assert_eq!(result as i32, SP1VerifyResult::InternalError as i32); diff --git a/rootchain/consensus/zkverifier/sp1_verifier.go b/rootchain/consensus/zkverifier/sp1_verifier.go index 80d6808d..dd7073f5 100644 --- a/rootchain/consensus/zkverifier/sp1_verifier.go +++ b/rootchain/consensus/zkverifier/sp1_verifier.go @@ -17,14 +17,15 @@ type SP1Verifier struct { // NewSP1Verifier creates a new SP1 verifier // vkeyPath: path to the SP1 verification key file (.vkey) -func NewSP1Verifier(vkeyPath string) (*SP1Verifier, error) { +// chainID: chain identifier of the EVM partition from the partition config (invariant) +func NewSP1Verifier(vkeyPath string, chainID uint64) (*SP1Verifier, error) { if vkeyPath == "" { return nil, fmt.Errorf("verification key path is required for SP1 verifier") } // Try to create FFI verifier first - if ffiVerifier, err := NewSP1VerifierFFI(vkeyPath); err == nil { - slog.Info("Using SP1 FFI verifier", "path", vkeyPath, "version", GetFFIVersion()) + if ffiVerifier, err := NewSP1VerifierFFI(vkeyPath, chainID); err == nil { + slog.Info("Using SP1 FFI verifier", "path", vkeyPath, "version", GetFFIVersion(), "chain_id", chainID) return &SP1Verifier{ vkey: ffiVerifier.vkey, enabled: true, diff --git a/rootchain/consensus/zkverifier/sp1_verifier_ffi.go b/rootchain/consensus/zkverifier/sp1_verifier_ffi.go index 716d1d67..c062697d 100644 --- a/rootchain/consensus/zkverifier/sp1_verifier_ffi.go +++ b/rootchain/consensus/zkverifier/sp1_verifier_ffi.go @@ -13,11 +13,13 @@ import ( // SP1VerifierFFI wraps the Rust FFI library for SP1 proof verification type SP1VerifierFFI struct { - vkey []byte + vkey []byte + chainID uint64 } // NewSP1VerifierFFI creates a new FFI-based SP1 verifier -func NewSP1VerifierFFI(vkeyPath string) (*SP1VerifierFFI, error) { +// chainID: chain identifier of the EVM partition from the partition config (invariant) +func NewSP1VerifierFFI(vkeyPath string, chainID uint64) (*SP1VerifierFFI, error) { // Load verification key vkey, err := loadVerificationKey(vkeyPath) if err != nil { @@ -56,7 +58,8 @@ func NewSP1VerifierFFI(vkeyPath string) (*SP1VerifierFFI, error) { } return &SP1VerifierFFI{ - vkey: vkey, + vkey: vkey, + chainID: chainID, }, nil } @@ -93,6 +96,7 @@ func (v *SP1VerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, new (*C.uint8_t)(unsafe.Pointer(&previousStateRoot[0])), (*C.uint8_t)(unsafe.Pointer(&newStateRoot[0])), (*C.uint8_t)(unsafe.Pointer(&blockHash[0])), + C.uint64_t(v.chainID), &errorOut, ) diff --git a/rootchain/consensus/zkverifier/sp1_verifier_ffi_stub.go b/rootchain/consensus/zkverifier/sp1_verifier_ffi_stub.go index 7b8d0018..d900bf2f 100644 --- a/rootchain/consensus/zkverifier/sp1_verifier_ffi_stub.go +++ b/rootchain/consensus/zkverifier/sp1_verifier_ffi_stub.go @@ -6,11 +6,13 @@ import "fmt" // SP1VerifierFFI is a stub when FFI is not available type SP1VerifierFFI struct { - vkey []byte + vkey []byte + chainID uint64 } // NewSP1VerifierFFI returns an error when FFI is not available -func NewSP1VerifierFFI(vkeyPath string) (*SP1VerifierFFI, error) { +// chainID: chain identifier of the EVM partition from the partition config (invariant) +func NewSP1VerifierFFI(vkeyPath string, chainID uint64) (*SP1VerifierFFI, error) { return nil, fmt.Errorf("SP1 FFI verifier not available: build with -tags zkverifier_ffi to enable") } diff --git a/rootchain/consensus/zkverifier/verifier.go b/rootchain/consensus/zkverifier/verifier.go index 1d037e1f..d71a47d4 100644 --- a/rootchain/consensus/zkverifier/verifier.go +++ b/rootchain/consensus/zkverifier/verifier.go @@ -60,6 +60,10 @@ type Config struct { // For RISC0: path to the verification key VerificationKeyPath string + // chainID: chain identifier of the EVM partition from the partition config (invariant) + // must match the chain_id in proof public values + ChainID uint64 + // AdditionalConfig holds prover-specific configuration AdditionalConfig map[string]interface{} } @@ -86,9 +90,9 @@ func NewVerifier(cfg *Config) (ZKVerifier, error) { switch cfg.ProofType { case ProofTypeSP1: - return NewSP1Verifier(cfg.VerificationKeyPath) + return NewSP1Verifier(cfg.VerificationKeyPath, cfg.ChainID) case ProofTypeLightClient: - return NewLightClientVerifier() + return NewLightClientVerifier(cfg.ChainID) case ProofTypeRISC0: return nil, fmt.Errorf("RISC0 verifier not implemented") case ProofTypeExec, ProofTypeNone: diff --git a/rootchain/consensus/zkverifier/verifier_ffi_test.go b/rootchain/consensus/zkverifier/verifier_ffi_test.go index 02b0a2dc..5822e991 100644 --- a/rootchain/consensus/zkverifier/verifier_ffi_test.go +++ b/rootchain/consensus/zkverifier/verifier_ffi_test.go @@ -23,6 +23,7 @@ func TestNewVerifier_SP1_WithFFI(t *testing.T) { Enabled: true, ProofType: ProofTypeSP1, VerificationKeyPath: vkeyPath, + ChainID: 1, } verifier, err := NewVerifier(cfg) @@ -37,6 +38,7 @@ func TestNewVerifier_SP1_MissingVKey_WithFFI(t *testing.T) { Enabled: true, ProofType: ProofTypeSP1, VerificationKeyPath: "/nonexistent/path/test.vkey", + ChainID: 1, } verifier, err := NewVerifier(cfg) @@ -52,7 +54,7 @@ func TestSP1Verifier_InvalidInputs_WithFFI(t *testing.T) { err := os.WriteFile(vkeyPath, make([]byte, 64), 0644) require.NoError(t, err) - verifier, err := NewSP1Verifier(vkeyPath) + verifier, err := NewSP1Verifier(vkeyPath, 1) require.NoError(t, err) testCases := []struct { @@ -121,7 +123,7 @@ func TestSP1Verifier_EmptyVKey_WithFFI(t *testing.T) { err := os.WriteFile(vkeyPath, []byte{}, 0644) require.NoError(t, err) - verifier, err := NewSP1Verifier(vkeyPath) + verifier, err := NewSP1Verifier(vkeyPath, 1) require.Error(t, err) require.Nil(t, verifier) // FFI will detect empty vkey diff --git a/rootchain/consensus/zkverifier/verifier_stub_test.go b/rootchain/consensus/zkverifier/verifier_stub_test.go index 413532b8..62ff1ffe 100644 --- a/rootchain/consensus/zkverifier/verifier_stub_test.go +++ b/rootchain/consensus/zkverifier/verifier_stub_test.go @@ -21,6 +21,7 @@ func TestNewVerifier_SP1_WithoutFFI(t *testing.T) { Enabled: true, ProofType: ProofTypeSP1, VerificationKeyPath: vkeyPath, + ChainID: 1, } verifier, err := NewVerifier(cfg) @@ -34,6 +35,7 @@ func TestNewVerifier_SP1_MissingVKey_WithoutFFI(t *testing.T) { Enabled: true, ProofType: ProofTypeSP1, VerificationKeyPath: "/nonexistent/path/test.vkey", + ChainID: 1, } verifier, err := NewVerifier(cfg) @@ -49,7 +51,7 @@ func TestSP1Verifier_InvalidInputs_WithoutFFI(t *testing.T) { err := os.WriteFile(vkeyPath, []byte("fake_verification_key_data"), 0644) require.NoError(t, err) - verifier, err := NewSP1Verifier(vkeyPath) + verifier, err := NewSP1Verifier(vkeyPath, 1) require.Error(t, err) require.Nil(t, verifier) require.Contains(t, err.Error(), "build with -tags zkverifier_ffi") @@ -61,7 +63,7 @@ func TestSP1Verifier_EmptyVKey_WithoutFFI(t *testing.T) { err := os.WriteFile(vkeyPath, []byte{}, 0644) require.NoError(t, err) - verifier, err := NewSP1Verifier(vkeyPath) + verifier, err := NewSP1Verifier(vkeyPath, 1) require.Error(t, err) require.Nil(t, verifier) require.Contains(t, err.Error(), "build with -tags zkverifier_ffi") diff --git a/rootchain/partitions/orchestration.go b/rootchain/partitions/orchestration.go index 19607c00..6ee342f5 100644 --- a/rootchain/partitions/orchestration.go +++ b/rootchain/partitions/orchestration.go @@ -247,12 +247,22 @@ func verifyProofConfig(shardConf *types.PartitionDescriptionRecord) error { return fmt.Errorf("proof type %q not available (build with -tags zkverifier_ffi to enable)", proofType) } - // SP1 requires verification key path + // SP1 requires verification key path and chain_id if proofType == zkverifier.ProofTypeSP1 { vkeyPath := zkverifier.ParseVKeyPathFromParams(shardConf.PartitionParams) if vkeyPath == "" { return fmt.Errorf("vkey_path required for SP1 proof type") } + if _, ok := zkverifier.ParseChainIDFromParams(shardConf.PartitionParams); !ok { + return fmt.Errorf("chain_id required for SP1 proof type") + } + } + + // LightClient requires chain_id + if proofType == zkverifier.ProofTypeLightClient { + if _, ok := zkverifier.ParseChainIDFromParams(shardConf.PartitionParams); !ok { + return fmt.Errorf("chain_id required for light_client proof type") + } } return nil From f52e5009116ccac69f6ae4bb3bb9041dd6c37d02 Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Thu, 26 Mar 2026 12:28:23 +0200 Subject: [PATCH 12/17] fix wrong advice --- CLAUDE.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 2b4b70b4..cec76591 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -274,6 +274,7 @@ DOCKER_GO_DEPENDENCY=../bft-go-base make build-docker ## Common Pitfalls + 1. **Round Synchronization**: Partitions must use `TechnicalRecord.Round` for next certification request, not block number or self-incremented counter 2. **CBOR Serialization**: Use `cbor:",toarray"` for struct tags and ensure nil values serialize as CBOR null (0xf6), not empty byte strings @@ -284,11 +285,11 @@ DOCKER_GO_DEPENDENCY=../bft-go-base make build-docker 5. **Timestamp Source**: Use UnicitySeal.timestamp from last UC, not system time -6. **Previous Hash**: For certification requests, use UC.InputRecord.Hash (the certified state), not block.parent_state_root +6. **Previous Hash**: For certification requests, use previous round's state root hash as the PreviousHash. It MUST match the previous UC's luc.InputRecord.Hash to be successful. That is, rounds' root hashes must form a continuous chain certified by InputRecords. 7. **First Block**: Send previous_hash=nil to let BFT Core use genesis state -8. **Database Cleanup**: When testing, clean both partition AND root chain databases for fresh state +8. **Database Cleanup**: When testing, clean both partition AND root chain databases for fresh state. Otherwise, the BFT Core and partition can not produce a synchronized chain of root hashes, following the ledger rules. ## Related Repositories From 28cf8a61e17c01d9b947e9d0db4b8b6b3f2588b9 Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Sun, 5 Apr 2026 01:18:42 +0300 Subject: [PATCH 13/17] ProofTypeAggregatorRSMT --- .gitignore | 1 + cli/ubft/cmd/partition_params.go | 178 --------- .../block_certification_request.go | 3 +- rootchain/consensus/zkverifier/README.md | 61 +++- .../zkverifier/aggregator_rsmt_verifier.go | 61 ++++ .../aggregator_rsmt_verifier_test.go | 112 ++++++ .../consensus/zkverifier/capabilities_ffi.go | 7 +- .../consensus/zkverifier/capabilities_stub.go | 9 +- .../consensus/zkverifier/partition_config.go | 2 +- rootchain/consensus/zkverifier/registry.go | 5 + rootchain/consensus/zkverifier/rsmt/doc.go | 30 ++ .../consensus/zkverifier/rsmt/envelope.go | 105 ++++++ .../zkverifier/rsmt/fixtures_test.go | 86 +++++ rootchain/consensus/zkverifier/rsmt/hash.go | 27 ++ .../consensus/zkverifier/rsmt/sortkey.go | 40 +++ rootchain/consensus/zkverifier/rsmt/verify.go | 171 +++++++++ .../consensus/zkverifier/rsmt/verify_test.go | 338 ++++++++++++++++++ rootchain/consensus/zkverifier/verifier.go | 6 + rootchain/node.go | 33 +- 19 files changed, 1077 insertions(+), 198 deletions(-) delete mode 100644 cli/ubft/cmd/partition_params.go create mode 100644 rootchain/consensus/zkverifier/aggregator_rsmt_verifier.go create mode 100644 rootchain/consensus/zkverifier/aggregator_rsmt_verifier_test.go create mode 100644 rootchain/consensus/zkverifier/rsmt/doc.go create mode 100644 rootchain/consensus/zkverifier/rsmt/envelope.go create mode 100644 rootchain/consensus/zkverifier/rsmt/fixtures_test.go create mode 100644 rootchain/consensus/zkverifier/rsmt/hash.go create mode 100644 rootchain/consensus/zkverifier/rsmt/sortkey.go create mode 100644 rootchain/consensus/zkverifier/rsmt/verify.go create mode 100644 rootchain/consensus/zkverifier/rsmt/verify_test.go diff --git a/.gitignore b/.gitignore index 753014f5..664e27f1 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,7 @@ test-coverage.html gosec_report.json .trivycache/ /test-nodes/ +rootchain/consensus/zkverifier/rsmt/testdata/ *.log diff --git a/cli/ubft/cmd/partition_params.go b/cli/ubft/cmd/partition_params.go deleted file mode 100644 index 5bbad053..00000000 --- a/cli/ubft/cmd/partition_params.go +++ /dev/null @@ -1,178 +0,0 @@ -package cmd - -import ( - "fmt" - "strconv" - - "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier" - "github.com/unicitynetwork/bft-core/txsystem" - "github.com/unicitynetwork/bft-go-base/types" - "github.com/unicitynetwork/bft-go-base/types/hex" -) - -const ( - moneyInitialBillValue = "initialBillValue" - moneyInitialBillOwnerPredicate = "initialBillOwnerPredicate" - moneyDCMoneySupplyValue = "dcMoneySupplyValue" - - tokensAdminOwnerPredicate = "adminOwnerPredicate" - tokensFeelessMode = "feeless-mode" - - orchestrationOwnerPredicate = "ownerPredicate" -) - -type MoneyPartitionParams struct { - InitialBillValue uint64 - InitialBillOwnerPredicate types.PredicateBytes - DCMoneySupplyValue uint64 // The initial value for Dust Collector money supply. Total money supply is initial bill + DC money supply. -} - -type OrchestrationPartitionParams struct { - OwnerPredicate types.PredicateBytes // the Proof-of-Authority owner predicate -} - -type TokensPartitionParams struct { - AdminOwnerPredicate types.PredicateBytes // the admin owner predicate for permissioned mode - FeelessMode bool // if true then fees are not charged (applies only in permissioned mode) -} - -func ParseMoneyPartitionParams(shardConf *types.PartitionDescriptionRecord) (*MoneyPartitionParams, error) { - var params MoneyPartitionParams - for key, valueStr := range shardConf.PartitionParams { - switch key { - case moneyInitialBillValue: - parsedValue, err := parseUint64(key, valueStr) - if err != nil { - return nil, err - } - params.InitialBillValue = parsedValue - case moneyInitialBillOwnerPredicate: - value, err := hex.Decode([]byte(valueStr)) - if err != nil { - return nil, fmt.Errorf("failed to parse param %q value: %w", key, err) - } - params.InitialBillOwnerPredicate = value - case moneyDCMoneySupplyValue: - parsedValue, err := parseUint64(key, valueStr) - if err != nil { - return nil, err - } - params.DCMoneySupplyValue = parsedValue - default: - return nil, fmt.Errorf("unexpected partition param: %s", key) - } - } - return ¶ms, nil -} - -func ParseOrchestrationPartitionParams(shardConf txsystem.ShardConf) (*OrchestrationPartitionParams, error) { - var params OrchestrationPartitionParams - for key, valueStr := range shardConf.GetPartitionParams() { - switch key { - case orchestrationOwnerPredicate: - value, err := hex.Decode([]byte(valueStr)) - if err != nil { - return nil, fmt.Errorf("failed to parse param %q value: %w", key, err) - } - params.OwnerPredicate = value - default: - return nil, fmt.Errorf("unexpected partition param: %s", key) - } - } - return ¶ms, nil -} - -func ParseTokensPartitionParams(shardConf txsystem.ShardConf) (*TokensPartitionParams, error) { - var params TokensPartitionParams - for key, valueStr := range shardConf.GetPartitionParams() { - switch key { - case tokensAdminOwnerPredicate: - { - value, err := hex.Decode([]byte(valueStr)) - if err != nil { - return nil, fmt.Errorf("failed to parse param %q value: %w", key, err) - } - params.AdminOwnerPredicate = value - } - case tokensFeelessMode: - { - value, err := strconv.ParseBool(valueStr) - if err != nil { - return nil, fmt.Errorf("failed to parse param %q value: %w", key, err) - } - params.FeelessMode = value - } - default: - return nil, fmt.Errorf("unexpected partition param: %s", key) - } - } - return ¶ms, nil -} - -func parseUint64(key, value string) (uint64, error) { - ret, err := strconv.ParseUint(value, 10, 64) - if err != nil { - return 0, fmt.Errorf("failed to parse param %q value: %w", key, err) - } - return ret, nil -} - -// ProofPartitionParams holds parsed proof configuration from partition params. -type ProofPartitionParams struct { - // ProofType specifies the proof type for the partition. - // Empty/none means m-of-n signature verification only. - ProofType zkverifier.ProofType - - // VerificationKeyPath is the path to the verification key file. - // Required for SP1 proof type. - VerificationKeyPath string -} - -// ParseProofPartitionParams extracts proof configuration from partition params. -// Returns error if the configuration is invalid. -func ParseProofPartitionParams(params map[string]string) (*ProofPartitionParams, error) { - result := &ProofPartitionParams{ - ProofType: zkverifier.ParseProofTypeFromParams(params), - VerificationKeyPath: zkverifier.ParseVKeyPathFromParams(params), - } - - // Validate the configuration - if result.ProofType != zkverifier.ProofTypeNone && result.ProofType != "" { - if !zkverifier.IsProofTypeAvailable(result.ProofType) { - return nil, fmt.Errorf("proof type %q not available (build with -tags zkverifier_ffi to enable)", result.ProofType) - } - - if result.ProofType == zkverifier.ProofTypeSP1 && result.VerificationKeyPath == "" { - return nil, fmt.Errorf("vkey_path required for SP1 proof type") - } - } - - return result, nil -} - -// IsEnabled returns true if ZK proof verification is enabled for this configuration. -func (p *ProofPartitionParams) IsEnabled() bool { - switch p.ProofType { - case zkverifier.ProofTypeNone, zkverifier.ProofTypeExec, "": - return false - default: - return true - } -} - -// ToPartitionParams converts the proof configuration to a partition params map. -func (p *ProofPartitionParams) ToPartitionParams() map[string]string { - if p.ProofType == zkverifier.ProofTypeNone || p.ProofType == "" { - return nil - } - - params := map[string]string{ - zkverifier.ParamProofType: string(p.ProofType), - } - - if p.VerificationKeyPath != "" { - params[zkverifier.ParamVerificationKeyPath] = p.VerificationKeyPath - } - - return params -} diff --git a/network/protocol/certification/block_certification_request.go b/network/protocol/certification/block_certification_request.go index 65c6bd4b..92dc0501 100644 --- a/network/protocol/certification/block_certification_request.go +++ b/network/protocol/certification/block_certification_request.go @@ -22,7 +22,7 @@ type BlockCertificationRequest struct { ShardID types.ShardID `json:"shardId"` NodeID string `json:"nodeId"` InputRecord *types.InputRecord `json:"inputRecord"` - ZkProof []byte `json:"zkProof"` // ZK proof for state transition validation + ZkProof []byte `json:"zkProof"` // (ZK) proof for state transition validation BlockSize uint64 `json:"blockSize"` StateSize uint64 `json:"stateSize"` Signature hex.Bytes `json:"signature"` @@ -91,6 +91,7 @@ func (x BlockCertificationRequest) Bytes() ([]byte, error) { } // UnmarshalCBOR provides backward compatibility for old database format (before ZkProof field was added) +// TODO: remove eventually func (x *BlockCertificationRequest) UnmarshalCBOR(data []byte) error { // Try new format first (8 elements with ZkProof) type newFormat BlockCertificationRequest diff --git a/rootchain/consensus/zkverifier/README.md b/rootchain/consensus/zkverifier/README.md index e84bff03..5a1f01e2 100644 --- a/rootchain/consensus/zkverifier/README.md +++ b/rootchain/consensus/zkverifier/README.md @@ -4,11 +4,64 @@ This directory contains optional Rust FFI components for ZK proof verification. ## Architecture -The ZK verifier supports multiple proof types through a common interface: +The ZK verifier supports multiple proof types through a common interface. +Verifiers fall into two families: -- **SP1 Verifier**: Verifies SP1 zkVM proofs using Rust FFI (optional) -- **Light Client Verifier**: Executes full witness validation using Rust FFI (optional) -- **No-Op Verifier**: Disabled verification for testing (always available) +**Pure-Go, always compiled in (no build tag):** +- **No-Op Verifier** (`proof_type` unset or `none`): Disabled verification for testing. +- **Aggregator RSMT Verifier** (`proof_type=aggregator_rsmt_v1`): Verifies a + Radix Sparse Merkle Tree consistency proof produced by the Rust aggregator + (`crates/rsmt/src/consistency.rs`). Recomputes the `prev → new` root + transition for a batch of newly inserted leaves. Implementation lives in + the `rsmt/` sub-package. + +**FFI-gated (`-tags zkverifier_ffi`):** +- **SP1 Verifier** (`proof_type=sp1`): Verifies SP1 zkVM proofs. +- **Light Client Verifier** (`proof_type=light_client`): Executes full witness validation. + +### Aggregator RSMT verifier + +Enable it on a partition by setting `proof_type=aggregator_rsmt_v1` in the +partition's `PartitionParams` when generating the shard config, e.g.: + +```bash +ubft shard-conf generate \ + ... \ + --partition-params "proof_type=aggregator_rsmt_v1" +``` + +Once set, `node.verifyZKProof()` rejects any `BlockCertificationRequest` whose +`ZkProof` envelope does not recompute `InputRecord.PreviousHash → +InputRecord.Hash` for the carried batch — no UC is issued. + +**Wire format of `ZkProof`** (no version tag; the format is selected by +`proof_type`): + +``` +offset size field +0 4 leaf_count (big-endian u32) +4 ... leaves: leaf_count × { key[32] || value_len (u16 BE) || value[value_len] } +... to end-of-buf consistency-proof opcode stream (flat bytes) +``` + +Opcodes (post-order stack machine): +- `0x00 || h[32]` — `S`: unchanged subtree hash +- `0x01` — `L`: pop next leaf from the wire batch +- `0x02 || depth` — `N`: inner node at `depth ∈ 0..=255`, pops 2 children + +Invariants enforced by the verifier: +- Leaves MUST be pre-sorted by `SortKey` (per-byte bit-reversal, LSB-first + lexicographic order). Unsorted or duplicate leaves → `ErrLeavesUnsorted`. +- Empty batch ⇒ empty proof and `prev == new`; otherwise `ErrEmptyBatchNonEmptyProof` / + `ErrEmptyBatchRootChange`. +- After stream consumption: stack size 1, leaves fully consumed, bytes fully + consumed, and `stack[0] == (prev, new)`. +- Leaf count is capped at `MaxLeafCount = 1<<20` to prevent OOM from malicious + inputs. Value length is naturally capped at 65 535 by `u16`. + +Hash functions (SHA-256, matching `crates/rsmt/src/hash.rs`): +- `HashLeaf(key, value) = SHA256(0x00 || key[32] || value)` +- `HashNode(left, right, depth) = SHA256(0x01 || depth || left[32] || right[32])` ## Build Configurations diff --git a/rootchain/consensus/zkverifier/aggregator_rsmt_verifier.go b/rootchain/consensus/zkverifier/aggregator_rsmt_verifier.go new file mode 100644 index 00000000..b3c71794 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator_rsmt_verifier.go @@ -0,0 +1,61 @@ +package zkverifier + +import ( + "fmt" + + "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier/rsmt" +) + +// AggregatorRSMTVerifier verifies Radix SMT consistency proofs produced by +// the Rust aggregator's `rsmt` crate (proof type "aggregator_rsmt_v1"). +// +// The verifier is pure Go, always compiled in (no build tag, no FFI). It +// recomputes both the pre- and post-insertion SMT roots from the envelope +// and checks them against the claimed InputRecord.PreviousHash / Hash. +// +// See rootchain/consensus/zkverifier/rsmt for the canonical wire format. +type AggregatorRSMTVerifier struct{} + +// NewAggregatorRSMTVerifier constructs a stateless RSMT consistency verifier. +// No configuration or verification key is required — the consistency proof +// is self-contained and verified against root hashes from the InputRecord. +func NewAggregatorRSMTVerifier() *AggregatorRSMTVerifier { + return &AggregatorRSMTVerifier{} +} + +// VerifyProof decodes the zk_proof envelope and verifies the +// previousStateRoot → newStateRoot transition. The blockHash argument is +// unused: the aggregator's state transition is validated independently of +// the block header hash, which is covered by the normal InputRecord rules. +// +// An empty previousStateRoot (len == 0) is reserved for genesis / sync UCs +// and is filtered out earlier in Node.verifyZKProof, so both roots are +// expected to be 32 bytes here in practice. +func (v *AggregatorRSMTVerifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, _ []byte) error { + env, err := rsmt.DecodeEnvelope(proof) + if err != nil { + return fmt.Errorf("%w: %v", ErrInvalidProofFormat, err) + } + oldRoot, err := rsmt.RootFromBytes(previousStateRoot) + if err != nil { + return fmt.Errorf("%w: previous state root: %v", ErrInvalidProofFormat, err) + } + newRoot, err := rsmt.RootFromBytes(newStateRoot) + if err != nil { + return fmt.Errorf("%w: new state root: %v", ErrInvalidProofFormat, err) + } + if err := rsmt.Verify(env, oldRoot, newRoot); err != nil { + return fmt.Errorf("%w: %v", ErrProofVerificationFailed, err) + } + return nil +} + +// ProofType returns ProofTypeAggregatorRSMTv1. +func (*AggregatorRSMTVerifier) ProofType() ProofType { + return ProofTypeAggregatorRSMTv1 +} + +// IsEnabled reports that aggregator RSMT verification is active. +func (*AggregatorRSMTVerifier) IsEnabled() bool { + return true +} diff --git a/rootchain/consensus/zkverifier/aggregator_rsmt_verifier_test.go b/rootchain/consensus/zkverifier/aggregator_rsmt_verifier_test.go new file mode 100644 index 00000000..38341fd9 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator_rsmt_verifier_test.go @@ -0,0 +1,112 @@ +package zkverifier + +import ( + "bytes" + "errors" + "testing" + + "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier/rsmt" + "github.com/unicitynetwork/bft-go-base/types" +) + +func TestAggregatorRSMTVerifier_SingleLeafIntoEmptyTree(t *testing.T) { + var k [32]byte + k[0] = 0x05 + v := []byte("hello") + leafHash := rsmt.HashLeaf(k, v) + + env, err := rsmt.EncodeEnvelope( + []rsmt.Leaf{{Key: k, Value: v}}, + []byte{0x01}, // L + ) + if err != nil { + t.Fatal(err) + } + + ver := NewAggregatorRSMTVerifier() + if !ver.IsEnabled() { + t.Fatal("expected IsEnabled()") + } + if ver.ProofType() != ProofTypeAggregatorRSMTv1 { + t.Fatalf("unexpected ProofType %q", ver.ProofType()) + } + + // Genesis-to-first-leaf: prev nil, new = hashLeaf. + if err := ver.VerifyProof(env, nil, leafHash[:], nil); err != nil { + t.Fatalf("VerifyProof: %v", err) + } + + // Wrong new root. + bad := make([]byte, 32) + if err := ver.VerifyProof(env, nil, bad, nil); !errors.Is(err, ErrProofVerificationFailed) { + t.Fatalf("wrong root: got %v, want ErrProofVerificationFailed", err) + } + + // Malformed envelope. + if err := ver.VerifyProof([]byte{0x00}, nil, leafHash[:], nil); !errors.Is(err, ErrInvalidProofFormat) { + t.Fatalf("malformed envelope: got %v, want ErrInvalidProofFormat", err) + } + + // Wrong-length previous root. + if err := ver.VerifyProof(env, []byte{1, 2, 3}, leafHash[:], nil); !errors.Is(err, ErrInvalidProofFormat) { + t.Fatalf("bad prev root length: got %v, want ErrInvalidProofFormat", err) + } +} + +func TestAggregatorRSMTVerifier_TwoLeaves(t *testing.T) { + var k0, k1 [32]byte + k0[0] = 0x00 // bit 0 = 0 → left under depth-0 split + k1[0] = 0x01 // bit 0 = 1 → right + v0 := []byte("v0") + v1 := []byte("v1") + + h0 := rsmt.HashLeaf(k0, v0) + h1 := rsmt.HashLeaf(k1, v1) + newRoot := rsmt.HashNode(h0, h1, 0) + + var proof bytes.Buffer + proof.WriteByte(0x01) // L (k0) + proof.WriteByte(0x01) // L (k1) + proof.WriteByte(0x02) // N + proof.WriteByte(0x00) // depth=0 + + env, err := rsmt.EncodeEnvelope( + []rsmt.Leaf{{Key: k0, Value: v0}, {Key: k1, Value: v1}}, + proof.Bytes(), + ) + if err != nil { + t.Fatal(err) + } + + ver := NewAggregatorRSMTVerifier() + if err := ver.VerifyProof(env, nil, newRoot[:], nil); err != nil { + t.Fatalf("VerifyProof: %v", err) + } +} + +func TestRegistry_AggregatorRSMT(t *testing.T) { + reg := NewRegistry() + params := map[string]string{ParamProofType: string(ProofTypeAggregatorRSMTv1)} + v, err := reg.GetVerifier(types.PartitionID(42), types.ShardID{}, 0, params) + if err != nil { + t.Fatalf("GetVerifier: %v", err) + } + if _, ok := v.(*AggregatorRSMTVerifier); !ok { + t.Fatalf("registry returned %T, want *AggregatorRSMTVerifier", v) + } + if !v.IsEnabled() { + t.Fatalf("verifier not enabled") + } + if v.ProofType() != ProofTypeAggregatorRSMTv1 { + t.Fatalf("wrong proof type %q", v.ProofType()) + } + + // Cached on repeat call. + v2, err := reg.GetVerifier(types.PartitionID(42), types.ShardID{}, 0, params) + if err != nil { + t.Fatal(err) + } + if v != v2 { + t.Fatalf("registry did not cache verifier") + } +} diff --git a/rootchain/consensus/zkverifier/capabilities_ffi.go b/rootchain/consensus/zkverifier/capabilities_ffi.go index f2c9db1e..79f24214 100644 --- a/rootchain/consensus/zkverifier/capabilities_ffi.go +++ b/rootchain/consensus/zkverifier/capabilities_ffi.go @@ -6,7 +6,7 @@ package zkverifier // in the current build. With FFI, SP1 and LightClient are available. func IsProofTypeAvailable(pt ProofType) bool { switch pt { - case ProofTypeSP1, ProofTypeLightClient, ProofTypeExec, ProofTypeNone, "": + case ProofTypeSP1, ProofTypeLightClient, ProofTypeAggregatorRSMTv1, ProofTypeExec, ProofTypeNone, "": return true default: return false @@ -14,9 +14,10 @@ func IsProofTypeAvailable(pt ProofType) bool { } // AvailableProofTypes returns the list of proof types available in the current build. -// With FFI, SP1 and LightClient are available (besides m-of-n signature mode). +// With FFI, SP1 and LightClient are available (besides m-of-n signature mode +// and the pure-Go aggregator RSMT verifier). func AvailableProofTypes() []ProofType { - return []ProofType{ProofTypeSP1, ProofTypeLightClient, ProofTypeExec} + return []ProofType{ProofTypeSP1, ProofTypeLightClient, ProofTypeAggregatorRSMTv1, ProofTypeExec} } // IsFFIAvailable returns whether FFI support is built in. diff --git a/rootchain/consensus/zkverifier/capabilities_stub.go b/rootchain/consensus/zkverifier/capabilities_stub.go index 1837b25e..2349f798 100644 --- a/rootchain/consensus/zkverifier/capabilities_stub.go +++ b/rootchain/consensus/zkverifier/capabilities_stub.go @@ -3,10 +3,11 @@ package zkverifier // IsProofTypeAvailable returns whether the given proof type is available -// in the current build. Without FFI, only Exec (no-op) is available. +// in the current build. Without FFI, the pure-Go aggregator RSMT verifier +// is available alongside the m-of-n "exec" mode. func IsProofTypeAvailable(pt ProofType) bool { switch pt { - case ProofTypeExec, ProofTypeNone, "": + case ProofTypeAggregatorRSMTv1, ProofTypeExec, ProofTypeNone, "": return true default: return false @@ -14,9 +15,9 @@ func IsProofTypeAvailable(pt ProofType) bool { } // AvailableProofTypes returns the list of proof types available in the current build. -// Without FFI, only Exec is available (besides m-of-n signature mode). +// Without FFI, the pure-Go aggregator RSMT verifier and Exec (m-of-n) are available. func AvailableProofTypes() []ProofType { - return []ProofType{ProofTypeExec} + return []ProofType{ProofTypeAggregatorRSMTv1, ProofTypeExec} } // IsFFIAvailable returns whether FFI support is built in. diff --git a/rootchain/consensus/zkverifier/partition_config.go b/rootchain/consensus/zkverifier/partition_config.go index 17e98bf3..655fef7b 100644 --- a/rootchain/consensus/zkverifier/partition_config.go +++ b/rootchain/consensus/zkverifier/partition_config.go @@ -6,7 +6,7 @@ import "strconv" // These are stored in PartitionDescriptionRecord.PartitionParams. const ( // ParamProofType specifies the proof type for the partition. - // Valid values: "sp1", "light_client", "exec" + // Valid values: "sp1", "light_client", "aggregator_rsmt_v1", "exec" // If empty or not set, m-of-n signature verification only (no ZK proof required). ParamProofType = "proof_type" diff --git a/rootchain/consensus/zkverifier/registry.go b/rootchain/consensus/zkverifier/registry.go index 7ba56239..c32488ba 100644 --- a/rootchain/consensus/zkverifier/registry.go +++ b/rootchain/consensus/zkverifier/registry.go @@ -100,6 +100,11 @@ func (r *Registry) createVerifier(params map[string]string) (ZKVerifier, error) } return NewLightClientVerifier(chainID) + case ProofTypeAggregatorRSMTv1: + // Pure-Go verifier: no vkey, no chain_id. Consistency proof is + // self-contained and recomputes roots from the envelope. + return NewAggregatorRSMTVerifier(), nil + default: return nil, fmt.Errorf("unknown proof type: %s", proofType) } diff --git a/rootchain/consensus/zkverifier/rsmt/doc.go b/rootchain/consensus/zkverifier/rsmt/doc.go new file mode 100644 index 00000000..0347c6b8 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/doc.go @@ -0,0 +1,30 @@ +// Package rsmt implements verification of consistency proofs produced by the +// Rust Radix Sparse Merkle Tree library (`crates/rsmt` in the aggregator +// repository). It is the Go counterpart of `crates/rsmt/src/consistency.rs`. +// +// The verifier consumes a compact binary envelope (`DecodeEnvelope`) that +// carries the batch of newly inserted leaves followed by the flat +// post-order opcode stream, and recomputes the old and new SMT roots with a +// stack machine (`Verify`). +// +// Wire format (aggregator_rsmt_v1): +// +// offset size field +// 0 4 leaf_count (big-endian u32) +// 4 ... leaves: leaf_count × { key[32] || value_len (u16 BE) || value[value_len] } +// ... ... opcode stream (flat bytes, runs to end of buffer) +// +// Leaves must be pre-sorted by SortKey (per-byte bit-reversed key, LSB-first +// traversal order), this package does not reorder them. +// +// Opcodes: +// +// S(h) 0x00 || h[32] - unchanged subtree hash +// L 0x01 - new leaf; next batch entry +// N(d) 0x02 || d - internal node at depth d, pops two children +// +// Hashes (SHA-256, matching `crates/rsmt/src/hash.rs`): +// +// hash_leaf(key, value) = SHA256(0x00 || key || value) +// hash_node(l, r, d) = SHA256(0x01 || d || l || r) +package rsmt diff --git a/rootchain/consensus/zkverifier/rsmt/envelope.go b/rootchain/consensus/zkverifier/rsmt/envelope.go new file mode 100644 index 00000000..d3f24387 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/envelope.go @@ -0,0 +1,105 @@ +package rsmt + +import ( + "encoding/binary" + "errors" + "fmt" + "math" +) + +// MaxLeafCount bounds the number of leaves in a single envelope to prevent +// pathological allocations from malicious inputs. A batch of 1M leaves would +// already dwarf any realistic aggregator round. +const MaxLeafCount = 1 << 20 + +// Leaf is a single (key, value) pair in the batch carried by the envelope. +// Value is a slice into the original envelope buffer; callers must copy it +// if they need to retain it after the buffer is reused. +type Leaf struct { + Key [32]byte + Value []byte +} + +// Envelope is the decoded contents of a zk_proof field for the +// `aggregator_rsmt_v1` proof type. +// +// Leaves are in wire order (caller-sorted by SortKey); Proof is the flat +// opcode stream. See package doc for the full wire format. +type Envelope struct { + Leaves []Leaf + Proof []byte +} + +// Envelope decoding errors. +var ( + ErrEnvelopeTruncated = errors.New("rsmt: envelope truncated") + ErrEnvelopeTooManyLeaves = errors.New("rsmt: envelope leaf count exceeds maximum") +) + +// DecodeEnvelope parses the wire format described in the package doc. +// It returns a view over the input buffer: Leaf values alias into b. +func DecodeEnvelope(b []byte) (*Envelope, error) { + if len(b) < 4 { + return nil, fmt.Errorf("%w: missing leaf_count", ErrEnvelopeTruncated) + } + count := binary.BigEndian.Uint32(b[0:4]) + if count > MaxLeafCount { + return nil, fmt.Errorf("%w: %d > %d", ErrEnvelopeTooManyLeaves, count, MaxLeafCount) + } + pos := 4 + leaves := make([]Leaf, 0, count) + for i := uint32(0); i < count; i++ { + if pos+32+2 > len(b) { + return nil, fmt.Errorf("%w: leaf %d header", ErrEnvelopeTruncated, i) + } + var key [32]byte + copy(key[:], b[pos:pos+32]) + pos += 32 + vlen := int(binary.BigEndian.Uint16(b[pos : pos+2])) + pos += 2 + if pos+vlen > len(b) { + return nil, fmt.Errorf("%w: leaf %d value (need %d, have %d)", + ErrEnvelopeTruncated, i, vlen, len(b)-pos) + } + leaves = append(leaves, Leaf{Key: key, Value: b[pos : pos+vlen]}) + pos += vlen + } + return &Envelope{Leaves: leaves, Proof: b[pos:]}, nil +} + +// EncodeEnvelope produces the wire format for the given (already sorted) +// leaves and flat opcode stream. Provided primarily for tests and fixtures; +// production envelopes are built by the Rust aggregator. +func EncodeEnvelope(leaves []Leaf, proof []byte) ([]byte, error) { + numLeaves := len(leaves) + if numLeaves > MaxLeafCount { + return nil, fmt.Errorf("%w: %d > %d", ErrEnvelopeTooManyLeaves, numLeaves, MaxLeafCount) + } + size := 4 + len(proof) + for i := range leaves { + vlen := len(leaves[i].Value) + if vlen > math.MaxUint16 { + return nil, fmt.Errorf("rsmt: leaf %d value length %d exceeds u16 max", + i, vlen) + } + size += 32 + 2 + vlen + } + out := make([]byte, 0, size) + var hdr [4]byte + binary.BigEndian.PutUint32(hdr[:], uint32(numLeaves)) + out = append(out, hdr[:]...) + for i := range leaves { + out = append(out, leaves[i].Key[:]...) + var lhdr [2]byte + vlen := len(leaves[i].Value) + if vlen > math.MaxUint16 { + return nil, fmt.Errorf("rsmt: leaf %d value length %d exceeds u16 max", + i, vlen) + } + binary.BigEndian.PutUint16(lhdr[:], uint16(vlen)) + out = append(out, lhdr[:]...) + out = append(out, leaves[i].Value...) + } + out = append(out, proof...) + return out, nil +} diff --git a/rootchain/consensus/zkverifier/rsmt/fixtures_test.go b/rootchain/consensus/zkverifier/rsmt/fixtures_test.go new file mode 100644 index 00000000..9b1c1817 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/fixtures_test.go @@ -0,0 +1,86 @@ +package rsmt + +import ( + "encoding/hex" + "encoding/json" + "os" + "path/filepath" + "testing" +) + +// TestCrossLanguageFixtures loads envelope fixtures produced by the Rust +// side (crates/rsmt/examples/dump_envelope_fixtures.rs) and verifies that +// the Go implementation accepts each one. This is the authoritative check +// that the two implementations stay wire-compatible. +// +// Regenerate with: +// +// cargo run --example dump_envelope_fixtures -- \ +// bft-core/rootchain/consensus/zkverifier/rsmt/testdata +func TestCrossLanguageFixtures(t *testing.T) { + path := filepath.Join("testdata", "fixtures.json") + raw, err := os.ReadFile(path) + if err != nil { + t.Fatalf("read %s: %v", path, err) + } + + var doc struct { + Fixtures []struct { + Name string `json:"name"` + PrevRoot string `json:"prev_root"` + NewRoot string `json:"new_root"` + Envelope string `json:"envelope"` + } `json:"fixtures"` + } + if err := json.Unmarshal(raw, &doc); err != nil { + t.Fatalf("parse fixtures.json: %v", err) + } + if len(doc.Fixtures) == 0 { + t.Fatal("no fixtures loaded") + } + + decodeRoot := func(s string) (Root, error) { + if s == "" { + return Root{}, nil + } + b, err := hex.DecodeString(s) + if err != nil { + return Root{}, err + } + return RootFromBytes(b) + } + + for _, f := range doc.Fixtures { + t.Run(f.Name, func(t *testing.T) { + envBytes, err := hex.DecodeString(f.Envelope) + if err != nil { + t.Fatalf("decode envelope: %v", err) + } + env, err := DecodeEnvelope(envBytes) + if err != nil { + t.Fatalf("DecodeEnvelope: %v", err) + } + prev, err := decodeRoot(f.PrevRoot) + if err != nil { + t.Fatalf("prev_root: %v", err) + } + newR, err := decodeRoot(f.NewRoot) + if err != nil { + t.Fatalf("new_root: %v", err) + } + if err := Verify(env, prev, newR); err != nil { + t.Fatalf("Verify(%s): %v", f.Name, err) + } + + // Round-trip: re-encode the decoded envelope and confirm it + // matches the original bytes exactly, locking the wire format. + reenc, err := EncodeEnvelope(env.Leaves, env.Proof) + if err != nil { + t.Fatalf("re-encode: %v", err) + } + if string(reenc) != string(envBytes) { + t.Fatalf("envelope re-encode mismatch for %s", f.Name) + } + }) + } +} diff --git a/rootchain/consensus/zkverifier/rsmt/hash.go b/rootchain/consensus/zkverifier/rsmt/hash.go new file mode 100644 index 00000000..3c791462 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/hash.go @@ -0,0 +1,27 @@ +package rsmt + +import "crypto/sha256" + +// HashLeaf computes SHA256(0x00 || key || value). +// Matches `Sha256Hasher::hash_leaf` in crates/rsmt/src/hash.rs. +func HashLeaf(key [32]byte, value []byte) [32]byte { + h := sha256.New() + h.Write([]byte{0x00}) + h.Write(key[:]) + h.Write(value) + var out [32]byte + h.Sum(out[:0]) + return out +} + +// HashNode computes SHA256(0x01 || depth || left || right). +// Matches `Sha256Hasher::hash_node` in crates/rsmt/src/hash.rs. +func HashNode(left, right [32]byte, depth uint8) [32]byte { + h := sha256.New() + h.Write([]byte{0x01, depth}) + h.Write(left[:]) + h.Write(right[:]) + var out [32]byte + h.Sum(out[:0]) + return out +} diff --git a/rootchain/consensus/zkverifier/rsmt/sortkey.go b/rootchain/consensus/zkverifier/rsmt/sortkey.go new file mode 100644 index 00000000..89d66f66 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/sortkey.go @@ -0,0 +1,40 @@ +package rsmt + +// bitReverseTable reverses the bits within a single byte. +// bitReverseTable[0b0000_0001] == 0b1000_0000, etc. +var bitReverseTable [256]byte + +func init() { + for i := 0; i < 256; i++ { + var r byte + for bit := 0; bit < 8; bit++ { + if (i>>bit)&1 != 0 { + r |= 1 << (7 - bit) + } + } + bitReverseTable[i] = r + } +} + +// SortKey converts a 256-bit SMT key into its LSB-first lexicographic sort +// order by bit-reversing each byte in place (no byte-order reversal). +// Matches `get_sort_key` in crates/rsmt/src/path.rs. +func SortKey(key [32]byte) [32]byte { + var out [32]byte + for i := 0; i < 32; i++ { + out[i] = bitReverseTable[key[i]] + } + return out +} + +// sortKeyLess reports whether SortKey(a) < SortKey(b). +func sortKeyLess(a, b [32]byte) bool { + sa := SortKey(a) + sb := SortKey(b) + for i := 0; i < 32; i++ { + if sa[i] != sb[i] { + return sa[i] < sb[i] + } + } + return false +} diff --git a/rootchain/consensus/zkverifier/rsmt/verify.go b/rootchain/consensus/zkverifier/rsmt/verify.go new file mode 100644 index 00000000..854a42b6 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/verify.go @@ -0,0 +1,171 @@ +package rsmt + +import ( + "bytes" + "errors" + "fmt" +) + +// Verification errors. +var ( + ErrBadOpcode = errors.New("rsmt: bad opcode") + ErrOpcodeTruncated = errors.New("rsmt: opcode stream truncated") + ErrStackUnderflow = errors.New("rsmt: stack underflow") + ErrStackFinal = errors.New("rsmt: stack not reduced to single element") + ErrRootMismatch = errors.New("rsmt: recomputed root does not match claimed root") + ErrBatchUnderrun = errors.New("rsmt: opcode stream references more leaves than provided") + ErrBatchUnused = errors.New("rsmt: not all leaves consumed by opcode stream") + ErrPostStateMissing = errors.New("rsmt: N opcode with missing post-state child") + ErrLeavesUnsorted = errors.New("rsmt: leaves not sorted by SortKey") + ErrEmptyBatchNonEmptyProof = errors.New("rsmt: empty batch but non-empty proof") + ErrEmptyBatchRootChange = errors.New("rsmt: empty batch but root changed") +) + +// Root represents an optional 32-byte SMT root: Set == false models the +// None case (empty tree). Matches `Option<[u8;32]>` on the Rust side. +type Root struct { + Hash [32]byte + Set bool +} + +// RootFromBytes constructs a Root from a 0- or 32-byte slice. Empty slice or +// nil is treated as "no root" (empty tree); any other length is an error. +func RootFromBytes(b []byte) (Root, error) { + if len(b) == 0 { + return Root{}, nil + } + if len(b) != 32 { + return Root{}, fmt.Errorf("rsmt: root must be 32 bytes, got %d", len(b)) + } + var r Root + copy(r.Hash[:], b) + r.Set = true + return r, nil +} + +// stackEntry is a (pre_hash, post_hash) pair; flags track the Option<..> side. +// Matches `(Option<[u8;32]>, Option<[u8;32]>)` in crates/rsmt/src/consistency.rs. +type stackEntry struct { + pre, post [32]byte + preSet, postSet bool +} + +// Verify recomputes the old and new SMT roots from the envelope and checks +// them against oldRoot / newRoot. Returns nil iff the envelope is a valid +// consistency proof for the claimed transition. +// +// Leaves in env.Leaves MUST already be sorted by SortKey (with no duplicates). +// The verifier performs a single linear pre-check to enforce this invariant. +func Verify(env *Envelope, oldRoot, newRoot Root) error { + if env == nil { + return errors.New("rsmt: nil envelope") + } + + // Empty batch: must have empty proof and unchanged root. + if len(env.Leaves) == 0 { + if len(env.Proof) != 0 { + return ErrEmptyBatchNonEmptyProof + } + if oldRoot.Set != newRoot.Set || (oldRoot.Set && oldRoot.Hash != newRoot.Hash) { + return ErrEmptyBatchRootChange + } + return nil + } + + // Assert leaves are in SortKey order (also rejects duplicates). + // TODO: remove if implementation is stable + for i := 1; i < len(env.Leaves); i++ { + if !sortKeyLess(env.Leaves[i-1].Key, env.Leaves[i].Key) { + return fmt.Errorf("%w: at index %d", ErrLeavesUnsorted, i) + } + } + + stack := make([]stackEntry, 0, 64) + proof := env.Proof + bi := 0 + pi := 0 + + for pi < len(proof) { + op := proof[pi] + pi++ + switch op { + case 0x00: // S(h): push (h, h) + if pi+32 > len(proof) { + return fmt.Errorf("%w: S payload", ErrOpcodeTruncated) + } + var h [32]byte + copy(h[:], proof[pi:pi+32]) + pi += 32 + stack = append(stack, stackEntry{pre: h, post: h, preSet: true, postSet: true}) + + case 0x01: // L: consume next leaf + if bi >= len(env.Leaves) { + return ErrBatchUnderrun + } + leaf := &env.Leaves[bi] + bi++ + lh := HashLeaf(leaf.Key, leaf.Value) + stack = append(stack, stackEntry{post: lh, postSet: true}) + + case 0x02: // N(depth): pop right, pop left, push combined + if pi >= len(proof) { + return fmt.Errorf("%w: N depth", ErrOpcodeTruncated) + } + depth := proof[pi] + pi++ + if len(stack) < 2 { + return ErrStackUnderflow + } + right := stack[len(stack)-1] + left := stack[len(stack)-2] + stack = stack[:len(stack)-2] + + // pre-state: None children propagate their sibling's pre-hash. + var combined stackEntry + switch { + case !left.preSet && !right.preSet: + // Both children new — no pre-state hash at this level. + case !left.preSet: + combined.pre = right.pre + combined.preSet = right.preSet + case !right.preSet: + combined.pre = left.pre + combined.preSet = left.preSet + default: + combined.pre = HashNode(left.pre, right.pre, depth) + combined.preSet = true + } + + // post-state: both children MUST have a post-hash. + if !left.postSet || !right.postSet { + return ErrPostStateMissing + } + combined.post = HashNode(left.post, right.post, depth) + combined.postSet = true + + stack = append(stack, combined) + + default: + return fmt.Errorf("%w: 0x%02x", ErrBadOpcode, op) + } + } + + if bi != len(env.Leaves) { + return ErrBatchUnused + } + if len(stack) != 1 { + return ErrStackFinal + } + + top := stack[0] + if top.preSet != oldRoot.Set || top.postSet != newRoot.Set { + return ErrRootMismatch + } + if top.preSet && !bytes.Equal(top.pre[:], oldRoot.Hash[:]) { + return ErrRootMismatch + } + if top.postSet && !bytes.Equal(top.post[:], newRoot.Hash[:]) { + return ErrRootMismatch + } + return nil +} diff --git a/rootchain/consensus/zkverifier/rsmt/verify_test.go b/rootchain/consensus/zkverifier/rsmt/verify_test.go new file mode 100644 index 00000000..11c65aa2 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/verify_test.go @@ -0,0 +1,338 @@ +package rsmt + +import ( + "bytes" + "errors" + "testing" +) + +// Helpers to build opcode streams. + +func opS(h [32]byte) []byte { + out := make([]byte, 33) + out[0] = 0x00 + copy(out[1:], h[:]) + return out +} + +func opL() []byte { return []byte{0x01} } + +func opN(depth uint8) []byte { return []byte{0x02, depth} } + +func key(b byte) [32]byte { + var k [32]byte + k[0] = b + return k +} + +// TestSortKey_MatchesRust locks in the bit-reverse-per-byte behavior. +func TestSortKey_MatchesRust(t *testing.T) { + var k [32]byte + k[0] = 0b0000_0001 // bit 0 set + sk := SortKey(k) + if sk[0] != 0b1000_0000 { + t.Fatalf("SortKey bit 0 set → sk[0]=%#b, want 0b1000_0000", sk[0]) + } + if sk[31] != 0 { + t.Fatalf("SortKey trailing byte = %d, want 0", sk[31]) + } +} + +func TestSortKey_Ordering(t *testing.T) { + // Two keys differing only at bit 0: bit-0-clear sorts before bit-0-set. + k0 := [32]byte{} + k1 := [32]byte{} + k1[0] = 0x01 + if !sortKeyLess(k0, k1) { + t.Fatalf("expected sortKeyLess(k0, k1)") + } + if sortKeyLess(k1, k0) { + t.Fatalf("expected !sortKeyLess(k1, k0)") + } +} + +// TestVerify_EmptyBatch exercises the short-circuit: empty envelope, unchanged root. +func TestVerify_EmptyBatch(t *testing.T) { + env := &Envelope{} + var h [32]byte + for i := range h { + h[i] = 0xab + } + r := Root{Hash: h, Set: true} + if err := Verify(env, r, r); err != nil { + t.Fatalf("empty batch, equal roots: %v", err) + } + + // Empty envelope with different roots must fail. + var h2 [32]byte + h2[0] = 0xff + if err := Verify(env, r, Root{Hash: h2, Set: true}); !errors.Is(err, ErrEmptyBatchRootChange) { + t.Fatalf("empty batch, different roots: got %v, want ErrEmptyBatchRootChange", err) + } + + // Empty batch but non-empty proof must fail. + env2 := &Envelope{Proof: []byte{0x00}} + if err := Verify(env2, r, r); !errors.Is(err, ErrEmptyBatchNonEmptyProof) { + t.Fatalf("empty batch, non-empty proof: got %v, want ErrEmptyBatchNonEmptyProof", err) + } +} + +// TestVerify_SingleLeafIntoEmptyTree inserts a single leaf into an empty tree. +// The proof stream is just [L]; after running, stack top is (None, HashLeaf(k,v)). +func TestVerify_SingleLeafIntoEmptyTree(t *testing.T) { + k := key(0x05) + v := []byte("hello") + expected := HashLeaf(k, v) + + env := &Envelope{ + Leaves: []Leaf{{Key: k, Value: v}}, + Proof: opL(), + } + if err := Verify(env, Root{}, Root{Hash: expected, Set: true}); err != nil { + t.Fatalf("single leaf: %v", err) + } + + // Wrong new root → ErrRootMismatch. + var bad [32]byte + if err := Verify(env, Root{}, Root{Hash: bad, Set: true}); !errors.Is(err, ErrRootMismatch) { + t.Fatalf("wrong new root: got %v, want ErrRootMismatch", err) + } + // Wrong old root (claims tree non-empty) → ErrRootMismatch. + if err := Verify(env, Root{Hash: bad, Set: true}, Root{Hash: expected, Set: true}); !errors.Is(err, ErrRootMismatch) { + t.Fatalf("wrong old root: got %v, want ErrRootMismatch", err) + } +} + +// TestVerify_TwoLeavesIntoEmptyTree: two leaves diverging at bit 0. +// Proof stream is [L, L, N(depth=0)]. The split bit (depth) is the index of +// the first set bit in the XOR of the two sorted keys, which is 0 here +// (k0=0x00 vs k1=0x01 → bit 0 differs). The left child is the leaf whose +// bit 0 is 0 (k0); right child is k1. +func TestVerify_TwoLeavesIntoEmptyTree(t *testing.T) { + k0 := key(0x00) // bit 0 = 0 → goes left under a depth=0 split + k1 := key(0x01) // bit 0 = 1 → goes right + v0 := []byte("v0") + v1 := []byte("v1") + + // Leaves must be in SortKey order. bit-0-clear sorts before bit-0-set. + leaves := []Leaf{{Key: k0, Value: v0}, {Key: k1, Value: v1}} + + l0 := HashLeaf(k0, v0) + l1 := HashLeaf(k1, v1) + root := HashNode(l0, l1, 0) + + // Build [L, L, N(0)] + var proof bytes.Buffer + proof.Write(opL()) + proof.Write(opL()) + proof.Write(opN(0)) + + env := &Envelope{Leaves: leaves, Proof: proof.Bytes()} + if err := Verify(env, Root{}, Root{Hash: root, Set: true}); err != nil { + t.Fatalf("two leaves: %v", err) + } +} + +// TestVerify_UnsortedLeavesRejected feeds two leaves in reverse SortKey order. +func TestVerify_UnsortedLeavesRejected(t *testing.T) { + k0 := key(0x00) + k1 := key(0x01) + leaves := []Leaf{ + {Key: k1, Value: []byte("v1")}, // sorts AFTER k0 — wrong order + {Key: k0, Value: []byte("v0")}, + } + var proof bytes.Buffer + proof.Write(opL()) + proof.Write(opL()) + proof.Write(opN(0)) + env := &Envelope{Leaves: leaves, Proof: proof.Bytes()} + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrLeavesUnsorted) { + t.Fatalf("unsorted: got %v, want ErrLeavesUnsorted", err) + } +} + +// TestVerify_DuplicateLeavesRejected: duplicate key violates strict ordering. +func TestVerify_DuplicateLeavesRejected(t *testing.T) { + k := key(0x01) + env := &Envelope{ + Leaves: []Leaf{ + {Key: k, Value: []byte("a")}, + {Key: k, Value: []byte("b")}, + }, + Proof: append(append(opL(), opL()...), opN(0)...), + } + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrLeavesUnsorted) { + t.Fatalf("duplicate key: got %v, want ErrLeavesUnsorted", err) + } +} + +// TestVerify_InsertIntoExistingTree: existing single leaf at key(0x02); +// new leaf at key(0x01). After insertion the tree is a 2-leaf tree; the +// consistency proof is [L, S(h_existing), N(depth=0)]. +func TestVerify_InsertIntoExistingTree(t *testing.T) { + kOld := key(0x02) // bit 0 = 0 → left + vOld := []byte("old") + // Actually, key(0x02) has byte0 = 0b0000_0010, bit 0 (LSB) = 0 → left. + kNew := key(0x01) // bit 0 = 1 → right + vNew := []byte("new") + + hOld := HashLeaf(kOld, vOld) + hNew := HashLeaf(kNew, vNew) + oldRoot := hOld // single-leaf tree + newRoot := HashNode(hOld, hNew, 0) + + // Sort new leaves by SortKey (trivially one leaf). + leaves := []Leaf{{Key: kNew, Value: vNew}} + + // Proof order: left subtree first (kOld, bit 0 = 0, unchanged → S), + // then right subtree (kNew, bit 0 = 1, new leaf → L), then N(0). + var proof bytes.Buffer + proof.Write(opS(hOld)) + proof.Write(opL()) + proof.Write(opN(0)) + + env := &Envelope{Leaves: leaves, Proof: proof.Bytes()} + if err := Verify(env, Root{Hash: oldRoot, Set: true}, Root{Hash: newRoot, Set: true}); err != nil { + t.Fatalf("insert into existing tree: %v", err) + } +} + +// TestVerify_BatchUnderrun: proof references more leaves than provided. +func TestVerify_BatchUnderrun(t *testing.T) { + env := &Envelope{ + Leaves: []Leaf{{Key: key(0x01), Value: []byte("v")}}, + Proof: append(opL(), opL()...), + } + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrBatchUnderrun) { + t.Fatalf("got %v, want ErrBatchUnderrun", err) + } +} + +// TestVerify_BatchUnused: more leaves than opcode L references. +func TestVerify_BatchUnused(t *testing.T) { + env := &Envelope{ + Leaves: []Leaf{ + {Key: key(0x00), Value: []byte("a")}, + {Key: key(0x01), Value: []byte("b")}, + }, + Proof: opL(), + } + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrBatchUnused) { + t.Fatalf("got %v, want ErrBatchUnused", err) + } +} + +// singleLeafEnvelope returns an envelope with one leaf so opcode-level tests +// bypass the empty-batch short-circuit. +func singleLeafEnvelope(proof []byte) *Envelope { + return &Envelope{ + Leaves: []Leaf{{Key: key(0x01), Value: []byte("v")}}, + Proof: proof, + } +} + +// TestVerify_TruncatedOpcodeStream: S without its 32-byte payload. +func TestVerify_TruncatedOpcodeStream(t *testing.T) { + env := singleLeafEnvelope([]byte{0x00, 0x01, 0x02}) // S, truncated + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrOpcodeTruncated) { + t.Fatalf("got %v, want ErrOpcodeTruncated", err) + } +} + +// TestVerify_BadOpcode. +func TestVerify_BadOpcode(t *testing.T) { + env := singleLeafEnvelope([]byte{0x7f}) + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrBadOpcode) { + t.Fatalf("got %v, want ErrBadOpcode", err) + } +} + +// TestVerify_StackUnderflow: N without two children. +func TestVerify_StackUnderflow(t *testing.T) { + env := singleLeafEnvelope(opN(0)) + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrStackUnderflow) { + t.Fatalf("got %v, want ErrStackUnderflow", err) + } +} + +func TestEnvelope_RoundTrip(t *testing.T) { + leaves := []Leaf{ + {Key: key(0x00), Value: []byte("alpha")}, + {Key: key(0x01), Value: []byte{}}, + {Key: key(0x02), Value: bytes.Repeat([]byte{0xAB}, 1234)}, + } + proof := []byte{0x01, 0x01, 0x01, 0x02, 0x05} + buf, err := EncodeEnvelope(leaves, proof) + if err != nil { + t.Fatal(err) + } + env, err := DecodeEnvelope(buf) + if err != nil { + t.Fatal(err) + } + if len(env.Leaves) != len(leaves) { + t.Fatalf("leaf count: got %d, want %d", len(env.Leaves), len(leaves)) + } + for i := range leaves { + if env.Leaves[i].Key != leaves[i].Key { + t.Errorf("leaf %d key mismatch", i) + } + if !bytes.Equal(env.Leaves[i].Value, leaves[i].Value) { + t.Errorf("leaf %d value mismatch", i) + } + } + if !bytes.Equal(env.Proof, proof) { + t.Errorf("proof mismatch") + } +} + +func TestEnvelope_Truncated(t *testing.T) { + if _, err := DecodeEnvelope([]byte{0x00, 0x00, 0x00}); !errors.Is(err, ErrEnvelopeTruncated) { + t.Fatalf("short header: got %v", err) + } + // leaf_count = 1, but only half a key present + buf := []byte{0x00, 0x00, 0x00, 0x01, 0x00, 0x00} + if _, err := DecodeEnvelope(buf); !errors.Is(err, ErrEnvelopeTruncated) { + t.Fatalf("short leaf header: got %v", err) + } + // leaf_count = 1, full key, value_len=10, but value missing + buf = make([]byte, 4+32+2) + buf[3] = 0x01 // leaf_count = 1 + buf[4+32+1] = 10 // value_len = 10 + if _, err := DecodeEnvelope(buf); !errors.Is(err, ErrEnvelopeTruncated) { + t.Fatalf("short value: got %v", err) + } +} + +func TestEnvelope_TooManyLeaves(t *testing.T) { + buf := []byte{0xFF, 0xFF, 0xFF, 0xFF} // leaf_count = 2^32-1 + if _, err := DecodeEnvelope(buf); !errors.Is(err, ErrEnvelopeTooManyLeaves) { + t.Fatalf("got %v, want ErrEnvelopeTooManyLeaves", err) + } +} + +func TestEnvelope_MaxValueLen(t *testing.T) { + oversize := make([]byte, 0x10000) // 65536 > u16::max + _, err := EncodeEnvelope([]Leaf{{Value: oversize}}, nil) + if err == nil { + t.Fatalf("expected oversize value to fail encode") + } +} + +func TestRootFromBytes(t *testing.T) { + if r, err := RootFromBytes(nil); err != nil || r.Set { + t.Fatalf("nil: got %+v, %v", r, err) + } + if r, err := RootFromBytes([]byte{}); err != nil || r.Set { + t.Fatalf("empty: got %+v, %v", r, err) + } + if _, err := RootFromBytes([]byte{1, 2, 3}); err == nil { + t.Fatalf("expected error on 3-byte input") + } + thirtyTwo := make([]byte, 32) + thirtyTwo[5] = 0xAA + r, err := RootFromBytes(thirtyTwo) + if err != nil || !r.Set || r.Hash[5] != 0xAA { + t.Fatalf("32 bytes: got %+v, %v", r, err) + } +} diff --git a/rootchain/consensus/zkverifier/verifier.go b/rootchain/consensus/zkverifier/verifier.go index d71a47d4..56e380e4 100644 --- a/rootchain/consensus/zkverifier/verifier.go +++ b/rootchain/consensus/zkverifier/verifier.go @@ -26,6 +26,10 @@ const ( ProofTypeExec ProofType = "exec" // ProofTypeLightClient indicates light client mode (full witness validation) ProofTypeLightClient ProofType = "light_client" + // ProofTypeAggregatorRSMTv1 indicates aggregator Radix SMT consistency proof + // (flat opcode stream with batch of new leaves). Verified in-process in pure + // Go; see rootchain/consensus/zkverifier/rsmt for the wire format. + ProofTypeAggregatorRSMTv1 ProofType = "aggregator_rsmt_v1" // ProofTypeNone indicates no proof verification (disabled) ProofTypeNone ProofType = "none" ) @@ -93,6 +97,8 @@ func NewVerifier(cfg *Config) (ZKVerifier, error) { return NewSP1Verifier(cfg.VerificationKeyPath, cfg.ChainID) case ProofTypeLightClient: return NewLightClientVerifier(cfg.ChainID) + case ProofTypeAggregatorRSMTv1: + return NewAggregatorRSMTVerifier(), nil case ProofTypeRISC0: return nil, fmt.Errorf("RISC0 verifier not implemented") case ProofTypeExec, ProofTypeNone: diff --git a/rootchain/node.go b/rootchain/node.go index d07fb890..60136d9e 100644 --- a/rootchain/node.go +++ b/rootchain/node.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "log/slog" + "time" "github.com/libp2p/go-libp2p/core/peer" "go.opentelemetry.io/otel/attribute" @@ -337,7 +338,7 @@ func (v *Node) verifyZKProof(ctx context.Context, req *certification.BlockCertif // Skip verification for sync UCs and genesis blocks: // 1. Sync UCs: both hashes are null/empty (handshake/subscription requests) - // 2. Genesis block: previousHash is null/empty (first block with no parent) + // 2. Genesis block: previousHash is null/empty (first genesis block is sent from heaven) if len(previousStateRoot) == 0 && len(newStateRoot) == 0 { v.log.DebugContext(ctx, "Skipping ZK proof verification for sync UC", logger.Shard(req.PartitionID, req.ShardID)) @@ -349,21 +350,39 @@ func (v *Node) verifyZKProof(ctx context.Context, req *certification.BlockCertif return nil } + proofType := string(verifier.ProofType()) + proofSize := len(req.ZkProof) + v.log.DebugContext(ctx, "Verifying ZK proof", logger.Shard(req.PartitionID, req.ShardID), - logger.Data(slog.Int("proof_size", len(req.ZkProof))), - logger.Data(slog.String("proof_type", string(verifier.ProofType()))), - logger.Data(slog.Uint64("round", ir.RoundNumber))) + slog.String("verifier_type", proofType), + slog.Int("proof_size", proofSize), + slog.Uint64("round", ir.RoundNumber)) // Verify proof: previousStateRoot -> newStateRoot transition with block hash blockHash := ir.BlockHash - if err := verifier.VerifyProof(req.ZkProof, previousStateRoot, newStateRoot, blockHash); err != nil { - return fmt.Errorf("ZK proof verification failed: %w", err) + start := time.Now() + verifyErr := verifier.VerifyProof(req.ZkProof, previousStateRoot, newStateRoot, blockHash) + elapsed := time.Since(start) + + if verifyErr != nil { + v.log.WarnContext(ctx, "ZK proof verification failed", + logger.Shard(req.PartitionID, req.ShardID), + slog.String("verifier_type", proofType), + slog.Int("proof_size", proofSize), + slog.Duration("verification_time", elapsed), + slog.Uint64("round", ir.RoundNumber), + logger.Error(verifyErr)) + return fmt.Errorf("ZK proof verification failed: %w", verifyErr) } v.log.InfoContext(ctx, "ZK proof verified successfully", logger.Shard(req.PartitionID, req.ShardID), - logger.Data(slog.Uint64("round", ir.RoundNumber))) + slog.String("verifier_type", proofType), + slog.Int("proof_size", proofSize), + slog.Uint64("num_leaves", req.BlockSize), + slog.Duration("verification_time", elapsed), + slog.Uint64("round", ir.RoundNumber)) return nil } From abafb14584c86e90aa28caa3eb0e3662a3710876 Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Sun, 5 Apr 2026 14:01:06 +0300 Subject: [PATCH 14/17] err info in uc response --- .../certification/certification_response.go | 90 +++++++++++++++++++ .../certification_response_test.go | 78 ++++++++++++++++ rootchain/consensus/storage/sharding.go | 1 + rootchain/node.go | 31 ++++++- rootchain/node_test.go | 10 ++- 5 files changed, 205 insertions(+), 5 deletions(-) diff --git a/network/protocol/certification/certification_response.go b/network/protocol/certification/certification_response.go index 1d8c6356..9d296680 100644 --- a/network/protocol/certification/certification_response.go +++ b/network/protocol/certification/certification_response.go @@ -7,9 +7,40 @@ import ( "github.com/unicitynetwork/bft-go-base/types" ) +// Status codes for CertificationResponse.Status. Status is a transport-level +// field on the outer response wrapper — it is NEVER hashed into the UC — and +// it only describes why *this particular response message* was generated. +// The wrapped UC is still the last-good certificate regardless of status. +const ( + // CertStatusOK — request was accepted and UC is the newly certified one. + CertStatusOK uint32 = 0 + // CertStatusTransient — root-side transient error (consensus manager unavailable, + // send failure, etc). Submitter SHOULD retry with the same batch. + CertStatusTransient uint32 = 1 + // CertStatusRequestInvalid — ValidRequest failed (stale round/epoch, timestamp + // drift, bad signature, bad prev-hash). Submitter SHOULD resync from the + // attached UC and retry. + CertStatusRequestInvalid uint32 = 2 + // CertStatusProofInvalid — ZK proof verification failed. The batch and its + // proof are inconsistent. Submitter SHOULD drop this batch; a new batch may succeed. + CertStatusProofInvalid uint32 = 3 + // CertStatusFatal — unrecoverable (unknown proof type, mandatory verifier not + // configured, schema mismatch). Submitter SHOULD stop retrying and alert. + CertStatusFatal uint32 = 255 +) + +// MaxStatusMessageLen caps the free-form diagnostic string on the wire so a +// misbehaving or malicious root cannot blast unbounded strings at partitions. +const MaxStatusMessageLen = 512 + /* Certification response is sent by the root partition to validators of a shard of a partition as a response to a certification request message. + +Status and Message are outer transport-level fields. Status == CertStatusOK means +the request was accepted and UC is the newly certified one. A non-zero Status means +the request was rejected for the reason encoded in Status/Message; the wrapped UC +in that case is the last-good certificate so the submitter can resync its state. */ type CertificationResponse struct { _ struct{} `cbor:",toarray"` @@ -17,8 +48,19 @@ type CertificationResponse struct { Shard types.ShardID Technical TechnicalRecord UC types.UnicityCertificate + Status uint32 + Message string } +// IsAccepted reports whether the wrapped UC represents acceptance of the +// request that triggered this response. +func (cr *CertificationResponse) IsAccepted() bool { + return cr != nil && cr.Status == CertStatusOK +} + +// IsValid validates the structural integrity of the wrapped UC and technical +// record. It intentionally does NOT reject non-OK Status values: the wrapped +// UC is still the last-good certificate even on a rejection. func (cr *CertificationResponse) IsValid() error { if cr == nil { return errors.New("nil CertificationResponse") @@ -52,3 +94,51 @@ func (cr *CertificationResponse) SetTechnicalRecord(tr TechnicalRecord) error { cr.Technical = tr return nil } + +// UnmarshalCBOR provides backward compatibility for the pre-status wire format +// (4 array elements, before Status/Message were added). Existing rootchain.db +// snapshots and older peers encode the old shape; we decode either and fill +// Status/Message with zero values when they're absent. +// TODO: remove eventually. +func (cr *CertificationResponse) UnmarshalCBOR(data []byte) error { + // Try the new 6-element format first. + type newFormat struct { + _ struct{} `cbor:",toarray"` + Partition types.PartitionID + Shard types.ShardID + Technical TechnicalRecord + UC types.UnicityCertificate + Status uint32 + Message string + } + var nf newFormat + if err := types.Cbor.Unmarshal(data, &nf); err == nil { + cr.Partition = nf.Partition + cr.Shard = nf.Shard + cr.Technical = nf.Technical + cr.UC = nf.UC + cr.Status = nf.Status + cr.Message = nf.Message + return nil + } + + // Fall back to the old 4-element format. + type oldFormat struct { + _ struct{} `cbor:",toarray"` + Partition types.PartitionID + Shard types.ShardID + Technical TechnicalRecord + UC types.UnicityCertificate + } + var of oldFormat + if err := types.Cbor.Unmarshal(data, &of); err != nil { + return err + } + cr.Partition = of.Partition + cr.Shard = of.Shard + cr.Technical = of.Technical + cr.UC = of.UC + cr.Status = CertStatusOK + cr.Message = "" + return nil +} diff --git a/network/protocol/certification/certification_response_test.go b/network/protocol/certification/certification_response_test.go index ad66a726..395e82a5 100644 --- a/network/protocol/certification/certification_response_test.go +++ b/network/protocol/certification/certification_response_test.go @@ -1,6 +1,7 @@ package certification import ( + "strings" "testing" "github.com/stretchr/testify/require" @@ -65,6 +66,83 @@ func Test_CertificationResponse_IsValid(t *testing.T) { }) } +func Test_CertificationResponse_IsValid_NonOKStatus(t *testing.T) { + // Status != OK must NOT cause IsValid to fail — the wrapped UC is still + // the last-good certificate and callers may want to forward it. + cr := &CertificationResponse{ + Partition: 1, + Shard: types.ShardID{}, + UC: types.UnicityCertificate{ + Version: 1, + UnicityTreeCertificate: &types.UnicityTreeCertificate{ + Version: 1, + Partition: 1, + }, + }, + Status: CertStatusProofInvalid, + Message: "envelope truncated: missing leaf_count", + } + require.NoError(t, cr.SetTechnicalRecord(TechnicalRecord{ + Round: 99, Epoch: 8, Leader: "1", StatHash: []byte{1}, FeeHash: []byte{2}, + })) + require.NoError(t, cr.IsValid()) + require.False(t, cr.IsAccepted()) + + cr.Status = CertStatusOK + require.True(t, cr.IsAccepted()) +} + +func Test_CertificationResponse_CBOR_RoundTrip_WithStatus(t *testing.T) { + orig := &CertificationResponse{ + Partition: 1, + Shard: types.ShardID{}, + UC: types.UnicityCertificate{ + Version: 1, + UnicityTreeCertificate: &types.UnicityTreeCertificate{ + Version: 1, Partition: 1, + }, + }, + Status: CertStatusRequestInvalid, + Message: "stale round: expected 42 got 41", + } + require.NoError(t, orig.SetTechnicalRecord(TechnicalRecord{ + Round: 99, Epoch: 8, Leader: "1", StatHash: []byte{1}, FeeHash: []byte{2}, + })) + + buf, err := types.Cbor.Marshal(orig) + require.NoError(t, err) + + var decoded CertificationResponse + require.NoError(t, types.Cbor.Unmarshal(buf, &decoded)) + require.Equal(t, orig.Status, decoded.Status) + require.Equal(t, orig.Message, decoded.Message) + require.Equal(t, orig.Partition, decoded.Partition) + require.Equal(t, orig.Technical.Round, decoded.Technical.Round) +} + +func Test_SendRejection_TruncationBoundary(t *testing.T) { + // Sanity: verify that MaxStatusMessageLen is a reasonable cap and that + // a message exactly at the cap survives round-trip. + msg := strings.Repeat("x", MaxStatusMessageLen) + cr := &CertificationResponse{ + Partition: 1, + UC: types.UnicityCertificate{ + Version: 1, + UnicityTreeCertificate: &types.UnicityTreeCertificate{Version: 1, Partition: 1}, + }, + Status: CertStatusFatal, + Message: msg, + } + require.NoError(t, cr.SetTechnicalRecord(TechnicalRecord{ + Round: 1, Epoch: 1, Leader: "1", StatHash: []byte{1}, FeeHash: []byte{1}, + })) + buf, err := types.Cbor.Marshal(cr) + require.NoError(t, err) + var out CertificationResponse + require.NoError(t, types.Cbor.Unmarshal(buf, &out)) + require.Len(t, out.Message, MaxStatusMessageLen) +} + func Test_CertificationResponse_SetTechnicalRecord(t *testing.T) { tr := TechnicalRecord{Round: 123, Epoch: 4, Leader: "567890"} cr := CertificationResponse{} diff --git a/rootchain/consensus/storage/sharding.go b/rootchain/consensus/storage/sharding.go index c5b7fc71..9f31cb4d 100644 --- a/rootchain/consensus/storage/sharding.go +++ b/rootchain/consensus/storage/sharding.go @@ -150,6 +150,7 @@ func (ss ShardStates) certificationResponses(algo crypto.Hash) ([]*certification UnicityTreeCertificate: utCert, ShardTreeCertificate: stCert, }, + Status: certification.CertStatusOK, }) } diff --git a/rootchain/node.go b/rootchain/node.go index 60136d9e..6df8b59a 100644 --- a/rootchain/node.go +++ b/rootchain/node.go @@ -191,6 +191,27 @@ func (v *Node) sendResponse(ctx context.Context, nodeID string, cr *certificatio return v.net.Send(ctx, cr, peerID) } +// sendRejection attaches a status code and diagnostic message to a copy of the +// last-good CertificationResponse (so the submitter can still resync from the +// wrapped UC) and sends it. The shared `last` pointer is NOT mutated — we only +// ever write to a shallow copy. +func (v *Node) sendRejection(ctx context.Context, nodeID string, last *certification.CertificationResponse, status uint32, cause error) error { + if last == nil { + return fmt.Errorf("no last CR available to attach to rejection") + } + msg := "" + if cause != nil { + msg = cause.Error() + if len(msg) > certification.MaxStatusMessageLen { + msg = msg[:certification.MaxStatusMessageLen] + } + } + resp := *last + resp.Status = status + resp.Message = msg + return v.sendResponse(ctx, nodeID, &resp) +} + func (v *Node) onHandshake(ctx context.Context, req *handshake.Handshake) error { ctx, span := v.tracer.Start(ctx, "node.onHandshake") defer span.End() @@ -238,7 +259,7 @@ func (v *Node) onBlockCertificationRequest(ctx context.Context, req *certificati // we got the shard info thus it's a valid partition/shard if err := si.ValidRequest(req); err != nil { err = fmt.Errorf("invalid block certification request: %w", err) - if se := v.sendResponse(ctx, req.NodeID, si.LastCR); se != nil { + if se := v.sendRejection(ctx, req.NodeID, si.LastCR, certification.CertStatusRequestInvalid, err); se != nil { err = errors.Join(err, fmt.Errorf("sending latest cert: %w", se)) } return err @@ -250,9 +271,11 @@ func (v *Node) onBlockCertificationRequest(ctx context.Context, req *certificati logger.Error(err), logger.Shard(req.PartitionID, req.ShardID)) - // Send last valid UC immediately when proof verification fails - // This allows the partition to sync back to the last certified state - if se := v.sendResponse(ctx, req.NodeID, si.LastCR); se != nil { + // Send last valid UC immediately when proof verification fails so the + // partition can sync back to the last certified state. The outer + // response carries CertStatusProofInvalid + the verifier's error so + // the submitter can distinguish this from a timeout repeat UC. + if se := v.sendRejection(ctx, req.NodeID, si.LastCR, certification.CertStatusProofInvalid, err); se != nil { err = errors.Join(err, fmt.Errorf("failed to send last valid UC: %w", se)) } return fmt.Errorf("ZK proof verification failed: %w", err) diff --git a/rootchain/node_test.go b/rootchain/node_test.go index eabcae53..63e2512d 100644 --- a/rootchain/node_test.go +++ b/rootchain/node_test.go @@ -544,10 +544,18 @@ func Test_onBlockCertificationRequest(t *testing.T) { t.Run("invalid request", func(t *testing.T) { // in case of invalid request we respond with the latest cert of the shard + // wrapped in a rejection envelope (Status=RequestInvalid, Message=why). sendCallCnt := 0 partNet := mockPartitionNet{ send: func(ctx context.Context, msg any, receivers ...p2peer.ID) error { - require.Equal(t, &certResp, msg) + resp, ok := msg.(*certification.CertificationResponse) + require.True(t, ok, "expected *CertificationResponse, got %T", msg) + require.Equal(t, certification.CertStatusRequestInvalid, resp.Status) + require.NotEmpty(t, resp.Message) + // The wrapped UC/Technical must still be the last-good certificate. + require.Equal(t, certResp.Partition, resp.Partition) + require.Equal(t, certResp.Technical, resp.Technical) + require.Equal(t, certResp.UC.TRHash, resp.UC.TRHash) sendCallCnt++ return nil }, From 36e89e1af565f7fa6226737a349ede4cfbbba830 Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Sun, 5 Apr 2026 14:10:48 +0300 Subject: [PATCH 15/17] rust/go compat test data --- .gitignore | 1 - .../zkverifier/rsmt/testdata/fixtures.json | 34 +++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 rootchain/consensus/zkverifier/rsmt/testdata/fixtures.json diff --git a/.gitignore b/.gitignore index 664e27f1..753014f5 100644 --- a/.gitignore +++ b/.gitignore @@ -16,7 +16,6 @@ test-coverage.html gosec_report.json .trivycache/ /test-nodes/ -rootchain/consensus/zkverifier/rsmt/testdata/ *.log diff --git a/rootchain/consensus/zkverifier/rsmt/testdata/fixtures.json b/rootchain/consensus/zkverifier/rsmt/testdata/fixtures.json new file mode 100644 index 00000000..df01ecd9 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/testdata/fixtures.json @@ -0,0 +1,34 @@ +{ + "fixtures": [ + { + "name": "empty_batch_empty_tree", + "prev_root": "", + "new_root": "", + "envelope": "00000000" + }, + { + "name": "single_leaf_into_empty", + "prev_root": "", + "new_root": "37456985272abf5c393d420e08b83520e0110857fe1fdcaed96bb2c5a13aa0df", + "envelope": "000000010500000000000000000000000000000000000000000000000000000000000000000568656c6c6f01" + }, + { + "name": "two_leaves_into_empty", + "prev_root": "", + "new_root": "f276ee0665e2474e4eff31d9681640463dfde89ea35b0a98e7bc370c3b95d6a4", + "envelope": "0000000200000000000000000000000000000000000000000000000000000000000000010002763080000000000000000000000000000000000000000000000000000000000000020002763101010207" + }, + { + "name": "insert_into_existing", + "prev_root": "8b3c07970a80a31948fa634bbba4132f6f75957b35f1750f7fd04070badaa69d", + "new_root": "7ebfa43283488c37459bbf6c91159f7885c21e78d134a73d4040301ead1686bc", + "envelope": "000000024000000000000000000000000000000000000000000000000000000000000004000164500000000000000000000000000000000000000000000000000000000000000500016501007f7988e562af77b1ed732f7e2dfc4e7c5394267eea169af325d952f4fa843df2020500f380a374af649fefeff0c7f374a5a6455df030ab218981b88e1c16659100e6d601020600a7f302c9e9ea09e6925e772a168dae23620ad9f753bfd367001d74b06f8fca9a02050204" + }, + { + "name": "fifty_leaves_into_empty", + "prev_root": "", + "new_root": "30bdc202cfe1ace9d2f10b598b2231ab09cd99968171d475af2aa49d5c300def", + "envelope": "0000003200000000000000000000000000000000000000000000000000000000000000aa000300550120000000000000000000000000000020000000000000000000000000000000aa000320752110000000000000000000000000000010000000000000000000000000000000aa000310451130000000000000000000000000000030000000000000000000000000000000aa000330653188000000000000000000000000000008000000000000000000000000000000aa0003085d09a8000000000000000000000000000028000000000000000000000000000000aa0003287d2998000000000000000000000000000018000000000000000000000000000000aa0003184d1944000000000000000000000000000004000000000000000000000000000000aa000304510564000000000000000000000000000024000000000000000000000000000000aa000324712554000000000000000000000000000014000000000000000000000000000000aa0003144115cc00000000000000000000000000000c000000000000000000000000000000aa00030c590dec00000000000000000000000000002c000000000000000000000000000000aa00032c792ddc00000000000000000000000000001c000000000000000000000000000000aa00031c491d42000000000000000000000000000022000000000000000000000000000000aa000322772322000000000000000000000000000002000000000000000000000000000000aa000302570332000000000000000000000000000012000000000000000000000000000000aa0003124713ca00000000000000000000000000002a000000000000000000000000000000aa00032a7f2baa00000000000000000000000000000a000000000000000000000000000000aa00030a5f0bba00000000000000000000000000001a000000000000000000000000000000aa00031a4f1b86000000000000000000000000000026000000000000000000000000000000aa000326732766000000000000000000000000000006000000000000000000000000000000aa000306530776000000000000000000000000000016000000000000000000000000000000aa00031643170e00000000000000000000000000002e000000000000000000000000000000aa00032e7b2fee00000000000000000000000000000e000000000000000000000000000000aa00030e5b0ffe00000000000000000000000000001e000000000000000000000000000000aa00031e4b1f41000000000000000000000000000031000000000000000000000000000000aa000331643221000000000000000000000000000011000000000000000000000000000000aa000311441211000000000000000000000000000001000000000000000000000000000000aa000301540231000000000000000000000000000021000000000000000000000000000000aa0003217422a9000000000000000000000000000019000000000000000000000000000000aa0003194c1a99000000000000000000000000000009000000000000000000000000000000aa0003095c0ab9000000000000000000000000000029000000000000000000000000000000aa0003297c2a65000000000000000000000000000015000000000000000000000000000000aa000315401655000000000000000000000000000005000000000000000000000000000000aa000305500675000000000000000000000000000025000000000000000000000000000000aa0003257026ed00000000000000000000000000001d000000000000000000000000000000aa00031d481edd00000000000000000000000000000d000000000000000000000000000000aa00030d580efd00000000000000000000000000002d000000000000000000000000000000aa00032d782e43000000000000000000000000000013000000000000000000000000000000aa000313461453000000000000000000000000000023000000000000000000000000000000aa000323762433000000000000000000000000000003000000000000000000000000000000aa0003035604cb00000000000000000000000000001b000000000000000000000000000000aa00031b4e1cdb00000000000000000000000000002b000000000000000000000000000000aa00032b7e2cbb00000000000000000000000000000b000000000000000000000000000000aa00030b5e0c87000000000000000000000000000017000000000000000000000000000000aa000317421897000000000000000000000000000027000000000000000000000000000000aa000327722877000000000000000000000000000007000000000000000000000000000000aa00030752080f00000000000000000000000000001f000000000000000000000000000000aa00031f4a201f00000000000000000000000000002f000000000000000000000000000000aa00032f7a30ff00000000000000000000000000000f000000000000000000000000000000aa00030f5a1001010205010102050204010102050102040203010102050102040101020501020402030202010102050102040101020501020402030101020501020401010205010204020302020201010102050101020502040101010205020402030101010205020401010102050204020302020101010205020401010102050204020301010102050204010101020502040203020202010200" + } + ] +} From 3cea485aae49a2a731d7279ed504f073dce5845b Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Sun, 5 Apr 2026 22:25:39 +0300 Subject: [PATCH 16/17] handshake subscribes to uc feed always. \ TODO: test with multiple rc nodes --- rootchain/node.go | 16 ++++++++++------ rootchain/node_test.go | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 6 deletions(-) diff --git a/rootchain/node.go b/rootchain/node.go index 6df8b59a..14fb2fdc 100644 --- a/rootchain/node.go +++ b/rootchain/node.go @@ -231,12 +231,16 @@ func (v *Node) onHandshake(ctx context.Context, req *handshake.Handshake) error return fmt.Errorf("node ID is not in active validator set %s - %s - %s", req.PartitionID, req.ShardID, req.NodeID) } - if si.LastCR == nil || si.LastCR.UC.GetRoundNumber() == 0 { - // Make sure shard nodes get CertificationResponses even - // before they send the first BlockCertificationRequests - if err := v.subscription.Subscribe(req.PartitionID, req.ShardID, req.NodeID); err != nil { - return fmt.Errorf("subscribing the sender: %w", err) - } + // (Re)subscribe on every handshake. Subscriptions have a bounded response + // quota (see responsesPerSubscription in subscription.go) which is only + // refilled by Subscribe; repeat UCs sent to an idle partition consume quota + // without refilling it, so after ~responsesPerSubscription T2 timeouts the + // partition would otherwise fall off the subscriber map entirely. The + // handshake is the partition's only refresh mechanism when it has no + // BlockCertificationRequests to send, so it must always re-subscribe — + // not just before the first certified block. + if err := v.subscription.Subscribe(req.PartitionID, req.ShardID, req.NodeID); err != nil { + return fmt.Errorf("subscribing the sender: %w", err) } if err = v.sendResponse(ctx, req.NodeID, si.LastCR); err != nil { return fmt.Errorf("failed to send response: %w", err) diff --git a/rootchain/node_test.go b/rootchain/node_test.go index 63e2512d..3a7c5d69 100644 --- a/rootchain/node_test.go +++ b/rootchain/node_test.go @@ -281,6 +281,48 @@ func Test_onHandshake(t *testing.T) { } require.NoError(t, node.onHandshake(t.Context(), &msg)) }) + + // Regression: a partition that has already produced certified blocks must + // still be (re)subscribed on handshake. The subscription has a bounded + // per-peer response quota (see responsesPerSubscription) which repeat UCs + // on T2 timeout drain without refilling; handshake is the only refresh + // mechanism for an idle partition. If onHandshake skips Subscribe once + // LastCR.UC.GetRoundNumber() > 0, the partition silently falls off the + // subscriber map and BFT Core logs "0 receivers" on subsequent repeat UCs. + t.Run("post-genesis handshake re-subscribes", func(t *testing.T) { + cr := validCertificationResponse(t) + cr.UC.InputRecord.RoundNumber = 42 // post-genesis: GetRoundNumber() > 0 + + partNet := mockPartitionNet{ + send: func(ctx context.Context, msg any, receivers ...p2peer.ID) error { + return nil + }, + } + cm := mockConsensusManager{ + shardInfo: func(partition types.PartitionID, shard types.ShardID) (*storage.ShardInfo, error) { + return newMockShardInfo(t, nodeID.String(), publicKey, cr), nil + }, + } + node, err := New(&nwPeer, partNet, cm, nopObs) + require.NoError(t, err) + + msg := handshake.Handshake{ + PartitionID: cr.Partition, + ShardID: cr.Shard, + NodeID: nodeID.String(), + } + require.NoError(t, node.onHandshake(t.Context(), &msg)) + + // Subscribe must have been called — the peer should be registered with + // a full response quota, not absent from the subs map. + key := partitionShard{cr.Partition, cr.Shard.Key()} + node.subscription.mu.RLock() + defer node.subscription.mu.RUnlock() + peers, ok := node.subscription.subs[key] + require.True(t, ok, "partition must be present in subs map after post-genesis handshake") + require.Equal(t, responsesPerSubscription, peers[nodeID], + "peer quota must be refilled by handshake") + }) } func Test_handlePartitionMsg(t *testing.T) { From 2efac44936b2edd539fa8afb3fe7fa0040b9ab2b Mon Sep 17 00:00:00 2001 From: Risto Laanoja Date: Tue, 7 Apr 2026 12:54:28 +0300 Subject: [PATCH 17/17] AggregatorZKVerifier --- Makefile | 59 ++++- README.md | 15 +- .../aggregator-zk-verifier-ffi/Cargo.toml | 26 ++ .../aggregator_zk_verifier.h | 90 +++++++ .../aggregator-zk-verifier-ffi/build.sh | 67 +++++ .../aggregator-zk-verifier-ffi/src/lib.rs | 250 ++++++++++++++++++ .../zkverifier/aggregator_zk_verifier.go | 65 +++++ .../zkverifier/aggregator_zk_verifier_ffi.go | 119 +++++++++ .../aggregator_zk_verifier_ffi_stub.go | 23 ++ .../consensus/zkverifier/capabilities.go | 43 +++ .../capabilities_aggregator_zk_ffi.go | 6 + .../capabilities_aggregator_zk_stub.go | 6 + .../consensus/zkverifier/capabilities_ffi.go | 25 +- .../consensus/zkverifier/capabilities_stub.go | 25 +- rootchain/consensus/zkverifier/registry.go | 7 + .../zkverifier/sp1-verifier-ffi/src/lib.rs | 2 + rootchain/consensus/zkverifier/verifier.go | 7 + 17 files changed, 778 insertions(+), 57 deletions(-) create mode 100644 rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/Cargo.toml create mode 100644 rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/aggregator_zk_verifier.h create mode 100755 rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/build.sh create mode 100644 rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/src/lib.rs create mode 100644 rootchain/consensus/zkverifier/aggregator_zk_verifier.go create mode 100644 rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi.go create mode 100644 rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi_stub.go create mode 100644 rootchain/consensus/zkverifier/capabilities.go create mode 100644 rootchain/consensus/zkverifier/capabilities_aggregator_zk_ffi.go create mode 100644 rootchain/consensus/zkverifier/capabilities_aggregator_zk_stub.go diff --git a/Makefile b/Makefile index 8761750b..be7b3086 100644 --- a/Makefile +++ b/Makefile @@ -7,21 +7,40 @@ endif # ZK Verifier FFI configuration # Set ZKVERIFIER_FFI=1 to enable Rust FFI components (SP1 and light-client verifiers) -# Default: disabled (builds without Rust dependencies) +# Set ZKVERIFIER_AGGREGATOR_ZK_FFI=1 to enable the aggregator ZK verifier FFI (SP1 6.0.2) +# Default: all disabled (builds without Rust dependencies) ZKVERIFIER_FFI ?= 0 +ZKVERIFIER_AGGREGATOR_ZK_FFI ?= 0 + +# Accumulate Go build tags +GO_BUILD_TAGS_LIST = +GO_TEST_TAGS_LIST = -# Go build tags based on FFI configuration ifeq ($(ZKVERIFIER_FFI),1) - GO_BUILD_TAGS = -tags zkverifier_ffi - GO_TEST_TAGS = -tags zkverifier_ffi + GO_BUILD_TAGS_LIST += zkverifier_ffi + GO_TEST_TAGS_LIST += zkverifier_ffi +endif + +ifeq ($(ZKVERIFIER_AGGREGATOR_ZK_FFI),1) + GO_BUILD_TAGS_LIST += zkverifier_aggregator_zk_ffi + GO_TEST_TAGS_LIST += zkverifier_aggregator_zk_ffi +endif + +ifneq ($(strip $(GO_BUILD_TAGS_LIST)),) + GO_BUILD_TAGS = -tags $(subst $(space),$(comma),$(strip $(GO_BUILD_TAGS_LIST))) + GO_TEST_TAGS = -tags $(subst $(space),$(comma),$(strip $(GO_TEST_TAGS_LIST))) else GO_BUILD_TAGS = - GO_TEST_TAGS = + GO_TEST_TAGS = endif +comma = , +space = $(empty) $(empty) + # FFI library paths -SP1_VERIFIER_FFI_DIR = rootchain/consensus/zkverifier/sp1-verifier-ffi -LIGHT_CLIENT_VERIFIER_FFI_DIR = rootchain/consensus/zkverifier/light-client-verifier-ffi +SP1_VERIFIER_FFI_DIR = rootchain/consensus/zkverifier/sp1-verifier-ffi +LIGHT_CLIENT_VERIFIER_FFI_DIR = rootchain/consensus/zkverifier/light-client-verifier-ffi +AGGREGATOR_ZK_VERIFIER_FFI_DIR = rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi all: clean tools test build gosec @@ -36,6 +55,9 @@ clean-ffi: @if [ -d "$(LIGHT_CLIENT_VERIFIER_FFI_DIR)" ]; then \ cd $(LIGHT_CLIENT_VERIFIER_FFI_DIR) && cargo clean; \ fi + @if [ -d "$(AGGREGATOR_ZK_VERIFIER_FFI_DIR)" ]; then \ + cd $(AGGREGATOR_ZK_VERIFIER_FFI_DIR) && cargo clean; \ + fi test: go test $(GO_TEST_TAGS) ./... -coverpkg=./... -count=1 -coverprofile test-coverage.out @@ -45,11 +67,19 @@ build: # https://github.com/golang/go/issues/51279 cd ./cli/ubft && go build $(GO_BUILD_TAGS) -o ../../build/ubft -# Build with ZK verifier FFI support (requires Rust toolchain) +# Build with ZK verifier FFI support (SP1 + light-client, requires Rust toolchain) build-with-ffi: build-rust-ffi $(MAKE) build ZKVERIFIER_FFI=1 -# Build Rust FFI libraries +# Build with aggregator ZK verifier FFI support only (SP1 6.0.2, requires Rust toolchain) +build-with-aggregator-zk-ffi: build-aggregator-zk-ffi + $(MAKE) build ZKVERIFIER_AGGREGATOR_ZK_FFI=1 + +# Build with all FFI verifiers enabled +build-with-all-ffi: build-rust-ffi build-aggregator-zk-ffi + $(MAKE) build ZKVERIFIER_FFI=1 ZKVERIFIER_AGGREGATOR_ZK_FFI=1 + +# Build all Rust FFI libraries (SP1 + light-client) build-rust-ffi: check-rust build-sp1-ffi build-light-client-ffi build-sp1-ffi: @@ -68,6 +98,14 @@ build-light-client-ffi: echo "Warning: $(LIGHT_CLIENT_VERIFIER_FFI_DIR) not found"; \ fi +build-aggregator-zk-ffi: check-rust + @echo "Building Aggregator ZK verifier FFI (SP1 6.0.2)..." + @if [ -d "$(AGGREGATOR_ZK_VERIFIER_FFI_DIR)" ]; then \ + cd $(AGGREGATOR_ZK_VERIFIER_FFI_DIR) && cargo build --release; \ + else \ + echo "Warning: $(AGGREGATOR_ZK_VERIFIER_FFI_DIR) not found"; \ + fi + # Check if Rust toolchain is available check-rust: @command -v cargo >/dev/null 2>&1 || { \ @@ -93,9 +131,12 @@ tools: test \ build \ build-with-ffi \ + build-with-aggregator-zk-ffi \ + build-with-all-ffi \ build-rust-ffi \ build-sp1-ffi \ build-light-client-ffi \ + build-aggregator-zk-ffi \ check-rust \ build-docker \ gosec diff --git a/README.md b/README.md index e622772c..15d803c2 100644 --- a/README.md +++ b/README.md @@ -12,12 +12,15 @@ Run `make build` to build the application. Executable will be built to `build/ub | Target | Description | |--------|-------------| | `make build` | Build without FFI (default, no Rust required) | -| `make build-with-ffi` | Build with FFI support | -| `make build-rust-ffi` | Build Rust FFI libraries only | -| `make build-sp1-ffi` | Build SP1 verifier FFI | -| `make build-light-client-ffi` | Build Light Client verifier FFI | -| `make clean-ffi` | Clean Rust build artifacts | -| `make check-rust` | Verify Rust toolchain | +| `make build-with-ffi` | Build with SP1 + Light Client FFI verifiers | +| `make build-with-aggregator-zk-ffi` | Build with aggregator ZK verifier FFI (SP1 6.0.2) | +| `make build-with-all-ffi` | Build with all FFI verifiers enabled | +| `make build-rust-ffi` | Build SP1 + Light Client Rust FFI libraries only | +| `make build-aggregator-zk-ffi` | Build aggregator ZK verifier Rust FFI library only | +| `make build-sp1-ffi` | Build SP1 verifier FFI library only | +| `make build-light-client-ffi` | Build Light Client verifier FFI library only | +| `make clean-ffi` | Clean all Rust FFI build artifacts | +| `make check-rust` | Verify Rust toolchain is available | # Configuration diff --git a/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/Cargo.toml new file mode 100644 index 00000000..483ae317 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "aggregator-zk-verifier-ffi" +version = "0.1.0" +edition = "2021" + +# Standalone workspace — does not participate in the bft-core Go module build. +[workspace] + +[lib] +crate-type = ["cdylib", "staticlib"] + +[dependencies] +# Must match the sp1-sdk version used in rugregator/crates/zk-host (6.0.2). +# This is independent of sp1-verifier-ffi which uses 5.0.8 for the EVM prover. +# No "blocking" feature — we only need the type layer (SP1VerifyingKey, +# SP1ProofWithPublicValues, SP1Proof, HashableKey). Verification is handled +# by the lightweight sp1-verifier crate without any CpuProver/worker overhead. +sp1-sdk = { version = "6.0.2", features = ["blocking"] } +sp1-verifier = { version = "6.0.2", features = ["std"] } +anyhow = "1.0" +bincode = "1.3" + +[profile.release] +opt-level = 3 +lto = true +codegen-units = 1 diff --git a/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/aggregator_zk_verifier.h b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/aggregator_zk_verifier.h new file mode 100644 index 00000000..20765707 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/aggregator_zk_verifier.h @@ -0,0 +1,90 @@ +/** + * Aggregator ZK Verifier FFI + * + * C header for FFI interface to aggregator SP1 ZK proof verification. + * Uses SP1 6.0.2; independent of sp1_verifier.h which uses SP1 5.0.8. + */ + +#ifndef AGGREGATOR_ZK_VERIFIER_H +#define AGGREGATOR_ZK_VERIFIER_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Result codes for aggregator ZK verification. + * + * The AGGZK_ prefix distinguishes these from SP1_VERIFY_* constants so that + * both libraries can be linked into the same binary without symbol conflicts. + */ +typedef enum { + AGGZK_VERIFY_SUCCESS = 0, + AGGZK_VERIFY_INVALID_PROOF = 1, + AGGZK_VERIFY_INVALID_VKEY = 2, + AGGZK_VERIFY_INVALID_PUBLIC_INPUTS = 3, + AGGZK_VERIFY_VERIFICATION_FAILED = 4, + AGGZK_VERIFY_INTERNAL_ERROR = 5, +} AggZkVerifyResult; + +/** + * Verify an aggregator SP1 ZK consistency proof. + * + * The proof was produced by rugregator's zk-host crate (SP1 6.0.2). + * Public values layout: prev_root[32] || new_root[32] (64 bytes total). + * + * @param vkey_bytes Bincode-serialized SP1VerifyingKey + * @param vkey_len Length of vkey_bytes + * @param proof_bytes Bincode-serialized SP1ProofWithPublicValues + * @param proof_len Length of proof_bytes + * @param prev_root Pointer to 32-byte previous SMT root + * @param new_root Pointer to 32-byte new SMT root + * @param error_out On error, set to a malloc'd C string (free with aggzk_free_string) + * @return AggZkVerifyResult status code + */ +AggZkVerifyResult aggzk_verify_proof( + const uint8_t* vkey_bytes, + size_t vkey_len, + const uint8_t* proof_bytes, + size_t proof_len, + const uint8_t* prev_root, + const uint8_t* new_root, + char** error_out +); + +/** + * Validate a bincode-serialized SP1VerifyingKey without running a proof. + * + * @param vkey_bytes Pointer to vkey bytes + * @param vkey_len Length of vkey_bytes + * @param error_out On error, set to a malloc'd C string (free with aggzk_free_string) + * @return AGGZK_VERIFY_SUCCESS or AGGZK_VERIFY_INVALID_VKEY + */ +AggZkVerifyResult aggzk_validate_vkey( + const uint8_t* vkey_bytes, + size_t vkey_len, + char** error_out +); + +/** + * Free a string allocated by this library. + * + * @param s Pointer to string to free (may be NULL) + */ +void aggzk_free_string(char* s); + +/** + * Return the version of this FFI library. + * + * @return Pointer to a static null-terminated version string (do not free) + */ +const char* aggzk_ffi_version(void); + +#ifdef __cplusplus +} +#endif + +#endif /* AGGREGATOR_ZK_VERIFIER_H */ diff --git a/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/build.sh b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/build.sh new file mode 100755 index 00000000..6133d813 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/build.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# +# Build script for the Aggregator ZK Verifier FFI library. +# Uses SP1 6.0.2; independent of sp1-verifier-ffi (SP1 5.0.8). +# + +set -e + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +echo -e "${GREEN}Building Aggregator ZK Verifier FFI Library${NC}" +echo "=============================================" + +if ! command -v cargo &> /dev/null; then + echo -e "${RED}Error: Rust/Cargo not found${NC}" + echo "Please install Rust from https://rustup.rs/" + exit 1 +fi + +RUST_VERSION=$(cargo --version | cut -d' ' -f2) +echo -e "${GREEN}Rust version: ${RUST_VERSION}${NC}" + +echo -e "\n${YELLOW}Building Rust library...${NC}" +cargo build --release + +echo -e "${GREEN}✓ Build successful${NC}" + +LIB_PATH="target/release" +if [[ "$OSTYPE" == "darwin"* ]]; then + LIB_FILE="libaggregator_zk_verifier_ffi.dylib" + STATIC_LIB="libaggregator_zk_verifier_ffi.a" +elif [[ "$OSTYPE" == "linux-gnu"* ]]; then + LIB_FILE="libaggregator_zk_verifier_ffi.so" + STATIC_LIB="libaggregator_zk_verifier_ffi.a" +else + echo -e "${YELLOW}Warning: Unknown OS type, library names may differ${NC}" + LIB_FILE="libaggregator_zk_verifier_ffi.*" + STATIC_LIB="libaggregator_zk_verifier_ffi.a" +fi + +echo -e "\n${YELLOW}Build artifacts:${NC}" +if [ -f "${LIB_PATH}/${LIB_FILE}" ]; then + ls -lh "${LIB_PATH}/${LIB_FILE}" + echo -e "${GREEN}✓ Dynamic library created${NC}" +else + echo -e "${RED}✗ Dynamic library not found${NC}" +fi + +if [ -f "${LIB_PATH}/${STATIC_LIB}" ]; then + ls -lh "${LIB_PATH}/${STATIC_LIB}" + echo -e "${GREEN}✓ Static library created${NC}" +else + echo -e "${YELLOW}⚠ Static library not found (optional)${NC}" +fi + +echo -e "\n${YELLOW}Running Rust tests...${NC}" +cargo test + +echo -e "${GREEN}✓ All tests passed${NC}" + +echo -e "\n${GREEN}Build complete!${NC}" +echo -e "\nTo use with Go:" +echo -e " export CGO_LDFLAGS=\"-L\$(pwd)/${LIB_PATH}\"" +echo -e " cd .. && go build -tags zkverifier_aggregator_zk_ffi ./..." diff --git a/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/src/lib.rs b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/src/lib.rs new file mode 100644 index 00000000..2b95e935 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/src/lib.rs @@ -0,0 +1,250 @@ +use std::ffi::CString; +use std::os::raw::c_char; +use std::sync::OnceLock; +use sp1_sdk::{HashableKey, SP1Proof, SP1ProofWithPublicValues}; +use sp1_sdk::blocking::{CpuProver, Prover, ProverClient}; +use sp1_verifier::{Groth16Verifier, PlonkVerifier, GROTH16_VK_BYTES, PLONK_VK_BYTES}; + +/// Global CpuProver for Compressed proof verification. +/// +/// `SP1CompressedVerifierRaw` in sp1-verifier 6.0.2 uses a placeholder all-zero +/// vk_merkle_root (TODO) and therefore rejects all proofs from the CPU prover. +/// We must use CpuProver::verify() for Compressed proofs. Initialization takes +/// ~15 seconds but happens only once per process lifetime. +static CPU_PROVER: OnceLock = OnceLock::new(); + +fn cpu_prover() -> &'static CpuProver { + CPU_PROVER.get_or_init(|| ProverClient::builder().cpu().build()) +} + +/// Error codes for the aggregator ZK verifier FFI. +/// +/// The `AGGZK_` prefix keeps these distinct from the SP1 verifier FFI symbols +/// (`SP1_VERIFY_*`) so both libraries can coexist in the same Go binary. +#[repr(C)] +pub enum AggZkVerifyResult { + Success = 0, + InvalidProof = 1, + InvalidVKey = 2, + InvalidPublicInputs = 3, + VerificationFailed = 4, + InternalError = 5, +} + +/// Verify an aggregator SP1 ZK consistency proof. +/// +/// The proof was produced by `rugregator/crates/zk-host` using SP1 6.0.2. +/// The guest program committed exactly 64 public-value bytes: +/// bytes 0–31: previous SMT root +/// bytes 32–63: new SMT root +/// +/// Supported proof kinds: Groth16, Plonk, Compressed. +/// Core proofs are rejected (return `InternalError`). +/// +/// # Arguments +/// * `vkey_bytes` / `vkey_len` — bincode-serialized `SP1VerifyingKey` +/// * `proof_bytes` / `proof_len` — bincode-serialized `SP1ProofWithPublicValues` +/// * `prev_root` — pointer to 32-byte previous state root +/// * `new_root` — pointer to 32-byte new state root +/// * `error_out` — on error, set to a malloc'd C string (caller frees with `aggzk_free_string`) +/// +/// # Returns +/// `AggZkVerifyResult` status code. +#[no_mangle] +pub extern "C" fn aggzk_verify_proof( + vkey_bytes: *const u8, + vkey_len: usize, + proof_bytes: *const u8, + proof_len: usize, + prev_root: *const u8, + new_root: *const u8, + error_out: *mut *mut c_char, +) -> AggZkVerifyResult { + if vkey_bytes.is_null() || proof_bytes.is_null() { + set_error(error_out, "null pointer passed to aggzk_verify_proof"); + return AggZkVerifyResult::InternalError; + } + if prev_root.is_null() || new_root.is_null() { + set_error(error_out, "null state root pointer"); + return AggZkVerifyResult::InvalidPublicInputs; + } + + let vkey_data = unsafe { std::slice::from_raw_parts(vkey_bytes, vkey_len) }; + let proof_data = unsafe { std::slice::from_raw_parts(proof_bytes, proof_len) }; + let prev = unsafe { std::slice::from_raw_parts(prev_root, 32) }; + let new = unsafe { std::slice::from_raw_parts(new_root, 32) }; + + match verify_internal(vkey_data, proof_data, prev, new) { + Ok(()) => AggZkVerifyResult::Success, + Err(e) => { + set_error(error_out, &e.to_string()); + classify(&e) + } + } +} + +fn verify_internal( + vkey_data: &[u8], + proof_data: &[u8], + prev_root: &[u8], + new_root: &[u8], +) -> anyhow::Result<()> { + let vkey: sp1_sdk::SP1VerifyingKey = bincode::deserialize(vkey_data) + .map_err(|e| anyhow::anyhow!("failed to deserialize vkey: {e}"))?; + + let proof: SP1ProofWithPublicValues = bincode::deserialize(proof_data) + .map_err(|e| anyhow::anyhow!("failed to deserialize proof: {e}"))?; + + // Public values layout: prev_root[32] || new_root[32] — exactly 64 bytes. + let pv = proof.public_values.as_slice(); + if pv.len() != 64 { + anyhow::bail!( + "public values length mismatch: expected 64 bytes, got {}", + pv.len() + ); + } + if &pv[0..32] != prev_root { + anyhow::bail!("previous state root mismatch in public values"); + } + if &pv[32..64] != new_root { + anyhow::bail!("new state root mismatch in public values"); + } + + let pv_bytes = proof.public_values.to_vec(); + + match &proof.proof { + SP1Proof::Core(_) => { + anyhow::bail!("Core proofs are not supported; regenerate with Groth16, Plonk, or Compressed"); + } + SP1Proof::Groth16(_) => { + let wire = proof.bytes(); + let vkey_hash = vkey.bytes32(); // "0x<64 hex chars>" + Groth16Verifier::verify(&wire, &pv_bytes, &vkey_hash, &GROTH16_VK_BYTES) + .map_err(|e| anyhow::anyhow!("Groth16 verification failed: {e:?}"))?; + } + SP1Proof::Plonk(_) => { + let wire = proof.bytes(); + let vkey_hash = vkey.bytes32(); + PlonkVerifier::verify(&wire, &pv_bytes, &vkey_hash, &PLONK_VK_BYTES) + .map_err(|e| anyhow::anyhow!("Plonk verification failed: {e:?}"))?; + } + SP1Proof::Compressed(_) => { + // SP1CompressedVerifierRaw in sp1-verifier 6.0.2 uses a placeholder + // all-zero vk_merkle_root and rejects all CPU-generated proofs. + // Fall back to the cached CpuProver which skips the Merkle check when + // vk_verification is disabled (the CPU prover default). + cpu_prover() + .verify(&proof, &vkey, None) + .map_err(|e| anyhow::anyhow!("Compressed proof verification failed: {e}"))?; + } + } + + Ok(()) +} + +/// Validate a bincode-serialized `SP1VerifyingKey` without running a proof. +#[no_mangle] +pub extern "C" fn aggzk_validate_vkey( + vkey_bytes: *const u8, + vkey_len: usize, + error_out: *mut *mut c_char, +) -> AggZkVerifyResult { + if vkey_bytes.is_null() { + set_error(error_out, "null pointer passed to aggzk_validate_vkey"); + return AggZkVerifyResult::InternalError; + } + if vkey_len == 0 { + set_error(error_out, "vkey is empty"); + return AggZkVerifyResult::InvalidVKey; + } + let data = unsafe { std::slice::from_raw_parts(vkey_bytes, vkey_len) }; + match bincode::deserialize::(data) { + Ok(_) => AggZkVerifyResult::Success, + Err(e) => { + set_error(error_out, &format!("failed to deserialize vkey: {e}")); + AggZkVerifyResult::InvalidVKey + } + } +} + +/// Free a string allocated by this library. +#[no_mangle] +pub extern "C" fn aggzk_free_string(s: *mut c_char) { + if !s.is_null() { + unsafe { let _ = CString::from_raw(s); } + } +} + +/// Return the version of this FFI library (static string, do not free). +#[no_mangle] +pub extern "C" fn aggzk_ffi_version() -> *const c_char { + const VERSION: &str = concat!(env!("CARGO_PKG_VERSION"), "\0"); + VERSION.as_ptr() as *const c_char +} + +// ── Error helpers ───────────────────────────────────────────────────────────── + +fn classify(err: &anyhow::Error) -> AggZkVerifyResult { + let msg = err.to_string().to_lowercase(); + if msg.contains("vkey") || msg.contains("verifying key") { + AggZkVerifyResult::InvalidVKey + } else if msg.contains("deserialize proof") { + AggZkVerifyResult::InvalidProof + } else if msg.contains("state root") || msg.contains("public values") { + AggZkVerifyResult::InvalidPublicInputs + } else if msg.contains("verification failed") { + AggZkVerifyResult::VerificationFailed + } else if msg.contains("not supported") { + AggZkVerifyResult::InternalError + } else { + AggZkVerifyResult::InternalError + } +} + +fn set_error(error_out: *mut *mut c_char, message: &str) { + if !error_out.is_null() { + if let Ok(s) = CString::new(message) { + unsafe { *error_out = s.into_raw(); } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::ptr; + + #[test] + fn test_null_pointers() { + let mut error: *mut c_char = ptr::null_mut(); + let result = aggzk_verify_proof( + ptr::null(), 0, + ptr::null(), 0, + ptr::null(), + ptr::null(), + &mut error, + ); + assert!(matches!(result, AggZkVerifyResult::InternalError)); + if !error.is_null() { + aggzk_free_string(error); + } + } + + #[test] + fn test_version() { + let version = aggzk_ffi_version(); + assert!(!version.is_null()); + let s = unsafe { std::ffi::CStr::from_ptr(version) }; + assert!(s.to_str().unwrap().starts_with("0.1.0")); + } + + #[test] + fn test_empty_vkey() { + let mut error: *mut c_char = ptr::null_mut(); + let result = aggzk_validate_vkey(ptr::null(), 0, &mut error); + assert!(matches!(result, AggZkVerifyResult::InternalError)); + if !error.is_null() { + aggzk_free_string(error); + } + } +} diff --git a/rootchain/consensus/zkverifier/aggregator_zk_verifier.go b/rootchain/consensus/zkverifier/aggregator_zk_verifier.go new file mode 100644 index 00000000..b9c91cbf --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator_zk_verifier.go @@ -0,0 +1,65 @@ +package zkverifier + +import ( + "encoding/hex" + "fmt" + "log/slog" +) + +// AggregatorZKVerifier verifies SP1 ZK consistency proofs produced by the +// rugregator aggregator (SP1 6.0.2). +// +// The proof commits exactly 64 public-value bytes: +// +// bytes 0–31: previous SMT root (must match previousStateRoot arg) +// bytes 32–63: new SMT root (must match newStateRoot arg) +// +// blockHash is accepted by the ZKVerifier interface but ignored — aggregator +// ZK proofs do not commit a block hash. +type AggregatorZKVerifier struct { + enabled bool + ffiVerifier *AggregatorZKVerifierFFI +} + +// NewAggregatorZKVerifier creates a new aggregator ZK verifier. +// vkeyPath must point to a bincode-serialized SP1VerifyingKey (see extract-vkey). +func NewAggregatorZKVerifier(vkeyPath string) (*AggregatorZKVerifier, error) { + if vkeyPath == "" { + return nil, fmt.Errorf("vkey_path is required for aggregator_zk_v1 proof type") + } + ffi, err := NewAggregatorZKVerifierFFI(vkeyPath) + if err != nil { + return nil, fmt.Errorf("aggregator ZK FFI verifier not available: %w", err) + } + slog.Info("Using aggregator ZK verifier", "path", vkeyPath, "version", GetAggregatorZKFFIVersion()) + return &AggregatorZKVerifier{enabled: true, ffiVerifier: ffi}, nil +} + +// VerifyProof verifies an aggregator SP1 ZK consistency proof. +func (v *AggregatorZKVerifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + if !v.enabled { + return ErrVerifierNotConfigured + } + if len(proof) == 0 { + return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) + } + if len(previousStateRoot) != 32 { + return fmt.Errorf("%w: previousStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(previousStateRoot)) + } + if len(newStateRoot) != 32 { + return fmt.Errorf("%w: newStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(newStateRoot)) + } + + slog.Debug("Verifying aggregator ZK proof", + "proof_size", len(proof), + "prev_root", hex.EncodeToString(previousStateRoot[:8]), + "new_root", hex.EncodeToString(newStateRoot[:8])) + + return v.ffiVerifier.VerifyProof(proof, previousStateRoot, newStateRoot) +} + +// ProofType returns ProofTypeAggregatorZKv1. +func (v *AggregatorZKVerifier) ProofType() ProofType { return ProofTypeAggregatorZKv1 } + +// IsEnabled returns true if the verifier is configured and the FFI library is available. +func (v *AggregatorZKVerifier) IsEnabled() bool { return v.enabled } diff --git a/rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi.go b/rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi.go new file mode 100644 index 00000000..a6b7b8ee --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi.go @@ -0,0 +1,119 @@ +//go:build zkverifier_aggregator_zk_ffi + +package zkverifier + +// #cgo LDFLAGS: -L${SRCDIR}/aggregator-zk-verifier-ffi/target/release -laggregator_zk_verifier_ffi -ldl -lm +// #include "aggregator-zk-verifier-ffi/aggregator_zk_verifier.h" +// #include +import "C" +import ( + "fmt" + "unsafe" +) + +// AggregatorZKVerifierFFI wraps the Rust FFI library for aggregator ZK proof verification. +type AggregatorZKVerifierFFI struct { + vkey []byte +} + +// NewAggregatorZKVerifierFFI creates a new FFI-based aggregator ZK verifier. +func NewAggregatorZKVerifierFFI(vkeyPath string) (*AggregatorZKVerifierFFI, error) { + vkey, err := readFile(vkeyPath) + if err != nil { + return nil, fmt.Errorf("failed to load vkey: %w", err) + } + if len(vkey) == 0 { + return nil, fmt.Errorf("vkey file is empty") + } + + var errorOut *C.char + defer func() { + if errorOut != nil { + C.aggzk_free_string(errorOut) + } + }() + + result := C.aggzk_validate_vkey( + (*C.uint8_t)(unsafe.Pointer(&vkey[0])), + C.size_t(len(vkey)), + &errorOut, + ) + if result != C.AGGZK_VERIFY_SUCCESS { + if errorOut != nil { + return nil, fmt.Errorf("invalid vkey: %s", C.GoString(errorOut)) + } + return nil, fmt.Errorf("invalid vkey") + } + + return &AggregatorZKVerifierFFI{vkey: vkey}, nil +} + +// VerifyProof verifies an aggregator ZK proof via the Rust FFI library. +// blockHash is unused by aggregator ZK proofs and not passed to the FFI. +func (v *AggregatorZKVerifierFFI) VerifyProof(proof []byte, prevRoot []byte, newRoot []byte) error { + if len(proof) == 0 { + return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) + } + if len(prevRoot) != 32 { + return fmt.Errorf("%w: prevRoot must be 32 bytes", ErrInvalidProofFormat) + } + if len(newRoot) != 32 { + return fmt.Errorf("%w: newRoot must be 32 bytes", ErrInvalidProofFormat) + } + + var errorOut *C.char + defer func() { + if errorOut != nil { + C.aggzk_free_string(errorOut) + } + }() + + result := C.aggzk_verify_proof( + (*C.uint8_t)(unsafe.Pointer(&v.vkey[0])), + C.size_t(len(v.vkey)), + (*C.uint8_t)(unsafe.Pointer(&proof[0])), + C.size_t(len(proof)), + (*C.uint8_t)(unsafe.Pointer(&prevRoot[0])), + (*C.uint8_t)(unsafe.Pointer(&newRoot[0])), + &errorOut, + ) + + switch result { + case C.AGGZK_VERIFY_SUCCESS: + return nil + case C.AGGZK_VERIFY_INVALID_PROOF: + if errorOut != nil { + return fmt.Errorf("%w: %s", ErrInvalidProofFormat, C.GoString(errorOut)) + } + return ErrInvalidProofFormat + case C.AGGZK_VERIFY_INVALID_VKEY: + if errorOut != nil { + return fmt.Errorf("invalid vkey: %s", C.GoString(errorOut)) + } + return fmt.Errorf("invalid vkey") + case C.AGGZK_VERIFY_INVALID_PUBLIC_INPUTS: + if errorOut != nil { + return fmt.Errorf("invalid public inputs: %s", C.GoString(errorOut)) + } + return fmt.Errorf("invalid public inputs") + case C.AGGZK_VERIFY_VERIFICATION_FAILED: + if errorOut != nil { + return fmt.Errorf("%w: %s", ErrProofVerificationFailed, C.GoString(errorOut)) + } + return ErrProofVerificationFailed + default: + if errorOut != nil { + return fmt.Errorf("internal error: %s", C.GoString(errorOut)) + } + return fmt.Errorf("internal error") + } +} + +// GetAggregatorZKFFIVersion returns the version of the Rust FFI library. +func GetAggregatorZKFFIVersion() string { + v := C.aggzk_ffi_version() + if v == nil { + return "unknown" + } + return C.GoString(v) +} diff --git a/rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi_stub.go b/rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi_stub.go new file mode 100644 index 00000000..283c5b96 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi_stub.go @@ -0,0 +1,23 @@ +//go:build !zkverifier_aggregator_zk_ffi + +package zkverifier + +import "fmt" + +// AggregatorZKVerifierFFI is a stub when the FFI library is not compiled in. +type AggregatorZKVerifierFFI struct { + vkey []byte +} + +// NewAggregatorZKVerifierFFI returns an error when the FFI library is not available. +func NewAggregatorZKVerifierFFI(_ string) (*AggregatorZKVerifierFFI, error) { + return nil, fmt.Errorf("aggregator ZK FFI verifier not available: build with -tags zkverifier_aggregator_zk_ffi to enable") +} + +// VerifyProof always returns an error in the stub. +func (v *AggregatorZKVerifierFFI) VerifyProof(_ []byte, _ []byte, _ []byte) error { + return fmt.Errorf("aggregator ZK FFI verifier not available") +} + +// GetAggregatorZKFFIVersion returns "unavailable" in the stub. +func GetAggregatorZKFFIVersion() string { return "unavailable" } diff --git a/rootchain/consensus/zkverifier/capabilities.go b/rootchain/consensus/zkverifier/capabilities.go new file mode 100644 index 00000000..1bf56adb --- /dev/null +++ b/rootchain/consensus/zkverifier/capabilities.go @@ -0,0 +1,43 @@ +package zkverifier + +// IsProofTypeAvailable reports whether pt can be used in the current binary. +// Each FFI verifier is independently toggled by its own build tag: +// - SP1, LightClient: -tags zkverifier_ffi +// - AggregatorZKv1: -tags zkverifier_aggregator_zk_ffi +// +// Pure-Go verifiers (AggregatorRSMTv1, Exec, None) are always available. +func IsProofTypeAvailable(pt ProofType) bool { + switch pt { + case ProofTypeAggregatorRSMTv1, ProofTypeExec, ProofTypeNone, "": + return true + case ProofTypeSP1: + return isSP1Available() + case ProofTypeLightClient: + return isLightClientAvailable() + case ProofTypeAggregatorZKv1: + return isAggregatorZKv1Available() + default: + return false + } +} + +// AvailableProofTypes returns the proof types that can be instantiated in the +// current binary. +func AvailableProofTypes() []ProofType { + types := []ProofType{ProofTypeAggregatorRSMTv1, ProofTypeExec} + if isSP1Available() { + types = append(types, ProofTypeSP1) + } + if isLightClientAvailable() { + types = append(types, ProofTypeLightClient) + } + if isAggregatorZKv1Available() { + types = append(types, ProofTypeAggregatorZKv1) + } + return types +} + +// IsFFIAvailable reports whether any FFI-backed verifier is available. +func IsFFIAvailable() bool { + return isSP1Available() || isLightClientAvailable() || isAggregatorZKv1Available() +} diff --git a/rootchain/consensus/zkverifier/capabilities_aggregator_zk_ffi.go b/rootchain/consensus/zkverifier/capabilities_aggregator_zk_ffi.go new file mode 100644 index 00000000..6fcc42a5 --- /dev/null +++ b/rootchain/consensus/zkverifier/capabilities_aggregator_zk_ffi.go @@ -0,0 +1,6 @@ +//go:build zkverifier_aggregator_zk_ffi + +package zkverifier + +// isAggregatorZKv1Available returns true when the aggregator ZK FFI verifier was compiled in. +func isAggregatorZKv1Available() bool { return true } diff --git a/rootchain/consensus/zkverifier/capabilities_aggregator_zk_stub.go b/rootchain/consensus/zkverifier/capabilities_aggregator_zk_stub.go new file mode 100644 index 00000000..7c3193df --- /dev/null +++ b/rootchain/consensus/zkverifier/capabilities_aggregator_zk_stub.go @@ -0,0 +1,6 @@ +//go:build !zkverifier_aggregator_zk_ffi + +package zkverifier + +// isAggregatorZKv1Available returns false when the aggregator ZK FFI verifier was not compiled in. +func isAggregatorZKv1Available() bool { return false } diff --git a/rootchain/consensus/zkverifier/capabilities_ffi.go b/rootchain/consensus/zkverifier/capabilities_ffi.go index 79f24214..80387324 100644 --- a/rootchain/consensus/zkverifier/capabilities_ffi.go +++ b/rootchain/consensus/zkverifier/capabilities_ffi.go @@ -2,25 +2,8 @@ package zkverifier -// IsProofTypeAvailable returns whether the given proof type is available -// in the current build. With FFI, SP1 and LightClient are available. -func IsProofTypeAvailable(pt ProofType) bool { - switch pt { - case ProofTypeSP1, ProofTypeLightClient, ProofTypeAggregatorRSMTv1, ProofTypeExec, ProofTypeNone, "": - return true - default: - return false - } -} +// isSP1Available returns true when the SP1 FFI verifier was compiled in. +func isSP1Available() bool { return true } -// AvailableProofTypes returns the list of proof types available in the current build. -// With FFI, SP1 and LightClient are available (besides m-of-n signature mode -// and the pure-Go aggregator RSMT verifier). -func AvailableProofTypes() []ProofType { - return []ProofType{ProofTypeSP1, ProofTypeLightClient, ProofTypeAggregatorRSMTv1, ProofTypeExec} -} - -// IsFFIAvailable returns whether FFI support is built in. -func IsFFIAvailable() bool { - return true -} +// isLightClientAvailable returns true when the LightClient FFI verifier was compiled in. +func isLightClientAvailable() bool { return true } diff --git a/rootchain/consensus/zkverifier/capabilities_stub.go b/rootchain/consensus/zkverifier/capabilities_stub.go index 2349f798..b000d7c2 100644 --- a/rootchain/consensus/zkverifier/capabilities_stub.go +++ b/rootchain/consensus/zkverifier/capabilities_stub.go @@ -2,25 +2,8 @@ package zkverifier -// IsProofTypeAvailable returns whether the given proof type is available -// in the current build. Without FFI, the pure-Go aggregator RSMT verifier -// is available alongside the m-of-n "exec" mode. -func IsProofTypeAvailable(pt ProofType) bool { - switch pt { - case ProofTypeAggregatorRSMTv1, ProofTypeExec, ProofTypeNone, "": - return true - default: - return false - } -} +// isSP1Available returns false when the SP1 FFI verifier was not compiled in. +func isSP1Available() bool { return false } -// AvailableProofTypes returns the list of proof types available in the current build. -// Without FFI, the pure-Go aggregator RSMT verifier and Exec (m-of-n) are available. -func AvailableProofTypes() []ProofType { - return []ProofType{ProofTypeAggregatorRSMTv1, ProofTypeExec} -} - -// IsFFIAvailable returns whether FFI support is built in. -func IsFFIAvailable() bool { - return false -} +// isLightClientAvailable returns false when the LightClient FFI verifier was not compiled in. +func isLightClientAvailable() bool { return false } diff --git a/rootchain/consensus/zkverifier/registry.go b/rootchain/consensus/zkverifier/registry.go index c32488ba..735c07d6 100644 --- a/rootchain/consensus/zkverifier/registry.go +++ b/rootchain/consensus/zkverifier/registry.go @@ -105,6 +105,13 @@ func (r *Registry) createVerifier(params map[string]string) (ZKVerifier, error) // self-contained and recomputes roots from the envelope. return NewAggregatorRSMTVerifier(), nil + case ProofTypeAggregatorZKv1: + vkeyPath := ParseVKeyPathFromParams(params) + if vkeyPath == "" { + return nil, fmt.Errorf("vkey_path required for aggregator_zk_v1 proof type") + } + return NewAggregatorZKVerifier(vkeyPath) + default: return nil, fmt.Errorf("unknown proof type: %s", proofType) } diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs b/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs index 40d15b30..d7960413 100644 --- a/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs @@ -92,6 +92,8 @@ fn verify_proof_internal( .map_err(|e| anyhow::anyhow!("Failed to deserialize proof: {}", e))?; // Create prover client (used for verification) + // TODO: use the sp1-verifier crate instead https://github.com/succinctlabs/sp1/tree/v6.0.2/crates/verifier + // or at least cache the client let client = ProverClient::from_env(); // Verify the proof diff --git a/rootchain/consensus/zkverifier/verifier.go b/rootchain/consensus/zkverifier/verifier.go index 56e380e4..718b6de2 100644 --- a/rootchain/consensus/zkverifier/verifier.go +++ b/rootchain/consensus/zkverifier/verifier.go @@ -30,6 +30,11 @@ const ( // (flat opcode stream with batch of new leaves). Verified in-process in pure // Go; see rootchain/consensus/zkverifier/rsmt for the wire format. ProofTypeAggregatorRSMTv1 ProofType = "aggregator_rsmt_v1" + // ProofTypeAggregatorZKv1 indicates an SP1 ZK proof of aggregator SMT + // consistency produced by rugregator's zk-host crate (SP1 6.0.2). + // Public values: prev_root[32] || new_root[32] (64 bytes). + // Requires the binary to be built with -tags zkverifier_aggregator_zk_ffi. + ProofTypeAggregatorZKv1 ProofType = "aggregator_zk_v1" // ProofTypeNone indicates no proof verification (disabled) ProofTypeNone ProofType = "none" ) @@ -99,6 +104,8 @@ func NewVerifier(cfg *Config) (ZKVerifier, error) { return NewLightClientVerifier(cfg.ChainID) case ProofTypeAggregatorRSMTv1: return NewAggregatorRSMTVerifier(), nil + case ProofTypeAggregatorZKv1: + return NewAggregatorZKVerifier(cfg.VerificationKeyPath) case ProofTypeRISC0: return nil, fmt.Errorf("RISC0 verifier not implemented") case ProofTypeExec, ProofTypeNone: