diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7f6fb5fe..048ca3af 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,6 +4,7 @@ on: [push] env: GO_VERSION: 1.24 + RUST_VERSION: stable RETENTION-DAYS: 1 jobs: @@ -15,8 +16,30 @@ jobs: uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - - name: build + - name: build (without FFI) run: make build + + build-with-ffi: + if: false + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Setup Go ${{ env.GO_VERSION }} + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup Rust ${{ env.RUST_VERSION }} + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ env.RUST_VERSION }} + - name: Cache Rust dependencies + uses: Swatinem/rust-cache@v2 + with: + workspaces: | + rootchain/consensus/zkverifier/sp1-verifier-ffi + rootchain/consensus/zkverifier/light-client-verifier-ffi + - name: build (with FFI) + run: make build-with-ffi test: runs-on: ubuntu-latest steps: @@ -25,9 +48,9 @@ jobs: uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - - name: vet + - name: vet (without FFI) run: go vet ./... - - name: test + - name: test (without FFI) run: make test - name: upload test coverage uses: actions/upload-artifact@v4 @@ -36,6 +59,38 @@ jobs: path: test-coverage.out retention-days: ${{ env.RETENTION-DAYS }} + test-with-ffi: + if: false + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Setup Go ${{ env.GO_VERSION }} + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup Rust ${{ env.RUST_VERSION }} + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ env.RUST_VERSION }} + - name: Cache Rust dependencies + uses: Swatinem/rust-cache@v2 + with: + workspaces: | + rootchain/consensus/zkverifier/sp1-verifier-ffi + rootchain/consensus/zkverifier/light-client-verifier-ffi + - name: Build Rust FFI libraries + run: make build-rust-ffi + - name: vet (with FFI) + run: go vet -tags zkverifier_ffi ./... + - name: test (with FFI) + run: make test ZKVERIFIER_FFI=1 + - name: upload test coverage (with FFI) + uses: actions/upload-artifact@v4 + with: + name: test-coverage-ffi + path: test-coverage.out + retention-days: ${{ env.RETENTION-DAYS }} + analyze: runs-on: ubuntu-latest continue-on-error: true diff --git a/.gitignore b/.gitignore index 0849678a..753014f5 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,12 @@ .DS_Store build/ +# rust +target/ +Cargo.lock +*.pdb +config.toml + # Test artifacts test-coverage.out test-coverage-cobertura.xml diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..cec76591 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,297 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Overview + +Unicity BFT Core is a Byzantine Fault Tolerant (BFT) consensus system implementing a two-layer architecture: a root chain for consensus coordination and partitions for transaction processing. This is the reference implementation in Go. + +## Build and Development + +### Prerequisites + +- Go 1.24 or higher +- C compiler (GCC recommended - part of build-essential on Debian/Ubuntu, available via Homebrew on macOS) +- For ZK proof verification: SP1 verifier dependencies + +### Essential Commands + +```bash +# Build the ubft binary +make build # Outputs to build/ubft + +# Run tests with coverage +make test # Uses -count=1 to disable caching + +# Run single test +go test ./path/to/package -run TestName + +# Run security analysis +make gosec + +# Clean build artifacts and test nodes +make clean + +# Full build pipeline +make all # clean + tools + test + build + gosec +``` + +### Running Nodes + +The CLI binary `ubft` provides commands for different node types: + +```bash +# Root node (consensus coordinator) +./build/ubft root-node run --home path/to/node-dir + +# Shard/partition node (transaction processor) +./build/ubft shard-node run --home path/to/node-dir + +# View available commands +./build/ubft -h +``` + +### Test Environment Setup + +```bash +# Set up root chain + 3 money partition nodes +./setup-nodes.sh -m 3 -t 0 + +# Set up root + money + token partitions +./setup-nodes.sh -m 3 -t 3 + +# Start nodes +./start.sh -r -p money # root + money partitions +./start.sh -r -p money -p tokens # root + money + tokens + +# Stop all +./stop.sh -a +``` + +Generated node configurations are in `test-nodes/` directory. + +## Architecture + +### Two-Layer BFT System + +**Root Chain** (`rootchain/`): +- Coordinates consensus across all partitions +- Maintains trust bases and validator sets +- Processes block certification requests from partitions +- Returns Unicity Certificates (UCs) to certify partition state +- Single root chain can coordinate multiple partitions + +**Partitions/Shards** (`partition/`): +- Process transactions independently +- Submit block certification requests to root chain +- Receive UCs to finalize blocks +- Types: Money partition, Token partition, Orchestration partition, Custom partitions + +### Key Components + +**Consensus** (`rootchain/consensus/`): +- Byzantine consensus algorithm for root chain +- Processes proposals, votes, quorum certificates (QCs) +- Leader election and rotation +- State machine: new round → propose → vote → commit + +**Networking** (`network/`): +- libp2p-based P2P networking +- Protocol definitions in `network/protocol/`: + - `certification/`: Block certification request/response + - `handshake/`: UC feed subscription + - `abdrc/`: Consensus messages (proposals, votes, recovery) + - `blockproposal/`: Block proposals + - `replication/`: Ledger replication + +**State Management** (`state/`): +- Partition state trees +- Merkle tree implementations for state commitments +- State replication and recovery + +**Transaction Systems** (`txsystem/`): +- Pluggable transaction processing +- Money partition: transfers, splits, swaps, fee credits +- Token partition: NFTs, fungible tokens +- Predicates: WASM-based smart contract execution + +**Storage** (`keyvaluedb/`): +- Abstraction over storage backends +- BoltDB implementation for production +- MemoryDB for testing +- Transaction-based read/write operations + +### Critical Data Structures + +**Unicity Certificate (UC)**: Proof that root chain reached consensus on a partition's state +- Contains `InputRecord` (partition's proposed state) +- Contains `UnicitySeal` (root chain's certification with signatures) +- Contains `TechnicalRecord` (synchronization data: next round, epoch, leader) + +**InputRecord (IR)**: Partition's state transition proposal +- Round number, epoch, timestamp +- Previous state hash, new state hash +- Block hash +- Validation rules (bft-go-base/types/input_record.go:75): + - If state hash unchanged: block hash must be nil (no transactions) + - If state hash changed: block hash must be non-nil (has transactions) + +**BlockCertificationRequest**: Partition sends to root chain +- InputRecord with proposed state +- ZK proof (optional, separate from IR) +- Signature from partition validator +- Uses CBOR serialization with tuple/array format + +**TechnicalRecord**: Root chain provides to partition for synchronization +- Next round number (partition must use this for next request) +- Current epoch +- Current leader +- Ensures partition stays synchronized with root chain rounds + +### CBOR Serialization + +All network messages use CBOR (Compact Binary Object Representation) with `toarray` format (array/tuple serialization, not maps). + +**Important**: Go structs use `cbor:",toarray"` tags. When implementing clients in other languages: +- Use array serialization (not map/object) +- Nil values serialize as CBOR null (0xf6), not empty byte strings (0x40) +- Byte slices use CBOR byte string type (major type 2) + +Example from certification protocol: +``` +[partition_id, shard_id, node_id, input_record, zk_proof, block_size, state_size, signature] +``` + +### Partition Integration Pattern + +When building a new partition/blockchain that integrates with BFT Core: + +1. **Initialization**: + - Subscribe to UC feed via Handshake message + - Receive initial sync UC (may have null hashes for pre-state) + - Store sync UC for timestamp/epoch but don't finalize blocks + +2. **Block Production**: + - Use `next_round` from last UC's TechnicalRecord + - Use `timestamp` from last UC's UnicitySeal + - Use `epoch` from last UC's InputRecord + - Previous hash = last certified state hash (from UC.InputRecord.Hash) + - For first block: previous_hash = None (let BFT Core use genesis) + +3. **Certification**: + - Build InputRecord with round from TechnicalRecord + - Set block_hash = actual block header hash (not state root!) + - Set hash = new state root + - Sign entire BlockCertificationRequest (with signature set to nil) + - Send via `/ab/block-certification/0.0.1` protocol + +4. **UC Validation**: + - Check UC.InputRecord.Hash matches proposed state + - Sync UCs (both hashes null): update round state, don't finalize + - Repeat UCs (same IR, higher root round): timeout, resync + - Valid UCs: finalize block, store as last UC + +5. **State Continuity**: + - Each block's previous_hash must equal last certified UC's hash + - Maintains chain of certified states + - Root chain validates this continuity + +## Configuration + +Configuration sources (in precedence order): +1. Command line flags: `--flag=value` +2. Environment variables: `UBFT_FLAG=value` +3. Config file: `$UBFT_HOME/config.props` +4. Default values + +Default `$UBFT_HOME` is `$HOME/.ubft` + +### Logging + +Logger config file: `$UBFT_HOME/logger-config.yaml` (see `cli/ubft/config/logger-config.yaml` for example) + +Log format options: text, json, console, ecs +Log level options: DEBUG, INFO, WARN, ERROR + +### Tracing + +Enable distributed tracing: +```bash +UBFT_TRACING=otlptracehttp ./build/ubft root-node run ... +``` + +Exporter options: stdout, otlptracehttp, zipkin + +For tests: +```bash +UBFT_TEST_TRACER=otlptracehttp go test ./... +``` + +## Testing + +### Test Structure + +- Unit tests alongside production code (`*_test.go`) +- Test utilities in `internal/testutils/` +- Integration tests use real network components with mock partitions + +### Test Helpers + +- `internal/testutils/eventually.go`: Async condition checking +- `internal/testutils/logger/`: Test logger setup +- `internal/testutils/network/`: Mock network implementations +- `internal/testutils/trustbase/`: Test trust base generation +- `internal/testutils/txsystem/`: Counter-based test transaction system + +### Running Tests + +```bash +# All tests with coverage +make test + +# Specific package +go test ./rootchain/consensus + +# Specific test +go test ./partition -run TestNode_StartAndStop + +# With race detector +go test -race ./... + +# Generate tests for Rust SDK +UBFT_RUST_SDK_ROOT="/path/to/rust-sdk" go test ./... +``` + +## Docker + +```bash +# Build Docker image +make build-docker + +# With local go dependencies +DOCKER_GO_DEPENDENCY=../bft-go-base make build-docker +``` + +## Common Pitfalls + + +1. **Round Synchronization**: Partitions must use `TechnicalRecord.Round` for next certification request, not block number or self-incremented counter + +2. **CBOR Serialization**: Use `cbor:",toarray"` for struct tags and ensure nil values serialize as CBOR null (0xf6), not empty byte strings + +3. **InputRecord Validation**: State hash changes require non-nil block hash; unchanged state requires nil block hash + +4. **UC Types**: Distinguish between sync UCs (null hashes), repeat UCs (timeout), and valid UCs (certified blocks) + +5. **Timestamp Source**: Use UnicitySeal.timestamp from last UC, not system time + +6. **Previous Hash**: For certification requests, use previous round's state root hash as the PreviousHash. It MUST match the previous UC's luc.InputRecord.Hash to be successful. That is, rounds' root hashes must form a continuous chain certified by InputRecords. + +7. **First Block**: Send previous_hash=nil to let BFT Core use genesis state + +8. **Database Cleanup**: When testing, clean both partition AND root chain databases for fresh state. Otherwise, the BFT Core and partition can not produce a synchronized chain of root hashes, following the ledger rules. + +## Related Repositories + +- `bft-go-base`: Shared types and utilities (InputRecord, UnicityCertificate, validation rules) +- Integration clients should implement CBOR serialization matching Go's `toarray` format diff --git a/Makefile b/Makefile index 26d8db79..be7b3086 100644 --- a/Makefile +++ b/Makefile @@ -5,19 +5,114 @@ ifdef DOCKER_GO_DEPENDENCY DOCKER_ARGUMENTS += --build-context go-dependency=${DOCKER_GO_DEPENDENCY} --build-arg DOCKER_GO_DEPENDENCY=${DOCKER_GO_DEPENDENCY} endif +# ZK Verifier FFI configuration +# Set ZKVERIFIER_FFI=1 to enable Rust FFI components (SP1 and light-client verifiers) +# Set ZKVERIFIER_AGGREGATOR_ZK_FFI=1 to enable the aggregator ZK verifier FFI (SP1 6.0.2) +# Default: all disabled (builds without Rust dependencies) +ZKVERIFIER_FFI ?= 0 +ZKVERIFIER_AGGREGATOR_ZK_FFI ?= 0 + +# Accumulate Go build tags +GO_BUILD_TAGS_LIST = +GO_TEST_TAGS_LIST = + +ifeq ($(ZKVERIFIER_FFI),1) + GO_BUILD_TAGS_LIST += zkverifier_ffi + GO_TEST_TAGS_LIST += zkverifier_ffi +endif + +ifeq ($(ZKVERIFIER_AGGREGATOR_ZK_FFI),1) + GO_BUILD_TAGS_LIST += zkverifier_aggregator_zk_ffi + GO_TEST_TAGS_LIST += zkverifier_aggregator_zk_ffi +endif + +ifneq ($(strip $(GO_BUILD_TAGS_LIST)),) + GO_BUILD_TAGS = -tags $(subst $(space),$(comma),$(strip $(GO_BUILD_TAGS_LIST))) + GO_TEST_TAGS = -tags $(subst $(space),$(comma),$(strip $(GO_TEST_TAGS_LIST))) +else + GO_BUILD_TAGS = + GO_TEST_TAGS = +endif + +comma = , +space = $(empty) $(empty) + +# FFI library paths +SP1_VERIFIER_FFI_DIR = rootchain/consensus/zkverifier/sp1-verifier-ffi +LIGHT_CLIENT_VERIFIER_FFI_DIR = rootchain/consensus/zkverifier/light-client-verifier-ffi +AGGREGATOR_ZK_VERIFIER_FFI_DIR = rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi + all: clean tools test build gosec clean: rm -rf build/ rm -rf test-nodes/ +clean-ffi: + @if [ -d "$(SP1_VERIFIER_FFI_DIR)" ]; then \ + cd $(SP1_VERIFIER_FFI_DIR) && cargo clean; \ + fi + @if [ -d "$(LIGHT_CLIENT_VERIFIER_FFI_DIR)" ]; then \ + cd $(LIGHT_CLIENT_VERIFIER_FFI_DIR) && cargo clean; \ + fi + @if [ -d "$(AGGREGATOR_ZK_VERIFIER_FFI_DIR)" ]; then \ + cd $(AGGREGATOR_ZK_VERIFIER_FFI_DIR) && cargo clean; \ + fi + test: - go test ./... -coverpkg=./... -count=1 -coverprofile test-coverage.out + go test $(GO_TEST_TAGS) ./... -coverpkg=./... -count=1 -coverprofile test-coverage.out build: # cd to directory where main.go exits, hack fix for go bug to embed version control data # https://github.com/golang/go/issues/51279 - cd ./cli/ubft && go build -o ../../build/ubft + cd ./cli/ubft && go build $(GO_BUILD_TAGS) -o ../../build/ubft + +# Build with ZK verifier FFI support (SP1 + light-client, requires Rust toolchain) +build-with-ffi: build-rust-ffi + $(MAKE) build ZKVERIFIER_FFI=1 + +# Build with aggregator ZK verifier FFI support only (SP1 6.0.2, requires Rust toolchain) +build-with-aggregator-zk-ffi: build-aggregator-zk-ffi + $(MAKE) build ZKVERIFIER_AGGREGATOR_ZK_FFI=1 + +# Build with all FFI verifiers enabled +build-with-all-ffi: build-rust-ffi build-aggregator-zk-ffi + $(MAKE) build ZKVERIFIER_FFI=1 ZKVERIFIER_AGGREGATOR_ZK_FFI=1 + +# Build all Rust FFI libraries (SP1 + light-client) +build-rust-ffi: check-rust build-sp1-ffi build-light-client-ffi + +build-sp1-ffi: + @echo "Building SP1 verifier FFI..." + @if [ -d "$(SP1_VERIFIER_FFI_DIR)" ]; then \ + cd $(SP1_VERIFIER_FFI_DIR) && cargo build --release; \ + else \ + echo "Warning: $(SP1_VERIFIER_FFI_DIR) not found"; \ + fi + +build-light-client-ffi: + @echo "Building Light Client verifier FFI..." + @if [ -d "$(LIGHT_CLIENT_VERIFIER_FFI_DIR)" ]; then \ + cd $(LIGHT_CLIENT_VERIFIER_FFI_DIR) && cargo build --release; \ + else \ + echo "Warning: $(LIGHT_CLIENT_VERIFIER_FFI_DIR) not found"; \ + fi + +build-aggregator-zk-ffi: check-rust + @echo "Building Aggregator ZK verifier FFI (SP1 6.0.2)..." + @if [ -d "$(AGGREGATOR_ZK_VERIFIER_FFI_DIR)" ]; then \ + cd $(AGGREGATOR_ZK_VERIFIER_FFI_DIR) && cargo build --release; \ + else \ + echo "Warning: $(AGGREGATOR_ZK_VERIFIER_FFI_DIR) not found"; \ + fi + +# Check if Rust toolchain is available +check-rust: + @command -v cargo >/dev/null 2>&1 || { \ + echo "Error: Rust toolchain not found. Install from https://rustup.rs"; \ + exit 1; \ + } + @echo "Rust toolchain found: $$(rustc --version)" build-docker: docker build ${DOCKER_ARGUMENTS} --file scripts/Dockerfile --tag unicity-bft:local . @@ -31,8 +126,17 @@ tools: .PHONY: \ all \ clean \ + clean-ffi \ tools \ test \ build \ + build-with-ffi \ + build-with-aggregator-zk-ffi \ + build-with-all-ffi \ + build-rust-ffi \ + build-sp1-ffi \ + build-light-client-ffi \ + build-aggregator-zk-ffi \ + check-rust \ build-docker \ gosec diff --git a/README.md b/README.md index 27e717ec..15d803c2 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,28 @@ # Build -Run `make build` to build the application. Executable will be built to `build/ubft`. +Run `make build` to build the application. Executable will be built to `build/ubft`. ### Build dependencies * [`Go`](https://go.dev/doc/install) version 1.24. * `C` compiler, recent versions of [GCC](https://gcc.gnu.org/) are recommended. In Debian and Ubuntu repositories, GCC is part of the build-essential package. On macOS, GCC can be installed with [Homebrew](https://formulae.brew.sh/formula/gcc). +### Build targets + +| Target | Description | +|--------|-------------| +| `make build` | Build without FFI (default, no Rust required) | +| `make build-with-ffi` | Build with SP1 + Light Client FFI verifiers | +| `make build-with-aggregator-zk-ffi` | Build with aggregator ZK verifier FFI (SP1 6.0.2) | +| `make build-with-all-ffi` | Build with all FFI verifiers enabled | +| `make build-rust-ffi` | Build SP1 + Light Client Rust FFI libraries only | +| `make build-aggregator-zk-ffi` | Build aggregator ZK verifier Rust FFI library only | +| `make build-sp1-ffi` | Build SP1 verifier FFI library only | +| `make build-light-client-ffi` | Build Light Client verifier FFI library only | +| `make clean-ffi` | Clean all Rust FFI build artifacts | +| `make check-rust` | Verify Rust toolchain is available | + + # Configuration It's possible to define the configuration values from (in the order of precedence): diff --git a/cli/ubft/cmd/root_node.go b/cli/ubft/cmd/root_node.go index 8cf255f8..0550747c 100644 --- a/cli/ubft/cmd/root_node.go +++ b/cli/ubft/cmd/root_node.go @@ -228,6 +228,7 @@ func rootNodeRun(ctx context.Context, flags *rootNodeRunFlags) error { if err = host.BootstrapConnect(ctx, log); err != nil { return err } + node, err := rootchain.New( host, partitionNet, diff --git a/go.mod b/go.mod index cb1d0102..40cabf03 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/spf13/pflag v1.0.6 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.10.0 - github.com/unicitynetwork/bft-go-base v1.0.3-0.20251230081246-e5204716ebf2 + github.com/tetratelabs/wazero v1.8.1 go.etcd.io/bbolt v1.4.0 go.opentelemetry.io/otel v1.32.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 @@ -146,6 +146,9 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.7.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.9.0 // indirect + github.com/unicitynetwork/bft-go-base v1.0.3-0.20260113141611-ef8e60451f16 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/wlynxg/anet v0.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect diff --git a/go.sum b/go.sum index 93939906..e57ceccf 100644 --- a/go.sum +++ b/go.sum @@ -462,8 +462,14 @@ github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/unicitynetwork/bft-go-base v1.0.3-0.20251230081246-e5204716ebf2 h1:loUqSmmtlkjpIYpfHF+OzHon6EU8fyszOMXj4jfOPBg= -github.com/unicitynetwork/bft-go-base v1.0.3-0.20251230081246-e5204716ebf2/go.mod h1:hBnOG52VRy/vpgIBUulTgk7PBTwODZ2xkVjCEu5yRcQ= +github.com/tetratelabs/wazero v1.8.1 h1:NrcgVbWfkWvVc4UtT4LRLDf91PsOzDzefMdwhLfA550= +github.com/tetratelabs/wazero v1.8.1/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo= +github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI= +github.com/unicitynetwork/bft-go-base v1.0.3-0.20260113141611-ef8e60451f16 h1:yixbhxRwxq4s/vMUvzkCnk/mI4+uM8CRzaOlw6/LQZ8= +github.com/unicitynetwork/bft-go-base v1.0.3-0.20260113141611-ef8e60451f16/go.mod h1:hBnOG52VRy/vpgIBUulTgk7PBTwODZ2xkVjCEu5yRcQ= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= diff --git a/network/protocol/certification/block_certification_request.go b/network/protocol/certification/block_certification_request.go index 107bc16a..92dc0501 100644 --- a/network/protocol/certification/block_certification_request.go +++ b/network/protocol/certification/block_certification_request.go @@ -22,6 +22,7 @@ type BlockCertificationRequest struct { ShardID types.ShardID `json:"shardId"` NodeID string `json:"nodeId"` InputRecord *types.InputRecord `json:"inputRecord"` + ZkProof []byte `json:"zkProof"` // (ZK) proof for state transition validation BlockSize uint64 `json:"blockSize"` StateSize uint64 `json:"stateSize"` Signature hex.Bytes `json:"signature"` @@ -84,6 +85,48 @@ func (x *BlockCertificationRequest) Sign(signer crypto.Signer) error { } func (x BlockCertificationRequest) Bytes() ([]byte, error) { + // Exclude signature from signed data x.Signature = nil return types.Cbor.Marshal(x) } + +// UnmarshalCBOR provides backward compatibility for old database format (before ZkProof field was added) +// TODO: remove eventually +func (x *BlockCertificationRequest) UnmarshalCBOR(data []byte) error { + // Try new format first (8 elements with ZkProof) + type newFormat BlockCertificationRequest + var nf newFormat + if err := types.Cbor.Unmarshal(data, &nf); err == nil { + *x = BlockCertificationRequest(nf) + return nil + } + + // Try old format (7 elements without ZkProof) + type oldFormat struct { + _ struct{} `cbor:",toarray"` + PartitionID types.PartitionID `json:"partitionId"` + ShardID types.ShardID `json:"shardId"` + NodeID string `json:"nodeId"` + InputRecord *types.InputRecord `json:"inputRecord"` + BlockSize uint64 `json:"blockSize"` + StateSize uint64 `json:"stateSize"` + Signature hex.Bytes `json:"signature"` + } + var of oldFormat + if err := types.Cbor.Unmarshal(data, &of); err != nil { + return err // Return error from old format attempt + } + + // Convert old format to new format + *x = BlockCertificationRequest{ + PartitionID: of.PartitionID, + ShardID: of.ShardID, + NodeID: of.NodeID, + InputRecord: of.InputRecord, + ZkProof: nil, // Old format didn't have ZkProof + BlockSize: of.BlockSize, + StateSize: of.StateSize, + Signature: of.Signature, + } + return nil +} diff --git a/network/protocol/certification/certification_response.go b/network/protocol/certification/certification_response.go index 1d8c6356..9d296680 100644 --- a/network/protocol/certification/certification_response.go +++ b/network/protocol/certification/certification_response.go @@ -7,9 +7,40 @@ import ( "github.com/unicitynetwork/bft-go-base/types" ) +// Status codes for CertificationResponse.Status. Status is a transport-level +// field on the outer response wrapper — it is NEVER hashed into the UC — and +// it only describes why *this particular response message* was generated. +// The wrapped UC is still the last-good certificate regardless of status. +const ( + // CertStatusOK — request was accepted and UC is the newly certified one. + CertStatusOK uint32 = 0 + // CertStatusTransient — root-side transient error (consensus manager unavailable, + // send failure, etc). Submitter SHOULD retry with the same batch. + CertStatusTransient uint32 = 1 + // CertStatusRequestInvalid — ValidRequest failed (stale round/epoch, timestamp + // drift, bad signature, bad prev-hash). Submitter SHOULD resync from the + // attached UC and retry. + CertStatusRequestInvalid uint32 = 2 + // CertStatusProofInvalid — ZK proof verification failed. The batch and its + // proof are inconsistent. Submitter SHOULD drop this batch; a new batch may succeed. + CertStatusProofInvalid uint32 = 3 + // CertStatusFatal — unrecoverable (unknown proof type, mandatory verifier not + // configured, schema mismatch). Submitter SHOULD stop retrying and alert. + CertStatusFatal uint32 = 255 +) + +// MaxStatusMessageLen caps the free-form diagnostic string on the wire so a +// misbehaving or malicious root cannot blast unbounded strings at partitions. +const MaxStatusMessageLen = 512 + /* Certification response is sent by the root partition to validators of a shard of a partition as a response to a certification request message. + +Status and Message are outer transport-level fields. Status == CertStatusOK means +the request was accepted and UC is the newly certified one. A non-zero Status means +the request was rejected for the reason encoded in Status/Message; the wrapped UC +in that case is the last-good certificate so the submitter can resync its state. */ type CertificationResponse struct { _ struct{} `cbor:",toarray"` @@ -17,8 +48,19 @@ type CertificationResponse struct { Shard types.ShardID Technical TechnicalRecord UC types.UnicityCertificate + Status uint32 + Message string } +// IsAccepted reports whether the wrapped UC represents acceptance of the +// request that triggered this response. +func (cr *CertificationResponse) IsAccepted() bool { + return cr != nil && cr.Status == CertStatusOK +} + +// IsValid validates the structural integrity of the wrapped UC and technical +// record. It intentionally does NOT reject non-OK Status values: the wrapped +// UC is still the last-good certificate even on a rejection. func (cr *CertificationResponse) IsValid() error { if cr == nil { return errors.New("nil CertificationResponse") @@ -52,3 +94,51 @@ func (cr *CertificationResponse) SetTechnicalRecord(tr TechnicalRecord) error { cr.Technical = tr return nil } + +// UnmarshalCBOR provides backward compatibility for the pre-status wire format +// (4 array elements, before Status/Message were added). Existing rootchain.db +// snapshots and older peers encode the old shape; we decode either and fill +// Status/Message with zero values when they're absent. +// TODO: remove eventually. +func (cr *CertificationResponse) UnmarshalCBOR(data []byte) error { + // Try the new 6-element format first. + type newFormat struct { + _ struct{} `cbor:",toarray"` + Partition types.PartitionID + Shard types.ShardID + Technical TechnicalRecord + UC types.UnicityCertificate + Status uint32 + Message string + } + var nf newFormat + if err := types.Cbor.Unmarshal(data, &nf); err == nil { + cr.Partition = nf.Partition + cr.Shard = nf.Shard + cr.Technical = nf.Technical + cr.UC = nf.UC + cr.Status = nf.Status + cr.Message = nf.Message + return nil + } + + // Fall back to the old 4-element format. + type oldFormat struct { + _ struct{} `cbor:",toarray"` + Partition types.PartitionID + Shard types.ShardID + Technical TechnicalRecord + UC types.UnicityCertificate + } + var of oldFormat + if err := types.Cbor.Unmarshal(data, &of); err != nil { + return err + } + cr.Partition = of.Partition + cr.Shard = of.Shard + cr.Technical = of.Technical + cr.UC = of.UC + cr.Status = CertStatusOK + cr.Message = "" + return nil +} diff --git a/network/protocol/certification/certification_response_test.go b/network/protocol/certification/certification_response_test.go index ad66a726..395e82a5 100644 --- a/network/protocol/certification/certification_response_test.go +++ b/network/protocol/certification/certification_response_test.go @@ -1,6 +1,7 @@ package certification import ( + "strings" "testing" "github.com/stretchr/testify/require" @@ -65,6 +66,83 @@ func Test_CertificationResponse_IsValid(t *testing.T) { }) } +func Test_CertificationResponse_IsValid_NonOKStatus(t *testing.T) { + // Status != OK must NOT cause IsValid to fail — the wrapped UC is still + // the last-good certificate and callers may want to forward it. + cr := &CertificationResponse{ + Partition: 1, + Shard: types.ShardID{}, + UC: types.UnicityCertificate{ + Version: 1, + UnicityTreeCertificate: &types.UnicityTreeCertificate{ + Version: 1, + Partition: 1, + }, + }, + Status: CertStatusProofInvalid, + Message: "envelope truncated: missing leaf_count", + } + require.NoError(t, cr.SetTechnicalRecord(TechnicalRecord{ + Round: 99, Epoch: 8, Leader: "1", StatHash: []byte{1}, FeeHash: []byte{2}, + })) + require.NoError(t, cr.IsValid()) + require.False(t, cr.IsAccepted()) + + cr.Status = CertStatusOK + require.True(t, cr.IsAccepted()) +} + +func Test_CertificationResponse_CBOR_RoundTrip_WithStatus(t *testing.T) { + orig := &CertificationResponse{ + Partition: 1, + Shard: types.ShardID{}, + UC: types.UnicityCertificate{ + Version: 1, + UnicityTreeCertificate: &types.UnicityTreeCertificate{ + Version: 1, Partition: 1, + }, + }, + Status: CertStatusRequestInvalid, + Message: "stale round: expected 42 got 41", + } + require.NoError(t, orig.SetTechnicalRecord(TechnicalRecord{ + Round: 99, Epoch: 8, Leader: "1", StatHash: []byte{1}, FeeHash: []byte{2}, + })) + + buf, err := types.Cbor.Marshal(orig) + require.NoError(t, err) + + var decoded CertificationResponse + require.NoError(t, types.Cbor.Unmarshal(buf, &decoded)) + require.Equal(t, orig.Status, decoded.Status) + require.Equal(t, orig.Message, decoded.Message) + require.Equal(t, orig.Partition, decoded.Partition) + require.Equal(t, orig.Technical.Round, decoded.Technical.Round) +} + +func Test_SendRejection_TruncationBoundary(t *testing.T) { + // Sanity: verify that MaxStatusMessageLen is a reasonable cap and that + // a message exactly at the cap survives round-trip. + msg := strings.Repeat("x", MaxStatusMessageLen) + cr := &CertificationResponse{ + Partition: 1, + UC: types.UnicityCertificate{ + Version: 1, + UnicityTreeCertificate: &types.UnicityTreeCertificate{Version: 1, Partition: 1}, + }, + Status: CertStatusFatal, + Message: msg, + } + require.NoError(t, cr.SetTechnicalRecord(TechnicalRecord{ + Round: 1, Epoch: 1, Leader: "1", StatHash: []byte{1}, FeeHash: []byte{1}, + })) + buf, err := types.Cbor.Marshal(cr) + require.NoError(t, err) + var out CertificationResponse + require.NoError(t, types.Cbor.Unmarshal(buf, &out)) + require.Len(t, out.Message, MaxStatusMessageLen) +} + func Test_CertificationResponse_SetTechnicalRecord(t *testing.T) { tr := TechnicalRecord{Round: 123, Epoch: 4, Leader: "567890"} cr := CertificationResponse{} diff --git a/rootchain/consensus/consensus_manager.go b/rootchain/consensus/consensus_manager.go index 71d96377..131d7d44 100644 --- a/rootchain/consensus/consensus_manager.go +++ b/rootchain/consensus/consensus_manager.go @@ -929,7 +929,7 @@ func (x *ConsensusManager) processNewRoundEvent(ctx context.Context) { } x.leaderCnt.Add(ctx, 1) - x.log.InfoContext(ctx, "new round start, node is leader") + // x.log.InfoContext(ctx, "new round start, node is leader") // find shards with T2 timeouts timedOutShards, err := x.t2Timeouts.GetT2Timeouts(round) diff --git a/rootchain/consensus/storage/block_store.go b/rootchain/consensus/storage/block_store.go index f92cb919..6fbaa8c5 100644 --- a/rootchain/consensus/storage/block_store.go +++ b/rootchain/consensus/storage/block_store.go @@ -197,7 +197,8 @@ func (x *BlockStore) GetCertificate(id types.PartitionID, shard types.ShardID) ( defer x.lock.RUnlock() committedBlock := x.blockTree.Root() - if si, ok := committedBlock.ShardState.States[types.PartitionShardID{PartitionID: id, ShardID: shard.Key()}]; ok { + key := types.PartitionShardID{PartitionID: id, ShardID: shard.Key()} + if si, ok := committedBlock.ShardState.States[key]; ok { return si.LastCR, nil } return nil, fmt.Errorf("no certificate found for shard %s - %s", id, shard) @@ -222,7 +223,8 @@ func (x *BlockStore) ShardInfo(partition types.PartitionID, shard types.ShardID) defer x.lock.RUnlock() committedBlock := x.blockTree.Root() - if si, ok := committedBlock.ShardState.States[types.PartitionShardID{PartitionID: partition, ShardID: shard.Key()}]; ok { + key := types.PartitionShardID{PartitionID: partition, ShardID: shard.Key()} + if si, ok := committedBlock.ShardState.States[key]; ok { return si } return nil diff --git a/rootchain/consensus/storage/sharding.go b/rootchain/consensus/storage/sharding.go index ce0cd10f..9f31cb4d 100644 --- a/rootchain/consensus/storage/sharding.go +++ b/rootchain/consensus/storage/sharding.go @@ -150,6 +150,7 @@ func (ss ShardStates) certificationResponses(algo crypto.Hash) ([]*certification UnicityTreeCertificate: utCert, ShardTreeCertificate: stCert, }, + Status: certification.CertStatusOK, }) } @@ -250,14 +251,15 @@ func NewShardInfo(shardConf *types.PartitionDescriptionRecord, hashAlg crypto.Ha return nil, fmt.Errorf("failed to calculate shard conf hash: %w", err) } si := &ShardInfo{ - PartitionID: shardConf.PartitionID, - ShardID: shardConf.ShardID, - T2Timeout: shardConf.T2Timeout, - ShardConfHash: shardConfHash, - RootHash: nil, - PrevEpochFees: types.RawCBOR{0xA0}, // CBOR map(0) - LastCR: nil, - IR: &types.InputRecord{Version: 1}, + PartitionID: shardConf.PartitionID, + ShardID: shardConf.ShardID, + T2Timeout: shardConf.T2Timeout, + ShardConfHash: shardConfHash, + RootHash: nil, + PrevEpochFees: types.RawCBOR{0xA0}, // CBOR map(0) + LastCR: nil, + IR: &types.InputRecord{Version: 1}, + PartitionParams: maps.Clone(shardConf.PartitionParams), } if si.PrevEpochStat, err = types.Cbor.Marshal(si.Stat); err != nil { @@ -334,10 +336,118 @@ type ShardInfo struct { IR *types.InputRecord TR certification.TechnicalRecord + // PartitionParams contains proof configuration from PartitionDescriptionRecord. + // Used for per-partition ZK proof verification settings. + // NOTE: This field MUST remain at the end of the exported fields for backward + // compatibility with CBOR deserialization of older data. + PartitionParams map[string]string + nodeIDs []string // sorted list of partition node IDs trustBase map[string]abcrypto.Verifier } +// shardInfoV1 is used for CBOR serialization/deserialization with toarray format. +// This is the format without PartitionParams (pre-v2 format). +type shardInfoV1 struct { + _ struct{} `cbor:",toarray"` + PartitionID types.PartitionID + ShardID types.ShardID + T2Timeout time.Duration + ShardConfHash []byte + RootHash []byte + PrevEpochStat types.RawCBOR + Stat certification.StatisticalRecord + PrevEpochFees types.RawCBOR + Fees map[string]uint64 + LastCR *certification.CertificationResponse + IR *types.InputRecord + TR certification.TechnicalRecord +} + +// shardInfoV2 is used for CBOR serialization/deserialization with toarray format. +// This is the format with PartitionParams (v2 format). +type shardInfoV2 struct { + _ struct{} `cbor:",toarray"` + PartitionID types.PartitionID + ShardID types.ShardID + T2Timeout time.Duration + ShardConfHash []byte + RootHash []byte + PrevEpochStat types.RawCBOR + Stat certification.StatisticalRecord + PrevEpochFees types.RawCBOR + Fees map[string]uint64 + LastCR *certification.CertificationResponse + IR *types.InputRecord + TR certification.TechnicalRecord + PartitionParams map[string]string +} + +// MarshalCBOR implements cbor.Marshaler for ShardInfo. +// Always uses the v2 format (with PartitionParams). +func (si ShardInfo) MarshalCBOR() ([]byte, error) { + v2 := shardInfoV2{ + PartitionID: si.PartitionID, + ShardID: si.ShardID, + T2Timeout: si.T2Timeout, + ShardConfHash: si.ShardConfHash, + RootHash: si.RootHash, + PrevEpochStat: si.PrevEpochStat, + Stat: si.Stat, + PrevEpochFees: si.PrevEpochFees, + Fees: si.Fees, + LastCR: si.LastCR, + IR: si.IR, + TR: si.TR, + PartitionParams: si.PartitionParams, + } + return types.Cbor.Marshal(v2) +} + +// UnmarshalCBOR implements cbor.Unmarshaler for ShardInfo. +// Supports both v1 (without PartitionParams) and v2 (with PartitionParams) formats. +func (si *ShardInfo) UnmarshalCBOR(data []byte) error { + // Try v2 format first (with PartitionParams) + var v2 shardInfoV2 + if err := types.Cbor.Unmarshal(data, &v2); err == nil { + si.PartitionID = v2.PartitionID + si.ShardID = v2.ShardID + si.T2Timeout = v2.T2Timeout + si.ShardConfHash = v2.ShardConfHash + si.RootHash = v2.RootHash + si.PrevEpochStat = v2.PrevEpochStat + si.Stat = v2.Stat + si.PrevEpochFees = v2.PrevEpochFees + si.Fees = v2.Fees + si.LastCR = v2.LastCR + si.IR = v2.IR + si.TR = v2.TR + si.PartitionParams = v2.PartitionParams + return nil + } + + // Fall back to v1 format (without PartitionParams) + var v1 shardInfoV1 + if err := types.Cbor.Unmarshal(data, &v1); err != nil { + return fmt.Errorf("decoding ShardInfo: %w", err) + } + + si.PartitionID = v1.PartitionID + si.ShardID = v1.ShardID + si.T2Timeout = v1.T2Timeout + si.ShardConfHash = v1.ShardConfHash + si.RootHash = v1.RootHash + si.PrevEpochStat = v1.PrevEpochStat + si.Stat = v1.Stat + si.PrevEpochFees = v1.PrevEpochFees + si.Fees = v1.Fees + si.LastCR = v1.LastCR + si.IR = v1.IR + si.TR = v1.TR + si.PartitionParams = nil // v1 format doesn't have this field + return nil +} + func (si *ShardInfo) resetFeeList(shardConf *types.PartitionDescriptionRecord) { fees := make(map[string]uint64) for _, n := range shardConf.Validators { @@ -383,14 +493,15 @@ func (si *ShardInfo) nextEpoch(shardConf *types.PartitionDescriptionRecord, hash return nil, fmt.Errorf("failed to calculate shard conf hash: %w", err) } nextSI := &ShardInfo{ - PartitionID: shardConf.PartitionID, - ShardID: shardConf.ShardID, - T2Timeout: shardConf.T2Timeout, - ShardConfHash: shardConfHash, - RootHash: si.RootHash, - LastCR: si.LastCR, - IR: si.IR, - TR: si.TR, + PartitionID: shardConf.PartitionID, + ShardID: shardConf.ShardID, + T2Timeout: shardConf.T2Timeout, + ShardConfHash: shardConfHash, + RootHash: si.RootHash, + LastCR: si.LastCR, + IR: si.IR, + TR: si.TR, + PartitionParams: maps.Clone(shardConf.PartitionParams), } if nextSI.PrevEpochFees, err = types.Cbor.Marshal(si.Fees); err != nil { diff --git a/rootchain/consensus/storage/testdata/rootchain_v0.db b/rootchain/consensus/storage/testdata/rootchain_v0.db index 21f95210..eebf76f9 100644 Binary files a/rootchain/consensus/storage/testdata/rootchain_v0.db and b/rootchain/consensus/storage/testdata/rootchain_v0.db differ diff --git a/rootchain/consensus/zkverifier/FFI_INTEGRATION.md b/rootchain/consensus/zkverifier/FFI_INTEGRATION.md new file mode 100644 index 00000000..52d65bb0 --- /dev/null +++ b/rootchain/consensus/zkverifier/FFI_INTEGRATION.md @@ -0,0 +1,448 @@ +# SP1 FFI Integration Guide + +Complete guide for integrating SP1 proof verification via FFI (Foreign Function Interface). + +## Overview + +Since there's no native Go library for SP1 STARK proof verification, we use FFI to call the Rust SP1 SDK from Go. + +**Architecture:** +``` +Go (BFT Core) → CGO → C Header → Rust FFI → SP1 SDK +``` + +--- + +## Quick Start + +### 1. Build the FFI Library + +```bash +cd rootchain/consensus/zkverifier/sp1-verifier-ffi +./build.sh +``` + +This will: +- Compile the Rust library +- Run tests +- Create `libsp1_verifier_ffi.{so,dylib,a}` + +### 2. Test the Integration + +```bash +cd .. +go test -v ./... +``` + +The Go code automatically links to the Rust library via CGO directives. + +### 3. Run BFT Core with FFI Verification + +```bash +ubft root-node run \ + --zk-verification-enabled=true \ + --zk-proof-type=sp1 \ + --zk-vkey-path=/etc/bft-core/sp1.vkey +``` + +If the FFI library is built, you'll see: +``` +INFO Using SP1 FFI verifier path=/etc/bft-core/sp1.vkey version=0.1.0 +``` + +If FFI is not available: +``` +ERROR FFI verifier not available, error=... +``` + +--- + +## Detailed Setup + +### Prerequisites + +**System Requirements:** +- Rust 1.70+ (install from https://rustup.rs/) +- GCC/Clang (for CGO) +- Go 1.21+ + +**Library Dependencies:** +- SP1 SDK (automatically fetched by Cargo) +- System libraries: `libdl`, `libm` + +### Build Process + +#### Step 1: Configure Rust Environment + +```bash +# Install Rust (if not already installed) +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + +# Verify installation +rustc --version +cargo --version +``` + +#### Step 2: Build FFI Library + +```bash +cd rootchain/consensus/zkverifier/sp1-verifier-ffi + +# Development build (faster, larger) +cargo build + +# Production build (optimized) +cargo build --release +``` + +**Build artifacts:** +- Linux: `target/release/libsp1_verifier_ffi.so` +- macOS: `target/release/libsp1_verifier_ffi.dylib` +- Windows: `target/release/sp1_verifier_ffi.dll` + +#### Step 3: Verify CGO Linkage + +```bash +cd .. +go build ./... +``` + +If you see errors like "library not found": +```bash +export CGO_LDFLAGS="-L$(pwd)/sp1-verifier-ffi/target/release" +export LD_LIBRARY_PATH="$(pwd)/sp1-verifier-ffi/target/release" # Linux +export DYLD_LIBRARY_PATH="$(pwd)/sp1-verifier-ffi/target/release" # macOS +``` + +--- + +## How It Works + +### Data Flow + +``` +1. Go calls NewSP1Verifier(vkeyPath) + ↓ +2. Attempts to create SP1VerifierFFI + ↓ +3. Loads C library via CGO + ↓ +4. Calls sp1_verify_proof() in Rust + ↓ +5. Rust deserializes proof and vkey + ↓ +6. SP1 SDK verifies cryptographically + ↓ +7. Result returned to Go as error/nil +``` + +### Memory Management + +**Go → C → Rust:** +- Go passes pointers to byte slices (no copy) +- Rust reads via `std::slice::from_raw_parts` (unsafe) +- No ownership transfer (Go still owns memory) + +**Rust → C → Go:** +- Rust allocates error strings with `CString::into_raw()` +- Go receives pointer and reads with `C.GoString()` +- Go calls `sp1_free_string()` to deallocate + +**Safety guarantees:** +- All unsafe blocks have safety comments +- Pointer null checks before dereferencing +- Proper cleanup in all error paths + +### Proof Format + +**Expected format:** +```rust +SP1ProofWithPublicValues { + proof: , + public_values: [ + prev_state_root[0..32], // 32 bytes + new_state_root[32..64], // 32 bytes + // ... additional public values + // TOOO: at least block hash must be checked as well, think about others + ] +} +``` + +**Serialization:** Bincode (Rust standard) + + + +## Deployment + +### Option 1: Static Linking (Recommended) + +Build with static library for easier deployment: + +```bash +cd sp1-verifier-ffi +cargo build --release + +# Copy static library +sudo cp target/release/libsp1_verifier_ffi.a /usr/local/lib/ + +# Build Go with static linking +cd .. +CGO_ENABLED=1 CGO_LDFLAGS="-static" go build ./... +``` + +**Pros:** +- Single binary deployment +- No runtime dependencies + +**Cons:** +- Larger binary size +- Longer build time + +### Option 2: Dynamic Linking + +```bash +# Install shared library +sudo cp target/release/libsp1_verifier_ffi.so /usr/local/lib/ +sudo ldconfig # Linux only + +# Build Go normally +go build ./... +``` + +**Pros:** +- Smaller binary +- Faster builds + +**Cons:** +- Must deploy library separately +- Runtime library path issues + +### Option 3: Bundled Distribution + +```bash +# Build everything +cd sp1-verifier-ffi && ./build.sh && cd .. + +# Package for distribution +mkdir -p dist/lib +cp sp1-verifier-ffi/target/release/libsp1_verifier_ffi.* dist/lib/ + +# Set library path in startup script +cat > dist/run.sh << 'EOF' +#!/bin/bash +export LD_LIBRARY_PATH="$(dirname $0)/lib:$LD_LIBRARY_PATH" +exec ./ubft "$@" +EOF +chmod +x dist/run.sh +``` + +--- + +## Testing + +### Unit Tests (Rust) + +```bash +cd sp1-verifier-ffi +cargo test +``` + +Tests verify: +- ✅ FFI safety (null pointers, bounds) +- ✅ Memory management +- ✅ Error code mapping + +### Integration Tests (Go) + +```bash +cd .. +go test -v ./... +``` + +Tests verify: +- ✅ CGO linkage works +- ✅ Version retrieval +- ✅ Error propagation +- ⚠️ Proof verification (requires real proof) + +### E2E Test with Real Proof + +```go +func TestSP1Verifier_RealProof(t *testing.T) { + verifier, err := NewSP1Verifier("testdata/sp1.vkey") + require.NoError(t, err) + + // Load real proof from Uni-EVM + proof, err := os.ReadFile("testdata/proof.bin") + require.NoError(t, err) + + prevRoot := hexDecode("...") + newRoot := hexDecode("...") + + err = verifier.VerifyProof(proof, prevRoot, newRoot) + require.NoError(t, err) +} +``` + +--- + +## Performance + +### Benchmarks + +```bash +cd sp1-verifier-ffi +cargo bench + +cd .. +go test -bench=. -benchmem +``` + +**Typical performance:** +- Verification: 10-100ms (depends on proof complexity) +- Memory: 50-200MB peak during verification +- CGO overhead: <1ms + +### Optimization + +**Rust side:** +```toml +[profile.release] +opt-level = 3 # Maximum optimization +lto = true # Link-time optimization +codegen-units = 1 # Better optimization +``` + +**Go side:** +- Reuse verifier instances (verification key loaded once) +- Avoid copying proof data (pass slices directly) + +--- + +## Troubleshooting + +### Build Errors + +**"cannot find -lsp1_verifier_ffi"** +```bash +# Library not built +cd sp1-verifier-ffi && cargo build --release && cd .. + +# Or set library path +export CGO_LDFLAGS="-L$(pwd)/sp1-verifier-ffi/target/release" +``` + +**"undefined reference to `sp1_verify_proof`"** +```bash +# Header/library mismatch - rebuild both +cd sp1-verifier-ffi +cargo clean +cargo build --release +cd .. && go build ./... +``` + +### Runtime Errors + +**"error while loading shared libraries"** +```bash +# Linux +export LD_LIBRARY_PATH="/path/to/lib:$LD_LIBRARY_PATH" +sudo ldconfig + +# macOS +export DYLD_LIBRARY_PATH="/path/to/lib:$DYLD_LIBRARY_PATH" +``` + +**"FFI verifier not available"** +- Check library is built: `ls sp1-verifier-ffi/target/release/libsp1_verifier_ffi.*` +- Check CGO is enabled: `go env CGO_ENABLED` (should be `1`) +- Check architecture match: `file libsp1_verifier_ffi.so` vs `go version` + +### Verification Errors + +**"Invalid proof format"** +- Proof must be serialized `SP1ProofWithPublicValues` +- Use bincode serialization +- Check proof is not corrupted + +**"State root mismatch"** +- Public values first 64 bytes must match expected roots +- Verify prover outputs correct public values +- Check byte ordering (big-endian vs little-endian) + +--- + +## Security Considerations + +### Memory Safety + +**Unsafe Rust blocks:** +- All marked with safety comments +- Reviewed for correctness +- Null pointer checks before dereferencing +- No use-after-free (Go owns memory) + +**FFI boundary:** +- All pointers validated +- Length parameters checked +- No buffer overflows + +### Cryptographic Security + +**Verification key:** +- Must be from trusted source +- Loaded once, reused for all proofs +- No modification after loading + +**Proof validation:** +- Full cryptographic verification via SP1 SDK +- Public inputs always validated (TODO: check more values) +- No trust in prover claims + +--- + +## Advanced Topics + +### Custom Public Values + +If your proofs have additional public values beyond state roots: + +```rust +// In lib.rs +fn verify_proof_internal(...) -> anyhow::Result<()> { + // ... existing code ... + + // Access additional public values + if public_values.len() > 64 { + let custom_data = &public_values[64..]; + // Process custom data + } + + Ok(()) +} +``` + +### Multiple Proof Types + +To support both SP1 and RISC0: + +```rust +#[no_mangle] +pub extern "C" fn risc0_verify_proof(...) -> SP1VerifyResult { + // RISC0 verification logic +} +``` + +```go +// In Go +type RISC0VerifierFFI struct { ... } +``` + +--- + +## References + +- [SP1 Documentation](https://docs.succinct.xyz/) +- [Rust FFI Nomicon](https://doc.rust-lang.org/nomicon/ffi.html) +- [CGO Documentation](https://pkg.go.dev/cmd/cgo) +- [sp1-verifier-ffi README](./sp1-verifier-ffi/README.md) diff --git a/rootchain/consensus/zkverifier/README.md b/rootchain/consensus/zkverifier/README.md new file mode 100644 index 00000000..5a1f01e2 --- /dev/null +++ b/rootchain/consensus/zkverifier/README.md @@ -0,0 +1,256 @@ +# ZK Verifier Build System + +This directory contains optional Rust FFI components for ZK proof verification. The build system is configurable and supports building with or without these Rust dependencies. + +## Architecture + +The ZK verifier supports multiple proof types through a common interface. +Verifiers fall into two families: + +**Pure-Go, always compiled in (no build tag):** +- **No-Op Verifier** (`proof_type` unset or `none`): Disabled verification for testing. +- **Aggregator RSMT Verifier** (`proof_type=aggregator_rsmt_v1`): Verifies a + Radix Sparse Merkle Tree consistency proof produced by the Rust aggregator + (`crates/rsmt/src/consistency.rs`). Recomputes the `prev → new` root + transition for a batch of newly inserted leaves. Implementation lives in + the `rsmt/` sub-package. + +**FFI-gated (`-tags zkverifier_ffi`):** +- **SP1 Verifier** (`proof_type=sp1`): Verifies SP1 zkVM proofs. +- **Light Client Verifier** (`proof_type=light_client`): Executes full witness validation. + +### Aggregator RSMT verifier + +Enable it on a partition by setting `proof_type=aggregator_rsmt_v1` in the +partition's `PartitionParams` when generating the shard config, e.g.: + +```bash +ubft shard-conf generate \ + ... \ + --partition-params "proof_type=aggregator_rsmt_v1" +``` + +Once set, `node.verifyZKProof()` rejects any `BlockCertificationRequest` whose +`ZkProof` envelope does not recompute `InputRecord.PreviousHash → +InputRecord.Hash` for the carried batch — no UC is issued. + +**Wire format of `ZkProof`** (no version tag; the format is selected by +`proof_type`): + +``` +offset size field +0 4 leaf_count (big-endian u32) +4 ... leaves: leaf_count × { key[32] || value_len (u16 BE) || value[value_len] } +... to end-of-buf consistency-proof opcode stream (flat bytes) +``` + +Opcodes (post-order stack machine): +- `0x00 || h[32]` — `S`: unchanged subtree hash +- `0x01` — `L`: pop next leaf from the wire batch +- `0x02 || depth` — `N`: inner node at `depth ∈ 0..=255`, pops 2 children + +Invariants enforced by the verifier: +- Leaves MUST be pre-sorted by `SortKey` (per-byte bit-reversal, LSB-first + lexicographic order). Unsorted or duplicate leaves → `ErrLeavesUnsorted`. +- Empty batch ⇒ empty proof and `prev == new`; otherwise `ErrEmptyBatchNonEmptyProof` / + `ErrEmptyBatchRootChange`. +- After stream consumption: stack size 1, leaves fully consumed, bytes fully + consumed, and `stack[0] == (prev, new)`. +- Leaf count is capped at `MaxLeafCount = 1<<20` to prevent OOM from malicious + inputs. Value length is naturally capped at 65 535 by `u16`. + +Hash functions (SHA-256, matching `crates/rsmt/src/hash.rs`): +- `HashLeaf(key, value) = SHA256(0x00 || key[32] || value)` +- `HashNode(left, right, depth) = SHA256(0x01 || depth || left[32] || right[32])` + +## Build Configurations + +### Default Build (No FFI) + +Build without Rust dependencies (default behavior): + +```bash +make build +# or +go build ./... +``` + +This uses Go build tag stubs that return errors when FFI verifiers are requested. The system will still build and run, but cannot verify ZK proofs. + +### Build with FFI + +Build with Rust FFI support for full ZK verification: + +```bash +make build-with-ffi +``` + +This will: +1. Check for Rust toolchain +2. Build SP1 verifier FFI library +3. Build Light Client verifier FFI library +4. Build Go binary with `-tags zkverifier_ffi` + +**Requirements:** +- Rust toolchain (install from https://rustup.rs) +- C compiler (GCC/Clang) +- Internet connection (to fetch ethrex dependencies from GitHub) + +### Manual FFI Build + +Build individual FFI components: + +```bash +# Build SP1 verifier only +make build-sp1-ffi + +# Build Light Client verifier only +make build-light-client-ffi + +# Build both +make build-rust-ffi +``` + +Then build Go with FFI tags: + +```bash +cd cli/ubft && go build -tags zkverifier_ffi -o ../../build/ubft +``` + +## Testing + +### Test without FFI + +```bash +make test +# or +go test ./... +``` + +### Test with FFI + +```bash +make test ZKVERIFIER_FFI=1 +# or +go test -tags zkverifier_ffi ./... +``` + +## CI/CD + +The CI pipeline (`.github/workflows/ci.yml`) runs both configurations: + +1. **build** job: Builds without FFI (fast, no Rust required) +2. **build-with-ffi** job: Builds with FFI (requires Rust setup) +3. **test** job: Tests without FFI +4. **test-with-ffi** job: Tests with FFI + +This ensures the codebase works in both configurations. + +## How It Works + +### Build Tags + +- **FFI files** (`*_ffi.go`): Tagged with `//go:build zkverifier_ffi` + - Only compiled when `-tags zkverifier_ffi` is used + - Contains cgo directives to link Rust libraries + +- **Stub files** (`*_ffi_stub.go`): Tagged with `//go:build !zkverifier_ffi` + - Compiled by default (without tags) + - Provides stub implementations that return errors + +### FFI Libraries + +Located in: +- `sp1-verifier-ffi/`: SP1 proof verification +- `light-client-verifier-ffi/`: Light client witness validation + +Built as static libraries (`.a` files) and linked via cgo: +```c +#cgo LDFLAGS: -L${SRCDIR}/sp1-verifier-ffi/target/release -lsp1_verifier_ffi -ldl -lm +``` + +**Dependencies:** +- `sp1-verifier-ffi`: Uses sp1-sdk from crates.io +- `light-client-verifier-ffi`: Uses ethrex fork from GitHub (https://github.com/ristik/ethrex branch uni-evm) + - Dependencies are fetched automatically during Rust build + - No local submodules or path dependencies required + +### Configuration + +The verifier factory (`NewVerifier()`) checks if FFI is available at runtime: + +```go +cfg := &zkverifier.Config{ + Enabled: true, + ProofType: zkverifier.ProofTypeSP1, + VerificationKeyPath: "/path/to/vkey", +} +verifier, err := zkverifier.NewVerifier(cfg) +``` + +Without FFI, this returns an error indicating FFI is not available. With FFI, it initializes the Rust verifier. + +## Makefile Targets + +| Target | Description | +|--------|-------------| +| `make build` | Build without FFI (default) | +| `make build-with-ffi` | Build with FFI support | +| `make build-rust-ffi` | Build Rust FFI libraries only | +| `make build-sp1-ffi` | Build SP1 verifier FFI | +| `make build-light-client-ffi` | Build Light Client verifier FFI | +| `make test` | Run tests without FFI | +| `make test ZKVERIFIER_FFI=1` | Run tests with FFI | +| `make clean` | Clean Go build artifacts | +| `make clean-ffi` | Clean Rust build artifacts | +| `make check-rust` | Verify Rust toolchain is available | + +## Environment Variables + +- `ZKVERIFIER_FFI=1`: Enable FFI build (used internally by Makefile) +- `CGO_ENABLED=1`: Required for cgo (usually set by default) + +## Troubleshooting + +### "FFI verifier not available" error + +This means the binary was built without FFI support. Rebuild with: +```bash +make build-with-ffi +``` + +### Rust toolchain not found + +Install Rust: +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +### ethrex dependencies not found + +The Rust FFI uses ethrex dependencies from GitHub (https://github.com/ristik/ethrex branch uni-evm). If you see errors fetching these, ensure you have: +- Internet connectivity +- Git configured with GitHub access + +### Duplicate library warnings + +When building with FFI, you may see: +``` +ld: warning: ignoring duplicate libraries: '-ldl', '-lm' +``` + +This is harmless - both FFI libraries link these system libraries. + +## Production Deployment + +For production deployments that need ZK verification: + +1. Ensure Rust toolchain is available in build environment +2. Use `make build-with-ffi` in CI/CD +3. Distribute the binary with embedded FFI libraries +4. Provide appropriate verification keys at runtime + +For deployments that don't need ZK verification (e.g., testing environments): + +1. Use `make build` (no Rust required) +2. Configure verifier with `Enabled: false` or `ProofType: ProofTypeNone` diff --git a/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/Cargo.toml new file mode 100644 index 00000000..483ae317 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "aggregator-zk-verifier-ffi" +version = "0.1.0" +edition = "2021" + +# Standalone workspace — does not participate in the bft-core Go module build. +[workspace] + +[lib] +crate-type = ["cdylib", "staticlib"] + +[dependencies] +# Must match the sp1-sdk version used in rugregator/crates/zk-host (6.0.2). +# This is independent of sp1-verifier-ffi which uses 5.0.8 for the EVM prover. +# No "blocking" feature — we only need the type layer (SP1VerifyingKey, +# SP1ProofWithPublicValues, SP1Proof, HashableKey). Verification is handled +# by the lightweight sp1-verifier crate without any CpuProver/worker overhead. +sp1-sdk = { version = "6.0.2", features = ["blocking"] } +sp1-verifier = { version = "6.0.2", features = ["std"] } +anyhow = "1.0" +bincode = "1.3" + +[profile.release] +opt-level = 3 +lto = true +codegen-units = 1 diff --git a/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/aggregator_zk_verifier.h b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/aggregator_zk_verifier.h new file mode 100644 index 00000000..20765707 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/aggregator_zk_verifier.h @@ -0,0 +1,90 @@ +/** + * Aggregator ZK Verifier FFI + * + * C header for FFI interface to aggregator SP1 ZK proof verification. + * Uses SP1 6.0.2; independent of sp1_verifier.h which uses SP1 5.0.8. + */ + +#ifndef AGGREGATOR_ZK_VERIFIER_H +#define AGGREGATOR_ZK_VERIFIER_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Result codes for aggregator ZK verification. + * + * The AGGZK_ prefix distinguishes these from SP1_VERIFY_* constants so that + * both libraries can be linked into the same binary without symbol conflicts. + */ +typedef enum { + AGGZK_VERIFY_SUCCESS = 0, + AGGZK_VERIFY_INVALID_PROOF = 1, + AGGZK_VERIFY_INVALID_VKEY = 2, + AGGZK_VERIFY_INVALID_PUBLIC_INPUTS = 3, + AGGZK_VERIFY_VERIFICATION_FAILED = 4, + AGGZK_VERIFY_INTERNAL_ERROR = 5, +} AggZkVerifyResult; + +/** + * Verify an aggregator SP1 ZK consistency proof. + * + * The proof was produced by rugregator's zk-host crate (SP1 6.0.2). + * Public values layout: prev_root[32] || new_root[32] (64 bytes total). + * + * @param vkey_bytes Bincode-serialized SP1VerifyingKey + * @param vkey_len Length of vkey_bytes + * @param proof_bytes Bincode-serialized SP1ProofWithPublicValues + * @param proof_len Length of proof_bytes + * @param prev_root Pointer to 32-byte previous SMT root + * @param new_root Pointer to 32-byte new SMT root + * @param error_out On error, set to a malloc'd C string (free with aggzk_free_string) + * @return AggZkVerifyResult status code + */ +AggZkVerifyResult aggzk_verify_proof( + const uint8_t* vkey_bytes, + size_t vkey_len, + const uint8_t* proof_bytes, + size_t proof_len, + const uint8_t* prev_root, + const uint8_t* new_root, + char** error_out +); + +/** + * Validate a bincode-serialized SP1VerifyingKey without running a proof. + * + * @param vkey_bytes Pointer to vkey bytes + * @param vkey_len Length of vkey_bytes + * @param error_out On error, set to a malloc'd C string (free with aggzk_free_string) + * @return AGGZK_VERIFY_SUCCESS or AGGZK_VERIFY_INVALID_VKEY + */ +AggZkVerifyResult aggzk_validate_vkey( + const uint8_t* vkey_bytes, + size_t vkey_len, + char** error_out +); + +/** + * Free a string allocated by this library. + * + * @param s Pointer to string to free (may be NULL) + */ +void aggzk_free_string(char* s); + +/** + * Return the version of this FFI library. + * + * @return Pointer to a static null-terminated version string (do not free) + */ +const char* aggzk_ffi_version(void); + +#ifdef __cplusplus +} +#endif + +#endif /* AGGREGATOR_ZK_VERIFIER_H */ diff --git a/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/build.sh b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/build.sh new file mode 100755 index 00000000..6133d813 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/build.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# +# Build script for the Aggregator ZK Verifier FFI library. +# Uses SP1 6.0.2; independent of sp1-verifier-ffi (SP1 5.0.8). +# + +set -e + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +echo -e "${GREEN}Building Aggregator ZK Verifier FFI Library${NC}" +echo "=============================================" + +if ! command -v cargo &> /dev/null; then + echo -e "${RED}Error: Rust/Cargo not found${NC}" + echo "Please install Rust from https://rustup.rs/" + exit 1 +fi + +RUST_VERSION=$(cargo --version | cut -d' ' -f2) +echo -e "${GREEN}Rust version: ${RUST_VERSION}${NC}" + +echo -e "\n${YELLOW}Building Rust library...${NC}" +cargo build --release + +echo -e "${GREEN}✓ Build successful${NC}" + +LIB_PATH="target/release" +if [[ "$OSTYPE" == "darwin"* ]]; then + LIB_FILE="libaggregator_zk_verifier_ffi.dylib" + STATIC_LIB="libaggregator_zk_verifier_ffi.a" +elif [[ "$OSTYPE" == "linux-gnu"* ]]; then + LIB_FILE="libaggregator_zk_verifier_ffi.so" + STATIC_LIB="libaggregator_zk_verifier_ffi.a" +else + echo -e "${YELLOW}Warning: Unknown OS type, library names may differ${NC}" + LIB_FILE="libaggregator_zk_verifier_ffi.*" + STATIC_LIB="libaggregator_zk_verifier_ffi.a" +fi + +echo -e "\n${YELLOW}Build artifacts:${NC}" +if [ -f "${LIB_PATH}/${LIB_FILE}" ]; then + ls -lh "${LIB_PATH}/${LIB_FILE}" + echo -e "${GREEN}✓ Dynamic library created${NC}" +else + echo -e "${RED}✗ Dynamic library not found${NC}" +fi + +if [ -f "${LIB_PATH}/${STATIC_LIB}" ]; then + ls -lh "${LIB_PATH}/${STATIC_LIB}" + echo -e "${GREEN}✓ Static library created${NC}" +else + echo -e "${YELLOW}⚠ Static library not found (optional)${NC}" +fi + +echo -e "\n${YELLOW}Running Rust tests...${NC}" +cargo test + +echo -e "${GREEN}✓ All tests passed${NC}" + +echo -e "\n${GREEN}Build complete!${NC}" +echo -e "\nTo use with Go:" +echo -e " export CGO_LDFLAGS=\"-L\$(pwd)/${LIB_PATH}\"" +echo -e " cd .. && go build -tags zkverifier_aggregator_zk_ffi ./..." diff --git a/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/src/lib.rs b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/src/lib.rs new file mode 100644 index 00000000..2b95e935 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator-zk-verifier-ffi/src/lib.rs @@ -0,0 +1,250 @@ +use std::ffi::CString; +use std::os::raw::c_char; +use std::sync::OnceLock; +use sp1_sdk::{HashableKey, SP1Proof, SP1ProofWithPublicValues}; +use sp1_sdk::blocking::{CpuProver, Prover, ProverClient}; +use sp1_verifier::{Groth16Verifier, PlonkVerifier, GROTH16_VK_BYTES, PLONK_VK_BYTES}; + +/// Global CpuProver for Compressed proof verification. +/// +/// `SP1CompressedVerifierRaw` in sp1-verifier 6.0.2 uses a placeholder all-zero +/// vk_merkle_root (TODO) and therefore rejects all proofs from the CPU prover. +/// We must use CpuProver::verify() for Compressed proofs. Initialization takes +/// ~15 seconds but happens only once per process lifetime. +static CPU_PROVER: OnceLock = OnceLock::new(); + +fn cpu_prover() -> &'static CpuProver { + CPU_PROVER.get_or_init(|| ProverClient::builder().cpu().build()) +} + +/// Error codes for the aggregator ZK verifier FFI. +/// +/// The `AGGZK_` prefix keeps these distinct from the SP1 verifier FFI symbols +/// (`SP1_VERIFY_*`) so both libraries can coexist in the same Go binary. +#[repr(C)] +pub enum AggZkVerifyResult { + Success = 0, + InvalidProof = 1, + InvalidVKey = 2, + InvalidPublicInputs = 3, + VerificationFailed = 4, + InternalError = 5, +} + +/// Verify an aggregator SP1 ZK consistency proof. +/// +/// The proof was produced by `rugregator/crates/zk-host` using SP1 6.0.2. +/// The guest program committed exactly 64 public-value bytes: +/// bytes 0–31: previous SMT root +/// bytes 32–63: new SMT root +/// +/// Supported proof kinds: Groth16, Plonk, Compressed. +/// Core proofs are rejected (return `InternalError`). +/// +/// # Arguments +/// * `vkey_bytes` / `vkey_len` — bincode-serialized `SP1VerifyingKey` +/// * `proof_bytes` / `proof_len` — bincode-serialized `SP1ProofWithPublicValues` +/// * `prev_root` — pointer to 32-byte previous state root +/// * `new_root` — pointer to 32-byte new state root +/// * `error_out` — on error, set to a malloc'd C string (caller frees with `aggzk_free_string`) +/// +/// # Returns +/// `AggZkVerifyResult` status code. +#[no_mangle] +pub extern "C" fn aggzk_verify_proof( + vkey_bytes: *const u8, + vkey_len: usize, + proof_bytes: *const u8, + proof_len: usize, + prev_root: *const u8, + new_root: *const u8, + error_out: *mut *mut c_char, +) -> AggZkVerifyResult { + if vkey_bytes.is_null() || proof_bytes.is_null() { + set_error(error_out, "null pointer passed to aggzk_verify_proof"); + return AggZkVerifyResult::InternalError; + } + if prev_root.is_null() || new_root.is_null() { + set_error(error_out, "null state root pointer"); + return AggZkVerifyResult::InvalidPublicInputs; + } + + let vkey_data = unsafe { std::slice::from_raw_parts(vkey_bytes, vkey_len) }; + let proof_data = unsafe { std::slice::from_raw_parts(proof_bytes, proof_len) }; + let prev = unsafe { std::slice::from_raw_parts(prev_root, 32) }; + let new = unsafe { std::slice::from_raw_parts(new_root, 32) }; + + match verify_internal(vkey_data, proof_data, prev, new) { + Ok(()) => AggZkVerifyResult::Success, + Err(e) => { + set_error(error_out, &e.to_string()); + classify(&e) + } + } +} + +fn verify_internal( + vkey_data: &[u8], + proof_data: &[u8], + prev_root: &[u8], + new_root: &[u8], +) -> anyhow::Result<()> { + let vkey: sp1_sdk::SP1VerifyingKey = bincode::deserialize(vkey_data) + .map_err(|e| anyhow::anyhow!("failed to deserialize vkey: {e}"))?; + + let proof: SP1ProofWithPublicValues = bincode::deserialize(proof_data) + .map_err(|e| anyhow::anyhow!("failed to deserialize proof: {e}"))?; + + // Public values layout: prev_root[32] || new_root[32] — exactly 64 bytes. + let pv = proof.public_values.as_slice(); + if pv.len() != 64 { + anyhow::bail!( + "public values length mismatch: expected 64 bytes, got {}", + pv.len() + ); + } + if &pv[0..32] != prev_root { + anyhow::bail!("previous state root mismatch in public values"); + } + if &pv[32..64] != new_root { + anyhow::bail!("new state root mismatch in public values"); + } + + let pv_bytes = proof.public_values.to_vec(); + + match &proof.proof { + SP1Proof::Core(_) => { + anyhow::bail!("Core proofs are not supported; regenerate with Groth16, Plonk, or Compressed"); + } + SP1Proof::Groth16(_) => { + let wire = proof.bytes(); + let vkey_hash = vkey.bytes32(); // "0x<64 hex chars>" + Groth16Verifier::verify(&wire, &pv_bytes, &vkey_hash, &GROTH16_VK_BYTES) + .map_err(|e| anyhow::anyhow!("Groth16 verification failed: {e:?}"))?; + } + SP1Proof::Plonk(_) => { + let wire = proof.bytes(); + let vkey_hash = vkey.bytes32(); + PlonkVerifier::verify(&wire, &pv_bytes, &vkey_hash, &PLONK_VK_BYTES) + .map_err(|e| anyhow::anyhow!("Plonk verification failed: {e:?}"))?; + } + SP1Proof::Compressed(_) => { + // SP1CompressedVerifierRaw in sp1-verifier 6.0.2 uses a placeholder + // all-zero vk_merkle_root and rejects all CPU-generated proofs. + // Fall back to the cached CpuProver which skips the Merkle check when + // vk_verification is disabled (the CPU prover default). + cpu_prover() + .verify(&proof, &vkey, None) + .map_err(|e| anyhow::anyhow!("Compressed proof verification failed: {e}"))?; + } + } + + Ok(()) +} + +/// Validate a bincode-serialized `SP1VerifyingKey` without running a proof. +#[no_mangle] +pub extern "C" fn aggzk_validate_vkey( + vkey_bytes: *const u8, + vkey_len: usize, + error_out: *mut *mut c_char, +) -> AggZkVerifyResult { + if vkey_bytes.is_null() { + set_error(error_out, "null pointer passed to aggzk_validate_vkey"); + return AggZkVerifyResult::InternalError; + } + if vkey_len == 0 { + set_error(error_out, "vkey is empty"); + return AggZkVerifyResult::InvalidVKey; + } + let data = unsafe { std::slice::from_raw_parts(vkey_bytes, vkey_len) }; + match bincode::deserialize::(data) { + Ok(_) => AggZkVerifyResult::Success, + Err(e) => { + set_error(error_out, &format!("failed to deserialize vkey: {e}")); + AggZkVerifyResult::InvalidVKey + } + } +} + +/// Free a string allocated by this library. +#[no_mangle] +pub extern "C" fn aggzk_free_string(s: *mut c_char) { + if !s.is_null() { + unsafe { let _ = CString::from_raw(s); } + } +} + +/// Return the version of this FFI library (static string, do not free). +#[no_mangle] +pub extern "C" fn aggzk_ffi_version() -> *const c_char { + const VERSION: &str = concat!(env!("CARGO_PKG_VERSION"), "\0"); + VERSION.as_ptr() as *const c_char +} + +// ── Error helpers ───────────────────────────────────────────────────────────── + +fn classify(err: &anyhow::Error) -> AggZkVerifyResult { + let msg = err.to_string().to_lowercase(); + if msg.contains("vkey") || msg.contains("verifying key") { + AggZkVerifyResult::InvalidVKey + } else if msg.contains("deserialize proof") { + AggZkVerifyResult::InvalidProof + } else if msg.contains("state root") || msg.contains("public values") { + AggZkVerifyResult::InvalidPublicInputs + } else if msg.contains("verification failed") { + AggZkVerifyResult::VerificationFailed + } else if msg.contains("not supported") { + AggZkVerifyResult::InternalError + } else { + AggZkVerifyResult::InternalError + } +} + +fn set_error(error_out: *mut *mut c_char, message: &str) { + if !error_out.is_null() { + if let Ok(s) = CString::new(message) { + unsafe { *error_out = s.into_raw(); } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::ptr; + + #[test] + fn test_null_pointers() { + let mut error: *mut c_char = ptr::null_mut(); + let result = aggzk_verify_proof( + ptr::null(), 0, + ptr::null(), 0, + ptr::null(), + ptr::null(), + &mut error, + ); + assert!(matches!(result, AggZkVerifyResult::InternalError)); + if !error.is_null() { + aggzk_free_string(error); + } + } + + #[test] + fn test_version() { + let version = aggzk_ffi_version(); + assert!(!version.is_null()); + let s = unsafe { std::ffi::CStr::from_ptr(version) }; + assert!(s.to_str().unwrap().starts_with("0.1.0")); + } + + #[test] + fn test_empty_vkey() { + let mut error: *mut c_char = ptr::null_mut(); + let result = aggzk_validate_vkey(ptr::null(), 0, &mut error); + assert!(matches!(result, AggZkVerifyResult::InternalError)); + if !error.is_null() { + aggzk_free_string(error); + } + } +} diff --git a/rootchain/consensus/zkverifier/aggregator_rsmt_verifier.go b/rootchain/consensus/zkverifier/aggregator_rsmt_verifier.go new file mode 100644 index 00000000..b3c71794 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator_rsmt_verifier.go @@ -0,0 +1,61 @@ +package zkverifier + +import ( + "fmt" + + "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier/rsmt" +) + +// AggregatorRSMTVerifier verifies Radix SMT consistency proofs produced by +// the Rust aggregator's `rsmt` crate (proof type "aggregator_rsmt_v1"). +// +// The verifier is pure Go, always compiled in (no build tag, no FFI). It +// recomputes both the pre- and post-insertion SMT roots from the envelope +// and checks them against the claimed InputRecord.PreviousHash / Hash. +// +// See rootchain/consensus/zkverifier/rsmt for the canonical wire format. +type AggregatorRSMTVerifier struct{} + +// NewAggregatorRSMTVerifier constructs a stateless RSMT consistency verifier. +// No configuration or verification key is required — the consistency proof +// is self-contained and verified against root hashes from the InputRecord. +func NewAggregatorRSMTVerifier() *AggregatorRSMTVerifier { + return &AggregatorRSMTVerifier{} +} + +// VerifyProof decodes the zk_proof envelope and verifies the +// previousStateRoot → newStateRoot transition. The blockHash argument is +// unused: the aggregator's state transition is validated independently of +// the block header hash, which is covered by the normal InputRecord rules. +// +// An empty previousStateRoot (len == 0) is reserved for genesis / sync UCs +// and is filtered out earlier in Node.verifyZKProof, so both roots are +// expected to be 32 bytes here in practice. +func (v *AggregatorRSMTVerifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, _ []byte) error { + env, err := rsmt.DecodeEnvelope(proof) + if err != nil { + return fmt.Errorf("%w: %v", ErrInvalidProofFormat, err) + } + oldRoot, err := rsmt.RootFromBytes(previousStateRoot) + if err != nil { + return fmt.Errorf("%w: previous state root: %v", ErrInvalidProofFormat, err) + } + newRoot, err := rsmt.RootFromBytes(newStateRoot) + if err != nil { + return fmt.Errorf("%w: new state root: %v", ErrInvalidProofFormat, err) + } + if err := rsmt.Verify(env, oldRoot, newRoot); err != nil { + return fmt.Errorf("%w: %v", ErrProofVerificationFailed, err) + } + return nil +} + +// ProofType returns ProofTypeAggregatorRSMTv1. +func (*AggregatorRSMTVerifier) ProofType() ProofType { + return ProofTypeAggregatorRSMTv1 +} + +// IsEnabled reports that aggregator RSMT verification is active. +func (*AggregatorRSMTVerifier) IsEnabled() bool { + return true +} diff --git a/rootchain/consensus/zkverifier/aggregator_rsmt_verifier_test.go b/rootchain/consensus/zkverifier/aggregator_rsmt_verifier_test.go new file mode 100644 index 00000000..38341fd9 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator_rsmt_verifier_test.go @@ -0,0 +1,112 @@ +package zkverifier + +import ( + "bytes" + "errors" + "testing" + + "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier/rsmt" + "github.com/unicitynetwork/bft-go-base/types" +) + +func TestAggregatorRSMTVerifier_SingleLeafIntoEmptyTree(t *testing.T) { + var k [32]byte + k[0] = 0x05 + v := []byte("hello") + leafHash := rsmt.HashLeaf(k, v) + + env, err := rsmt.EncodeEnvelope( + []rsmt.Leaf{{Key: k, Value: v}}, + []byte{0x01}, // L + ) + if err != nil { + t.Fatal(err) + } + + ver := NewAggregatorRSMTVerifier() + if !ver.IsEnabled() { + t.Fatal("expected IsEnabled()") + } + if ver.ProofType() != ProofTypeAggregatorRSMTv1 { + t.Fatalf("unexpected ProofType %q", ver.ProofType()) + } + + // Genesis-to-first-leaf: prev nil, new = hashLeaf. + if err := ver.VerifyProof(env, nil, leafHash[:], nil); err != nil { + t.Fatalf("VerifyProof: %v", err) + } + + // Wrong new root. + bad := make([]byte, 32) + if err := ver.VerifyProof(env, nil, bad, nil); !errors.Is(err, ErrProofVerificationFailed) { + t.Fatalf("wrong root: got %v, want ErrProofVerificationFailed", err) + } + + // Malformed envelope. + if err := ver.VerifyProof([]byte{0x00}, nil, leafHash[:], nil); !errors.Is(err, ErrInvalidProofFormat) { + t.Fatalf("malformed envelope: got %v, want ErrInvalidProofFormat", err) + } + + // Wrong-length previous root. + if err := ver.VerifyProof(env, []byte{1, 2, 3}, leafHash[:], nil); !errors.Is(err, ErrInvalidProofFormat) { + t.Fatalf("bad prev root length: got %v, want ErrInvalidProofFormat", err) + } +} + +func TestAggregatorRSMTVerifier_TwoLeaves(t *testing.T) { + var k0, k1 [32]byte + k0[0] = 0x00 // bit 0 = 0 → left under depth-0 split + k1[0] = 0x01 // bit 0 = 1 → right + v0 := []byte("v0") + v1 := []byte("v1") + + h0 := rsmt.HashLeaf(k0, v0) + h1 := rsmt.HashLeaf(k1, v1) + newRoot := rsmt.HashNode(h0, h1, 0) + + var proof bytes.Buffer + proof.WriteByte(0x01) // L (k0) + proof.WriteByte(0x01) // L (k1) + proof.WriteByte(0x02) // N + proof.WriteByte(0x00) // depth=0 + + env, err := rsmt.EncodeEnvelope( + []rsmt.Leaf{{Key: k0, Value: v0}, {Key: k1, Value: v1}}, + proof.Bytes(), + ) + if err != nil { + t.Fatal(err) + } + + ver := NewAggregatorRSMTVerifier() + if err := ver.VerifyProof(env, nil, newRoot[:], nil); err != nil { + t.Fatalf("VerifyProof: %v", err) + } +} + +func TestRegistry_AggregatorRSMT(t *testing.T) { + reg := NewRegistry() + params := map[string]string{ParamProofType: string(ProofTypeAggregatorRSMTv1)} + v, err := reg.GetVerifier(types.PartitionID(42), types.ShardID{}, 0, params) + if err != nil { + t.Fatalf("GetVerifier: %v", err) + } + if _, ok := v.(*AggregatorRSMTVerifier); !ok { + t.Fatalf("registry returned %T, want *AggregatorRSMTVerifier", v) + } + if !v.IsEnabled() { + t.Fatalf("verifier not enabled") + } + if v.ProofType() != ProofTypeAggregatorRSMTv1 { + t.Fatalf("wrong proof type %q", v.ProofType()) + } + + // Cached on repeat call. + v2, err := reg.GetVerifier(types.PartitionID(42), types.ShardID{}, 0, params) + if err != nil { + t.Fatal(err) + } + if v != v2 { + t.Fatalf("registry did not cache verifier") + } +} diff --git a/rootchain/consensus/zkverifier/aggregator_zk_verifier.go b/rootchain/consensus/zkverifier/aggregator_zk_verifier.go new file mode 100644 index 00000000..b9c91cbf --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator_zk_verifier.go @@ -0,0 +1,65 @@ +package zkverifier + +import ( + "encoding/hex" + "fmt" + "log/slog" +) + +// AggregatorZKVerifier verifies SP1 ZK consistency proofs produced by the +// rugregator aggregator (SP1 6.0.2). +// +// The proof commits exactly 64 public-value bytes: +// +// bytes 0–31: previous SMT root (must match previousStateRoot arg) +// bytes 32–63: new SMT root (must match newStateRoot arg) +// +// blockHash is accepted by the ZKVerifier interface but ignored — aggregator +// ZK proofs do not commit a block hash. +type AggregatorZKVerifier struct { + enabled bool + ffiVerifier *AggregatorZKVerifierFFI +} + +// NewAggregatorZKVerifier creates a new aggregator ZK verifier. +// vkeyPath must point to a bincode-serialized SP1VerifyingKey (see extract-vkey). +func NewAggregatorZKVerifier(vkeyPath string) (*AggregatorZKVerifier, error) { + if vkeyPath == "" { + return nil, fmt.Errorf("vkey_path is required for aggregator_zk_v1 proof type") + } + ffi, err := NewAggregatorZKVerifierFFI(vkeyPath) + if err != nil { + return nil, fmt.Errorf("aggregator ZK FFI verifier not available: %w", err) + } + slog.Info("Using aggregator ZK verifier", "path", vkeyPath, "version", GetAggregatorZKFFIVersion()) + return &AggregatorZKVerifier{enabled: true, ffiVerifier: ffi}, nil +} + +// VerifyProof verifies an aggregator SP1 ZK consistency proof. +func (v *AggregatorZKVerifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + if !v.enabled { + return ErrVerifierNotConfigured + } + if len(proof) == 0 { + return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) + } + if len(previousStateRoot) != 32 { + return fmt.Errorf("%w: previousStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(previousStateRoot)) + } + if len(newStateRoot) != 32 { + return fmt.Errorf("%w: newStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(newStateRoot)) + } + + slog.Debug("Verifying aggregator ZK proof", + "proof_size", len(proof), + "prev_root", hex.EncodeToString(previousStateRoot[:8]), + "new_root", hex.EncodeToString(newStateRoot[:8])) + + return v.ffiVerifier.VerifyProof(proof, previousStateRoot, newStateRoot) +} + +// ProofType returns ProofTypeAggregatorZKv1. +func (v *AggregatorZKVerifier) ProofType() ProofType { return ProofTypeAggregatorZKv1 } + +// IsEnabled returns true if the verifier is configured and the FFI library is available. +func (v *AggregatorZKVerifier) IsEnabled() bool { return v.enabled } diff --git a/rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi.go b/rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi.go new file mode 100644 index 00000000..a6b7b8ee --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi.go @@ -0,0 +1,119 @@ +//go:build zkverifier_aggregator_zk_ffi + +package zkverifier + +// #cgo LDFLAGS: -L${SRCDIR}/aggregator-zk-verifier-ffi/target/release -laggregator_zk_verifier_ffi -ldl -lm +// #include "aggregator-zk-verifier-ffi/aggregator_zk_verifier.h" +// #include +import "C" +import ( + "fmt" + "unsafe" +) + +// AggregatorZKVerifierFFI wraps the Rust FFI library for aggregator ZK proof verification. +type AggregatorZKVerifierFFI struct { + vkey []byte +} + +// NewAggregatorZKVerifierFFI creates a new FFI-based aggregator ZK verifier. +func NewAggregatorZKVerifierFFI(vkeyPath string) (*AggregatorZKVerifierFFI, error) { + vkey, err := readFile(vkeyPath) + if err != nil { + return nil, fmt.Errorf("failed to load vkey: %w", err) + } + if len(vkey) == 0 { + return nil, fmt.Errorf("vkey file is empty") + } + + var errorOut *C.char + defer func() { + if errorOut != nil { + C.aggzk_free_string(errorOut) + } + }() + + result := C.aggzk_validate_vkey( + (*C.uint8_t)(unsafe.Pointer(&vkey[0])), + C.size_t(len(vkey)), + &errorOut, + ) + if result != C.AGGZK_VERIFY_SUCCESS { + if errorOut != nil { + return nil, fmt.Errorf("invalid vkey: %s", C.GoString(errorOut)) + } + return nil, fmt.Errorf("invalid vkey") + } + + return &AggregatorZKVerifierFFI{vkey: vkey}, nil +} + +// VerifyProof verifies an aggregator ZK proof via the Rust FFI library. +// blockHash is unused by aggregator ZK proofs and not passed to the FFI. +func (v *AggregatorZKVerifierFFI) VerifyProof(proof []byte, prevRoot []byte, newRoot []byte) error { + if len(proof) == 0 { + return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) + } + if len(prevRoot) != 32 { + return fmt.Errorf("%w: prevRoot must be 32 bytes", ErrInvalidProofFormat) + } + if len(newRoot) != 32 { + return fmt.Errorf("%w: newRoot must be 32 bytes", ErrInvalidProofFormat) + } + + var errorOut *C.char + defer func() { + if errorOut != nil { + C.aggzk_free_string(errorOut) + } + }() + + result := C.aggzk_verify_proof( + (*C.uint8_t)(unsafe.Pointer(&v.vkey[0])), + C.size_t(len(v.vkey)), + (*C.uint8_t)(unsafe.Pointer(&proof[0])), + C.size_t(len(proof)), + (*C.uint8_t)(unsafe.Pointer(&prevRoot[0])), + (*C.uint8_t)(unsafe.Pointer(&newRoot[0])), + &errorOut, + ) + + switch result { + case C.AGGZK_VERIFY_SUCCESS: + return nil + case C.AGGZK_VERIFY_INVALID_PROOF: + if errorOut != nil { + return fmt.Errorf("%w: %s", ErrInvalidProofFormat, C.GoString(errorOut)) + } + return ErrInvalidProofFormat + case C.AGGZK_VERIFY_INVALID_VKEY: + if errorOut != nil { + return fmt.Errorf("invalid vkey: %s", C.GoString(errorOut)) + } + return fmt.Errorf("invalid vkey") + case C.AGGZK_VERIFY_INVALID_PUBLIC_INPUTS: + if errorOut != nil { + return fmt.Errorf("invalid public inputs: %s", C.GoString(errorOut)) + } + return fmt.Errorf("invalid public inputs") + case C.AGGZK_VERIFY_VERIFICATION_FAILED: + if errorOut != nil { + return fmt.Errorf("%w: %s", ErrProofVerificationFailed, C.GoString(errorOut)) + } + return ErrProofVerificationFailed + default: + if errorOut != nil { + return fmt.Errorf("internal error: %s", C.GoString(errorOut)) + } + return fmt.Errorf("internal error") + } +} + +// GetAggregatorZKFFIVersion returns the version of the Rust FFI library. +func GetAggregatorZKFFIVersion() string { + v := C.aggzk_ffi_version() + if v == nil { + return "unknown" + } + return C.GoString(v) +} diff --git a/rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi_stub.go b/rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi_stub.go new file mode 100644 index 00000000..283c5b96 --- /dev/null +++ b/rootchain/consensus/zkverifier/aggregator_zk_verifier_ffi_stub.go @@ -0,0 +1,23 @@ +//go:build !zkverifier_aggregator_zk_ffi + +package zkverifier + +import "fmt" + +// AggregatorZKVerifierFFI is a stub when the FFI library is not compiled in. +type AggregatorZKVerifierFFI struct { + vkey []byte +} + +// NewAggregatorZKVerifierFFI returns an error when the FFI library is not available. +func NewAggregatorZKVerifierFFI(_ string) (*AggregatorZKVerifierFFI, error) { + return nil, fmt.Errorf("aggregator ZK FFI verifier not available: build with -tags zkverifier_aggregator_zk_ffi to enable") +} + +// VerifyProof always returns an error in the stub. +func (v *AggregatorZKVerifierFFI) VerifyProof(_ []byte, _ []byte, _ []byte) error { + return fmt.Errorf("aggregator ZK FFI verifier not available") +} + +// GetAggregatorZKFFIVersion returns "unavailable" in the stub. +func GetAggregatorZKFFIVersion() string { return "unavailable" } diff --git a/rootchain/consensus/zkverifier/capabilities.go b/rootchain/consensus/zkverifier/capabilities.go new file mode 100644 index 00000000..1bf56adb --- /dev/null +++ b/rootchain/consensus/zkverifier/capabilities.go @@ -0,0 +1,43 @@ +package zkverifier + +// IsProofTypeAvailable reports whether pt can be used in the current binary. +// Each FFI verifier is independently toggled by its own build tag: +// - SP1, LightClient: -tags zkverifier_ffi +// - AggregatorZKv1: -tags zkverifier_aggregator_zk_ffi +// +// Pure-Go verifiers (AggregatorRSMTv1, Exec, None) are always available. +func IsProofTypeAvailable(pt ProofType) bool { + switch pt { + case ProofTypeAggregatorRSMTv1, ProofTypeExec, ProofTypeNone, "": + return true + case ProofTypeSP1: + return isSP1Available() + case ProofTypeLightClient: + return isLightClientAvailable() + case ProofTypeAggregatorZKv1: + return isAggregatorZKv1Available() + default: + return false + } +} + +// AvailableProofTypes returns the proof types that can be instantiated in the +// current binary. +func AvailableProofTypes() []ProofType { + types := []ProofType{ProofTypeAggregatorRSMTv1, ProofTypeExec} + if isSP1Available() { + types = append(types, ProofTypeSP1) + } + if isLightClientAvailable() { + types = append(types, ProofTypeLightClient) + } + if isAggregatorZKv1Available() { + types = append(types, ProofTypeAggregatorZKv1) + } + return types +} + +// IsFFIAvailable reports whether any FFI-backed verifier is available. +func IsFFIAvailable() bool { + return isSP1Available() || isLightClientAvailable() || isAggregatorZKv1Available() +} diff --git a/rootchain/consensus/zkverifier/capabilities_aggregator_zk_ffi.go b/rootchain/consensus/zkverifier/capabilities_aggregator_zk_ffi.go new file mode 100644 index 00000000..6fcc42a5 --- /dev/null +++ b/rootchain/consensus/zkverifier/capabilities_aggregator_zk_ffi.go @@ -0,0 +1,6 @@ +//go:build zkverifier_aggregator_zk_ffi + +package zkverifier + +// isAggregatorZKv1Available returns true when the aggregator ZK FFI verifier was compiled in. +func isAggregatorZKv1Available() bool { return true } diff --git a/rootchain/consensus/zkverifier/capabilities_aggregator_zk_stub.go b/rootchain/consensus/zkverifier/capabilities_aggregator_zk_stub.go new file mode 100644 index 00000000..7c3193df --- /dev/null +++ b/rootchain/consensus/zkverifier/capabilities_aggregator_zk_stub.go @@ -0,0 +1,6 @@ +//go:build !zkverifier_aggregator_zk_ffi + +package zkverifier + +// isAggregatorZKv1Available returns false when the aggregator ZK FFI verifier was not compiled in. +func isAggregatorZKv1Available() bool { return false } diff --git a/rootchain/consensus/zkverifier/capabilities_ffi.go b/rootchain/consensus/zkverifier/capabilities_ffi.go new file mode 100644 index 00000000..80387324 --- /dev/null +++ b/rootchain/consensus/zkverifier/capabilities_ffi.go @@ -0,0 +1,9 @@ +//go:build zkverifier_ffi + +package zkverifier + +// isSP1Available returns true when the SP1 FFI verifier was compiled in. +func isSP1Available() bool { return true } + +// isLightClientAvailable returns true when the LightClient FFI verifier was compiled in. +func isLightClientAvailable() bool { return true } diff --git a/rootchain/consensus/zkverifier/capabilities_stub.go b/rootchain/consensus/zkverifier/capabilities_stub.go new file mode 100644 index 00000000..b000d7c2 --- /dev/null +++ b/rootchain/consensus/zkverifier/capabilities_stub.go @@ -0,0 +1,9 @@ +//go:build !zkverifier_ffi + +package zkverifier + +// isSP1Available returns false when the SP1 FFI verifier was not compiled in. +func isSP1Available() bool { return false } + +// isLightClientAvailable returns false when the LightClient FFI verifier was not compiled in. +func isLightClientAvailable() bool { return false } diff --git a/rootchain/consensus/zkverifier/capabilities_test.go b/rootchain/consensus/zkverifier/capabilities_test.go new file mode 100644 index 00000000..395df1ec --- /dev/null +++ b/rootchain/consensus/zkverifier/capabilities_test.go @@ -0,0 +1,189 @@ +package zkverifier + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIsProofTypeAvailable(t *testing.T) { + // These types should always be available + require.True(t, IsProofTypeAvailable(ProofTypeExec)) + require.True(t, IsProofTypeAvailable(ProofTypeNone)) + require.True(t, IsProofTypeAvailable("")) + + // Unknown types should not be available + require.False(t, IsProofTypeAvailable(ProofType("unknown"))) + + // SP1 and LightClient availability depends on build tags + // The stub version returns false for these + if !IsFFIAvailable() { + require.False(t, IsProofTypeAvailable(ProofTypeSP1)) + require.False(t, IsProofTypeAvailable(ProofTypeLightClient)) + } +} + +func TestAvailableProofTypes(t *testing.T) { + types := AvailableProofTypes() + require.NotEmpty(t, types) + require.Contains(t, types, ProofTypeExec) +} + +func TestParseProofTypeFromParams(t *testing.T) { + testCases := []struct { + name string + params map[string]string + expected ProofType + }{ + { + name: "nil params", + params: nil, + expected: ProofTypeNone, + }, + { + name: "empty params", + params: map[string]string{}, + expected: ProofTypeNone, + }, + { + name: "empty proof_type", + params: map[string]string{ParamProofType: ""}, + expected: ProofTypeNone, + }, + { + name: "sp1", + params: map[string]string{ParamProofType: "sp1"}, + expected: ProofTypeSP1, + }, + { + name: "light_client", + params: map[string]string{ParamProofType: "light_client"}, + expected: ProofTypeLightClient, + }, + { + name: "exec", + params: map[string]string{ParamProofType: "exec"}, + expected: ProofTypeExec, + }, + { + name: "none", + params: map[string]string{ParamProofType: "none"}, + expected: ProofTypeNone, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := ParseProofTypeFromParams(tc.params) + require.Equal(t, tc.expected, result) + }) + } +} + +func TestParseVKeyPathFromParams(t *testing.T) { + testCases := []struct { + name string + params map[string]string + expected string + }{ + { + name: "nil params", + params: nil, + expected: "", + }, + { + name: "empty params", + params: map[string]string{}, + expected: "", + }, + { + name: "no vkey_path", + params: map[string]string{ParamProofType: "sp1"}, + expected: "", + }, + { + name: "with vkey_path", + params: map[string]string{ParamProofType: "sp1", ParamVerificationKeyPath: "/path/to/vkey"}, + expected: "/path/to/vkey", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := ParseVKeyPathFromParams(tc.params) + require.Equal(t, tc.expected, result) + }) + } +} + +func TestParseChainIDFromParams(t *testing.T) { + testCases := []struct { + name string + params map[string]string + expectedID uint64 + expectedOK bool + }{ + { + name: "nil params", + params: nil, + expectedID: 0, + expectedOK: false, + }, + { + name: "empty params", + params: map[string]string{}, + expectedID: 0, + expectedOK: false, + }, + { + name: "no chain_id", + params: map[string]string{ParamProofType: "sp1"}, + expectedID: 0, + expectedOK: false, + }, + { + name: "empty chain_id", + params: map[string]string{ParamChainID: ""}, + expectedID: 0, + expectedOK: false, + }, + { + name: "invalid chain_id", + params: map[string]string{ParamChainID: "invalid"}, + expectedID: 0, + expectedOK: false, + }, + { + name: "negative chain_id", + params: map[string]string{ParamChainID: "-1"}, + expectedID: 0, + expectedOK: false, + }, + { + name: "valid chain_id 1", + params: map[string]string{ParamChainID: "1"}, + expectedID: 1, + expectedOK: true, + }, + { + name: "valid chain_id mainnet", + params: map[string]string{ParamChainID: "1337"}, + expectedID: 1337, + expectedOK: true, + }, + { + name: "valid large chain_id", + params: map[string]string{ParamChainID: "999999999"}, + expectedID: 999999999, + expectedOK: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, ok := ParseChainIDFromParams(tc.params) + require.Equal(t, tc.expectedOK, ok) + require.Equal(t, tc.expectedID, result) + }) + } +} diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml new file mode 100644 index 00000000..a896d98f --- /dev/null +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "light-client-verifier-ffi" +version = "0.1.0" +edition = "2021" + +# Make this package independent of parent workspace +[workspace] + +[lib] +crate-type = ["cdylib", "staticlib"] + +[dependencies] +# Core dependencies for witness deserialization and validation +# Pin rkyv to same version as uni-evm workspace to ensure compatible serialization +rkyv = { version = "=0.8.10", features = ["std", "unaligned"] } +anyhow = "1.0" + +# TODO: use github fork instead of local +ethrex-core = { path = "../../../../../ethrex/crates/common", package = "ethrex-common" } +# The l2 feature adds blob_commitment and blob_proof fields to ProgramInput +guest_program = { path = "../../../../../ethrex/crates/l2/prover/src/guest_program", features = ["l2"] } + +[profile.release] +opt-level = 3 +lto = true +codegen-units = 1 diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md b/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md new file mode 100644 index 00000000..f51ca283 --- /dev/null +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/README.md @@ -0,0 +1,265 @@ +# Light Client Verifier FFI + +Rust FFI library for verifying uni-evm light client proofs in BFT Core. + +## Overview + +This library provides a Foreign Function Interface (FFI) for BFT Core (written in Go) to verify light client proofs from uni-evm. In light client mode, instead of generating zero-knowledge proofs (which take 5+ minutes), uni-evm sends the full witness data to BFT Core, which executes the validation logic directly. + +**Performance**: Light client mode is ~300x faster than SP1 mode for development: +- SP1 mode: 5+ minutes per block +- Light client mode: ~5 seconds per block + +## How It Works + +### Light Client Proof Format + +``` +┌─────────────┬──────────────────────────────────────┐ +│ Magic (8B) │ Serialized ProgramInput (varies) │ +│ "LCPROOF\0" │ (witness + blocks + config) │ +└─────────────┴──────────────────────────────────────┘ +``` + +### Verification Process + +1. **Magic Header Check**: Validates the first 8 bytes are `LCPROOF\0` +2. **Deserialization**: Deserializes `ProgramInput` from the payload (rkyv format) +3. **Execution**: Calls `guest_program::execution::stateless_validation_l1()` +4. **State Root Validation**: Verifies `prev_state_root` and `new_state_root` match + +## Building + +### Prerequisites + +- Rust stable or nightly toolchain +- Internet connection (to fetch ethrex dependencies from GitHub) +- C compiler (GCC or Clang) + +### Dependencies + +This library depends on ethrex components from the uni-evm fork: +- **Repository**: https://github.com/ristik/ethrex +- **Branch**: `uni-evm` +- **Components used**: + - `ethrex-common` (core types) + - `guest_program` (validation logic) + +Dependencies are fetched automatically via Cargo from GitHub - no local submodules required. + +### Build Steps + +```bash +# From this directory +./build.sh + +# Or manually +cargo build --release +``` + +Output: +- `target/release/liblight_client_verifier_ffi.a` - Static library +- `target/release/liblight_client_verifier_ffi.dylib` - Dynamic library (macOS) +- `target/release/liblight_client_verifier_ffi.so` - Dynamic library (Linux) + +## Usage from Go + +### Include in BFT Core + +The library is automatically linked when building BFT Core's zkverifier package: + +```go +// In bft-core/rootchain/consensus/zkverifier/verifier.go +cfg := &zkverifier.Config{ + Enabled: true, + ProofType: zkverifier.ProofTypeLightClient, +} + +verifier, err := zkverifier.NewVerifier(cfg) +// verifier will use light client FFI automatically +``` + +### Direct FFI Usage (Advanced) + +```go +import "C" +// #cgo LDFLAGS: -L${SRCDIR}/light-client-verifier-ffi/target/release -llight_client_verifier_ffi +// #include "light-client-verifier-ffi/light_client_verifier.h" + +// Verify a light client proof +result := C.light_client_verify_proof( + (*C.uint8_t)(unsafe.Pointer(&payload[0])), + C.size_t(len(payload)), + (*C.uint8_t)(unsafe.Pointer(&prevStateRoot[0])), + (*C.uint8_t)(unsafe.Pointer(&newStateRoot[0])), + &errorOut, +) +``` + +## API Reference + +### C Functions + +#### `light_client_verify_proof` + +Verifies a light client proof payload. + +```c +LightClientVerifyResult light_client_verify_proof( + const uint8_t* payload_bytes, + size_t payload_len, + const uint8_t* prev_state_root, // 32 bytes + const uint8_t* new_state_root, // 32 bytes + char** error_out +); +``` + +**Returns**: +- `LIGHT_CLIENT_VERIFY_SUCCESS` (0) - Proof is valid +- `LIGHT_CLIENT_VERIFY_INVALID_PROOF` (1) - Proof data is malformed +- `LIGHT_CLIENT_VERIFY_INVALID_MAGIC_HEADER` (2) - Magic header mismatch +- `LIGHT_CLIENT_VERIFY_INVALID_PUBLIC_INPUTS` (3) - State roots don't match +- `LIGHT_CLIENT_VERIFY_VERIFICATION_FAILED` (4) - Validation logic failed +- `LIGHT_CLIENT_VERIFY_INTERNAL_ERROR` (5) - Internal error + +#### `light_client_validate_payload` + +Validates payload format without executing validation logic. + +```c +LightClientVerifyResult light_client_validate_payload( + const uint8_t* payload_bytes, + size_t payload_len, + char** error_out +); +``` + +#### `light_client_ffi_version` + +Returns the FFI library version string. + +```c +const char* light_client_ffi_version(void); +``` + +#### `light_client_free_string` + +Frees a string allocated by the library. + +```c +void light_client_free_string(char* s); +``` + +## Testing + +### Unit Tests + +```bash +cargo test +``` + +### Integration Tests + +See `bft-core/rootchain/consensus/zkverifier/verifier_test.go` for Go integration tests. + +## Architecture + +### Dependencies + +- **rkyv** - Zero-copy deserialization (matches uni-evm's serialization format) +- **ethrex-common** - Core types (Block, H256, etc.) +- **guest_program** - Validation logic (`stateless_validation_l1`) + +### File Structure + +``` +light-client-verifier-ffi/ +├── src/ +│ └── lib.rs # FFI implementation +├── light_client_verifier.h # C header +├── Cargo.toml # Dependencies +├── build.sh # Build script +└── README.md # This file +``` + +## Troubleshooting + +### Build Errors + +**Error**: `failed to load manifest for dependency 'ethrex-common'` + +**Solution**: Ensure you're building from within the uni-evm repository structure, where the ethrex submodule is available at `../../../../../ethrex/`. + +**Error**: `undefined reference to 'light_client_verify_proof'` + +**Solution**: Ensure the Rust library is built before building Go code: +```bash +cd light-client-verifier-ffi +cargo build --release +cd ../.. +go build +``` + +### Runtime Errors + +**Error**: "invalid magic header" + +**Cause**: Payload doesn't start with `LCPROOF\0` or is from SP1 mode. + +**Solution**: Ensure uni-evm is configured with `prover_type = "light_client"`. + +**Error**: "Failed to deserialize ProgramInput" + +**Cause**: Payload format mismatch between uni-evm and BFT Core versions. + +**Solution**: Ensure both uni-evm and BFT Core are using compatible ethrex versions. + +## Performance + +### Proof Sizes + +| Block Type | Payload Size | +|------------|-------------| +| Empty block | ~1.4 KB | +| 3 transactions | ~2 KB | +| 10 transactions | ~5-10 KB | +| 100 transactions | ~50-100 KB | + +Compare to: +- Exec mode: 4 bytes (dummy) +- SP1 mode: ~50 KB (compressed STARK) + +### Verification Time + +| Mode | Time | +|------|------| +| Light client | ~100-200ms | +| SP1 | ~10ms (proof verification only) | + +Light client is slower to verify but much faster to generate (no proving overhead). + +## Development Workflow + +### Modify Validation Logic + +1. Edit `guest_program` crate in ethrex +2. Rebuild this FFI library: `cargo build --release` +3. Rebuild BFT Core: `cd ../.. && go build` + +### Add New Exports + +1. Add Rust function with `#[no_mangle]` and `extern "C"` +2. Add declaration to `light_client_verifier.h` +3. Add Go wrapper in `light_client_verifier_ffi.go` + +## Security Considerations + +- **Not succinct**: Full witness data is transmitted (1-5MB vs 50KB for SP1) +- **Development only**: Recommended for local development and testing +- **Production use**: Switch to SP1 mode for production deployments + +## See Also + +- [LIGHT_CLIENT_MODE.md](../../../../../LIGHT_CLIENT_MODE.md) - User documentation +- [LIGHT_CLIENT_MODE_PLAN.md](../../../../../LIGHT_CLIENT_MODE_PLAN.md) - Implementation plan +- [SP1 Verifier FFI](../sp1-verifier-ffi/README.md) - Similar FFI for SP1 proofs diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/build.sh b/rootchain/consensus/zkverifier/light-client-verifier-ffi/build.sh new file mode 100755 index 00000000..807a342b --- /dev/null +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/build.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# Build script for light-client-verifier-ffi +# This builds the Rust FFI library that BFT Core uses to verify light client proofs + +set -e + +echo "Building light-client-verifier-ffi..." + +# Build in release mode for optimal performance +cargo build --release + +echo "Build complete!" +echo "Library: target/release/liblight_client_verifier_ffi.a" +echo " target/release/liblight_client_verifier_ffi.so (Linux)" +echo " target/release/liblight_client_verifier_ffi.dylib (macOS)" diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/light_client_verifier.h b/rootchain/consensus/zkverifier/light-client-verifier-ffi/light_client_verifier.h new file mode 100644 index 00000000..778fec73 --- /dev/null +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/light_client_verifier.h @@ -0,0 +1,89 @@ +/** + * Light Client Verifier FFI + * + * C header for FFI interface to light client proof verification + */ + +#ifndef LIGHT_CLIENT_VERIFIER_H +#define LIGHT_CLIENT_VERIFIER_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Result codes for light client verification + */ +typedef enum { + LIGHT_CLIENT_VERIFY_SUCCESS = 0, + LIGHT_CLIENT_VERIFY_INVALID_PROOF = 1, + LIGHT_CLIENT_VERIFY_INVALID_MAGIC_HEADER = 2, + LIGHT_CLIENT_VERIFY_INVALID_PUBLIC_INPUTS = 3, + LIGHT_CLIENT_VERIFY_VERIFICATION_FAILED = 4, + LIGHT_CLIENT_VERIFY_INTERNAL_ERROR = 5, +} LightClientVerifyResult; + +/** + * Verify a light client proof payload + * + * The payload should contain: + * - Magic header: "LCPROOF\0" (8 bytes) + * - Serialized ProgramInput (rkyv format) + * + * @param payload_bytes Pointer to payload bytes + * @param payload_len Length of payload in bytes + * @param prev_state_root Pointer to 32-byte previous state root + * @param new_state_root Pointer to 32-byte new state root + * @param block_hash Pointer to 32-byte block hash + * @param chain_id Chain ID of EVM instance from partition config + * @param error_out Output pointer for error message (must be freed with light_client_free_string) + * @return LightClientVerifyResult status code + */ +LightClientVerifyResult light_client_verify_proof( + const uint8_t* payload_bytes, + size_t payload_len, + const uint8_t* prev_state_root, + const uint8_t* new_state_root, + const uint8_t* block_hash, + uint64_t chain_id, + char** error_out +); + +/** + * Free a string allocated by light_client_verify_proof + * + * @param s Pointer to string to free + */ +void light_client_free_string(char* s); + +/** + * Get the version of the FFI library + * + * @return Version string (do not free) + */ +const char* light_client_ffi_version(void); + +/** + * Validate a light client payload format + * + * Checks magic header and ProgramInput deserialization without executing validation. + * + * @param payload_bytes Pointer to payload bytes + * @param payload_len Length of payload in bytes + * @param error_out Output pointer for error message (must be freed with light_client_free_string) + * @return LightClientVerifyResult status code (SUCCESS or error) + */ +LightClientVerifyResult light_client_validate_payload( + const uint8_t* payload_bytes, + size_t payload_len, + char** error_out +); + +#ifdef __cplusplus +} +#endif + +#endif /* LIGHT_CLIENT_VERIFIER_H */ diff --git a/rootchain/consensus/zkverifier/light-client-verifier-ffi/src/lib.rs b/rootchain/consensus/zkverifier/light-client-verifier-ffi/src/lib.rs new file mode 100644 index 00000000..0488380d --- /dev/null +++ b/rootchain/consensus/zkverifier/light-client-verifier-ffi/src/lib.rs @@ -0,0 +1,333 @@ +use std::ffi::CString; +use std::os::raw::c_char; + +// Magic header for light client proofs: "LCPROOF\0" +const LIGHT_CLIENT_MAGIC: &[u8; 8] = b"LCPROOF\0"; + +/// Error codes for FFI interface +#[repr(C)] +pub enum LightClientVerifyResult { + Success = 0, + InvalidProof = 1, + InvalidMagicHeader = 2, + InvalidPublicInputs = 3, + VerificationFailed = 4, + InternalError = 5, +} + +/// Verify a light client proof payload +/// +/// # Arguments +/// * `payload_bytes` - Pointer to payload bytes (magic header + serialized ProgramInput) +/// * `payload_len` - Length of payload +/// * `prev_state_root` - Pointer to 32-byte previous state root +/// * `new_state_root` - Pointer to 32-byte new state root +/// * `block_hash` - Pointer to 32-byte block hash +/// * `chain_id` - EVM chain ID from partition config +/// * `error_out` - Output pointer for error message (caller must free with light_client_free_string) +/// +/// # Returns +/// LightClientVerifyResult code +#[no_mangle] +pub extern "C" fn light_client_verify_proof( + payload_bytes: *const u8, + payload_len: usize, + prev_state_root: *const u8, + new_state_root: *const u8, + block_hash: *const u8, + chain_id: u64, + error_out: *mut *mut c_char, +) -> LightClientVerifyResult { + // Safety checks + if payload_bytes.is_null() { + set_error(error_out, "null pointer passed to light_client_verify_proof"); + return LightClientVerifyResult::InternalError; + } + + if prev_state_root.is_null() || new_state_root.is_null() || block_hash.is_null() { + set_error(error_out, "null state root or block hash pointer"); + return LightClientVerifyResult::InvalidPublicInputs; + } + + // Convert C pointers to Rust slices + let payload_data = unsafe { std::slice::from_raw_parts(payload_bytes, payload_len) }; + let prev_root = unsafe { std::slice::from_raw_parts(prev_state_root, 32) }; + let new_root = unsafe { std::slice::from_raw_parts(new_state_root, 32) }; + let blk_hash = unsafe { std::slice::from_raw_parts(block_hash, 32) }; + + // Perform verification + match verify_light_client_proof_internal(payload_data, prev_root, new_root, blk_hash, chain_id) { + Ok(()) => LightClientVerifyResult::Success, + Err(e) => { + set_error(error_out, &e.to_string()); + match classify_error(&e) { + ErrorType::InvalidMagicHeader => LightClientVerifyResult::InvalidMagicHeader, + ErrorType::InvalidProof => LightClientVerifyResult::InvalidProof, + ErrorType::InvalidPublicInputs => LightClientVerifyResult::InvalidPublicInputs, + ErrorType::VerificationFailed => LightClientVerifyResult::VerificationFailed, + ErrorType::Internal => LightClientVerifyResult::InternalError, + } + } + } +} + +/// Internal verification logic +fn verify_light_client_proof_internal( + payload_data: &[u8], + prev_state_root: &[u8], + new_state_root: &[u8], + block_hash: &[u8], + chain_id: u64, +) -> anyhow::Result<()> { + // 1. Check magic header + if payload_data.len() < 8 { + return Err(anyhow::anyhow!( + "Payload too short: expected at least 8 bytes for magic header, got {}", + payload_data.len() + )); + } + + if &payload_data[0..8] != LIGHT_CLIENT_MAGIC.as_slice() { + return Err(anyhow::anyhow!( + "Invalid magic header: expected {:?}, got {:?}", + LIGHT_CLIENT_MAGIC, + &payload_data[0..8] + )); + } + + // 2. Deserialize ProgramInput (skip 8-byte magic header) + let input_bytes = &payload_data[8..]; + let program_input = rkyv::from_bytes::(input_bytes) + .map_err(|e| anyhow::anyhow!("Failed to deserialize ProgramInput: {}", e))?; + + // 3. Validate that we have blocks + if program_input.blocks.is_empty() { + return Err(anyhow::anyhow!("No blocks in ProgramInput")); + } + + // 4. Execute stateless validation + let output = guest_program::execution::stateless_validation_l1( + program_input.blocks, + program_input.execution_witness, + program_input.elasticity_multiplier, + chain_id, + ) + .map_err(|e| anyhow::anyhow!("Stateless validation failed: {}", e))?; + + // 6. Convert public inputs to H256 + let prev_root_h256 = ethrex_core::H256::from_slice(prev_state_root); + let new_root_h256 = ethrex_core::H256::from_slice(new_state_root); + let block_hash_h256 = ethrex_core::H256::from_slice(block_hash); + + // 7. Verify state roots match + if output.initial_state_hash != prev_root_h256 { + return Err(anyhow::anyhow!( + "Previous state root mismatch: expected {:?}, got {:?}", + prev_root_h256, + output.initial_state_hash + )); + } + + if output.final_state_hash != new_root_h256 { + return Err(anyhow::anyhow!( + "New state root mismatch: expected {:?}, got {:?}", + new_root_h256, + output.final_state_hash + )); + } + + // 8. Verify block hash matches + if output.last_block_hash != block_hash_h256 { + return Err(anyhow::anyhow!( + "Block hash mismatch: expected {:?}, got {:?}", + block_hash_h256, + output.last_block_hash + )); + } + + Ok(()) +} + +/// Free a string allocated by this library +#[no_mangle] +pub extern "C" fn light_client_free_string(s: *mut c_char) { + if !s.is_null() { + unsafe { + let _ = CString::from_raw(s); + } + } +} + +/// Get the version of this FFI library +#[no_mangle] +pub extern "C" fn light_client_ffi_version() -> *const c_char { + const VERSION: &str = concat!(env!("CARGO_PKG_VERSION"), "\0"); + VERSION.as_ptr() as *const c_char +} + +/// Validate a light client payload format +/// +/// # Arguments +/// * `payload_bytes` - Pointer to payload bytes +/// * `payload_len` - Length of payload +/// * `error_out` - Output pointer for error message (caller must free with light_client_free_string) +/// +/// # Returns +/// LightClientVerifyResult code (Success or InvalidProof) +#[no_mangle] +pub extern "C" fn light_client_validate_payload( + payload_bytes: *const u8, + payload_len: usize, + error_out: *mut *mut c_char, +) -> LightClientVerifyResult { + // Safety checks + if payload_bytes.is_null() { + set_error(error_out, "null pointer passed to light_client_validate_payload"); + return LightClientVerifyResult::InternalError; + } + + if payload_len < 8 { + set_error(error_out, "payload too short (need at least 8 bytes for magic)"); + return LightClientVerifyResult::InvalidProof; + } + + // Convert C pointer to Rust slice + let payload_data = unsafe { std::slice::from_raw_parts(payload_bytes, payload_len) }; + + // Check magic header + if &payload_data[0..8] != LIGHT_CLIENT_MAGIC.as_slice() { + set_error(error_out, "invalid magic header"); + return LightClientVerifyResult::InvalidMagicHeader; + } + + // Try to deserialize ProgramInput + let input_bytes = &payload_data[8..]; + match rkyv::from_bytes::(input_bytes) { + Ok(_) => LightClientVerifyResult::Success, + Err(e) => { + set_error(error_out, &format!("Failed to deserialize ProgramInput: {}", e)); + LightClientVerifyResult::InvalidProof + } + } +} + +// Helper functions + +enum ErrorType { + InvalidMagicHeader, + InvalidProof, + InvalidPublicInputs, + VerificationFailed, + Internal, +} + +fn classify_error(err: &anyhow::Error) -> ErrorType { + let msg = err.to_string().to_lowercase(); + if msg.contains("magic header") { + ErrorType::InvalidMagicHeader + } else if msg.contains("deserialize") { + ErrorType::InvalidProof + } else if msg.contains("state root mismatch") || msg.contains("public values") { + ErrorType::InvalidPublicInputs + } else if msg.contains("validation failed") { + ErrorType::VerificationFailed + } else { + ErrorType::Internal + } +} + +fn set_error(error_out: *mut *mut c_char, message: &str) { + if !error_out.is_null() { + if let Ok(c_string) = CString::new(message) { + unsafe { + *error_out = c_string.into_raw(); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::ffi::CStr; + use std::ptr; + + #[test] + fn test_null_pointers() { + let mut error: *mut c_char = ptr::null_mut(); + let result = light_client_verify_proof( + ptr::null(), + 0, + ptr::null(), + ptr::null(), + ptr::null(), + 1, // chain_id + &mut error, + ); + assert_eq!(result as i32, LightClientVerifyResult::InternalError as i32); + + if !error.is_null() { + light_client_free_string(error); + } + } + + #[test] + fn test_version() { + let version = light_client_ffi_version(); + assert!(!version.is_null()); + let version_str = unsafe { CStr::from_ptr(version) }; + assert!(version_str.to_str().unwrap().starts_with("0.1.0")); + } + + #[test] + fn test_invalid_magic_header() { + let payload = vec![0u8; 100]; // Invalid magic + let prev_root = [0u8; 32]; + let new_root = [0u8; 32]; + let block_hash = [0u8; 32]; + let mut error: *mut c_char = ptr::null_mut(); + + let result = light_client_verify_proof( + payload.as_ptr(), + payload.len(), + prev_root.as_ptr(), + new_root.as_ptr(), + block_hash.as_ptr(), + 1, // chain_id + &mut error, + ); + + assert_eq!(result as i32, LightClientVerifyResult::InvalidMagicHeader as i32); + + if !error.is_null() { + light_client_free_string(error); + } + } + + #[test] + fn test_payload_too_short() { + let payload = vec![0u8; 5]; // Too short for magic + let prev_root = [0u8; 32]; + let new_root = [0u8; 32]; + let block_hash = [0u8; 32]; + let mut error: *mut c_char = ptr::null_mut(); + + let result = light_client_verify_proof( + payload.as_ptr(), + payload.len(), + prev_root.as_ptr(), + new_root.as_ptr(), + block_hash.as_ptr(), + 1, // chain_id + &mut error, + ); + + // Payload too short should return InvalidMagicHeader (checked first) or InvalidProof + // The actual error is InvalidMagicHeader (2) because we check magic header first + assert_eq!(result as i32, LightClientVerifyResult::InvalidMagicHeader as i32); + + if !error.is_null() { + light_client_free_string(error); + } + } +} diff --git a/rootchain/consensus/zkverifier/light_client_verifier.go b/rootchain/consensus/zkverifier/light_client_verifier.go new file mode 100644 index 00000000..abe37e82 --- /dev/null +++ b/rootchain/consensus/zkverifier/light_client_verifier.go @@ -0,0 +1,90 @@ +package zkverifier + +import ( + "encoding/hex" + "fmt" + "log/slog" +) + +// LightClientVerifier verifies light client proofs by executing validation logic directly +type LightClientVerifier struct { + enabled bool + ffiVerifier *LightClientVerifierFFI +} + +// NewLightClientVerifier creates a new light client verifier +// chainID: chain identifier of the EVM partition from the partition config (invariant) +func NewLightClientVerifier(chainID uint64) (*LightClientVerifier, error) { + // Try to create FFI verifier + if ffiVerifier, err := NewLightClientVerifierFFI(chainID); err == nil { + slog.Info("Using Light Client FFI verifier", "version", GetLightClientFFIVersion(), "chain_id", chainID) + return &LightClientVerifier{ + enabled: true, + ffiVerifier: ffiVerifier, + }, nil + } else { + return nil, fmt.Errorf("Light Client FFI verifier not available: %w", err) + } +} + +// VerifyProof verifies a light client proof payload +// +// The proof payload should contain: +// - Magic header: "LCPROOF\0" (8 bytes) +// - Serialized ProgramInput (rkyv format) +// +// This function: +// 1. Validates the magic header +// 2. Deserializes the ProgramInput +// 3. Executes stateless_validation_l1() +// 4. Verifies the state roots and block hash match +func (v *LightClientVerifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + if !v.enabled { + return ErrVerifierNotConfigured + } + + if len(proof) == 0 { + return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) + } + + if len(previousStateRoot) != 32 { + return fmt.Errorf("%w: previousStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(previousStateRoot)) + } + + if len(newStateRoot) != 32 { + return fmt.Errorf("%w: newStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(newStateRoot)) + } + + if len(blockHash) != 32 { + return fmt.Errorf("%w: blockHash must be 32 bytes, got %d", ErrInvalidProofFormat, len(blockHash)) + } + + // Check magic header + if len(proof) < 8 { + return fmt.Errorf("%w: payload too short for magic header", ErrInvalidProofFormat) + } + + magic := proof[0:8] + expectedMagic := []byte("LCPROOF\x00") + if string(magic) != string(expectedMagic) { + return fmt.Errorf("%w: invalid magic header: expected %v, got %v", + ErrInvalidProofFormat, expectedMagic, magic) + } + + slog.Debug("Verifying light client proof", + "payload_size", len(proof), + "witness_size", len(proof)-8, + "prev_root", hex.EncodeToString(previousStateRoot[:8]), + "new_root", hex.EncodeToString(newStateRoot[:8]), + "block_hash", hex.EncodeToString(blockHash[:8])) + + return v.ffiVerifier.VerifyProof(proof, previousStateRoot, newStateRoot, blockHash) +} + +func (v *LightClientVerifier) ProofType() ProofType { + return ProofTypeLightClient +} + +func (v *LightClientVerifier) IsEnabled() bool { + return v.enabled +} diff --git a/rootchain/consensus/zkverifier/light_client_verifier_ffi.go b/rootchain/consensus/zkverifier/light_client_verifier_ffi.go new file mode 100644 index 00000000..5176e9a8 --- /dev/null +++ b/rootchain/consensus/zkverifier/light_client_verifier_ffi.go @@ -0,0 +1,119 @@ +//go:build zkverifier_ffi + +package zkverifier + +// #cgo LDFLAGS: -L${SRCDIR}/light-client-verifier-ffi/target/release -llight_client_verifier_ffi -ldl -lm +// #include "light-client-verifier-ffi/light_client_verifier.h" +// #include +import "C" +import ( + "fmt" + "unsafe" +) + +// LightClientVerifierFFI wraps the Rust FFI library for light client proof verification +type LightClientVerifierFFI struct { + enabled bool + chainID uint64 +} + +// NewLightClientVerifierFFI creates a new FFI-based light client verifier +// chainID: chain identifier of the EVM partition from the partition config (invariant) +func NewLightClientVerifierFFI(chainID uint64) (*LightClientVerifierFFI, error) { + // Verify FFI library is available + version := C.light_client_ffi_version() + if version == nil { + return nil, fmt.Errorf("FFI library not available") + } + + return &LightClientVerifierFFI{ + enabled: true, + chainID: chainID, + }, nil +} + +// VerifyProof verifies a light client proof using the Rust FFI library +func (v *LightClientVerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + // Validate inputs + if len(proof) == 0 { + return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) + } + if len(previousStateRoot) != 32 { + return fmt.Errorf("%w: previousStateRoot must be 32 bytes", ErrInvalidProofFormat) + } + if len(newStateRoot) != 32 { + return fmt.Errorf("%w: newStateRoot must be 32 bytes", ErrInvalidProofFormat) + } + if len(blockHash) != 32 { + return fmt.Errorf("%w: blockHash must be 32 bytes", ErrInvalidProofFormat) + } + + // Prepare C pointers + var errorOut *C.char + defer func() { + if errorOut != nil { + C.light_client_free_string(errorOut) + } + }() + + // Call FFI verification function + result := C.light_client_verify_proof( + (*C.uint8_t)(unsafe.Pointer(&proof[0])), + C.size_t(len(proof)), + (*C.uint8_t)(unsafe.Pointer(&previousStateRoot[0])), + (*C.uint8_t)(unsafe.Pointer(&newStateRoot[0])), + (*C.uint8_t)(unsafe.Pointer(&blockHash[0])), + C.uint64_t(v.chainID), + &errorOut, + ) + + // Check result + switch result { + case C.LIGHT_CLIENT_VERIFY_SUCCESS: + return nil + case C.LIGHT_CLIENT_VERIFY_INVALID_PROOF: + if errorOut != nil { + return fmt.Errorf("%w: %s", ErrInvalidProofFormat, C.GoString(errorOut)) + } + return ErrInvalidProofFormat + case C.LIGHT_CLIENT_VERIFY_INVALID_MAGIC_HEADER: + if errorOut != nil { + return fmt.Errorf("invalid magic header: %s", C.GoString(errorOut)) + } + return fmt.Errorf("invalid magic header") + case C.LIGHT_CLIENT_VERIFY_INVALID_PUBLIC_INPUTS: + if errorOut != nil { + return fmt.Errorf("invalid public inputs: %s", C.GoString(errorOut)) + } + return fmt.Errorf("invalid public inputs") + case C.LIGHT_CLIENT_VERIFY_VERIFICATION_FAILED: + if errorOut != nil { + return fmt.Errorf("%w: %s", ErrProofVerificationFailed, C.GoString(errorOut)) + } + return ErrProofVerificationFailed + default: + if errorOut != nil { + return fmt.Errorf("internal error: %s", C.GoString(errorOut)) + } + return fmt.Errorf("internal error") + } +} + +// ProofType returns the proof type +func (v *LightClientVerifierFFI) ProofType() ProofType { + return ProofTypeLightClient +} + +// IsEnabled returns true if the verifier is enabled +func (v *LightClientVerifierFFI) IsEnabled() bool { + return v.enabled +} + +// GetLightClientFFIVersion returns the version of the FFI library +func GetLightClientFFIVersion() string { + version := C.light_client_ffi_version() + if version == nil { + return "unknown" + } + return C.GoString(version) +} diff --git a/rootchain/consensus/zkverifier/light_client_verifier_ffi_stub.go b/rootchain/consensus/zkverifier/light_client_verifier_ffi_stub.go new file mode 100644 index 00000000..a84b3433 --- /dev/null +++ b/rootchain/consensus/zkverifier/light_client_verifier_ffi_stub.go @@ -0,0 +1,36 @@ +//go:build !zkverifier_ffi + +package zkverifier + +import "fmt" + +// LightClientVerifierFFI is a stub when FFI is not available +type LightClientVerifierFFI struct { + chainID uint64 +} + +// NewLightClientVerifierFFI returns an error when FFI is not available +// chainID: chain identifier of EVM partition from the partition config (invariant) +func NewLightClientVerifierFFI(chainID uint64) (*LightClientVerifierFFI, error) { + return nil, fmt.Errorf("Light Client FFI verifier not available: build with -tags zkverifier_ffi to enable") +} + +// VerifyProof returns an error when FFI is not available +func (v *LightClientVerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + return fmt.Errorf("Light Client FFI verifier not available") +} + +// ProofType returns the proof type +func (v *LightClientVerifierFFI) ProofType() ProofType { + return ProofTypeLightClient +} + +// IsEnabled returns false when FFI is not available +func (v *LightClientVerifierFFI) IsEnabled() bool { + return false +} + +// GetLightClientFFIVersion returns "unavailable" when FFI is not built +func GetLightClientFFIVersion() string { + return "unavailable" +} diff --git a/rootchain/consensus/zkverifier/partition_config.go b/rootchain/consensus/zkverifier/partition_config.go new file mode 100644 index 00000000..655fef7b --- /dev/null +++ b/rootchain/consensus/zkverifier/partition_config.go @@ -0,0 +1,61 @@ +package zkverifier + +import "strconv" + +// Partition params keys for proof verification configuration. +// These are stored in PartitionDescriptionRecord.PartitionParams. +const ( + // ParamProofType specifies the proof type for the partition. + // Valid values: "sp1", "light_client", "aggregator_rsmt_v1", "exec" + // If empty or not set, m-of-n signature verification only (no ZK proof required). + ParamProofType = "proof_type" + + // ParamVerificationKeyPath specifies the path to the verification key file. + // Required for SP1 proof type. + ParamVerificationKeyPath = "vkey_path" + + // ParamChainID specifies the EVM chain ID for the partition. + // Required for SP1 and light_client proof types. + // This is different from the BFT Core network ID - each EVM partition has its own chain ID. + ParamChainID = "chain_id" +) + +// ParseProofTypeFromParams extracts the ProofType from partition params. +// Returns ProofTypeNone if proof_type is not set or empty. +func ParseProofTypeFromParams(params map[string]string) ProofType { + if params == nil { + return ProofTypeNone + } + pt, ok := params[ParamProofType] + if !ok || pt == "" { + return ProofTypeNone + } + return ProofType(pt) +} + +// ParseVKeyPathFromParams extracts the verification key path from partition params. +// Returns empty string if not set. +func ParseVKeyPathFromParams(params map[string]string) string { + if params == nil { + return "" + } + return params[ParamVerificationKeyPath] +} + +// ParseChainIDFromParams extracts the EVM chain ID from partition params. +// Returns 0 and false if not set or invalid. +// The chain_id is specific to the EVM partition and verified against ZK proof public values. +func ParseChainIDFromParams(params map[string]string) (uint64, bool) { + if params == nil { + return 0, false + } + cidStr, ok := params[ParamChainID] + if !ok || cidStr == "" { + return 0, false + } + cid, err := strconv.ParseUint(cidStr, 10, 64) + if err != nil { + return 0, false + } + return cid, true +} diff --git a/rootchain/consensus/zkverifier/registry.go b/rootchain/consensus/zkverifier/registry.go new file mode 100644 index 00000000..735c07d6 --- /dev/null +++ b/rootchain/consensus/zkverifier/registry.go @@ -0,0 +1,139 @@ +package zkverifier + +import ( + "fmt" + "sync" + + "github.com/unicitynetwork/bft-go-base/types" +) + +// registryCacheKey uniquely identifies a verifier configuration for a partition/shard/epoch. +type registryCacheKey struct { + PartitionID types.PartitionID + ShardID string // ShardID.Key() + Epoch uint64 +} + +// Registry manages ZK verifiers for partitions, caching them by partition+shard+epoch. +type Registry struct { + cache map[registryCacheKey]ZKVerifier + mu sync.RWMutex +} + +// NewRegistry creates a new ZK verifier registry. +func NewRegistry() *Registry { + return &Registry{ + cache: make(map[registryCacheKey]ZKVerifier), + } +} + +// GetVerifier returns a ZK verifier for the given partition configuration. +// It caches verifiers by partition+shard+epoch to avoid recreating them. +// +// Returns: +// - NoOpVerifier when proof_type is empty, "none", or "exec" (m-of-n mode) +// - The appropriate verifier for sp1/light_client +// - Error if the requested proof type is unavailable (FFI not built) or misconfigured +func (r *Registry) GetVerifier(partitionID types.PartitionID, shardID types.ShardID, epoch uint64, params map[string]string) (ZKVerifier, error) { + key := registryCacheKey{ + PartitionID: partitionID, + ShardID: shardID.Key(), + Epoch: epoch, + } + + // Check cache first + r.mu.RLock() + if v, ok := r.cache[key]; ok { + r.mu.RUnlock() + return v, nil + } + r.mu.RUnlock() + + // Create new verifier + verifier, err := r.createVerifier(params) + if err != nil { + return nil, err + } + + // Cache the verifier + r.mu.Lock() + // Double-check in case another goroutine created it + if v, ok := r.cache[key]; ok { + r.mu.Unlock() + return v, nil + } + r.cache[key] = verifier + r.mu.Unlock() + + return verifier, nil +} + +// createVerifier creates a new verifier based on partition params. +func (r *Registry) createVerifier(params map[string]string) (ZKVerifier, error) { + proofType := ParseProofTypeFromParams(params) + + // Check availability before attempting to create + if !IsProofTypeAvailable(proofType) { + return nil, fmt.Errorf("proof type %q not available (build with -tags zkverifier_ffi to enable)", proofType) + } + + switch proofType { + case ProofTypeNone, ProofTypeExec, "": + // m-of-n mode - no ZK proof verification + return &NoOpVerifier{}, nil + + case ProofTypeSP1: + vkeyPath := ParseVKeyPathFromParams(params) + if vkeyPath == "" { + return nil, fmt.Errorf("vkey_path required for SP1 proof type") + } + chainID, ok := ParseChainIDFromParams(params) + if !ok { + return nil, fmt.Errorf("chain_id required for SP1 proof type") + } + return NewSP1Verifier(vkeyPath, chainID) + + case ProofTypeLightClient: + chainID, ok := ParseChainIDFromParams(params) + if !ok { + return nil, fmt.Errorf("chain_id required for light_client proof type") + } + return NewLightClientVerifier(chainID) + + case ProofTypeAggregatorRSMTv1: + // Pure-Go verifier: no vkey, no chain_id. Consistency proof is + // self-contained and recomputes roots from the envelope. + return NewAggregatorRSMTVerifier(), nil + + case ProofTypeAggregatorZKv1: + vkeyPath := ParseVKeyPathFromParams(params) + if vkeyPath == "" { + return nil, fmt.Errorf("vkey_path required for aggregator_zk_v1 proof type") + } + return NewAggregatorZKVerifier(vkeyPath) + + default: + return nil, fmt.Errorf("unknown proof type: %s", proofType) + } +} + +// InvalidateCache removes the cached verifier for the given partition+shard+epoch. +// Call this when partition configuration changes. +func (r *Registry) InvalidateCache(partitionID types.PartitionID, shardID types.ShardID, epoch uint64) { + key := registryCacheKey{ + PartitionID: partitionID, + ShardID: shardID.Key(), + Epoch: epoch, + } + + r.mu.Lock() + delete(r.cache, key) + r.mu.Unlock() +} + +// ClearCache removes all cached verifiers. +func (r *Registry) ClearCache() { + r.mu.Lock() + r.cache = make(map[registryCacheKey]ZKVerifier) + r.mu.Unlock() +} diff --git a/rootchain/consensus/zkverifier/registry_test.go b/rootchain/consensus/zkverifier/registry_test.go new file mode 100644 index 00000000..7353bfc7 --- /dev/null +++ b/rootchain/consensus/zkverifier/registry_test.go @@ -0,0 +1,163 @@ +package zkverifier + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/unicitynetwork/bft-go-base/types" +) + +func TestNewRegistry(t *testing.T) { + r := NewRegistry() + require.NotNil(t, r) + require.NotNil(t, r.cache) + require.Empty(t, r.cache) +} + +func TestRegistry_GetVerifier_NoProofType(t *testing.T) { + r := NewRegistry() + + // Empty params should return NoOpVerifier + v, err := r.GetVerifier(1, types.ShardID{}, 0, nil) + require.NoError(t, err) + require.NotNil(t, v) + require.IsType(t, &NoOpVerifier{}, v) + require.False(t, v.IsEnabled()) + + // Empty proof_type should return NoOpVerifier + v, err = r.GetVerifier(1, types.ShardID{}, 0, map[string]string{}) + require.NoError(t, err) + require.NotNil(t, v) + require.IsType(t, &NoOpVerifier{}, v) + + // proof_type = "none" should return NoOpVerifier + v, err = r.GetVerifier(1, types.ShardID{}, 0, map[string]string{ParamProofType: string(ProofTypeNone)}) + require.NoError(t, err) + require.NotNil(t, v) + require.IsType(t, &NoOpVerifier{}, v) + + // proof_type = "exec" should return NoOpVerifier + v, err = r.GetVerifier(1, types.ShardID{}, 0, map[string]string{ParamProofType: string(ProofTypeExec)}) + require.NoError(t, err) + require.NotNil(t, v) + require.IsType(t, &NoOpVerifier{}, v) +} + +func TestRegistry_GetVerifier_Caching(t *testing.T) { + r := NewRegistry() + + // Get verifier twice - should be cached + params := map[string]string{ParamProofType: string(ProofTypeExec)} + v1, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.NoError(t, err) + + v2, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.NoError(t, err) + + // Same verifier should be returned from cache (type check is sufficient for NoOpVerifier) + require.IsType(t, v1, v2) + + // Check that cache has entry + require.Len(t, r.cache, 1) + + // Different epoch should create new cache entry + _, err = r.GetVerifier(1, types.ShardID{}, 1, params) + require.NoError(t, err) + require.Len(t, r.cache, 2) + + // Different partition should create new cache entry + _, err = r.GetVerifier(2, types.ShardID{}, 0, params) + require.NoError(t, err) + require.Len(t, r.cache, 3) +} + +func TestRegistry_InvalidateCache(t *testing.T) { + r := NewRegistry() + + params := map[string]string{ParamProofType: string(ProofTypeExec)} + _, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.NoError(t, err) + require.Len(t, r.cache, 1) + + // Invalidate cache + r.InvalidateCache(1, types.ShardID{}, 0) + require.Len(t, r.cache, 0) + + // Getting verifier again should recreate cache entry + _, err = r.GetVerifier(1, types.ShardID{}, 0, params) + require.NoError(t, err) + require.Len(t, r.cache, 1) +} + +func TestRegistry_ClearCache(t *testing.T) { + r := NewRegistry() + + params := map[string]string{ParamProofType: string(ProofTypeExec)} + _, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.NoError(t, err) + _, err = r.GetVerifier(2, types.ShardID{}, 0, params) + require.NoError(t, err) + + require.Len(t, r.cache, 2) + + r.ClearCache() + require.Empty(t, r.cache) +} + +func TestRegistry_GetVerifier_SP1MissingVKey(t *testing.T) { + r := NewRegistry() + + // SP1 without vkey_path should fail + params := map[string]string{ParamProofType: string(ProofTypeSP1)} + _, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.Error(t, err) + + // In stub mode (without FFI), error is "not available" + // In FFI mode, error is "vkey_path required" + if IsFFIAvailable() { + require.Contains(t, err.Error(), "vkey_path required") + } else { + require.Contains(t, err.Error(), "not available") + } +} + +func TestRegistry_GetVerifier_UnavailableProofType(t *testing.T) { + r := NewRegistry() + + // Test with an unknown proof type + params := map[string]string{ParamProofType: "unknown_type"} + _, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.Error(t, err) + require.Contains(t, err.Error(), "not available") +} + +func TestRegistry_GetVerifier_SP1MissingChainID(t *testing.T) { + if !IsFFIAvailable() { + t.Skip("FFI not available, skipping chain_id requirement test") + } + + r := NewRegistry() + + // SP1 with vkey_path but without chain_id should fail + params := map[string]string{ + ParamProofType: string(ProofTypeSP1), + ParamVerificationKeyPath: "/path/to/vkey", + } + _, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.Error(t, err) + require.Contains(t, err.Error(), "chain_id required") +} + +func TestRegistry_GetVerifier_LightClientMissingChainID(t *testing.T) { + if !IsFFIAvailable() { + t.Skip("FFI not available, skipping chain_id requirement test") + } + + r := NewRegistry() + + // light_client without chain_id should fail + params := map[string]string{ParamProofType: string(ProofTypeLightClient)} + _, err := r.GetVerifier(1, types.ShardID{}, 0, params) + require.Error(t, err) + require.Contains(t, err.Error(), "chain_id required") +} diff --git a/rootchain/consensus/zkverifier/rsmt/doc.go b/rootchain/consensus/zkverifier/rsmt/doc.go new file mode 100644 index 00000000..0347c6b8 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/doc.go @@ -0,0 +1,30 @@ +// Package rsmt implements verification of consistency proofs produced by the +// Rust Radix Sparse Merkle Tree library (`crates/rsmt` in the aggregator +// repository). It is the Go counterpart of `crates/rsmt/src/consistency.rs`. +// +// The verifier consumes a compact binary envelope (`DecodeEnvelope`) that +// carries the batch of newly inserted leaves followed by the flat +// post-order opcode stream, and recomputes the old and new SMT roots with a +// stack machine (`Verify`). +// +// Wire format (aggregator_rsmt_v1): +// +// offset size field +// 0 4 leaf_count (big-endian u32) +// 4 ... leaves: leaf_count × { key[32] || value_len (u16 BE) || value[value_len] } +// ... ... opcode stream (flat bytes, runs to end of buffer) +// +// Leaves must be pre-sorted by SortKey (per-byte bit-reversed key, LSB-first +// traversal order), this package does not reorder them. +// +// Opcodes: +// +// S(h) 0x00 || h[32] - unchanged subtree hash +// L 0x01 - new leaf; next batch entry +// N(d) 0x02 || d - internal node at depth d, pops two children +// +// Hashes (SHA-256, matching `crates/rsmt/src/hash.rs`): +// +// hash_leaf(key, value) = SHA256(0x00 || key || value) +// hash_node(l, r, d) = SHA256(0x01 || d || l || r) +package rsmt diff --git a/rootchain/consensus/zkverifier/rsmt/envelope.go b/rootchain/consensus/zkverifier/rsmt/envelope.go new file mode 100644 index 00000000..d3f24387 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/envelope.go @@ -0,0 +1,105 @@ +package rsmt + +import ( + "encoding/binary" + "errors" + "fmt" + "math" +) + +// MaxLeafCount bounds the number of leaves in a single envelope to prevent +// pathological allocations from malicious inputs. A batch of 1M leaves would +// already dwarf any realistic aggregator round. +const MaxLeafCount = 1 << 20 + +// Leaf is a single (key, value) pair in the batch carried by the envelope. +// Value is a slice into the original envelope buffer; callers must copy it +// if they need to retain it after the buffer is reused. +type Leaf struct { + Key [32]byte + Value []byte +} + +// Envelope is the decoded contents of a zk_proof field for the +// `aggregator_rsmt_v1` proof type. +// +// Leaves are in wire order (caller-sorted by SortKey); Proof is the flat +// opcode stream. See package doc for the full wire format. +type Envelope struct { + Leaves []Leaf + Proof []byte +} + +// Envelope decoding errors. +var ( + ErrEnvelopeTruncated = errors.New("rsmt: envelope truncated") + ErrEnvelopeTooManyLeaves = errors.New("rsmt: envelope leaf count exceeds maximum") +) + +// DecodeEnvelope parses the wire format described in the package doc. +// It returns a view over the input buffer: Leaf values alias into b. +func DecodeEnvelope(b []byte) (*Envelope, error) { + if len(b) < 4 { + return nil, fmt.Errorf("%w: missing leaf_count", ErrEnvelopeTruncated) + } + count := binary.BigEndian.Uint32(b[0:4]) + if count > MaxLeafCount { + return nil, fmt.Errorf("%w: %d > %d", ErrEnvelopeTooManyLeaves, count, MaxLeafCount) + } + pos := 4 + leaves := make([]Leaf, 0, count) + for i := uint32(0); i < count; i++ { + if pos+32+2 > len(b) { + return nil, fmt.Errorf("%w: leaf %d header", ErrEnvelopeTruncated, i) + } + var key [32]byte + copy(key[:], b[pos:pos+32]) + pos += 32 + vlen := int(binary.BigEndian.Uint16(b[pos : pos+2])) + pos += 2 + if pos+vlen > len(b) { + return nil, fmt.Errorf("%w: leaf %d value (need %d, have %d)", + ErrEnvelopeTruncated, i, vlen, len(b)-pos) + } + leaves = append(leaves, Leaf{Key: key, Value: b[pos : pos+vlen]}) + pos += vlen + } + return &Envelope{Leaves: leaves, Proof: b[pos:]}, nil +} + +// EncodeEnvelope produces the wire format for the given (already sorted) +// leaves and flat opcode stream. Provided primarily for tests and fixtures; +// production envelopes are built by the Rust aggregator. +func EncodeEnvelope(leaves []Leaf, proof []byte) ([]byte, error) { + numLeaves := len(leaves) + if numLeaves > MaxLeafCount { + return nil, fmt.Errorf("%w: %d > %d", ErrEnvelopeTooManyLeaves, numLeaves, MaxLeafCount) + } + size := 4 + len(proof) + for i := range leaves { + vlen := len(leaves[i].Value) + if vlen > math.MaxUint16 { + return nil, fmt.Errorf("rsmt: leaf %d value length %d exceeds u16 max", + i, vlen) + } + size += 32 + 2 + vlen + } + out := make([]byte, 0, size) + var hdr [4]byte + binary.BigEndian.PutUint32(hdr[:], uint32(numLeaves)) + out = append(out, hdr[:]...) + for i := range leaves { + out = append(out, leaves[i].Key[:]...) + var lhdr [2]byte + vlen := len(leaves[i].Value) + if vlen > math.MaxUint16 { + return nil, fmt.Errorf("rsmt: leaf %d value length %d exceeds u16 max", + i, vlen) + } + binary.BigEndian.PutUint16(lhdr[:], uint16(vlen)) + out = append(out, lhdr[:]...) + out = append(out, leaves[i].Value...) + } + out = append(out, proof...) + return out, nil +} diff --git a/rootchain/consensus/zkverifier/rsmt/fixtures_test.go b/rootchain/consensus/zkverifier/rsmt/fixtures_test.go new file mode 100644 index 00000000..9b1c1817 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/fixtures_test.go @@ -0,0 +1,86 @@ +package rsmt + +import ( + "encoding/hex" + "encoding/json" + "os" + "path/filepath" + "testing" +) + +// TestCrossLanguageFixtures loads envelope fixtures produced by the Rust +// side (crates/rsmt/examples/dump_envelope_fixtures.rs) and verifies that +// the Go implementation accepts each one. This is the authoritative check +// that the two implementations stay wire-compatible. +// +// Regenerate with: +// +// cargo run --example dump_envelope_fixtures -- \ +// bft-core/rootchain/consensus/zkverifier/rsmt/testdata +func TestCrossLanguageFixtures(t *testing.T) { + path := filepath.Join("testdata", "fixtures.json") + raw, err := os.ReadFile(path) + if err != nil { + t.Fatalf("read %s: %v", path, err) + } + + var doc struct { + Fixtures []struct { + Name string `json:"name"` + PrevRoot string `json:"prev_root"` + NewRoot string `json:"new_root"` + Envelope string `json:"envelope"` + } `json:"fixtures"` + } + if err := json.Unmarshal(raw, &doc); err != nil { + t.Fatalf("parse fixtures.json: %v", err) + } + if len(doc.Fixtures) == 0 { + t.Fatal("no fixtures loaded") + } + + decodeRoot := func(s string) (Root, error) { + if s == "" { + return Root{}, nil + } + b, err := hex.DecodeString(s) + if err != nil { + return Root{}, err + } + return RootFromBytes(b) + } + + for _, f := range doc.Fixtures { + t.Run(f.Name, func(t *testing.T) { + envBytes, err := hex.DecodeString(f.Envelope) + if err != nil { + t.Fatalf("decode envelope: %v", err) + } + env, err := DecodeEnvelope(envBytes) + if err != nil { + t.Fatalf("DecodeEnvelope: %v", err) + } + prev, err := decodeRoot(f.PrevRoot) + if err != nil { + t.Fatalf("prev_root: %v", err) + } + newR, err := decodeRoot(f.NewRoot) + if err != nil { + t.Fatalf("new_root: %v", err) + } + if err := Verify(env, prev, newR); err != nil { + t.Fatalf("Verify(%s): %v", f.Name, err) + } + + // Round-trip: re-encode the decoded envelope and confirm it + // matches the original bytes exactly, locking the wire format. + reenc, err := EncodeEnvelope(env.Leaves, env.Proof) + if err != nil { + t.Fatalf("re-encode: %v", err) + } + if string(reenc) != string(envBytes) { + t.Fatalf("envelope re-encode mismatch for %s", f.Name) + } + }) + } +} diff --git a/rootchain/consensus/zkverifier/rsmt/hash.go b/rootchain/consensus/zkverifier/rsmt/hash.go new file mode 100644 index 00000000..3c791462 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/hash.go @@ -0,0 +1,27 @@ +package rsmt + +import "crypto/sha256" + +// HashLeaf computes SHA256(0x00 || key || value). +// Matches `Sha256Hasher::hash_leaf` in crates/rsmt/src/hash.rs. +func HashLeaf(key [32]byte, value []byte) [32]byte { + h := sha256.New() + h.Write([]byte{0x00}) + h.Write(key[:]) + h.Write(value) + var out [32]byte + h.Sum(out[:0]) + return out +} + +// HashNode computes SHA256(0x01 || depth || left || right). +// Matches `Sha256Hasher::hash_node` in crates/rsmt/src/hash.rs. +func HashNode(left, right [32]byte, depth uint8) [32]byte { + h := sha256.New() + h.Write([]byte{0x01, depth}) + h.Write(left[:]) + h.Write(right[:]) + var out [32]byte + h.Sum(out[:0]) + return out +} diff --git a/rootchain/consensus/zkverifier/rsmt/sortkey.go b/rootchain/consensus/zkverifier/rsmt/sortkey.go new file mode 100644 index 00000000..89d66f66 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/sortkey.go @@ -0,0 +1,40 @@ +package rsmt + +// bitReverseTable reverses the bits within a single byte. +// bitReverseTable[0b0000_0001] == 0b1000_0000, etc. +var bitReverseTable [256]byte + +func init() { + for i := 0; i < 256; i++ { + var r byte + for bit := 0; bit < 8; bit++ { + if (i>>bit)&1 != 0 { + r |= 1 << (7 - bit) + } + } + bitReverseTable[i] = r + } +} + +// SortKey converts a 256-bit SMT key into its LSB-first lexicographic sort +// order by bit-reversing each byte in place (no byte-order reversal). +// Matches `get_sort_key` in crates/rsmt/src/path.rs. +func SortKey(key [32]byte) [32]byte { + var out [32]byte + for i := 0; i < 32; i++ { + out[i] = bitReverseTable[key[i]] + } + return out +} + +// sortKeyLess reports whether SortKey(a) < SortKey(b). +func sortKeyLess(a, b [32]byte) bool { + sa := SortKey(a) + sb := SortKey(b) + for i := 0; i < 32; i++ { + if sa[i] != sb[i] { + return sa[i] < sb[i] + } + } + return false +} diff --git a/rootchain/consensus/zkverifier/rsmt/testdata/fixtures.json b/rootchain/consensus/zkverifier/rsmt/testdata/fixtures.json new file mode 100644 index 00000000..df01ecd9 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/testdata/fixtures.json @@ -0,0 +1,34 @@ +{ + "fixtures": [ + { + "name": "empty_batch_empty_tree", + "prev_root": "", + "new_root": "", + "envelope": "00000000" + }, + { + "name": "single_leaf_into_empty", + "prev_root": "", + "new_root": "37456985272abf5c393d420e08b83520e0110857fe1fdcaed96bb2c5a13aa0df", + "envelope": "000000010500000000000000000000000000000000000000000000000000000000000000000568656c6c6f01" + }, + { + "name": "two_leaves_into_empty", + "prev_root": "", + "new_root": "f276ee0665e2474e4eff31d9681640463dfde89ea35b0a98e7bc370c3b95d6a4", + "envelope": "0000000200000000000000000000000000000000000000000000000000000000000000010002763080000000000000000000000000000000000000000000000000000000000000020002763101010207" + }, + { + "name": "insert_into_existing", + "prev_root": "8b3c07970a80a31948fa634bbba4132f6f75957b35f1750f7fd04070badaa69d", + "new_root": "7ebfa43283488c37459bbf6c91159f7885c21e78d134a73d4040301ead1686bc", + "envelope": "000000024000000000000000000000000000000000000000000000000000000000000004000164500000000000000000000000000000000000000000000000000000000000000500016501007f7988e562af77b1ed732f7e2dfc4e7c5394267eea169af325d952f4fa843df2020500f380a374af649fefeff0c7f374a5a6455df030ab218981b88e1c16659100e6d601020600a7f302c9e9ea09e6925e772a168dae23620ad9f753bfd367001d74b06f8fca9a02050204" + }, + { + "name": "fifty_leaves_into_empty", + "prev_root": "", + "new_root": "30bdc202cfe1ace9d2f10b598b2231ab09cd99968171d475af2aa49d5c300def", + "envelope": "0000003200000000000000000000000000000000000000000000000000000000000000aa000300550120000000000000000000000000000020000000000000000000000000000000aa000320752110000000000000000000000000000010000000000000000000000000000000aa000310451130000000000000000000000000000030000000000000000000000000000000aa000330653188000000000000000000000000000008000000000000000000000000000000aa0003085d09a8000000000000000000000000000028000000000000000000000000000000aa0003287d2998000000000000000000000000000018000000000000000000000000000000aa0003184d1944000000000000000000000000000004000000000000000000000000000000aa000304510564000000000000000000000000000024000000000000000000000000000000aa000324712554000000000000000000000000000014000000000000000000000000000000aa0003144115cc00000000000000000000000000000c000000000000000000000000000000aa00030c590dec00000000000000000000000000002c000000000000000000000000000000aa00032c792ddc00000000000000000000000000001c000000000000000000000000000000aa00031c491d42000000000000000000000000000022000000000000000000000000000000aa000322772322000000000000000000000000000002000000000000000000000000000000aa000302570332000000000000000000000000000012000000000000000000000000000000aa0003124713ca00000000000000000000000000002a000000000000000000000000000000aa00032a7f2baa00000000000000000000000000000a000000000000000000000000000000aa00030a5f0bba00000000000000000000000000001a000000000000000000000000000000aa00031a4f1b86000000000000000000000000000026000000000000000000000000000000aa000326732766000000000000000000000000000006000000000000000000000000000000aa000306530776000000000000000000000000000016000000000000000000000000000000aa00031643170e00000000000000000000000000002e000000000000000000000000000000aa00032e7b2fee00000000000000000000000000000e000000000000000000000000000000aa00030e5b0ffe00000000000000000000000000001e000000000000000000000000000000aa00031e4b1f41000000000000000000000000000031000000000000000000000000000000aa000331643221000000000000000000000000000011000000000000000000000000000000aa000311441211000000000000000000000000000001000000000000000000000000000000aa000301540231000000000000000000000000000021000000000000000000000000000000aa0003217422a9000000000000000000000000000019000000000000000000000000000000aa0003194c1a99000000000000000000000000000009000000000000000000000000000000aa0003095c0ab9000000000000000000000000000029000000000000000000000000000000aa0003297c2a65000000000000000000000000000015000000000000000000000000000000aa000315401655000000000000000000000000000005000000000000000000000000000000aa000305500675000000000000000000000000000025000000000000000000000000000000aa0003257026ed00000000000000000000000000001d000000000000000000000000000000aa00031d481edd00000000000000000000000000000d000000000000000000000000000000aa00030d580efd00000000000000000000000000002d000000000000000000000000000000aa00032d782e43000000000000000000000000000013000000000000000000000000000000aa000313461453000000000000000000000000000023000000000000000000000000000000aa000323762433000000000000000000000000000003000000000000000000000000000000aa0003035604cb00000000000000000000000000001b000000000000000000000000000000aa00031b4e1cdb00000000000000000000000000002b000000000000000000000000000000aa00032b7e2cbb00000000000000000000000000000b000000000000000000000000000000aa00030b5e0c87000000000000000000000000000017000000000000000000000000000000aa000317421897000000000000000000000000000027000000000000000000000000000000aa000327722877000000000000000000000000000007000000000000000000000000000000aa00030752080f00000000000000000000000000001f000000000000000000000000000000aa00031f4a201f00000000000000000000000000002f000000000000000000000000000000aa00032f7a30ff00000000000000000000000000000f000000000000000000000000000000aa00030f5a1001010205010102050204010102050102040203010102050102040101020501020402030202010102050102040101020501020402030101020501020401010205010204020302020201010102050101020502040101010205020402030101010205020401010102050204020302020101010205020401010102050204020301010102050204010101020502040203020202010200" + } + ] +} diff --git a/rootchain/consensus/zkverifier/rsmt/verify.go b/rootchain/consensus/zkverifier/rsmt/verify.go new file mode 100644 index 00000000..854a42b6 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/verify.go @@ -0,0 +1,171 @@ +package rsmt + +import ( + "bytes" + "errors" + "fmt" +) + +// Verification errors. +var ( + ErrBadOpcode = errors.New("rsmt: bad opcode") + ErrOpcodeTruncated = errors.New("rsmt: opcode stream truncated") + ErrStackUnderflow = errors.New("rsmt: stack underflow") + ErrStackFinal = errors.New("rsmt: stack not reduced to single element") + ErrRootMismatch = errors.New("rsmt: recomputed root does not match claimed root") + ErrBatchUnderrun = errors.New("rsmt: opcode stream references more leaves than provided") + ErrBatchUnused = errors.New("rsmt: not all leaves consumed by opcode stream") + ErrPostStateMissing = errors.New("rsmt: N opcode with missing post-state child") + ErrLeavesUnsorted = errors.New("rsmt: leaves not sorted by SortKey") + ErrEmptyBatchNonEmptyProof = errors.New("rsmt: empty batch but non-empty proof") + ErrEmptyBatchRootChange = errors.New("rsmt: empty batch but root changed") +) + +// Root represents an optional 32-byte SMT root: Set == false models the +// None case (empty tree). Matches `Option<[u8;32]>` on the Rust side. +type Root struct { + Hash [32]byte + Set bool +} + +// RootFromBytes constructs a Root from a 0- or 32-byte slice. Empty slice or +// nil is treated as "no root" (empty tree); any other length is an error. +func RootFromBytes(b []byte) (Root, error) { + if len(b) == 0 { + return Root{}, nil + } + if len(b) != 32 { + return Root{}, fmt.Errorf("rsmt: root must be 32 bytes, got %d", len(b)) + } + var r Root + copy(r.Hash[:], b) + r.Set = true + return r, nil +} + +// stackEntry is a (pre_hash, post_hash) pair; flags track the Option<..> side. +// Matches `(Option<[u8;32]>, Option<[u8;32]>)` in crates/rsmt/src/consistency.rs. +type stackEntry struct { + pre, post [32]byte + preSet, postSet bool +} + +// Verify recomputes the old and new SMT roots from the envelope and checks +// them against oldRoot / newRoot. Returns nil iff the envelope is a valid +// consistency proof for the claimed transition. +// +// Leaves in env.Leaves MUST already be sorted by SortKey (with no duplicates). +// The verifier performs a single linear pre-check to enforce this invariant. +func Verify(env *Envelope, oldRoot, newRoot Root) error { + if env == nil { + return errors.New("rsmt: nil envelope") + } + + // Empty batch: must have empty proof and unchanged root. + if len(env.Leaves) == 0 { + if len(env.Proof) != 0 { + return ErrEmptyBatchNonEmptyProof + } + if oldRoot.Set != newRoot.Set || (oldRoot.Set && oldRoot.Hash != newRoot.Hash) { + return ErrEmptyBatchRootChange + } + return nil + } + + // Assert leaves are in SortKey order (also rejects duplicates). + // TODO: remove if implementation is stable + for i := 1; i < len(env.Leaves); i++ { + if !sortKeyLess(env.Leaves[i-1].Key, env.Leaves[i].Key) { + return fmt.Errorf("%w: at index %d", ErrLeavesUnsorted, i) + } + } + + stack := make([]stackEntry, 0, 64) + proof := env.Proof + bi := 0 + pi := 0 + + for pi < len(proof) { + op := proof[pi] + pi++ + switch op { + case 0x00: // S(h): push (h, h) + if pi+32 > len(proof) { + return fmt.Errorf("%w: S payload", ErrOpcodeTruncated) + } + var h [32]byte + copy(h[:], proof[pi:pi+32]) + pi += 32 + stack = append(stack, stackEntry{pre: h, post: h, preSet: true, postSet: true}) + + case 0x01: // L: consume next leaf + if bi >= len(env.Leaves) { + return ErrBatchUnderrun + } + leaf := &env.Leaves[bi] + bi++ + lh := HashLeaf(leaf.Key, leaf.Value) + stack = append(stack, stackEntry{post: lh, postSet: true}) + + case 0x02: // N(depth): pop right, pop left, push combined + if pi >= len(proof) { + return fmt.Errorf("%w: N depth", ErrOpcodeTruncated) + } + depth := proof[pi] + pi++ + if len(stack) < 2 { + return ErrStackUnderflow + } + right := stack[len(stack)-1] + left := stack[len(stack)-2] + stack = stack[:len(stack)-2] + + // pre-state: None children propagate their sibling's pre-hash. + var combined stackEntry + switch { + case !left.preSet && !right.preSet: + // Both children new — no pre-state hash at this level. + case !left.preSet: + combined.pre = right.pre + combined.preSet = right.preSet + case !right.preSet: + combined.pre = left.pre + combined.preSet = left.preSet + default: + combined.pre = HashNode(left.pre, right.pre, depth) + combined.preSet = true + } + + // post-state: both children MUST have a post-hash. + if !left.postSet || !right.postSet { + return ErrPostStateMissing + } + combined.post = HashNode(left.post, right.post, depth) + combined.postSet = true + + stack = append(stack, combined) + + default: + return fmt.Errorf("%w: 0x%02x", ErrBadOpcode, op) + } + } + + if bi != len(env.Leaves) { + return ErrBatchUnused + } + if len(stack) != 1 { + return ErrStackFinal + } + + top := stack[0] + if top.preSet != oldRoot.Set || top.postSet != newRoot.Set { + return ErrRootMismatch + } + if top.preSet && !bytes.Equal(top.pre[:], oldRoot.Hash[:]) { + return ErrRootMismatch + } + if top.postSet && !bytes.Equal(top.post[:], newRoot.Hash[:]) { + return ErrRootMismatch + } + return nil +} diff --git a/rootchain/consensus/zkverifier/rsmt/verify_test.go b/rootchain/consensus/zkverifier/rsmt/verify_test.go new file mode 100644 index 00000000..11c65aa2 --- /dev/null +++ b/rootchain/consensus/zkverifier/rsmt/verify_test.go @@ -0,0 +1,338 @@ +package rsmt + +import ( + "bytes" + "errors" + "testing" +) + +// Helpers to build opcode streams. + +func opS(h [32]byte) []byte { + out := make([]byte, 33) + out[0] = 0x00 + copy(out[1:], h[:]) + return out +} + +func opL() []byte { return []byte{0x01} } + +func opN(depth uint8) []byte { return []byte{0x02, depth} } + +func key(b byte) [32]byte { + var k [32]byte + k[0] = b + return k +} + +// TestSortKey_MatchesRust locks in the bit-reverse-per-byte behavior. +func TestSortKey_MatchesRust(t *testing.T) { + var k [32]byte + k[0] = 0b0000_0001 // bit 0 set + sk := SortKey(k) + if sk[0] != 0b1000_0000 { + t.Fatalf("SortKey bit 0 set → sk[0]=%#b, want 0b1000_0000", sk[0]) + } + if sk[31] != 0 { + t.Fatalf("SortKey trailing byte = %d, want 0", sk[31]) + } +} + +func TestSortKey_Ordering(t *testing.T) { + // Two keys differing only at bit 0: bit-0-clear sorts before bit-0-set. + k0 := [32]byte{} + k1 := [32]byte{} + k1[0] = 0x01 + if !sortKeyLess(k0, k1) { + t.Fatalf("expected sortKeyLess(k0, k1)") + } + if sortKeyLess(k1, k0) { + t.Fatalf("expected !sortKeyLess(k1, k0)") + } +} + +// TestVerify_EmptyBatch exercises the short-circuit: empty envelope, unchanged root. +func TestVerify_EmptyBatch(t *testing.T) { + env := &Envelope{} + var h [32]byte + for i := range h { + h[i] = 0xab + } + r := Root{Hash: h, Set: true} + if err := Verify(env, r, r); err != nil { + t.Fatalf("empty batch, equal roots: %v", err) + } + + // Empty envelope with different roots must fail. + var h2 [32]byte + h2[0] = 0xff + if err := Verify(env, r, Root{Hash: h2, Set: true}); !errors.Is(err, ErrEmptyBatchRootChange) { + t.Fatalf("empty batch, different roots: got %v, want ErrEmptyBatchRootChange", err) + } + + // Empty batch but non-empty proof must fail. + env2 := &Envelope{Proof: []byte{0x00}} + if err := Verify(env2, r, r); !errors.Is(err, ErrEmptyBatchNonEmptyProof) { + t.Fatalf("empty batch, non-empty proof: got %v, want ErrEmptyBatchNonEmptyProof", err) + } +} + +// TestVerify_SingleLeafIntoEmptyTree inserts a single leaf into an empty tree. +// The proof stream is just [L]; after running, stack top is (None, HashLeaf(k,v)). +func TestVerify_SingleLeafIntoEmptyTree(t *testing.T) { + k := key(0x05) + v := []byte("hello") + expected := HashLeaf(k, v) + + env := &Envelope{ + Leaves: []Leaf{{Key: k, Value: v}}, + Proof: opL(), + } + if err := Verify(env, Root{}, Root{Hash: expected, Set: true}); err != nil { + t.Fatalf("single leaf: %v", err) + } + + // Wrong new root → ErrRootMismatch. + var bad [32]byte + if err := Verify(env, Root{}, Root{Hash: bad, Set: true}); !errors.Is(err, ErrRootMismatch) { + t.Fatalf("wrong new root: got %v, want ErrRootMismatch", err) + } + // Wrong old root (claims tree non-empty) → ErrRootMismatch. + if err := Verify(env, Root{Hash: bad, Set: true}, Root{Hash: expected, Set: true}); !errors.Is(err, ErrRootMismatch) { + t.Fatalf("wrong old root: got %v, want ErrRootMismatch", err) + } +} + +// TestVerify_TwoLeavesIntoEmptyTree: two leaves diverging at bit 0. +// Proof stream is [L, L, N(depth=0)]. The split bit (depth) is the index of +// the first set bit in the XOR of the two sorted keys, which is 0 here +// (k0=0x00 vs k1=0x01 → bit 0 differs). The left child is the leaf whose +// bit 0 is 0 (k0); right child is k1. +func TestVerify_TwoLeavesIntoEmptyTree(t *testing.T) { + k0 := key(0x00) // bit 0 = 0 → goes left under a depth=0 split + k1 := key(0x01) // bit 0 = 1 → goes right + v0 := []byte("v0") + v1 := []byte("v1") + + // Leaves must be in SortKey order. bit-0-clear sorts before bit-0-set. + leaves := []Leaf{{Key: k0, Value: v0}, {Key: k1, Value: v1}} + + l0 := HashLeaf(k0, v0) + l1 := HashLeaf(k1, v1) + root := HashNode(l0, l1, 0) + + // Build [L, L, N(0)] + var proof bytes.Buffer + proof.Write(opL()) + proof.Write(opL()) + proof.Write(opN(0)) + + env := &Envelope{Leaves: leaves, Proof: proof.Bytes()} + if err := Verify(env, Root{}, Root{Hash: root, Set: true}); err != nil { + t.Fatalf("two leaves: %v", err) + } +} + +// TestVerify_UnsortedLeavesRejected feeds two leaves in reverse SortKey order. +func TestVerify_UnsortedLeavesRejected(t *testing.T) { + k0 := key(0x00) + k1 := key(0x01) + leaves := []Leaf{ + {Key: k1, Value: []byte("v1")}, // sorts AFTER k0 — wrong order + {Key: k0, Value: []byte("v0")}, + } + var proof bytes.Buffer + proof.Write(opL()) + proof.Write(opL()) + proof.Write(opN(0)) + env := &Envelope{Leaves: leaves, Proof: proof.Bytes()} + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrLeavesUnsorted) { + t.Fatalf("unsorted: got %v, want ErrLeavesUnsorted", err) + } +} + +// TestVerify_DuplicateLeavesRejected: duplicate key violates strict ordering. +func TestVerify_DuplicateLeavesRejected(t *testing.T) { + k := key(0x01) + env := &Envelope{ + Leaves: []Leaf{ + {Key: k, Value: []byte("a")}, + {Key: k, Value: []byte("b")}, + }, + Proof: append(append(opL(), opL()...), opN(0)...), + } + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrLeavesUnsorted) { + t.Fatalf("duplicate key: got %v, want ErrLeavesUnsorted", err) + } +} + +// TestVerify_InsertIntoExistingTree: existing single leaf at key(0x02); +// new leaf at key(0x01). After insertion the tree is a 2-leaf tree; the +// consistency proof is [L, S(h_existing), N(depth=0)]. +func TestVerify_InsertIntoExistingTree(t *testing.T) { + kOld := key(0x02) // bit 0 = 0 → left + vOld := []byte("old") + // Actually, key(0x02) has byte0 = 0b0000_0010, bit 0 (LSB) = 0 → left. + kNew := key(0x01) // bit 0 = 1 → right + vNew := []byte("new") + + hOld := HashLeaf(kOld, vOld) + hNew := HashLeaf(kNew, vNew) + oldRoot := hOld // single-leaf tree + newRoot := HashNode(hOld, hNew, 0) + + // Sort new leaves by SortKey (trivially one leaf). + leaves := []Leaf{{Key: kNew, Value: vNew}} + + // Proof order: left subtree first (kOld, bit 0 = 0, unchanged → S), + // then right subtree (kNew, bit 0 = 1, new leaf → L), then N(0). + var proof bytes.Buffer + proof.Write(opS(hOld)) + proof.Write(opL()) + proof.Write(opN(0)) + + env := &Envelope{Leaves: leaves, Proof: proof.Bytes()} + if err := Verify(env, Root{Hash: oldRoot, Set: true}, Root{Hash: newRoot, Set: true}); err != nil { + t.Fatalf("insert into existing tree: %v", err) + } +} + +// TestVerify_BatchUnderrun: proof references more leaves than provided. +func TestVerify_BatchUnderrun(t *testing.T) { + env := &Envelope{ + Leaves: []Leaf{{Key: key(0x01), Value: []byte("v")}}, + Proof: append(opL(), opL()...), + } + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrBatchUnderrun) { + t.Fatalf("got %v, want ErrBatchUnderrun", err) + } +} + +// TestVerify_BatchUnused: more leaves than opcode L references. +func TestVerify_BatchUnused(t *testing.T) { + env := &Envelope{ + Leaves: []Leaf{ + {Key: key(0x00), Value: []byte("a")}, + {Key: key(0x01), Value: []byte("b")}, + }, + Proof: opL(), + } + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrBatchUnused) { + t.Fatalf("got %v, want ErrBatchUnused", err) + } +} + +// singleLeafEnvelope returns an envelope with one leaf so opcode-level tests +// bypass the empty-batch short-circuit. +func singleLeafEnvelope(proof []byte) *Envelope { + return &Envelope{ + Leaves: []Leaf{{Key: key(0x01), Value: []byte("v")}}, + Proof: proof, + } +} + +// TestVerify_TruncatedOpcodeStream: S without its 32-byte payload. +func TestVerify_TruncatedOpcodeStream(t *testing.T) { + env := singleLeafEnvelope([]byte{0x00, 0x01, 0x02}) // S, truncated + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrOpcodeTruncated) { + t.Fatalf("got %v, want ErrOpcodeTruncated", err) + } +} + +// TestVerify_BadOpcode. +func TestVerify_BadOpcode(t *testing.T) { + env := singleLeafEnvelope([]byte{0x7f}) + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrBadOpcode) { + t.Fatalf("got %v, want ErrBadOpcode", err) + } +} + +// TestVerify_StackUnderflow: N without two children. +func TestVerify_StackUnderflow(t *testing.T) { + env := singleLeafEnvelope(opN(0)) + if err := Verify(env, Root{}, Root{Set: true}); !errors.Is(err, ErrStackUnderflow) { + t.Fatalf("got %v, want ErrStackUnderflow", err) + } +} + +func TestEnvelope_RoundTrip(t *testing.T) { + leaves := []Leaf{ + {Key: key(0x00), Value: []byte("alpha")}, + {Key: key(0x01), Value: []byte{}}, + {Key: key(0x02), Value: bytes.Repeat([]byte{0xAB}, 1234)}, + } + proof := []byte{0x01, 0x01, 0x01, 0x02, 0x05} + buf, err := EncodeEnvelope(leaves, proof) + if err != nil { + t.Fatal(err) + } + env, err := DecodeEnvelope(buf) + if err != nil { + t.Fatal(err) + } + if len(env.Leaves) != len(leaves) { + t.Fatalf("leaf count: got %d, want %d", len(env.Leaves), len(leaves)) + } + for i := range leaves { + if env.Leaves[i].Key != leaves[i].Key { + t.Errorf("leaf %d key mismatch", i) + } + if !bytes.Equal(env.Leaves[i].Value, leaves[i].Value) { + t.Errorf("leaf %d value mismatch", i) + } + } + if !bytes.Equal(env.Proof, proof) { + t.Errorf("proof mismatch") + } +} + +func TestEnvelope_Truncated(t *testing.T) { + if _, err := DecodeEnvelope([]byte{0x00, 0x00, 0x00}); !errors.Is(err, ErrEnvelopeTruncated) { + t.Fatalf("short header: got %v", err) + } + // leaf_count = 1, but only half a key present + buf := []byte{0x00, 0x00, 0x00, 0x01, 0x00, 0x00} + if _, err := DecodeEnvelope(buf); !errors.Is(err, ErrEnvelopeTruncated) { + t.Fatalf("short leaf header: got %v", err) + } + // leaf_count = 1, full key, value_len=10, but value missing + buf = make([]byte, 4+32+2) + buf[3] = 0x01 // leaf_count = 1 + buf[4+32+1] = 10 // value_len = 10 + if _, err := DecodeEnvelope(buf); !errors.Is(err, ErrEnvelopeTruncated) { + t.Fatalf("short value: got %v", err) + } +} + +func TestEnvelope_TooManyLeaves(t *testing.T) { + buf := []byte{0xFF, 0xFF, 0xFF, 0xFF} // leaf_count = 2^32-1 + if _, err := DecodeEnvelope(buf); !errors.Is(err, ErrEnvelopeTooManyLeaves) { + t.Fatalf("got %v, want ErrEnvelopeTooManyLeaves", err) + } +} + +func TestEnvelope_MaxValueLen(t *testing.T) { + oversize := make([]byte, 0x10000) // 65536 > u16::max + _, err := EncodeEnvelope([]Leaf{{Value: oversize}}, nil) + if err == nil { + t.Fatalf("expected oversize value to fail encode") + } +} + +func TestRootFromBytes(t *testing.T) { + if r, err := RootFromBytes(nil); err != nil || r.Set { + t.Fatalf("nil: got %+v, %v", r, err) + } + if r, err := RootFromBytes([]byte{}); err != nil || r.Set { + t.Fatalf("empty: got %+v, %v", r, err) + } + if _, err := RootFromBytes([]byte{1, 2, 3}); err == nil { + t.Fatalf("expected error on 3-byte input") + } + thirtyTwo := make([]byte, 32) + thirtyTwo[5] = 0xAA + r, err := RootFromBytes(thirtyTwo) + if err != nil || !r.Set || r.Hash[5] != 0xAA { + t.Fatalf("32 bytes: got %+v, %v", r, err) + } +} diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml b/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml new file mode 100644 index 00000000..57c6d667 --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "sp1-verifier-ffi" +version = "0.1.0" +edition = "2021" + +# Make this package independent of parent workspace +[workspace] + +[lib] +crate-type = ["cdylib", "staticlib"] + +[dependencies] +sp1-sdk = "5.0.8" +anyhow = "1.0" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +bincode = "1.3" + +[profile.release] +opt-level = 3 +lto = true +codegen-units = 1 diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/README.md b/rootchain/consensus/zkverifier/sp1-verifier-ffi/README.md new file mode 100644 index 00000000..e9ee7603 --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/README.md @@ -0,0 +1,259 @@ +# SP1 Verifier FFI Library + +Foreign Function Interface (FFI) library for verifying SP1 ZK proofs from Go. + +## Overview + +This Rust library provides C-compatible functions for verifying SP1 (Succinct Processor 1) zero-knowledge proofs. It wraps the SP1 SDK and exposes a simple interface that can be called from Go using CGO. + +## Architecture + +``` +┌─────────────────┐ +│ Go (BFT Core) │ +│ zkverifier │ +└────────┬────────┘ + │ CGO + ▼ +┌─────────────────┐ +│ C Header │ +│ sp1_verifier.h │ +└────────┬────────┘ + │ FFI + ▼ +┌─────────────────┐ +│ Rust Library │ +│ sp1-verifier │ +│ -ffi │ +└────────┬────────┘ + │ + ▼ +┌─────────────────┐ +│ SP1 SDK │ +│ (Rust) │ +└─────────────────┘ +``` + +## Building + +### Prerequisites + +- Rust toolchain (1.70+): https://rustup.rs/ +- Cargo + +### Build Commands + +```bash +# Build release version +./build.sh + +# Or manually: +cargo build --release + +# Run tests +cargo test + +# Clean build +cargo clean +``` + +### Build Artifacts + +After building, you'll find: +- `target/release/libsp1_verifier_ffi.so` (Linux) +- `target/release/libsp1_verifier_ffi.dylib` (macOS) +- `target/release/libsp1_verifier_ffi.a` (static library) + +## API + +### C Interface + +```c +/** + * Verify an SP1 compressed proof + * + * Returns: SP1VerifyResult status code + */ +SP1VerifyResult sp1_verify_proof( + const uint8_t* vkey_bytes, + size_t vkey_len, + const uint8_t* proof_bytes, + size_t proof_len, + const uint8_t* prev_state_root, // 32 bytes + const uint8_t* new_state_root, // 32 bytes + char** error_out // Must free with sp1_free_string +); + +/** + * Free error string + */ +void sp1_free_string(char* s); + +/** + * Get library version + */ +const char* sp1_ffi_version(void); +``` + +### Result Codes + +| Code | Meaning | +|------|---------| +| `SP1_VERIFY_SUCCESS` (0) | Proof verified successfully | +| `SP1_VERIFY_INVALID_PROOF` (1) | Proof data is malformed | +| `SP1_VERIFY_INVALID_VKEY` (2) | Verification key is invalid | +| `SP1_VERIFY_INVALID_PUBLIC_INPUTS` (3) | Public inputs don't match | +| `SP1_VERIFY_VERIFICATION_FAILED` (4) | Cryptographic verification failed | +| `SP1_VERIFY_INTERNAL_ERROR` (5) | Internal error | + +## Usage from Go + +### Setup + +1. Build the Rust library: + ```bash + cd sp1-verifier-ffi + ./build.sh + ``` + +2. The Go code will automatically link to the library using CGO directives in `sp1_verifier_ffi.go`: + ```go + // #cgo LDFLAGS: -L${SRCDIR}/sp1-verifier-ffi/target/release -lsp1_verifier_ffi + // #include "sp1-verifier-ffi/sp1_verifier.h" + import "C" + ``` + +### Example + +```go +package main + +import ( + "fmt" + "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier" +) + +func main() { + // Create verifier + verifier, err := zkverifier.NewSP1Verifier("/path/to/verification.vkey") + if err != nil { + panic(err) + } + + // Verify proof + proof := loadProofBytes() + prevRoot := make([]byte, 32) // Previous state root + newRoot := make([]byte, 32) // New state root + + err = verifier.VerifyProof(proof, prevRoot, newRoot) + if err != nil { + fmt.Printf("Verification failed: %v\n", err) + } else { + fmt.Println("Proof verified successfully!") + } +} +``` + +## Proof Format + +The library expects SP1 compressed proofs in the following format: + +1. **Verification Key**: Serialized SP1 verification key (bincode format) +2. **Proof**: Serialized `SP1ProofWithPublicValues` (bincode format) +3. **Public Values**: First 64 bytes must be: + - Bytes 0-31: Previous state root + - Bytes 32-63: New state root + +## Development + +### Project Structure + +``` +sp1-verifier-ffi/ +├── Cargo.toml # Rust package configuration +├── build.sh # Build script +├── src/ +│ └── lib.rs # FFI implementation +├── sp1_verifier.h # C header file +└── README.md # This file +``` + +### Adding New Functions + +1. Add Rust function with `#[no_mangle]` and `extern "C"`: + ```rust + #[no_mangle] + pub extern "C" fn new_function() -> i32 { + // Implementation + } + ``` + +2. Add declaration to `sp1_verifier.h`: + ```c + int32_t new_function(void); + ``` + +3. Update Go bindings in `../sp1_verifier_ffi.go` + +### Testing + +```bash +# Rust tests +cargo test + +# Go integration tests (from parent directory) +cd .. +go test -v ./... +``` + +## Troubleshooting + +### "library not found" error + +Make sure the library is built and CGO can find it: +```bash +export CGO_LDFLAGS="-L$(pwd)/target/release" +export LD_LIBRARY_PATH="$(pwd)/target/release:$LD_LIBRARY_PATH" # Linux +export DYLD_LIBRARY_PATH="$(pwd)/target/release:$DYLD_LIBRARY_PATH" # macOS +``` + +### "undefined symbol" error + +The library may not be linked correctly. Check: +1. Library was built with same architecture as Go binary +2. CGO flags are correct +3. Header file matches library exports + +### SP1 SDK errors + +Make sure you're using a compatible SP1 SDK version: +```bash +cargo update +cargo build --release +``` + +## Performance + +Typical verification times on modern hardware: +- Compressed proof verification: 10-100ms +- Memory usage: ~50-200MB during verification + +## Security + +⚠️ **Important Security Notes:** + +1. **Verification Key**: Must be generated from trusted source +2. **Public Inputs**: Always validated against expected values +3. **Memory Safety**: FFI uses unsafe Rust - reviewed for safety +4. **Error Handling**: All errors propagated to Go caller + +## License + +Same license as BFT Core parent project. + +## References + +- [SP1 Documentation](https://docs.succinct.xyz/) +- [SP1 GitHub](https://github.com/succinctlabs/sp1) +- [Rust FFI Guide](https://doc.rust-lang.org/nomicon/ffi.html) +- [CGO Documentation](https://pkg.go.dev/cmd/cgo) diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/build.sh b/rootchain/consensus/zkverifier/sp1-verifier-ffi/build.sh new file mode 100755 index 00000000..d7e1550d --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/build.sh @@ -0,0 +1,85 @@ +#!/bin/bash +# +# Build script for SP1 Verifier FFI library +# + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo -e "${GREEN}Building SP1 Verifier FFI Library${NC}" +echo "=====================================" + +# Check if Rust is installed +if ! command -v cargo &> /dev/null; then + echo -e "${RED}Error: Rust/Cargo not found${NC}" + echo "Please install Rust from https://rustup.rs/" + exit 1 +fi + +# Check Rust version +RUST_VERSION=$(cargo --version | cut -d' ' -f2) +echo -e "${GREEN}Rust version: ${RUST_VERSION}${NC}" + +# Build the library +echo -e "\n${YELLOW}Building Rust library...${NC}" +cargo build --release + +if [ $? -eq 0 ]; then + echo -e "${GREEN}✓ Build successful${NC}" +else + echo -e "${RED}✗ Build failed${NC}" + exit 1 +fi + +# Check build artifacts +LIB_PATH="target/release" +if [[ "$OSTYPE" == "darwin"* ]]; then + LIB_FILE="libsp1_verifier_ffi.dylib" + STATIC_LIB="libsp1_verifier_ffi.a" +elif [[ "$OSTYPE" == "linux-gnu"* ]]; then + LIB_FILE="libsp1_verifier_ffi.so" + STATIC_LIB="libsp1_verifier_ffi.a" +else + echo -e "${YELLOW}Warning: Unknown OS type, library names may differ${NC}" + LIB_FILE="libsp1_verifier_ffi.*" + STATIC_LIB="libsp1_verifier_ffi.a" +fi + +echo -e "\n${YELLOW}Build artifacts:${NC}" +if [ -f "${LIB_PATH}/${LIB_FILE}" ]; then + ls -lh "${LIB_PATH}/${LIB_FILE}" + echo -e "${GREEN}✓ Dynamic library created${NC}" +else + echo -e "${RED}✗ Dynamic library not found${NC}" +fi + +if [ -f "${LIB_PATH}/${STATIC_LIB}" ]; then + ls -lh "${LIB_PATH}/${STATIC_LIB}" + echo -e "${GREEN}✓ Static library created${NC}" +else + echo -e "${YELLOW}⚠ Static library not found (optional)${NC}" +fi + +# Run tests +echo -e "\n${YELLOW}Running Rust tests...${NC}" +cargo test + +if [ $? -eq 0 ]; then + echo -e "${GREEN}✓ All tests passed${NC}" +else + echo -e "${RED}✗ Some tests failed${NC}" + exit 1 +fi + +echo -e "\n${GREEN}Build complete!${NC}" +echo -e "\nTo use this library with Go:" +echo -e " 1. Set CGO_LDFLAGS to point to ${LIB_PATH}" +echo -e " 2. Run: go test ./... in the parent directory" +echo -e "\nExample:" +echo -e " export CGO_LDFLAGS=\"-L$(pwd)/${LIB_PATH}\"" +echo -e " cd .. && go test -v" diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h b/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h new file mode 100644 index 00000000..96183890 --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/sp1_verifier.h @@ -0,0 +1,87 @@ +/** + * SP1 Proof Verifier FFI + * + * C header for FFI interface to SP1 proof verification + */ + +#ifndef SP1_VERIFIER_H +#define SP1_VERIFIER_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Result codes for SP1 verification + */ +typedef enum { + SP1_VERIFY_SUCCESS = 0, + SP1_VERIFY_INVALID_PROOF = 1, + SP1_VERIFY_INVALID_VKEY = 2, + SP1_VERIFY_INVALID_PUBLIC_INPUTS = 3, + SP1_VERIFY_VERIFICATION_FAILED = 4, + SP1_VERIFY_INTERNAL_ERROR = 5, +} SP1VerifyResult; + +/** + * Verify an SP1 compressed proof + * + * @param vkey_bytes Pointer to verification key bytes + * @param vkey_len Length of verification key in bytes + * @param proof_bytes Pointer to proof bytes + * @param proof_len Length of proof in bytes + * @param prev_state_root Pointer to 32-byte previous state root + * @param new_state_root Pointer to 32-byte new state root + * @param block_hash Pointer to 32-byte block hash + * @param chain_id EVM Chain ID from partition config + * @param error_out Output pointer for error message (must be freed with sp1_free_string) + * @return SP1VerifyResult status code + */ +SP1VerifyResult sp1_verify_proof( + const uint8_t* vkey_bytes, + size_t vkey_len, + const uint8_t* proof_bytes, + size_t proof_len, + const uint8_t* prev_state_root, + const uint8_t* new_state_root, + const uint8_t* block_hash, + uint64_t chain_id, + char** error_out +); + +/** + * Free a string allocated by sp1_verify_proof + * + * @param s Pointer to string to free + */ +void sp1_free_string(char* s); + +/** + * Get the version of the FFI library + * + * @return Version string (do not free) + */ +const char* sp1_ffi_version(void); + +/** + * Validate a verification key + * + * @param vkey_bytes Pointer to verification key bytes + * @param vkey_len Length of verification key in bytes + * @param error_out Output pointer for error message (must be freed with sp1_free_string) + * @return SP1VerifyResult status code (SUCCESS or INVALID_VKEY) + */ +SP1VerifyResult sp1_validate_vkey( + const uint8_t* vkey_bytes, + size_t vkey_len, + char** error_out +); + +#ifdef __cplusplus +} +#endif + +#endif /* SP1_VERIFIER_H */ diff --git a/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs b/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs new file mode 100644 index 00000000..d7960413 --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1-verifier-ffi/src/lib.rs @@ -0,0 +1,295 @@ +use std::ffi::{CStr, CString}; +use std::os::raw::c_char; +use sp1_sdk::{ProverClient, SP1ProofWithPublicValues}; + +/// Error codes for FFI interface +#[repr(C)] +pub enum SP1VerifyResult { + Success = 0, + InvalidProof = 1, + InvalidVKey = 2, + InvalidPublicInputs = 3, + VerificationFailed = 4, + InternalError = 5, +} + +/// Verify an SP1 compressed proof +/// +/// # Arguments +/// * `vkey_bytes` - Pointer to verification key bytes +/// * `vkey_len` - Length of verification key +/// * `proof_bytes` - Pointer to proof bytes +/// * `proof_len` - Length of proof +/// * `prev_state_root` - Pointer to 32-byte previous state root +/// * `new_state_root` - Pointer to 32-byte new state root +/// * `block_hash` - Pointer to 32-byte block hash +/// * `chain_id` - Chain ID from partition config +/// * `error_out` - Output pointer for error message (caller must free with sp1_free_string) +/// +/// # Returns +/// SP1VerifyResult code +#[no_mangle] +pub extern "C" fn sp1_verify_proof( + vkey_bytes: *const u8, + vkey_len: usize, + proof_bytes: *const u8, + proof_len: usize, + prev_state_root: *const u8, + new_state_root: *const u8, + block_hash: *const u8, + chain_id: u64, + error_out: *mut *mut c_char, +) -> SP1VerifyResult { + // Safety checks + if vkey_bytes.is_null() || proof_bytes.is_null() { + set_error(error_out, "null pointer passed to sp1_verify_proof"); + return SP1VerifyResult::InternalError; + } + + if prev_state_root.is_null() || new_state_root.is_null() || block_hash.is_null() { + set_error(error_out, "null state root or block hash pointer"); + return SP1VerifyResult::InvalidPublicInputs; + } + + // Convert C pointers to Rust slices + let vkey_data = unsafe { std::slice::from_raw_parts(vkey_bytes, vkey_len) }; + let proof_data = unsafe { std::slice::from_raw_parts(proof_bytes, proof_len) }; + let prev_root = unsafe { std::slice::from_raw_parts(prev_state_root, 32) }; + let new_root = unsafe { std::slice::from_raw_parts(new_state_root, 32) }; + let blk_hash = unsafe { std::slice::from_raw_parts(block_hash, 32) }; + + // Perform verification + match verify_proof_internal(vkey_data, proof_data, prev_root, new_root, blk_hash, chain_id) { + Ok(()) => SP1VerifyResult::Success, + Err(e) => { + set_error(error_out, &e.to_string()); + match classify_error(&e) { + ErrorType::InvalidVKey => SP1VerifyResult::InvalidVKey, + ErrorType::InvalidProof => SP1VerifyResult::InvalidProof, + ErrorType::InvalidPublicInputs => SP1VerifyResult::InvalidPublicInputs, + ErrorType::VerificationFailed => SP1VerifyResult::VerificationFailed, + ErrorType::Internal => SP1VerifyResult::InternalError, + } + } + } +} + +/// Internal verification logic +fn verify_proof_internal( + vkey_data: &[u8], + proof_data: &[u8], + prev_state_root: &[u8], + new_state_root: &[u8], + block_hash: &[u8], + chain_id: u64, +) -> anyhow::Result<()> { + // Deserialize verification key + let vkey: sp1_sdk::SP1VerifyingKey = bincode::deserialize(vkey_data) + .map_err(|e| anyhow::anyhow!("Failed to deserialize verification key: {}", e))?; + + // Deserialize proof + let proof: SP1ProofWithPublicValues = bincode::deserialize(proof_data) + .map_err(|e| anyhow::anyhow!("Failed to deserialize proof: {}", e))?; + + // Create prover client (used for verification) + // TODO: use the sp1-verifier crate instead https://github.com/succinctlabs/sp1/tree/v6.0.2/crates/verifier + // or at least cache the client + let client = ProverClient::from_env(); + + // Verify the proof + client.verify(&proof, &vkey) + .map_err(|e| anyhow::anyhow!("Proof verification failed: {}", e))?; + + // Extract public values from proof + let public_values = proof.public_values.as_slice(); + + // Validate that public values contain expected data + // Expected format (from ProgramOutput::encode() with l2 feature): + // - 0-31: initial_state_hash (prev_state_root) + // - 32-63: final_state_hash (new_state_root) + // - 64-95: l1_out_messages_merkle_root (L2 feature) + // - 96-127: l1_in_messages_rolling_hash (L2 feature) + // - 128-159: blob_versioned_hash (L2 feature) + // - 160-191: last_block_hash (block_hash) + // - 192-199: chain_id (u64, little-endian) + // - 200+: non_privileged_count, etc. + // + // Note: ethrex's guest program has the 'l2' feature enabled by default, + // which adds 3 H256 fields (96 bytes) before the block hash. + if public_values.len() < 200 { + return Err(anyhow::anyhow!( + "Public values too short: expected at least 200 bytes for ethrex l2 format (including chain_id), got {}", + public_values.len() + )); + } + + // Check previous state root matches + if &public_values[0..32] != prev_state_root { + return Err(anyhow::anyhow!( + "Previous state root mismatch: expected {:?}, got {:?}", + prev_state_root, + &public_values[0..32] + )); + } + + // Check new state root matches + if &public_values[32..64] != new_state_root { + return Err(anyhow::anyhow!( + "New state root mismatch: expected {:?}, got {:?}", + new_state_root, + &public_values[32..64] + )); + } + + // Check block hash matches (at offset 160 due to l2 feature fields) + if &public_values[160..192] != block_hash { + return Err(anyhow::anyhow!( + "Block hash mismatch: expected {:?}, got {:?}", + block_hash, + &public_values[160..192] + )); + } + + // Check chain_id matches + let proof_chain_id = u64::from_le_bytes( + public_values[192..200] + .try_into() + .expect("slice is exactly 8 bytes") + ); + if proof_chain_id != chain_id { + return Err(anyhow::anyhow!( + "Chain ID mismatch: expected {} (chain_id from partition config), got {} (from proof)", + chain_id, + proof_chain_id + )); + } + + Ok(()) +} + +/// Free a string allocated by this library +#[no_mangle] +pub extern "C" fn sp1_free_string(s: *mut c_char) { + if !s.is_null() { + unsafe { + let _ = CString::from_raw(s); + } + } +} + +/// Get the version of this FFI library +#[no_mangle] +pub extern "C" fn sp1_ffi_version() -> *const c_char { + const VERSION: &str = concat!(env!("CARGO_PKG_VERSION"), "\0"); + VERSION.as_ptr() as *const c_char +} + +/// Validate a verification key +/// +/// # Arguments +/// * `vkey_bytes` - Pointer to verification key bytes +/// * `vkey_len` - Length of verification key +/// * `error_out` - Output pointer for error message (caller must free with sp1_free_string) +/// +/// # Returns +/// SP1VerifyResult code (Success or InvalidVKey) +#[no_mangle] +pub extern "C" fn sp1_validate_vkey( + vkey_bytes: *const u8, + vkey_len: usize, + error_out: *mut *mut c_char, +) -> SP1VerifyResult { + // Safety checks + if vkey_bytes.is_null() { + set_error(error_out, "null pointer passed to sp1_validate_vkey"); + return SP1VerifyResult::InternalError; + } + + if vkey_len == 0 { + set_error(error_out, "verification key is empty"); + return SP1VerifyResult::InvalidVKey; + } + + // Convert C pointer to Rust slice + let vkey_data = unsafe { std::slice::from_raw_parts(vkey_bytes, vkey_len) }; + + // Try to deserialize verification key + match bincode::deserialize::(vkey_data) { + Ok(_) => SP1VerifyResult::Success, + Err(e) => { + set_error(error_out, &format!("Failed to deserialize verification key: {}", e)); + SP1VerifyResult::InvalidVKey + } + } +} + +// Helper functions + +enum ErrorType { + InvalidVKey, + InvalidProof, + InvalidPublicInputs, + VerificationFailed, + Internal, +} + +fn classify_error(err: &anyhow::Error) -> ErrorType { + let msg = err.to_string().to_lowercase(); + if msg.contains("verification key") || msg.contains("vkey") { + ErrorType::InvalidVKey + } else if msg.contains("deserialize proof") { + ErrorType::InvalidProof + } else if msg.contains("state root") || msg.contains("public values") { + ErrorType::InvalidPublicInputs + } else if msg.contains("verification failed") { + ErrorType::VerificationFailed + } else { + ErrorType::Internal + } +} + +fn set_error(error_out: *mut *mut c_char, message: &str) { + if !error_out.is_null() { + if let Ok(c_string) = CString::new(message) { + unsafe { + *error_out = c_string.into_raw(); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::ffi::CStr; + use std::ptr; + + #[test] + fn test_null_pointers() { + let mut error: *mut c_char = ptr::null_mut(); + let result = sp1_verify_proof( + ptr::null(), + 0, + ptr::null(), + 0, + ptr::null(), + ptr::null(), + ptr::null(), + 1, // chain_id + &mut error, + ); + assert_eq!(result as i32, SP1VerifyResult::InternalError as i32); + + if !error.is_null() { + sp1_free_string(error); + } + } + + #[test] + fn test_version() { + let version = sp1_ffi_version(); + assert!(!version.is_null()); + let version_str = unsafe { CStr::from_ptr(version) }; + assert!(version_str.to_str().unwrap().starts_with("0.1.0")); + } +} diff --git a/rootchain/consensus/zkverifier/sp1_verifier.go b/rootchain/consensus/zkverifier/sp1_verifier.go new file mode 100644 index 00000000..dd7073f5 --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1_verifier.go @@ -0,0 +1,111 @@ +package zkverifier + +import ( + "encoding/hex" + "fmt" + "log/slog" + "os" + "path/filepath" +) + +// SP1Verifier verifies SP1 zkVM proofs +type SP1Verifier struct { + vkey []byte + enabled bool + ffiVerifier *SP1VerifierFFI +} + +// NewSP1Verifier creates a new SP1 verifier +// vkeyPath: path to the SP1 verification key file (.vkey) +// chainID: chain identifier of the EVM partition from the partition config (invariant) +func NewSP1Verifier(vkeyPath string, chainID uint64) (*SP1Verifier, error) { + if vkeyPath == "" { + return nil, fmt.Errorf("verification key path is required for SP1 verifier") + } + + // Try to create FFI verifier first + if ffiVerifier, err := NewSP1VerifierFFI(vkeyPath, chainID); err == nil { + slog.Info("Using SP1 FFI verifier", "path", vkeyPath, "version", GetFFIVersion(), "chain_id", chainID) + return &SP1Verifier{ + vkey: ffiVerifier.vkey, + enabled: true, + ffiVerifier: ffiVerifier, + }, nil + } else { + return nil, fmt.Errorf("SP1 FFI verifier not available: %w, vkeyPath: %s", err, vkeyPath) + } + +} + +// readFile reads a file and returns its contents +// Sanitizes path to prevent directory traversal attacks (CWE-22) +func readFile(path string) ([]byte, error) { + // Clean and normalize the path + cleanPath := filepath.Clean(path) + + // Resolve to absolute path + absPath, err := filepath.Abs(cleanPath) + if err != nil { + return nil, fmt.Errorf("failed to resolve path: %w", err) + } + + // Resolve any symlinks to prevent traversal via symlinks + realPath, err := filepath.EvalSymlinks(absPath) + if err != nil { + return nil, fmt.Errorf("failed to resolve symlinks: %w", err) + } + + data, err := os.ReadFile(realPath) + if err != nil { + return nil, fmt.Errorf("failed to read file %s: %w", realPath, err) + } + + return data, nil +} + +// VerifyProof verifies an SP1 compressed proof +// +// The proof should be a compressed SP1 proof generated by the prover. +// The proof includes: +// - Public inputs: previousStateRoot, newStateRoot, blockHash +// - Proof data: SP1 compressed proof bytes +// +// This function verifies that executing the program produces the expected +// state roots and block hash as public outputs. +func (v *SP1Verifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + if !v.enabled { + return ErrVerifierNotConfigured + } + + if len(proof) == 0 { + return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) + } + + if len(previousStateRoot) != 32 { + return fmt.Errorf("%w: previousStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(previousStateRoot)) + } + + if len(newStateRoot) != 32 { + return fmt.Errorf("%w: newStateRoot must be 32 bytes, got %d", ErrInvalidProofFormat, len(newStateRoot)) + } + + if len(blockHash) != 32 { + return fmt.Errorf("%w: blockHash must be 32 bytes, got %d", ErrInvalidProofFormat, len(blockHash)) + } + + slog.Debug("Verifying SP1 proof", + "proof_size", len(proof), + "prev_root", hex.EncodeToString(previousStateRoot[:8]), + "new_root", hex.EncodeToString(newStateRoot[:8]), + "block_hash", hex.EncodeToString(blockHash[:8])) + + return v.ffiVerifier.VerifyProof(proof, previousStateRoot, newStateRoot, blockHash) +} + +func (v *SP1Verifier) ProofType() ProofType { + return ProofTypeSP1 +} + +func (v *SP1Verifier) IsEnabled() bool { + return v.enabled +} diff --git a/rootchain/consensus/zkverifier/sp1_verifier_ffi.go b/rootchain/consensus/zkverifier/sp1_verifier_ffi.go new file mode 100644 index 00000000..c062697d --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1_verifier_ffi.go @@ -0,0 +1,159 @@ +//go:build zkverifier_ffi + +package zkverifier + +// #cgo LDFLAGS: -L${SRCDIR}/sp1-verifier-ffi/target/release -lsp1_verifier_ffi -ldl -lm +// #include "sp1-verifier-ffi/sp1_verifier.h" +// #include +import "C" +import ( + "fmt" + "unsafe" +) + +// SP1VerifierFFI wraps the Rust FFI library for SP1 proof verification +type SP1VerifierFFI struct { + vkey []byte + chainID uint64 +} + +// NewSP1VerifierFFI creates a new FFI-based SP1 verifier +// chainID: chain identifier of the EVM partition from the partition config (invariant) +func NewSP1VerifierFFI(vkeyPath string, chainID uint64) (*SP1VerifierFFI, error) { + // Load verification key + vkey, err := loadVerificationKey(vkeyPath) + if err != nil { + return nil, fmt.Errorf("failed to load verification key: %w", err) + } + + // Verify FFI library is available + version := C.sp1_ffi_version() + if version == nil { + return nil, fmt.Errorf("FFI library not available") + } + + // Validate verification key + if len(vkey) == 0 { + return nil, fmt.Errorf("verification key is empty") + } + + var errorOut *C.char + defer func() { + if errorOut != nil { + C.sp1_free_string(errorOut) + } + }() + + result := C.sp1_validate_vkey( + (*C.uint8_t)(unsafe.Pointer(&vkey[0])), + C.size_t(len(vkey)), + &errorOut, + ) + + if result != C.SP1_VERIFY_SUCCESS { + if errorOut != nil { + return nil, fmt.Errorf("invalid verification key: %s", C.GoString(errorOut)) + } + return nil, fmt.Errorf("invalid verification key") + } + + return &SP1VerifierFFI{ + vkey: vkey, + chainID: chainID, + }, nil +} + +// VerifyProof verifies an SP1 proof using the Rust FFI library +func (v *SP1VerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + // Validate inputs + if len(proof) == 0 { + return fmt.Errorf("%w: proof is empty", ErrInvalidProofFormat) + } + if len(previousStateRoot) != 32 { + return fmt.Errorf("%w: previousStateRoot must be 32 bytes", ErrInvalidProofFormat) + } + if len(newStateRoot) != 32 { + return fmt.Errorf("%w: newStateRoot must be 32 bytes", ErrInvalidProofFormat) + } + if len(blockHash) != 32 { + return fmt.Errorf("%w: blockHash must be 32 bytes", ErrInvalidProofFormat) + } + + // Prepare C pointers + var errorOut *C.char + defer func() { + if errorOut != nil { + C.sp1_free_string(errorOut) + } + }() + + // Call FFI verification function + result := C.sp1_verify_proof( + (*C.uint8_t)(unsafe.Pointer(&v.vkey[0])), + C.size_t(len(v.vkey)), + (*C.uint8_t)(unsafe.Pointer(&proof[0])), + C.size_t(len(proof)), + (*C.uint8_t)(unsafe.Pointer(&previousStateRoot[0])), + (*C.uint8_t)(unsafe.Pointer(&newStateRoot[0])), + (*C.uint8_t)(unsafe.Pointer(&blockHash[0])), + C.uint64_t(v.chainID), + &errorOut, + ) + + // Check result + switch result { + case C.SP1_VERIFY_SUCCESS: + return nil + case C.SP1_VERIFY_INVALID_PROOF: + if errorOut != nil { + return fmt.Errorf("%w: %s", ErrInvalidProofFormat, C.GoString(errorOut)) + } + return ErrInvalidProofFormat + case C.SP1_VERIFY_INVALID_VKEY: + if errorOut != nil { + return fmt.Errorf("invalid verification key: %s", C.GoString(errorOut)) + } + return fmt.Errorf("invalid verification key") + case C.SP1_VERIFY_INVALID_PUBLIC_INPUTS: + if errorOut != nil { + return fmt.Errorf("invalid public inputs: %s", C.GoString(errorOut)) + } + return fmt.Errorf("invalid public inputs") + case C.SP1_VERIFY_VERIFICATION_FAILED: + if errorOut != nil { + return fmt.Errorf("%w: %s", ErrProofVerificationFailed, C.GoString(errorOut)) + } + return ErrProofVerificationFailed + default: + if errorOut != nil { + return fmt.Errorf("internal error: %s", C.GoString(errorOut)) + } + return fmt.Errorf("internal error") + } +} + +// ProofType returns the proof type +func (v *SP1VerifierFFI) ProofType() ProofType { + return ProofTypeSP1 +} + +// IsEnabled returns true if the verifier is enabled +func (v *SP1VerifierFFI) IsEnabled() bool { + return len(v.vkey) > 0 +} + +// GetFFIVersion returns the version of the FFI library +func GetFFIVersion() string { + version := C.sp1_ffi_version() + if version == nil { + return "unknown" + } + return C.GoString(version) +} + +// Helper function to load verification key from file +func loadVerificationKey(path string) ([]byte, error) { + // This is implemented in sp1_verifier.go as readFile + // We reuse that function + return readFile(path) +} diff --git a/rootchain/consensus/zkverifier/sp1_verifier_ffi_stub.go b/rootchain/consensus/zkverifier/sp1_verifier_ffi_stub.go new file mode 100644 index 00000000..d900bf2f --- /dev/null +++ b/rootchain/consensus/zkverifier/sp1_verifier_ffi_stub.go @@ -0,0 +1,37 @@ +//go:build !zkverifier_ffi + +package zkverifier + +import "fmt" + +// SP1VerifierFFI is a stub when FFI is not available +type SP1VerifierFFI struct { + vkey []byte + chainID uint64 +} + +// NewSP1VerifierFFI returns an error when FFI is not available +// chainID: chain identifier of the EVM partition from the partition config (invariant) +func NewSP1VerifierFFI(vkeyPath string, chainID uint64) (*SP1VerifierFFI, error) { + return nil, fmt.Errorf("SP1 FFI verifier not available: build with -tags zkverifier_ffi to enable") +} + +// VerifyProof returns an error when FFI is not available +func (v *SP1VerifierFFI) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + return fmt.Errorf("SP1 FFI verifier not available") +} + +// ProofType returns the proof type +func (v *SP1VerifierFFI) ProofType() ProofType { + return ProofTypeSP1 +} + +// IsEnabled returns false when FFI is not available +func (v *SP1VerifierFFI) IsEnabled() bool { + return false +} + +// GetFFIVersion returns "unavailable" when FFI is not built +func GetFFIVersion() string { + return "unavailable" +} diff --git a/rootchain/consensus/zkverifier/verifier.go b/rootchain/consensus/zkverifier/verifier.go new file mode 100644 index 00000000..718b6de2 --- /dev/null +++ b/rootchain/consensus/zkverifier/verifier.go @@ -0,0 +1,132 @@ +package zkverifier + +import ( + "errors" + "fmt" +) + +var ( + // ErrProofVerificationFailed is returned when proof verification fails + ErrProofVerificationFailed = errors.New("proof verification failed") + // ErrInvalidProofFormat is returned when proof data is malformed + ErrInvalidProofFormat = errors.New("invalid proof format") + // ErrVerifierNotConfigured is returned when no verifier is configured + ErrVerifierNotConfigured = errors.New("zk verifier not configured") +) + +// ProofType identifies the proving system used +type ProofType string + +const ( + // ProofTypeSP1 indicates SP1 zkVM proof + ProofTypeSP1 ProofType = "sp1" + // ProofTypeRISC0 indicates RISC0 zkVM proof + ProofTypeRISC0 ProofType = "risc0" + // ProofTypeExec indicates execution without proving (testing only) + ProofTypeExec ProofType = "exec" + // ProofTypeLightClient indicates light client mode (full witness validation) + ProofTypeLightClient ProofType = "light_client" + // ProofTypeAggregatorRSMTv1 indicates aggregator Radix SMT consistency proof + // (flat opcode stream with batch of new leaves). Verified in-process in pure + // Go; see rootchain/consensus/zkverifier/rsmt for the wire format. + ProofTypeAggregatorRSMTv1 ProofType = "aggregator_rsmt_v1" + // ProofTypeAggregatorZKv1 indicates an SP1 ZK proof of aggregator SMT + // consistency produced by rugregator's zk-host crate (SP1 6.0.2). + // Public values: prev_root[32] || new_root[32] (64 bytes). + // Requires the binary to be built with -tags zkverifier_aggregator_zk_ffi. + ProofTypeAggregatorZKv1 ProofType = "aggregator_zk_v1" + // ProofTypeNone indicates no proof verification (disabled) + ProofTypeNone ProofType = "none" +) + +// ZKVerifier validates zero-knowledge proofs of state transitions +type ZKVerifier interface { + // VerifyProof verifies a ZK proof of state transition + // proof: The ZK proof bytes + // previousStateRoot: Hash of the previous state + // newStateRoot: Hash of the new state (claimed) + // blockHash: Hash of the block header (for light client mode) + // Returns nil if proof is valid, error otherwise + VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error + + // ProofType returns the type of proofs this verifier handles + ProofType() ProofType + + // IsEnabled returns true if verification is enabled + IsEnabled() bool +} + +// Config holds ZK verifier configuration +type Config struct { + // Enabled controls whether ZK verification is performed + Enabled bool + + // ProofType specifies which proof system to use + ProofType ProofType + + // VerificationKeyPath is the path to the verification key file + // For SP1: path to the .vkey file + // For RISC0: path to the verification key + VerificationKeyPath string + + // chainID: chain identifier of the EVM partition from the partition config (invariant) + // must match the chain_id in proof public values + ChainID uint64 + + // AdditionalConfig holds prover-specific configuration + AdditionalConfig map[string]interface{} +} + +// DefaultConfig returns a default configuration with verification disabled +func DefaultConfig() *Config { + return &Config{ + Enabled: false, + ProofType: ProofTypeNone, + VerificationKeyPath: "", + AdditionalConfig: make(map[string]interface{}), + } +} + +// NewVerifier creates a new ZK verifier based on configuration +func NewVerifier(cfg *Config) (ZKVerifier, error) { + if cfg == nil { + cfg = DefaultConfig() + } + + if !cfg.Enabled { + return &NoOpVerifier{}, nil + } + + switch cfg.ProofType { + case ProofTypeSP1: + return NewSP1Verifier(cfg.VerificationKeyPath, cfg.ChainID) + case ProofTypeLightClient: + return NewLightClientVerifier(cfg.ChainID) + case ProofTypeAggregatorRSMTv1: + return NewAggregatorRSMTVerifier(), nil + case ProofTypeAggregatorZKv1: + return NewAggregatorZKVerifier(cfg.VerificationKeyPath) + case ProofTypeRISC0: + return nil, fmt.Errorf("RISC0 verifier not implemented") + case ProofTypeExec, ProofTypeNone: + return &NoOpVerifier{}, nil + default: + return nil, fmt.Errorf("unknown proof type: %s", cfg.ProofType) + } +} + +// NoOpVerifier is a verifier that always returns success (for testing/disabled mode) +type NoOpVerifier struct{} + +func (v *NoOpVerifier) VerifyProof(proof []byte, previousStateRoot []byte, newStateRoot []byte, blockHash []byte) error { + // No verification performed + return nil +} + +func (v *NoOpVerifier) ProofType() ProofType { + return ProofTypeNone +} + +func (v *NoOpVerifier) IsEnabled() bool { + return false +} diff --git a/rootchain/consensus/zkverifier/verifier_ffi_test.go b/rootchain/consensus/zkverifier/verifier_ffi_test.go new file mode 100644 index 00000000..5822e991 --- /dev/null +++ b/rootchain/consensus/zkverifier/verifier_ffi_test.go @@ -0,0 +1,130 @@ +//go:build zkverifier_ffi + +package zkverifier + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewVerifier_SP1_WithFFI(t *testing.T) { + // Create temporary verification key file + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "test.vkey") + + // Create a fake but valid-sized vkey file + err := os.WriteFile(vkeyPath, make([]byte, 64), 0644) + require.NoError(t, err) + + cfg := &Config{ + Enabled: true, + ProofType: ProofTypeSP1, + VerificationKeyPath: vkeyPath, + ChainID: 1, + } + + verifier, err := NewVerifier(cfg) + require.NoError(t, err) + require.NotNil(t, verifier) + require.True(t, verifier.IsEnabled()) + require.Equal(t, ProofTypeSP1, verifier.ProofType()) +} + +func TestNewVerifier_SP1_MissingVKey_WithFFI(t *testing.T) { + cfg := &Config{ + Enabled: true, + ProofType: ProofTypeSP1, + VerificationKeyPath: "/nonexistent/path/test.vkey", + ChainID: 1, + } + + verifier, err := NewVerifier(cfg) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "failed to") +} + +func TestSP1Verifier_InvalidInputs_WithFFI(t *testing.T) { + // Create temporary verification key file + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "test.vkey") + err := os.WriteFile(vkeyPath, make([]byte, 64), 0644) + require.NoError(t, err) + + verifier, err := NewSP1Verifier(vkeyPath, 1) + require.NoError(t, err) + + testCases := []struct { + name string + proof []byte + previousStateRoot []byte + newStateRoot []byte + blockHash []byte + wantErr bool + errContains string + }{ + { + name: "empty proof", + proof: []byte{}, + previousStateRoot: make([]byte, 32), + newStateRoot: make([]byte, 32), + blockHash: make([]byte, 32), + wantErr: true, + errContains: "proof is empty", + }, + { + name: "invalid previous state root length", + proof: make([]byte, 100), + previousStateRoot: make([]byte, 16), + newStateRoot: make([]byte, 32), + blockHash: make([]byte, 32), + wantErr: true, + errContains: "previousStateRoot must be 32 bytes", + }, + { + name: "invalid new state root length", + proof: make([]byte, 100), + previousStateRoot: make([]byte, 32), + newStateRoot: make([]byte, 16), + blockHash: make([]byte, 32), + wantErr: true, + errContains: "newStateRoot must be 32 bytes", + }, + { + name: "invalid block hash length", + proof: make([]byte, 100), + previousStateRoot: make([]byte, 32), + newStateRoot: make([]byte, 32), + blockHash: make([]byte, 16), + wantErr: true, + errContains: "blockHash must be 32 bytes", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := verifier.VerifyProof(tc.proof, tc.previousStateRoot, tc.newStateRoot, tc.blockHash) + if tc.wantErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errContains) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestSP1Verifier_EmptyVKey_WithFFI(t *testing.T) { + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "empty.vkey") + err := os.WriteFile(vkeyPath, []byte{}, 0644) + require.NoError(t, err) + + verifier, err := NewSP1Verifier(vkeyPath, 1) + require.Error(t, err) + require.Nil(t, verifier) + // FFI will detect empty vkey +} diff --git a/rootchain/consensus/zkverifier/verifier_stub_test.go b/rootchain/consensus/zkverifier/verifier_stub_test.go new file mode 100644 index 00000000..62ff1ffe --- /dev/null +++ b/rootchain/consensus/zkverifier/verifier_stub_test.go @@ -0,0 +1,70 @@ +//go:build !zkverifier_ffi + +package zkverifier + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewVerifier_SP1_WithoutFFI(t *testing.T) { + // Create temporary verification key file + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "test.vkey") + err := os.WriteFile(vkeyPath, []byte("fake_verification_key_data"), 0644) + require.NoError(t, err) + + cfg := &Config{ + Enabled: true, + ProofType: ProofTypeSP1, + VerificationKeyPath: vkeyPath, + ChainID: 1, + } + + verifier, err := NewVerifier(cfg) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "build with -tags zkverifier_ffi") +} + +func TestNewVerifier_SP1_MissingVKey_WithoutFFI(t *testing.T) { + cfg := &Config{ + Enabled: true, + ProofType: ProofTypeSP1, + VerificationKeyPath: "/nonexistent/path/test.vkey", + ChainID: 1, + } + + verifier, err := NewVerifier(cfg) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "build with -tags zkverifier_ffi") +} + +func TestSP1Verifier_InvalidInputs_WithoutFFI(t *testing.T) { + // Create temporary verification key file + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "test.vkey") + err := os.WriteFile(vkeyPath, []byte("fake_verification_key_data"), 0644) + require.NoError(t, err) + + verifier, err := NewSP1Verifier(vkeyPath, 1) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "build with -tags zkverifier_ffi") +} + +func TestSP1Verifier_EmptyVKey_WithoutFFI(t *testing.T) { + tmpDir := t.TempDir() + vkeyPath := filepath.Join(tmpDir, "empty.vkey") + err := os.WriteFile(vkeyPath, []byte{}, 0644) + require.NoError(t, err) + + verifier, err := NewSP1Verifier(vkeyPath, 1) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "build with -tags zkverifier_ffi") +} diff --git a/rootchain/consensus/zkverifier/verifier_test.go b/rootchain/consensus/zkverifier/verifier_test.go new file mode 100644 index 00000000..fb9ef9d3 --- /dev/null +++ b/rootchain/consensus/zkverifier/verifier_test.go @@ -0,0 +1,69 @@ +package zkverifier + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDefaultConfig(t *testing.T) { + cfg := DefaultConfig() + require.False(t, cfg.Enabled) + require.Equal(t, ProofTypeNone, cfg.ProofType) + require.Empty(t, cfg.VerificationKeyPath) +} + +func TestNewVerifier_Disabled(t *testing.T) { + cfg := &Config{ + Enabled: false, + ProofType: ProofTypeSP1, + } + + verifier, err := NewVerifier(cfg) + require.NoError(t, err) + require.NotNil(t, verifier) + require.False(t, verifier.IsEnabled()) + require.Equal(t, ProofTypeNone, verifier.ProofType()) +} + +func TestNewVerifier_NoOpForExec(t *testing.T) { + cfg := &Config{ + Enabled: true, + ProofType: ProofTypeExec, + } + + verifier, err := NewVerifier(cfg) + require.NoError(t, err) + require.NotNil(t, verifier) + require.False(t, verifier.IsEnabled()) + + // Should accept any proof + err = verifier.VerifyProof([]byte("not a real proof"), make([]byte, 32), make([]byte, 32), make([]byte, 32)) + require.NoError(t, err) +} + +func TestNewVerifier_UnknownProofType(t *testing.T) { + cfg := &Config{ + Enabled: true, + ProofType: "unknown", + } + + verifier, err := NewVerifier(cfg) + require.Error(t, err) + require.Nil(t, verifier) + require.Contains(t, err.Error(), "unknown proof type") +} + +func TestNoOpVerifier(t *testing.T) { + v := &NoOpVerifier{} + + require.False(t, v.IsEnabled()) + require.Equal(t, ProofTypeNone, v.ProofType()) + + // Should accept any input + err := v.VerifyProof(nil, nil, nil, nil) + require.NoError(t, err) + + err = v.VerifyProof([]byte("test"), []byte("prev"), []byte("new"), []byte("block")) + require.NoError(t, err) +} diff --git a/rootchain/node.go b/rootchain/node.go index 8c80c4b1..14fb2fdc 100644 --- a/rootchain/node.go +++ b/rootchain/node.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "log/slog" + "time" "github.com/libp2p/go-libp2p/core/peer" "go.opentelemetry.io/otel/attribute" @@ -21,6 +22,7 @@ import ( "github.com/unicitynetwork/bft-core/observability" "github.com/unicitynetwork/bft-core/rootchain/consensus" "github.com/unicitynetwork/bft-core/rootchain/consensus/storage" + "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier" abcrypto "github.com/unicitynetwork/bft-go-base/crypto" "github.com/unicitynetwork/bft-go-base/types" ) @@ -53,6 +55,7 @@ type ( subscription *Subscriptions net PartitionNet consensusManager ConsensusManager + zkRegistry *zkverifier.Registry log *slog.Logger tracer trace.Tracer @@ -90,12 +93,16 @@ func New( subscription: subs, net: pNet, consensusManager: cm, + zkRegistry: zkverifier.NewRegistry(), log: observe.Logger(), tracer: observe.Tracer("rootchain.node"), } if err := node.initMetrics(meter); err != nil { return nil, fmt.Errorf("initializing metrics: %w", err) } + + observe.Logger().Info("Root node initialized with per-partition ZK proof verification") + return node, nil } @@ -184,6 +191,27 @@ func (v *Node) sendResponse(ctx context.Context, nodeID string, cr *certificatio return v.net.Send(ctx, cr, peerID) } +// sendRejection attaches a status code and diagnostic message to a copy of the +// last-good CertificationResponse (so the submitter can still resync from the +// wrapped UC) and sends it. The shared `last` pointer is NOT mutated — we only +// ever write to a shallow copy. +func (v *Node) sendRejection(ctx context.Context, nodeID string, last *certification.CertificationResponse, status uint32, cause error) error { + if last == nil { + return fmt.Errorf("no last CR available to attach to rejection") + } + msg := "" + if cause != nil { + msg = cause.Error() + if len(msg) > certification.MaxStatusMessageLen { + msg = msg[:certification.MaxStatusMessageLen] + } + } + resp := *last + resp.Status = status + resp.Message = msg + return v.sendResponse(ctx, nodeID, &resp) +} + func (v *Node) onHandshake(ctx context.Context, req *handshake.Handshake) error { ctx, span := v.tracer.Start(ctx, "node.onHandshake") defer span.End() @@ -203,12 +231,16 @@ func (v *Node) onHandshake(ctx context.Context, req *handshake.Handshake) error return fmt.Errorf("node ID is not in active validator set %s - %s - %s", req.PartitionID, req.ShardID, req.NodeID) } - if si.LastCR == nil || si.LastCR.UC.GetRoundNumber() == 0 { - // Make sure shard nodes get CertificationResponses even - // before they send the first BlockCertificationRequests - if err := v.subscription.Subscribe(req.PartitionID, req.ShardID, req.NodeID); err != nil { - return fmt.Errorf("subscribing the sender: %w", err) - } + // (Re)subscribe on every handshake. Subscriptions have a bounded response + // quota (see responsesPerSubscription in subscription.go) which is only + // refilled by Subscribe; repeat UCs sent to an idle partition consume quota + // without refilling it, so after ~responsesPerSubscription T2 timeouts the + // partition would otherwise fall off the subscriber map entirely. The + // handshake is the partition's only refresh mechanism when it has no + // BlockCertificationRequests to send, so it must always re-subscribe — + // not just before the first certified block. + if err := v.subscription.Subscribe(req.PartitionID, req.ShardID, req.NodeID); err != nil { + return fmt.Errorf("subscribing the sender: %w", err) } if err = v.sendResponse(ctx, req.NodeID, si.LastCR); err != nil { return fmt.Errorf("failed to send response: %w", err) @@ -231,12 +263,28 @@ func (v *Node) onBlockCertificationRequest(ctx context.Context, req *certificati // we got the shard info thus it's a valid partition/shard if err := si.ValidRequest(req); err != nil { err = fmt.Errorf("invalid block certification request: %w", err) - if se := v.sendResponse(ctx, req.NodeID, si.LastCR); se != nil { + if se := v.sendRejection(ctx, req.NodeID, si.LastCR, certification.CertStatusRequestInvalid, err); se != nil { err = errors.Join(err, fmt.Errorf("sending latest cert: %w", se)) } return err } + // Verify ZK proof (if verifier is enabled) + if err := v.verifyZKProof(ctx, req, si); err != nil { + v.log.WarnContext(ctx, "ZK proof verification failed - sending last valid UC", + logger.Error(err), + logger.Shard(req.PartitionID, req.ShardID)) + + // Send last valid UC immediately when proof verification fails so the + // partition can sync back to the last certified state. The outer + // response carries CertStatusProofInvalid + the verifier's error so + // the submitter can distinguish this from a timeout repeat UC. + if se := v.sendRejection(ctx, req.NodeID, si.LastCR, certification.CertStatusProofInvalid, err); se != nil { + err = errors.Join(err, fmt.Errorf("failed to send last valid UC: %w", se)) + } + return fmt.Errorf("ZK proof verification failed: %w", err) + } + if err := v.subscription.Subscribe(req.PartitionID, req.ShardID, req.NodeID); err != nil { return fmt.Errorf("subscribing the sender: %w", err) } @@ -292,3 +340,76 @@ func (v *Node) handleConsensus(ctx context.Context) error { } } } + +// verifyZKProof verifies the ZK proof in the block certification request +func (v *Node) verifyZKProof(ctx context.Context, req *certification.BlockCertificationRequest, si *storage.ShardInfo) error { + ir := req.InputRecord + if ir == nil { + return fmt.Errorf("input record is nil") + } + + // Get verifier for this partition's configuration + verifier, err := v.zkRegistry.GetVerifier(si.PartitionID, si.ShardID, si.IR.Epoch, si.PartitionParams) + if err != nil { + return fmt.Errorf("getting verifier for partition %s: %w", si.PartitionID, err) + } + + if !verifier.IsEnabled() { + // m-of-n mode - no ZK proof verification + return nil + } + + // Get state roots from InputRecord + previousStateRoot := ir.PreviousHash + newStateRoot := ir.Hash + + // Skip verification for sync UCs and genesis blocks: + // 1. Sync UCs: both hashes are null/empty (handshake/subscription requests) + // 2. Genesis block: previousHash is null/empty (first genesis block is sent from heaven) + if len(previousStateRoot) == 0 && len(newStateRoot) == 0 { + v.log.DebugContext(ctx, "Skipping ZK proof verification for sync UC", + logger.Shard(req.PartitionID, req.ShardID)) + return nil + } + if len(previousStateRoot) == 0 { + v.log.InfoContext(ctx, "Skipping ZK proof verification for genesis block", + logger.Shard(req.PartitionID, req.ShardID)) + return nil + } + + proofType := string(verifier.ProofType()) + proofSize := len(req.ZkProof) + + v.log.DebugContext(ctx, "Verifying ZK proof", + logger.Shard(req.PartitionID, req.ShardID), + slog.String("verifier_type", proofType), + slog.Int("proof_size", proofSize), + slog.Uint64("round", ir.RoundNumber)) + + // Verify proof: previousStateRoot -> newStateRoot transition with block hash + blockHash := ir.BlockHash + start := time.Now() + verifyErr := verifier.VerifyProof(req.ZkProof, previousStateRoot, newStateRoot, blockHash) + elapsed := time.Since(start) + + if verifyErr != nil { + v.log.WarnContext(ctx, "ZK proof verification failed", + logger.Shard(req.PartitionID, req.ShardID), + slog.String("verifier_type", proofType), + slog.Int("proof_size", proofSize), + slog.Duration("verification_time", elapsed), + slog.Uint64("round", ir.RoundNumber), + logger.Error(verifyErr)) + return fmt.Errorf("ZK proof verification failed: %w", verifyErr) + } + + v.log.InfoContext(ctx, "ZK proof verified successfully", + logger.Shard(req.PartitionID, req.ShardID), + slog.String("verifier_type", proofType), + slog.Int("proof_size", proofSize), + slog.Uint64("num_leaves", req.BlockSize), + slog.Duration("verification_time", elapsed), + slog.Uint64("round", ir.RoundNumber)) + + return nil +} diff --git a/rootchain/node_test.go b/rootchain/node_test.go index eabcae53..3a7c5d69 100644 --- a/rootchain/node_test.go +++ b/rootchain/node_test.go @@ -281,6 +281,48 @@ func Test_onHandshake(t *testing.T) { } require.NoError(t, node.onHandshake(t.Context(), &msg)) }) + + // Regression: a partition that has already produced certified blocks must + // still be (re)subscribed on handshake. The subscription has a bounded + // per-peer response quota (see responsesPerSubscription) which repeat UCs + // on T2 timeout drain without refilling; handshake is the only refresh + // mechanism for an idle partition. If onHandshake skips Subscribe once + // LastCR.UC.GetRoundNumber() > 0, the partition silently falls off the + // subscriber map and BFT Core logs "0 receivers" on subsequent repeat UCs. + t.Run("post-genesis handshake re-subscribes", func(t *testing.T) { + cr := validCertificationResponse(t) + cr.UC.InputRecord.RoundNumber = 42 // post-genesis: GetRoundNumber() > 0 + + partNet := mockPartitionNet{ + send: func(ctx context.Context, msg any, receivers ...p2peer.ID) error { + return nil + }, + } + cm := mockConsensusManager{ + shardInfo: func(partition types.PartitionID, shard types.ShardID) (*storage.ShardInfo, error) { + return newMockShardInfo(t, nodeID.String(), publicKey, cr), nil + }, + } + node, err := New(&nwPeer, partNet, cm, nopObs) + require.NoError(t, err) + + msg := handshake.Handshake{ + PartitionID: cr.Partition, + ShardID: cr.Shard, + NodeID: nodeID.String(), + } + require.NoError(t, node.onHandshake(t.Context(), &msg)) + + // Subscribe must have been called — the peer should be registered with + // a full response quota, not absent from the subs map. + key := partitionShard{cr.Partition, cr.Shard.Key()} + node.subscription.mu.RLock() + defer node.subscription.mu.RUnlock() + peers, ok := node.subscription.subs[key] + require.True(t, ok, "partition must be present in subs map after post-genesis handshake") + require.Equal(t, responsesPerSubscription, peers[nodeID], + "peer quota must be refilled by handshake") + }) } func Test_handlePartitionMsg(t *testing.T) { @@ -544,10 +586,18 @@ func Test_onBlockCertificationRequest(t *testing.T) { t.Run("invalid request", func(t *testing.T) { // in case of invalid request we respond with the latest cert of the shard + // wrapped in a rejection envelope (Status=RequestInvalid, Message=why). sendCallCnt := 0 partNet := mockPartitionNet{ send: func(ctx context.Context, msg any, receivers ...p2peer.ID) error { - require.Equal(t, &certResp, msg) + resp, ok := msg.(*certification.CertificationResponse) + require.True(t, ok, "expected *CertificationResponse, got %T", msg) + require.Equal(t, certification.CertStatusRequestInvalid, resp.Status) + require.NotEmpty(t, resp.Message) + // The wrapped UC/Technical must still be the last-good certificate. + require.Equal(t, certResp.Partition, resp.Partition) + require.Equal(t, certResp.Technical, resp.Technical) + require.Equal(t, certResp.UC.TRHash, resp.UC.TRHash) sendCallCnt++ return nil }, diff --git a/rootchain/partitions/orchestration.go b/rootchain/partitions/orchestration.go index 59b373e1..6ee342f5 100644 --- a/rootchain/partitions/orchestration.go +++ b/rootchain/partitions/orchestration.go @@ -9,6 +9,7 @@ import ( "time" "github.com/unicitynetwork/bft-core/logger" + "github.com/unicitynetwork/bft-core/rootchain/consensus/zkverifier" "github.com/unicitynetwork/bft-go-base/types" bolt "go.etcd.io/bbolt" ) @@ -210,7 +211,10 @@ func storeShardConf(tx *bolt.Tx, shardConf *types.PartitionDescriptionRecord) er func verifyShardConf(tx *bolt.Tx, shardConf *types.PartitionDescriptionRecord) error { if shardConf.Epoch == 0 { - return shardConf.IsValid() + if err := shardConf.IsValid(); err != nil { + return err + } + return verifyProofConfig(shardConf) } lastShardConf, err := getShardConf(tx, shardConf.PartitionID, shardConf.ShardID, math.MaxUint64) @@ -223,7 +227,45 @@ func verifyShardConf(tx *bolt.Tx, shardConf *types.PartitionDescriptionRecord) e if err = shardConf.Verify(lastShardConf); err != nil { return fmt.Errorf("shard conf does not extend previous shard conf: %w", err) } - return err + return verifyProofConfig(shardConf) +} + +// verifyProofConfig validates the ZK proof configuration in partition params. +// Returns error if: +// - proof_type is specified but not available (FFI not built) +// - SP1 proof_type is specified but vkey_path is missing +func verifyProofConfig(shardConf *types.PartitionDescriptionRecord) error { + proofType := zkverifier.ParseProofTypeFromParams(shardConf.PartitionParams) + + // Empty/none proof type is always valid (m-of-n mode) + if proofType == zkverifier.ProofTypeNone || proofType == "" { + return nil + } + + // Check if proof type is available in current build + if !zkverifier.IsProofTypeAvailable(proofType) { + return fmt.Errorf("proof type %q not available (build with -tags zkverifier_ffi to enable)", proofType) + } + + // SP1 requires verification key path and chain_id + if proofType == zkverifier.ProofTypeSP1 { + vkeyPath := zkverifier.ParseVKeyPathFromParams(shardConf.PartitionParams) + if vkeyPath == "" { + return fmt.Errorf("vkey_path required for SP1 proof type") + } + if _, ok := zkverifier.ParseChainIDFromParams(shardConf.PartitionParams); !ok { + return fmt.Errorf("chain_id required for SP1 proof type") + } + } + + // LightClient requires chain_id + if proofType == zkverifier.ProofTypeLightClient { + if _, ok := zkverifier.ParseChainIDFromParams(shardConf.PartitionParams); !ok { + return fmt.Errorf("chain_id required for light_client proof type") + } + } + + return nil } // schema: