From 3f27c906f6bcaf892de90ef2816c820e922d4657 Mon Sep 17 00:00:00 2001 From: markosg04 Date: Mon, 19 Jan 2026 14:08:18 -0500 Subject: [PATCH 01/16] feat: working zk, needs clean up --- Cargo.toml | 1 + ZK_IMPLEMENTATION_AUDIT.md | 357 +++++++++++++ benches/arkworks_proof.rs | 13 +- dory_paper_reference.md | 176 +++++++ dory_zk_analysis.md | 363 +++++++++++++ examples/basic_e2e.rs | 5 +- examples/homomorphic.rs | 5 +- examples/homomorphic_mixed_sizes.rs | 5 +- examples/non_square.rs | 5 +- src/backends/arkworks/ark_poly.rs | 58 +++ src/backends/arkworks/ark_serde.rs | 2 + src/evaluation_proof.rs | 479 ++++++++++++++++- src/lib.rs | 19 +- src/messages.rs | 97 +++- src/mode.rs | 65 +++ src/primitives/poly.rs | 30 ++ src/proof.rs | 56 +- src/reduce_and_fold.rs | 783 +++++++++++++++++++++++++--- src/setup.rs | 21 +- tests/arkworks/evaluation.rs | 29 +- tests/arkworks/homomorphic.rs | 8 +- tests/arkworks/integration.rs | 20 +- tests/arkworks/mod.rs | 4 + tests/arkworks/non_square.rs | 14 +- tests/arkworks/soundness.rs | 6 +- tests/arkworks/zk.rs | 334 ++++++++++++ tests/arkworks/zk_statistical.rs | 562 ++++++++++++++++++++ zk.md | 296 +++++++++++ zk_refactor_plan.md | 737 ++++++++++++++++++++++++++ 29 files changed, 4419 insertions(+), 131 deletions(-) create mode 100644 ZK_IMPLEMENTATION_AUDIT.md create mode 100644 dory_paper_reference.md create mode 100644 dory_zk_analysis.md create mode 100644 src/mode.rs create mode 100644 tests/arkworks/zk.rs create mode 100644 tests/arkworks/zk_statistical.rs create mode 100644 zk.md create mode 100644 zk_refactor_plan.md diff --git a/Cargo.toml b/Cargo.toml index 2997987..9a6a11a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,6 +34,7 @@ all-features = true [features] default = [] +zk = [] backends = ["arkworks", "disk-persistence"] arkworks = [ "dep:ark-bn254", diff --git a/ZK_IMPLEMENTATION_AUDIT.md b/ZK_IMPLEMENTATION_AUDIT.md new file mode 100644 index 0000000..c176ef0 --- /dev/null +++ b/ZK_IMPLEMENTATION_AUDIT.md @@ -0,0 +1,357 @@ +# Dory ZK Implementation Audit + +This document compares the current implementation against the Dory paper's ZK requirements. + +--- + +## Executive Summary + +**Current State: Partially Implemented (Phase 1 Complete, Phase 2 Missing)** + +The implementation has: +- ✅ Mode trait abstraction (`Transparent` / `ZK`) +- ✅ Blind sampling and masking for all protocol messages +- ✅ Blind accumulation logic in prover state +- ✅ Sigma proof structures defined (`Sigma1Proof`, `Sigma2Proof`, `ScalarProductProof`) +- ✅ Sigma proof generation functions implemented +- ✅ Sigma proof verification functions implemented + +**Critical Gap**: The ZK proofs produce the **same proof type** as transparent proofs (`DoryProof`), and verification uses the **same code path** that reveals `y`. The Sigma proofs exist but are **not wired into the main API**. + +--- + +## Detailed Analysis + +### 1. Mode Trait ✅ CORRECT + +**Implementation** (`src/mode.rs:14-65`): +```rust +pub trait Mode: 'static { + fn sample(rng: &mut R) -> F; + fn mask(value: G, base: &G, blind: &G::Scalar) -> G; +} + +impl Mode for Transparent { + fn sample(_rng: &mut R) -> F { F::zero() } + fn mask(value: G, _base: &G, _blind: &G::Scalar) -> G { value } +} + +impl Mode for ZK { + fn sample(rng: &mut R) -> F { F::random(rng) } + fn mask(value: G, base: &G, blind: &G::Scalar) -> G { value + base.scale(blind) } +} +``` + +**Assessment**: Correct. Blinds sampled from RNG (not transcript), masking is additive. + +--- + +### 2. VMV Message ⚠️ PARTIAL + +**What's Implemented** (`src/evaluation_proof.rs:141-165`): +```rust +let r_c: F = Mo::sample(rng); +let r_d2: F = Mo::sample(rng); +let r_e1: F = Mo::sample(rng); +let r_e2: F = Mo::sample(rng); + +let c = Mo::mask(c_raw, &setup.ht, &r_c); +let d2 = Mo::mask(d2_raw, &setup.ht, &r_d2); +let e1 = Mo::mask(e1_raw, &setup.h1, &r_e1); + +let vmv_message = VMVMessage { c, d2, e1 }; // <-- Uses non-ZK struct! +``` + +**What's Missing**: + +1. **`E_2` not computed/sent**: In ZK mode, the prover must send `E_2 = y·Γ_2,fin + r_e2·H_2`. Currently `r_e2` is sampled but `E_2` is not computed or included in the message. + +2. **`y_com` not computed**: The commitment `y_com = y·Γ_1,fin + r_y·H_1` is not created. + +3. **Wrong message type**: Uses `VMVMessage` instead of `ZkVMVMessage`. + +**Required Fix**: +```rust +// In ZK mode: +let y = polynomial.evaluate(point); +let e2 = Mo::mask(setup.g2_vec[0].scale(&y), &setup.h2, &r_e2); +let r_y: F = Mo::sample(rng); +let y_com = setup.g1_vec[0].scale(&y) + setup.h1.scale(&r_y); + +let vmv_message = ZkVMVMessage { c, d2, e1, e2, y_com }; +``` + +--- + +### 3. Reduce Round First Message ✅ CORRECT + +**Implementation** (`src/reduce_and_fold.rs:237-314`): +```rust +let r_d1_l: F = M::sample(rng); +let r_d1_r: F = M::sample(rng); +let r_d2_l: F = M::sample(rng); +let r_d2_r: F = M::sample(rng); + +let d1_left = M::mask(d1_left_base, &self.setup.ht, &r_d1_l); +let d1_right = M::mask(d1_right_base, &self.setup.ht, &r_d1_r); +let d2_left = M::mask(d2_left_base, &self.setup.ht, &r_d2_l); +let d2_right = M::mask(d2_right_base, &self.setup.ht, &r_d2_r); +``` + +**Assessment**: Correct. D values properly masked with H_T. + +--- + +### 4. Reduce Round Second Message ✅ CORRECT + +**Implementation** (`src/reduce_and_fold.rs:351-414`): +```rust +let r_c_plus: F = M::sample(rng); +let r_c_minus: F = M::sample(rng); +let r_e1_plus: F = M::sample(rng); +// ... etc + +let c_plus = M::mask(c_plus_base, &self.setup.ht, &r_c_plus); +let c_minus = M::mask(c_minus_base, &self.setup.ht, &r_c_minus); +let e1_plus = M::mask(e1_plus_base, &self.setup.h1, &r_e1_plus); +let e1_minus = M::mask(e1_minus_base, &self.setup.h1, &r_e1_minus); +let e2_plus = M::mask(e2_plus_base, &self.setup.h2, &r_e2_plus); +let e2_minus = M::mask(e2_minus_base, &self.setup.h2, &r_e2_minus); +``` + +**Assessment**: Correct. All values properly masked with respective generators. + +--- + +### 5. Blind Accumulation ⚠️ ISSUE + +**Implementation** (`src/reduce_and_fold.rs:340-343, 463-477`): + +After first challenge (β): +```rust +self.r_c = self.r_c + self.r_d2 * *beta + self.r_d1 * beta_inv; +``` + +After second challenge (α): +```rust +self.r_c = self.r_c + r_c_plus * *alpha + r_c_minus * alpha_inv; +self.r_d1 = r_d1_l * *alpha + r_d1_r; +self.r_d2 = r_d2_l * alpha_inv + r_d2_r; +self.r_e1 = self.r_e1 + r_e1_plus * *alpha + r_e1_minus * alpha_inv; +self.r_e2 = self.r_e2 + r_e2_plus * *alpha + r_e2_minus * alpha_inv; +``` + +**Issue**: The first challenge uses `self.r_d1` and `self.r_d2`, but these are **not yet set** from message blinds at that point. They should be folded from per-round blinds. + +**Paper says**: +- After β: `r_C ← r_C + β·(r_d2_l + r_d2_r) + β⁻¹·(r_d1_l + r_d1_r)` (sum of current round blinds) +- After α: `r_D1 ← α·r_d1_l + r_d1_r`, `r_D2 ← α⁻¹·r_d2_l + r_d2_r` + +**Current Code** uses `self.r_d1` and `self.r_d2` in `apply_first_challenge`, but those are set in `apply_second_challenge`. This is **inverted** - the accumulation happens after the challenge where it should use the blinds, but uses prior round's accumulated values. + +**Fix needed**: Pass message blinds to `apply_first_challenge` and use them directly, not the accumulated values. + +--- + +### 6. Final Message ✅ CORRECT + +**Implementation** (`src/reduce_and_fold.rs:486-515`): +```rust +let e1 = self.v1[0] + self.setup.h1.scale(&gamma_s1); +let e2 = self.v2[0] + self.setup.h2.scale(&gamma_inv_s2); + +// ZK: final blind accumulation +self.r_c = self.r_c + self.r_e2 * *gamma + self.r_e1 * gamma_inv; +``` + +**Assessment**: Correct formula for final blind accumulation. + +--- + +### 7. Scalar Product Σ-Protocol ✅ IMPLEMENTED (but not used) + +**Implementation** (`src/reduce_and_fold.rs:551-619`): +```rust +pub fn scalar_product_proof(&self, transcript: &mut T, rng: &mut R) -> ZkScalarProductProof { + // Sample d1, d2 + let s_d1 = F::random(rng); + let s_d2 = F::random(rng); + let d1 = gamma1.scale(&s_d1); + let d2 = gamma2.scale(&s_d2); + + // Sample blinding scalars + let r_p1 = F::random(rng); + // ... + + // Compute P1, P2, Q, R + let p1 = E::pair(&d1, &gamma2) + self.setup.ht.scale(&r_p1); + // ... + + // Get challenge c + let c = transcript.challenge_scalar(b"sigma_c"); + + // Compute responses + let e1 = d1 + v1.scale(&c); + let e2 = d2 + v2.scale(&c); + let r1 = r_p1 + c * self.r_d1; + let r2 = r_p2 + c * self.r_d2; + let r3 = r_r + c * r_q + c_sq * self.r_c; + + ScalarProductProof { p1, p2, q, r, e1, e2, r1, r2, r3 } +} +``` + +**Assessment**: Implementation looks correct per Dory paper. + +**Issue**: This function exists but is **never called** in the proof generation flow. + +--- + +### 8. Sigma1 Proof (VMV Consistency) ✅ IMPLEMENTED (but not used) + +**Implementation** (`src/reduce_and_fold.rs:630-672`): +- Proves `E_2 = y·Γ_2,fin + r_E2·H_2` +- Proves `y_com = y·Γ_1,fin + r_y·H_1` + +**Assessment**: Implementation looks correct. + +**Issue**: Never called - no `y_com` is created in the main flow. + +--- + +### 9. Sigma2 Proof (VMV Relation) ✅ IMPLEMENTED (but not used) + +**Implementation** (`src/reduce_and_fold.rs:725-762`): +- Proves `e(E_1, Γ_2,fin) - D_2 = e(H_1, t_1·Γ_2,fin + t_2·H_2)` + +**Assessment**: Implementation looks correct. + +**Issue**: Never called. + +--- + +### 10. Verification ❌ CRITICAL ISSUE + +**Current verify path** (`src/evaluation_proof.rs:341`): +```rust +// E2 = y · Γ2,fin where Γ2,fin = g2_0 +let e2 = setup.g2_0.scale(&evaluation); // <-- REVEALS y! +``` + +**Problem**: Even when proving with `ZK` mode, verification: +1. Takes `evaluation` as a parameter (reveals y) +2. Computes `E_2` from `y` (doesn't use prover's blinded E_2) +3. Uses `verify_final` not `verify_final_zk` + +**For true ZK**, verification should: +1. NOT take `evaluation` as input +2. Use prover's `E_2` from `ZkVMVMessage` +3. Verify Sigma1 proof to confirm `y_com` and `E_2` are consistent +4. Call `verify_final_zk` with the scalar product proof + +--- + +### 11. Proof Structure ❌ NOT USED + +**Defined** (`src/proof.rs:53-80`): +```rust +pub struct ZkDoryProof { + pub vmv_message: ZkVMVMessage, + pub first_messages: Vec>, + pub second_messages: Vec>, + pub final_message: ScalarProductMessage, + pub sigma1_proof: Sigma1Proof, + pub sigma2_proof: Sigma2Proof, + pub scalar_product_proof: ScalarProductProof, + pub nu: usize, + pub sigma: usize, +} +``` + +**Issue**: This struct is defined but: +1. No `create_zk_evaluation_proof` function produces it +2. No `verify_zk_evaluation_proof` function consumes it +3. Tests use regular `prove/verify` which produce/consume `DoryProof` + +--- + +## Test Analysis + +**Current ZK tests** (`tests/arkworks/zk.rs`): +```rust +let proof = prove::<_, BN254, ..., ZK, _>(...).unwrap(); // Returns DoryProof +verify::<_, BN254, ...>(tier_2, evaluation, &point, &proof, ...); // Takes evaluation! +``` + +**What's being tested**: The masking of intermediate protocol messages. + +**What's NOT tested**: +- Hiding the evaluation `y` +- Sigma proofs +- ZK verification path + +The tests pass because the underlying **values** are correct (masked values unmask to correct values), but the protocol is not actually zero-knowledge since `y` is still revealed to the verifier. + +--- + +## Summary of Issues + +| Component | Status | Issue | +|-----------|--------|-------| +| Mode trait | ✅ | - | +| VMV masking | ✅ | Values masked correctly | +| VMV message type | ❌ | Uses `VMVMessage` not `ZkVMVMessage` | +| E_2 computation | ❌ | Not sent by prover in ZK mode | +| y_com computation | ❌ | Not created | +| Reduce masking | ✅ | All values masked correctly | +| Blind accumulation | ⚠️ | Order may be wrong in first challenge | +| Final masking | ✅ | Correct | +| ScalarProductProof | ✅ | Implemented correctly | +| Sigma1Proof | ✅ | Implemented correctly | +| Sigma2Proof | ✅ | Implemented correctly | +| ZkDoryProof struct | ✅ | Defined correctly | +| `prove` API (ZK) | ❌ | Returns `DoryProof`, not `ZkDoryProof` | +| `verify` API (ZK) | ❌ | Takes `evaluation` as input, reveals `y` | +| verify_final_zk | ✅ | Implemented but not used | + +--- + +## Required Work to Complete ZK + +### Phase 1: Fix Blind Accumulation Order +1. Modify `apply_first_challenge` to take message blinds +2. Use message blinds (sum) directly, not accumulated state + +### Phase 2: Create ZK Proof Generation +1. Add `create_zk_evaluation_proof` function that: + - Computes `E_2 = y·Γ_2,fin + r_e2·H_2` + - Computes `y_com = y·Γ_1,fin + r_y·H_1` + - Creates `ZkVMVMessage` + - Generates `Sigma1Proof` + - Generates `Sigma2Proof` + - Generates `ScalarProductProof` + - Returns `ZkDoryProof` + +### Phase 3: Create ZK Verification +1. Add `verify_zk_evaluation_proof` function that: + - Does NOT take `evaluation` as input + - Verifies `Sigma1Proof` + - Verifies `Sigma2Proof` + - Uses prover's `E_2` from message + - Calls `verify_final_zk` with scalar product proof + +### Phase 4: Update Tests +1. Add tests that verify `y` is not revealed +2. Test that verification works without knowing `y` +3. Test Sigma proof soundness (invalid proofs rejected) + +--- + +## Conclusion + +The implementation has laid excellent groundwork with: +- Correct masking/blinding infrastructure +- Correct Sigma proof implementations +- Correct proof structures + +But it's **incomplete** because the ZK code paths are not integrated into the main `prove`/`verify` API. Currently, using `ZK` mode only masks intermediate values but still reveals `y` to the verifier - it's not truly zero-knowledge. diff --git a/benches/arkworks_proof.rs b/benches/arkworks_proof.rs index 527b297..1004610 100644 --- a/benches/arkworks_proof.rs +++ b/benches/arkworks_proof.rs @@ -15,9 +15,11 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; use dory_pcs::backends::arkworks::{ ArkFr, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, }; +use dory_pcs::mode::Transparent; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify}; +use rand::rngs::ThreadRng; use rand::thread_rng; #[cfg(feature = "cache")] @@ -82,7 +84,8 @@ fn bench_prove(c: &mut Criterion) { c.bench_function("prove_2^26_coefficients", |b| { b.iter(|| { let mut transcript = Blake2bTranscript::new(b"dory-bench"); - prove::<_, BN254, G1Routines, G2Routines, _, _>( + let mut rng = thread_rng(); + prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, ThreadRng>( black_box(&poly), black_box(&point), black_box(tier_1.clone()), @@ -90,6 +93,7 @@ fn bench_prove(c: &mut Criterion) { black_box(sigma), black_box(&prover_setup), black_box(&mut transcript), + black_box(&mut rng), ) .unwrap() }) @@ -106,7 +110,8 @@ fn bench_verify(c: &mut Criterion) { .unwrap(); let mut prover_transcript = Blake2bTranscript::new(b"dory-bench"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let mut rng = thread_rng(); + let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, ThreadRng>( &poly, &point, tier_1, @@ -114,6 +119,7 @@ fn bench_verify(c: &mut Criterion) { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .unwrap(); @@ -172,7 +178,7 @@ fn bench_end_to_end(c: &mut Criterion) { // Prove let mut prover_transcript = Blake2bTranscript::new(b"dory-bench"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, ThreadRng>( &poly, &point, tier_1, @@ -180,6 +186,7 @@ fn bench_end_to_end(c: &mut Criterion) { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .unwrap(); diff --git a/dory_paper_reference.md b/dory_paper_reference.md new file mode 100644 index 0000000..cb33427 --- /dev/null +++ b/dory_paper_reference.md @@ -0,0 +1,176 @@ +# Dory: Efficient, Transparent Arguments for Generalised Inner Products and Polynomial Commitments + +**Author:** Jonathan Lee +**Published:** TCC 2021 (Theory of Cryptography Conference) +**IACR ePrint:** [2020/1274](https://eprint.iacr.org/2020/1274) +**Springer:** [LNCS vol 13043](https://link.springer.com/chapter/10.1007/978-3-030-90453-1_1) +**License:** Creative Commons Attribution (CC BY) + +--- + +## Abstract + +Dory is a transparent setup, public-coin interactive argument for proving correctness of an inner-pairing product between committed vectors of elements of the two source groups. For a product of vectors of length n, proofs consist of 6 log n target group elements, one element from each source group, and 3 scalars. Verifier work is dominated by an O(log n) multi-exponentiation in the target group and O(1) pairings. Security is reduced to the standard SXDH assumption in the standard model. + +Dory is applied to build a multivariate polynomial commitment scheme via the Fiat-Shamir transform. For a dense polynomial with n coefficients: +- Prover work to compute a commitment is dominated by a multi-exponentiation in one source group of size n +- Prover work to show that a commitment to an evaluation is correct is O(n^(log 8/log 25)) in general, or O(n^(1/2)) for univariate or multilinear polynomials +- Communication complexity and Verifier work are both O(log n) + +--- + +## Complexity Summary + +| Metric | Complexity | +|--------|------------| +| Setup | Transparent (no toxic waste) | +| Proof size | 6 log n target group elements + O(1) | +| Verifier work | O(log n) multi-exp in G_T + O(1) pairings | +| Prover work (general) | O(n^(log 8/log 25)) ≈ O(n^0.65) | +| Prover work (univariate/multilinear) | O(√n) | +| Commitment size | 192 bytes (at n = 2²⁰) | +| Security assumption | SXDH (Symmetric External Diffie-Hellman) | + +--- + +## Core Technical Contributions + +### 1. Inner-Pairing-Product Commitments + +Dory employs inner-pairing-product commitments utilizing both G₁ and G₂ groups. The groups must not equal each other for DDH to hold and the scheme to be binding. The commitment is expressed as an inner product: ⟨vector, commitment_key⟩. + +For a pairing group (G₁, G₂, G_T): +- If the message is a vector in G₁, the commitment key is a vector in G₂ +- If the message is a vector in G₂, the commitment key is a vector in G₁ +- The commitment itself lives in G_T + +This structure-preserving symmetry between messages and commitment keys is a key insight. + +### 2. Recursive Folding (Reduce-and-Fold) + +The core technique is the observation that for any vectors u_L, u_R, v_L, v_R and any non-zero scalar α: + +``` +⟨u_L || u_R, v_L || v_R⟩ = ⟨α·u_L + u_R, α⁻¹·v_L + v_R⟩ - α·⟨u_L, v_R⟩ - α⁻¹·⟨u_R, v_L⟩ +``` + +This identity allows reducing a claim about the inner product ⟨u, v⟩ of vectors of length n to claims about inner products of vectors of length n/2. + +**Protocol Flow:** +1. Prover sends cross-terms: L = ⟨u_L, v_R⟩ and R = ⟨u_R, v_L⟩ +2. Verifier sends random challenge α +3. Both parties compute folded vectors: u' = α·u_L + u_R and v' = α⁻¹·v_L + v_R +4. Recurse until vectors have length 1 +5. Final claim verified with a sigma protocol + +The verifier uses homomorphic properties of the commitment scheme (with prover assistance) to find commitments to the shorter folded vectors. + +### 3. Parallel Recursion for Logarithmic Verification + +In addition to proving knowledge of u such that c_u = ⟨u, g⟩, the prover also shows knowledge of g such that c_g = ⟨g, Γ⟩. This is done via recursion by executing the protocol in parallel using the same randomness. + +The key optimization doubles the rounds of interaction to 2 log n in order to keep the messages per round constant, achieving O(log n) verification. + +### 4. Pre-processing with Matrix Commitments + +Using matrix commitments (similar to Hyrax), the prover: +1. Computes Generalized Pedersen Commitments internally (rows of the coefficient matrix) +2. Generates an inner-pairing-product commitment of the commitment vector + +**Pre-processing procedure (multi-round folding):** +- Round 1: Commits to g_L^(0) and g_R^(0) using public random key Γ^(1) ∈ G^(n/2), producing Δ_L^(1) and Δ_R^(1) +- Round i: Partitions Γ^(i−1) into Γ_L^(i−1) and Γ_R^(i−1), committing using Γ^(i) ∈ G^(n/2^i) + +The commitment key update formula: +``` +c_g^(i) = α_{i-1} · Δ_L^(i) + α_i · Δ_R^(i) +``` + +--- + +## Comparison with Related Work + +### vs. Bulletproofs +- Like Bulletproofs, uses recursive folding +- Unlike Bulletproofs, instead of updating g directly, the verifier computes a commitment to it in constant time using homomorphism + +### vs. Trusted Setup Schemes (KZG) +- Dory achieves similar asymptotics without trusted setup +- Previously, these asymptotics required trusted setup or concretely inefficient groups of unknown order + +### vs. DARK/Pietrzak +- Dory avoids groups of unknown order which are concretely inefficient + +--- + +## Concrete Performance (n = 2²⁰, single core) + +| Operation | Cost | +|-----------|------| +| Commitment size | 192 bytes | +| Evaluation proof size | ~18 KB | +| Proof generation time | ~3 seconds | +| Verification time | ~25 ms | + +**Batched evaluation (n = 2²⁰):** +| Metric | Marginal Cost | +|--------|---------------| +| Communication | < 1 KB | +| Prover time | ~300 ms | +| Verifier time | ~1 ms | + +--- + +## Protocol Messages (per round) + +In each round of the reduce-and-fold protocol, the prover sends: +- 6 target group elements (G_T) +- Cross-term products for the folding identity + +Total proof: 6 log n target group elements + 1 G₁ element + 1 G₂ element + 3 scalars + +--- + +## Security Model + +- **Assumption:** SXDH (Symmetric External Diffie-Hellman) +- **Model:** Standard model (no random oracle for security, though Fiat-Shamir used for non-interactivity) +- **Properties:** + - Computationally binding + - Perfectly hiding (in the interactive version) + - Knowledge sound + +--- + +## Application to Polynomial Commitments + +Dory constructs a multivariate polynomial commitment scheme: + +1. **Commitment:** Interpret polynomial coefficients as a matrix, commit using the two-tiered scheme +2. **Evaluation:** Reduce polynomial evaluation to inner-pairing-product claims +3. **Non-interactivity:** Apply Fiat-Shamir transform + +For multilinear polynomials (common in SNARKs), the structure enables O(√n) prover work for evaluation proofs. + +--- + +## Integration with Jolt zkVM + +Dory is used as the polynomial commitment scheme in Jolt, a RISC-V zkVM: +- The commitment key consists of random group elements generated by evaluating a cryptographic PRG +- Can be generated "on the fly" but explicit storage enables faster proving +- Space and Time's high-performance Dory implementation contributed to 6x speedup in Jolt + +--- + +## References + +1. Lee, J. (2021). Dory: Efficient, Transparent Arguments for Generalised Inner Products and Polynomial Commitments. In: Theory of Cryptography (TCC 2021), LNCS vol 13043, Springer. + +2. IACR ePrint: https://eprint.iacr.org/2020/1274 + +3. Bünz, B., Maller, M., Mishra, P., Tyagi, N., & Vesely, P. (2019). Proofs for Inner Pairing Products and Applications. IACR ePrint 2019/1177. + +4. Jolt zkVM: https://github.com/a16z/jolt + +5. Thaler, J. Proofs, Arguments, and Zero-Knowledge (Section 15.4 covers Dory). diff --git a/dory_zk_analysis.md b/dory_zk_analysis.md new file mode 100644 index 0000000..bd7cb7b --- /dev/null +++ b/dory_zk_analysis.md @@ -0,0 +1,363 @@ +2# Dory ZK vs Non-ZK: Deep Technical Analysis + +This document provides a concrete analysis of the differences between transparent (non-ZK) and zero-knowledge versions of the Dory polynomial commitment scheme. + +--- + +## 1. Overview: What Zero-Knowledge Means for Dory + +In the **transparent (non-ZK)** Dory protocol: +- The prover reveals the polynomial evaluation `y` in clear +- All protocol messages directly contain the computed values +- The verifier learns `y` and can verify `C(r) = y` + +In the **zero-knowledge (ZK)** Dory protocol: +- The prover commits to `y` without revealing it +- All protocol messages are **masked** with random blinds +- The verifier learns only that the prover knows a valid `(polynomial, evaluation)` pair + +--- + +## 2. Core Technique: Blinding/Masking + +### 2.1 The Masking Operation + +For any group element `V` computed by the prover, the ZK version masks it: + +``` +V_masked = V + r·H +``` + +Where: +- `V` is the actual computed value (e.g., a pairing result in G_T) +- `r` is a random scalar sampled from the prover's private RNG +- `H` is a **blinding generator** (H_T, H_1, or H_2 depending on the group) + +**Key Insight**: The blinding generators `H_T, H_1, H_2` must be linearly independent from the commitment generators `Γ_1, Γ_2`. This ensures: +1. The mask `r·H` looks random to the verifier +2. The binding property is preserved (prover can't find alternate openings) + +### 2.2 Mode Abstraction + +The implementation uses a `Mode` trait to unify ZK and non-ZK: + +```rust +pub trait Mode: 'static { + fn sample(rng: &mut R) -> F; + fn mask(value: G, base: &G, blind: &G::Scalar) -> G; +} + +// Transparent: sample returns 0, mask returns value unchanged +impl Mode for Transparent { + fn sample(_rng: &mut R) -> F { F::zero() } + fn mask(value: G, _base: &G, _blind: &G::Scalar) -> G { value } +} + +// ZK: sample returns random, mask adds blind +impl Mode for ZK { + fn sample(rng: &mut R) -> F { F::random(rng) } + fn mask(value: G, base: &G, blind: &G::Scalar) -> G { value + base.scale(blind) } +} +``` + +--- + +## 3. Protocol Differences by Phase + +### 3.1 VMV (Vector-Matrix-Vector) Message + +The VMV message initiates the evaluation proof. It contains `(C, D_2, E_1)`. + +#### Transparent Version +``` +C = e(⟨T_vec, v_vec⟩, Γ_2,fin) +D_2 = e(⟨Γ_1, v_vec⟩, Γ_2,fin) +E_1 = ⟨T_vec, L_vec⟩ +E_2 = y · Γ_2,fin // Computed by verifier from known y +``` + +#### ZK Version +``` +C = e(⟨T_vec, v_vec⟩, Γ_2,fin) + r_c · H_T +D_2 = e(⟨Γ_1, v_vec⟩, Γ_2,fin) + r_d2 · H_T +E_1 = ⟨T_vec, L_vec⟩ + r_e1 · H_1 +E_2 = y · Γ_2,fin + r_e2 · H_2 // Sent by prover (blinded) +y_com = y · Γ_1,fin + r_y · H_1 // Commitment to evaluation +``` + +**Key Differences**: +1. All values masked with their respective blinding generators +2. `E_2` sent by prover (verifier can't compute it without knowing `y`) +3. Additional commitment `y_com` for proving `y` consistency +4. Requires **Σ_1 proof** to prove `E_2` and `y_com` commit to same `y` +5. Requires **Σ_2 proof** to prove the VMV relation holds + +### 3.2 Reduce Rounds (First Message) + +Each reduce round computes cross-terms for the folding. First message contains `(D_1L, D_1R, D_2L, D_2R, E_1β, E_2β)`. + +#### Transparent Version +``` +D_1L = e(v_1L, Γ_2') +D_1R = e(v_1R, Γ_2') +D_2L = e(Γ_1', v_2L) +D_2R = e(Γ_1', v_2R) +E_1β = ⟨Γ_1, s_2⟩ +E_2β = ⟨Γ_2, s_1⟩ +``` + +#### ZK Version +``` +D_1L = e(v_1L, Γ_2') + r_d1l · H_T +D_1R = e(v_1R, Γ_2') + r_d1r · H_T +D_2L = e(Γ_1', v_2L) + r_d2l · H_T +D_2R = e(Γ_1', v_2R) + r_d2r · H_T +E_1β = ⟨Γ_1, s_2⟩ // Not masked (public generators) +E_2β = ⟨Γ_2, s_1⟩ // Not masked (public generators) +``` + +**Blind Sampling**: 4 new blinds per round: `r_d1l, r_d1r, r_d2l, r_d2r` + +### 3.3 Reduce Rounds (Second Message) + +Second message contains cross-products `(C_+, C_-, E_1+, E_1-, E_2+, E_2-)`. + +#### Transparent Version +``` +C_+ = e(v_1L, v_2R) +C_- = e(v_1R, v_2L) +E_1+ = ⟨v_1L, s_2R⟩ +E_1- = ⟨v_1R, s_2L⟩ +E_2+ = ⟨s_1L, v_2R⟩ +E_2- = ⟨s_1R, v_2L⟩ +``` + +#### ZK Version +``` +C_+ = e(v_1L, v_2R) + r_c+ · H_T +C_- = e(v_1R, v_2L) + r_c- · H_T +E_1+ = ⟨v_1L, s_2R⟩ + r_e1+ · H_1 +E_1- = ⟨v_1R, s_2L⟩ + r_e1- · H_1 +E_2+ = ⟨s_1L, v_2R⟩ + r_e2+ · H_2 +E_2- = ⟨s_1R, v_2L⟩ + r_e2- · H_2 +``` + +**Blind Sampling**: 6 new blinds per round: `r_c+, r_c-, r_e1+, r_e1-, r_e2+, r_e2-` + +### 3.4 Blind Accumulation + +In ZK mode, the prover must track how blinds combine through the protocol. + +After challenge `β` (first challenge): +``` +r_C ← r_C + β·r_D2 + β⁻¹·r_D1 +``` + +After challenge `α` (second challenge): +``` +r_C ← r_C + α·r_c+ + α⁻¹·r_c- +r_D1 ← α·r_d1l + r_d1r +r_D2 ← α⁻¹·r_d2l + r_d2r +r_E1 ← r_E1 + α·r_e1+ + α⁻¹·r_e1- +r_E2 ← r_E2 + α·r_e2+ + α⁻¹·r_e2- +``` + +After fold-scalars (challenge `γ`): +``` +r_C ← r_C + γ·r_E2 + γ⁻¹·r_E1 +``` + +### 3.5 Final Scalar Product + +#### Transparent Version + +Final message is `(E_1, E_2)`: +``` +E_1 = v_1 + γ·s_1·H_1 +E_2 = v_2 + γ⁻¹·s_2·H_2 +``` + +Verification: +``` +e(E_1 + d·Γ_1, E_2 + d⁻¹·Γ_2) = C' + χ_0 + d·D_2' + d⁻¹·D_1' +``` + +#### ZK Version + +Final message is `(E_1, E_2)` plus a **Σ-protocol proof**. + +The Σ-protocol proves knowledge of `(v_1, v_2, r_C, r_D1, r_D2)` satisfying: +- `C = e(v_1, v_2) + r_C·H_T` +- `D_1 = e(v_1, Γ_2) + r_D1·H_T` +- `D_2 = e(Γ_1, v_2) + r_D2·H_T` + +**Σ-Protocol Steps**: + +1. **Commitment Phase**: Prover samples `d_1 = s_d1·Γ_1`, `d_2 = s_d2·Γ_2` and blinds `r_P1, r_P2, r_Q, r_R`, then computes: + ``` + P_1 = e(d_1, Γ_2) + r_P1·H_T + P_2 = e(Γ_1, d_2) + r_P2·H_T + Q = e(d_1, v_2) + e(v_1, d_2) + r_Q·H_T + R = e(d_1, d_2) + r_R·H_T + ``` + +2. **Challenge**: Verifier sends random `c` (or derived via Fiat-Shamir) + +3. **Response Phase**: Prover computes: + ``` + E_1 = d_1 + c·v_1 + E_2 = d_2 + c·v_2 + r_1 = r_P1 + c·r_D1 + r_2 = r_P2 + c·r_D2 + r_3 = r_R + c·r_Q + c²·r_C + ``` + +4. **Verification**: Verifier checks: + ``` + e(E_1 + d·Γ_1, E_2 + d⁻¹·Γ_2) = χ + R + c·Q + c²·C + + d·P_2 + d·c·D_2 + + d⁻¹·P_1 + d⁻¹·c·D_1 + - (r_3 + d·r_2 + d⁻¹·r_1)·H_T + ``` + +--- + +## 4. Additional ZK-Only Structures + +### 4.1 Sigma1 Proof (VMV Consistency) + +Proves knowledge of `(y, r_E2, r_y)` such that: +- `E_2 = y·Γ_2,fin + r_E2·H_2` +- `y_com = y·Γ_1,fin + r_y·H_1` + +This is a standard Schnorr-like proof for DLOG equality across different bases. + +### 4.2 Sigma2 Proof (VMV Relation) + +Proves: +``` +e(E_1, Γ_2,fin) - D_2 = e(H_1, t_1·Γ_2,fin + t_2·H_2) +``` + +Where `t_1 = r_E1 + r_v` and `t_2 = -r_D2`. + +This proves the VMV constraint holds even with blinds. + +--- + +## 5. Proof Size Comparison + +| Component | Transparent | ZK | +|-----------|-------------|-----| +| VMV Message | 2 G_T + 1 G_1 | 2 G_T + 1 G_1 + 1 G_2 + 1 G_1 | +| Per Reduce Round | 4 G_T + 2 G_1 + 2 G_2 | Same (blinds absorbed) | +| Final Message | 1 G_1 + 1 G_2 | Same | +| Sigma1 Proof | - | 1 G_1 + 1 G_2 + 3 F | +| Sigma2 Proof | - | 1 G_T + 2 F | +| ScalarProduct Proof | - | 4 G_T + 1 G_1 + 1 G_2 + 3 F | + +**Total Overhead for ZK**: +- +1 G_1, +2 G_2 elements +- +5 G_T elements +- +8 F scalars + +--- + +## 6. Verification Complexity + +### Transparent +- Per round: O(1) scalar mults in G_T +- Final: 4 pairings (batched) + +### ZK +- Per round: Same (blind tracking is prover-side) +- Final: Same 4 pairings + Σ-protocol verification +- Additional: Sigma1 verification (2 scalar mults each in G_1, G_2) +- Additional: Sigma2 verification (1 pairing) + +**Total**: ~5-6 pairings equivalent (vs 4 for transparent) + +--- + +## 7. Security Properties + +### Transparent Dory +- **Binding**: Computational (under SXDH) +- **Hiding**: None (evaluation is revealed) +- **Soundness**: Computational (knowledge sound under SXDH) + +### ZK Dory +- **Binding**: Computational (under SXDH) +- **Hiding**: Statistical/Perfect (blinded messages are uniformly random) +- **Soundness**: Computational (knowledge sound under SXDH) +- **Zero-Knowledge**: Honest-Verifier Statistical ZK (HVSZK) + +The ZK property is **HVSZK** (Honest-Verifier Statistical Zero-Knowledge): +- The simulator can produce transcripts indistinguishable from real ones +- Requires honest verifier (challenges are random) +- Fiat-Shamir makes it non-interactive, achieving NIZK in ROM + +--- + +## 8. Implementation Summary + +### State Changes (Prover) + +```rust +pub struct DoryProverState<'a, E: PairingCurve, M: Mode = Transparent> { + // ... existing fields ... + + // ZK blind accumulators (zero for Transparent) + r_c: Scalar, // Accumulated blind for C + r_d1: Scalar, // Accumulated blind for D1 + r_d2: Scalar, // Accumulated blind for D2 + r_e1: Scalar, // Accumulated blind for E1 + r_e2: Scalar, // Accumulated blind for E2 + _mode: PhantomData, +} +``` + +### Proof Changes + +```rust +// Transparent proof +pub struct DoryProof { + pub vmv_message: VMVMessage, + pub first_messages: Vec>, + pub second_messages: Vec>, + pub final_message: ScalarProductMessage, +} + +// ZK proof adds Σ-proofs +pub struct ZkDoryProof { + pub vmv_message: ZkVMVMessage, // Includes E2, y_com + pub first_messages: Vec>, + pub second_messages: Vec>, + pub final_message: ScalarProductMessage, + pub sigma1_proof: Sigma1Proof, + pub sigma2_proof: Sigma2Proof, + pub scalar_product_proof: ScalarProductProof, +} +``` + +--- + +## 9. Key Insights + +1. **Blinding Generator Independence**: `H_T, H_1, H_2` must be sampled independently from `Γ_1, Γ_2`. In practice, derived from a hash of `Γ` or sampled from a separate random oracle. + +2. **Blind Folding Mirrors Value Folding**: When values fold as `v' = α·v_L + v_R`, blinds fold as `r' = α·r_L + r_R`. This maintains the invariant that `v'_masked = v' + r'·H`. + +3. **Σ-Protocols Are Necessary**: Simply masking isn't enough—the verifier needs to verify the final relation. The Σ-protocol proves knowledge of the witness without revealing the blinds. + +4. **RNG vs Transcript for Blinds**: Blinds are sampled from private RNG (not transcript) because they must not affect challenge derivation. Challenges come from the transcript after appending the masked values. + +5. **Additive vs Multiplicative Masking**: Dory uses additive masking (`V + r·H`) in groups. This is simpler than multiplicative approaches and works because group operations are efficient. + +--- + +## References + +1. Lee, J. (2021). "Dory: Efficient, Transparent Arguments for Generalised Inner Products and Polynomial Commitments." TCC 2021. +2. IACR ePrint: https://eprint.iacr.org/2020/1274 +3. Thaler, J. "Proofs, Arguments, and Zero-Knowledge" Section 15.4 diff --git a/examples/basic_e2e.rs b/examples/basic_e2e.rs index 8de8fc3..9624855 100644 --- a/examples/basic_e2e.rs +++ b/examples/basic_e2e.rs @@ -13,7 +13,7 @@ use dory_pcs::backends::arkworks::{ }; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; +use dory_pcs::{prove, setup, verify, Transparent}; use rand::thread_rng; use tracing::info; @@ -66,7 +66,7 @@ fn main() -> Result<(), Box> { // Step 5: Prove info!("5. Generating evaluation proof..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-basic-example"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -74,6 +74,7 @@ fn main() -> Result<(), Box> { sigma, &prover_setup, &mut prover_transcript, + &mut rng, )?; info!(" ✓ Proof generated (logarithmic size)\n"); diff --git a/examples/homomorphic.rs b/examples/homomorphic.rs index 83e4428..c7427e4 100644 --- a/examples/homomorphic.rs +++ b/examples/homomorphic.rs @@ -13,7 +13,7 @@ use dory_pcs::backends::arkworks::{ }; use dory_pcs::primitives::arithmetic::{Field, Group}; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; +use dory_pcs::{prove, setup, verify, Transparent}; use rand::thread_rng; use tracing::info; @@ -139,7 +139,7 @@ fn main() -> Result<(), Box> { // Step 8: Generate proof info!("8. Generating evaluation proof for combined polynomial..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-homomorphic-example"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( &combined_poly, &point, combined_tier1, @@ -147,6 +147,7 @@ fn main() -> Result<(), Box> { sigma, &prover_setup, &mut prover_transcript, + &mut rng, )?; info!(" ✓ Proof generated\n"); diff --git a/examples/homomorphic_mixed_sizes.rs b/examples/homomorphic_mixed_sizes.rs index 90fb474..50f3cbb 100644 --- a/examples/homomorphic_mixed_sizes.rs +++ b/examples/homomorphic_mixed_sizes.rs @@ -9,7 +9,7 @@ use dory_pcs::backends::arkworks::{ }; use dory_pcs::primitives::arithmetic::{Field, Group}; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; +use dory_pcs::{prove, setup, verify, Transparent}; use rand::thread_rng; use tracing::info; @@ -102,7 +102,7 @@ fn main() -> Result<(), Box> { info!("Generating evaluation proof with combined commitment..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-homomorphic-mixed"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( &combined_poly, &point, combined_tier1, @@ -110,6 +110,7 @@ fn main() -> Result<(), Box> { 2, &prover_setup, &mut prover_transcript, + &mut rng, )?; info!("✓ Proof generated\n"); diff --git a/examples/non_square.rs b/examples/non_square.rs index eece5fa..7c80bd0 100644 --- a/examples/non_square.rs +++ b/examples/non_square.rs @@ -11,7 +11,7 @@ use dory_pcs::backends::arkworks::{ }; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; +use dory_pcs::{prove, setup, verify, Transparent}; use rand::thread_rng; use tracing::info; @@ -65,7 +65,7 @@ fn main() -> Result<(), Box> { // Step 5: Prove info!("5. Generating evaluation proof..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-non-square-example"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -73,6 +73,7 @@ fn main() -> Result<(), Box> { sigma, &prover_setup, &mut prover_transcript, + &mut rng, )?; info!(" ✓ Proof generated (logarithmic size)\n"); diff --git a/src/backends/arkworks/ark_poly.rs b/src/backends/arkworks/ark_poly.rs index f8f9a8b..f9aee50 100644 --- a/src/backends/arkworks/ark_poly.rs +++ b/src/backends/arkworks/ark_poly.rs @@ -93,6 +93,64 @@ impl Polynomial for ArkworksPolynomial { Ok((commitment, row_commitments)) } + + #[cfg(feature = "zk")] + #[tracing::instrument(skip_all, name = "ArkworksPolynomial::commit_zk", fields(nu, sigma, num_rows = 1 << nu, num_cols = 1 << sigma))] + #[allow(clippy::type_complexity)] + fn commit_zk( + &self, + nu: usize, + sigma: usize, + setup: &ProverSetup, + rng: &mut R, + ) -> Result<(E::GT, Vec, Vec), DoryError> + where + E: PairingCurve, + M1: DoryRoutines, + E::G1: Group, + R: rand_core::RngCore, + { + let expected_len = 1 << (nu + sigma); + if self.coefficients.len() != expected_len { + return Err(DoryError::InvalidSize { + expected: expected_len, + actual: self.coefficients.len(), + }); + } + + let num_rows = 1 << nu; + let num_cols = 1 << sigma; + + // Tier 1: Compute blinded row commitments + // T_i = ⟨row_i, Γ1⟩ + r_i·H1 + let mut row_commitments = Vec::with_capacity(num_rows); + let mut row_blinds = Vec::with_capacity(num_rows); + + for _ in 0..num_rows { + // Sample blind for this row from private randomness + let r_i = ArkFr::random(rng); + row_blinds.push(r_i); + } + + for (i, r_i) in row_blinds.iter().enumerate() { + let row_start = i * num_cols; + let row_end = row_start + num_cols; + let row = &self.coefficients[row_start..row_end]; + + // Compute blinded row commitment: T_i = MSM(Γ1, row) + r_i·H1 + let g1_bases = &setup.g1_vec[..num_cols]; + let row_commit_raw = M1::msm(g1_bases, row); + let row_commit = row_commit_raw + setup.h1.scale(r_i); + row_commitments.push(row_commit); + } + + // Tier 2: Compute final commitment via multi-pairing + // The commitment is derived from blinded row commitments + let g2_bases = &setup.g2_vec[..num_rows]; + let commitment = E::multi_pair_g2_setup(&row_commitments, g2_bases); + + Ok((commitment, row_commitments, row_blinds)) + } } impl MultilinearLagrange for ArkworksPolynomial { diff --git a/src/backends/arkworks/ark_serde.rs b/src/backends/arkworks/ark_serde.rs index 8bfd364..214bbd0 100644 --- a/src/backends/arkworks/ark_serde.rs +++ b/src/backends/arkworks/ark_serde.rs @@ -422,6 +422,8 @@ impl CanonicalDeserialize for ArkDoryProof { first_messages, second_messages, final_message, + #[cfg(feature = "zk")] + scalar_product_proof: None, nu, sigma, }) diff --git a/src/evaluation_proof.rs b/src/evaluation_proof.rs index 3fe1332..559916e 100644 --- a/src/evaluation_proof.rs +++ b/src/evaluation_proof.rs @@ -27,6 +27,7 @@ use crate::error::DoryError; use crate::messages::VMVMessage; +use crate::mode::Mode; use crate::primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; use crate::primitives::poly::MultilinearLagrange; use crate::primitives::transcript::Transcript; @@ -59,6 +60,7 @@ use crate::setup::{ProverSetup, VerifierSetup}; /// - `sigma`: Log₂ of number of columns /// - `setup`: Prover setup /// - `transcript`: Fiat-Shamir transcript for challenge generation +/// - `rng`: Random number generator for sampling blinds (ZK mode only) /// /// # Returns /// Complete Dory proof containing VMV message, reduce messages, and final message @@ -70,8 +72,9 @@ use crate::setup::{ProverSetup, VerifierSetup}; /// Supports both square (nu = sigma) and non-square (nu < sigma) matrices. /// For non-square matrices, vectors are automatically padded to length 2^sigma. #[allow(clippy::type_complexity)] +#[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all, name = "create_evaluation_proof")] -pub fn create_evaluation_proof( +pub fn create_evaluation_proof( polynomial: &P, point: &[F], row_commitments: Option>, @@ -79,6 +82,7 @@ pub fn create_evaluation_proof( sigma: usize, setup: &ProverSetup, transcript: &mut T, + rng: &mut R, ) -> Result, DoryError> where F: Field, @@ -90,6 +94,8 @@ where M2: DoryRoutines, T: Transcript, P: MultilinearLagrange, + Mo: Mode, + R: rand_core::RngCore, { if point.len() != nu + sigma { return Err(DoryError::InvalidPointDimension { @@ -133,17 +139,29 @@ where let _span_vmv = tracing::span!(tracing::Level::DEBUG, "compute_vmv_message", nu, sigma).entered(); - // C = e(⟨row_commitments, v_vec⟩, h₂) + // Sample VMV blinds (zero in Transparent mode, random in ZK mode) + let r_c: F = Mo::sample(rng); + let r_d2: F = Mo::sample(rng); + let r_e1: F = Mo::sample(rng); + let r_e2: F = Mo::sample(rng); + + // Γ2,fin = g2_vec[0] (commitment base, distinct from H2 = h2 for blinding) + let g2_fin = &setup.g2_vec[0]; + + // C = e(⟨row_commitments, v_vec⟩, Γ2,fin) + r_c·HT let t_vec_v = M1::msm(&padded_row_commitments, &v_vec); - let c = E::pair(&t_vec_v, &setup.h2); + let c_raw = E::pair(&t_vec_v, g2_fin); + let c = Mo::mask(c_raw, &setup.ht, &r_c); - // D₂ = e(⟨Γ₁[sigma], v_vec⟩, h₂) + // D₂ = e(⟨Γ₁[sigma], v_vec⟩, Γ2,fin) + r_d2·HT let g1_bases_at_sigma = &setup.g1_vec[..1 << sigma]; let gamma1_v = M1::msm(g1_bases_at_sigma, &v_vec); - let d2 = E::pair(&gamma1_v, &setup.h2); + let d2_raw = E::pair(&gamma1_v, g2_fin); + let d2 = Mo::mask(d2_raw, &setup.ht, &r_d2); - // E₁ = ⟨row_commitments, left_vec⟩ - let e1 = M1::msm(&row_commitments, &left_vec); + // E₁ = ⟨row_commitments, left_vec⟩ + r_e1·H₁ + let e1_raw = M1::msm(&row_commitments, &left_vec); + let e1 = Mo::mask(e1_raw, &setup.h1, &r_e1); let vmv_message = VMVMessage { c, d2, e1 }; drop(_span_vmv); @@ -162,11 +180,11 @@ where ) .entered(); - // v₂ = v_vec · Γ₂,fin (each scalar scales g_fin) + // v₂ = v_vec · Γ₂,fin (each scalar scales Γ2,fin = g2_vec[0]) let v2 = { let _span = - tracing::span!(tracing::Level::DEBUG, "fixed_base_vector_scalar_mul_h2").entered(); - M2::fixed_base_vector_scalar_mul(&setup.h2, &v_vec) + tracing::span!(tracing::Level::DEBUG, "fixed_base_vector_scalar_mul_g2_fin").entered(); + M2::fixed_base_vector_scalar_mul(g2_fin, &v_vec) }; let mut padded_right_vec = right_vec.clone(); @@ -176,13 +194,18 @@ where padded_left_vec.resize(1 << sigma, F::zero()); } - let mut prover_state = DoryProverState::new( + // Create prover state with initial blinds from VMV + let mut prover_state: DoryProverState<'_, E, Mo> = DoryProverState::new_with_blinds( padded_row_commitments, // v1 = T_vec_prime (row commitments, padded) v2, // v2 = v_vec · g_fin Some(v_vec), // v2_scalars for first-round MSM+pair optimization padded_right_vec, // s1 = right_vec (padded) padded_left_vec, // s2 = left_vec (padded) setup, + r_c, // Initial r_c from VMV + r_d2, // Initial r_d2 from VMV + r_e1, // Initial r_e1 from VMV + r_e2, // Initial r_e2 from VMV ); drop(_span_init); @@ -191,7 +214,8 @@ where let mut second_messages = Vec::with_capacity(num_rounds); for _round in 0..num_rounds { - let first_msg = prover_state.compute_first_message::(); + let (first_msg, r_d1_l, r_d1_r, r_d2_l, r_d2_r) = + prover_state.compute_first_message::(rng); transcript.append_serde(b"d1_left", &first_msg.d1_left); transcript.append_serde(b"d1_right", &first_msg.d1_right); @@ -201,11 +225,13 @@ where transcript.append_serde(b"e2_beta", &first_msg.e2_beta); let beta = transcript.challenge_scalar(b"beta"); + // apply_first_challenge uses accumulated blinds (self.r_d1, self.r_d2) prover_state.apply_first_challenge::(&beta); first_messages.push(first_msg); - let second_msg = prover_state.compute_second_message::(); + let (second_msg, r_c_plus, r_c_minus, r_e1_plus, r_e1_minus, r_e2_plus, r_e2_minus) = + prover_state.compute_second_message::(rng); transcript.append_serde(b"c_plus", &second_msg.c_plus); transcript.append_serde(b"c_minus", &second_msg.c_minus); @@ -215,12 +241,31 @@ where transcript.append_serde(b"e2_minus", &second_msg.e2_minus); let alpha = transcript.challenge_scalar(b"alpha"); - prover_state.apply_second_challenge::(&alpha); + // apply_second_challenge folds message blinds into accumulated blinds + prover_state.apply_second_challenge::( + &alpha, r_d1_l, r_d1_r, r_d2_l, r_d2_r, r_c_plus, r_c_minus, r_e1_plus, r_e1_minus, + r_e2_plus, r_e2_minus, + ); second_messages.push(second_msg); } let gamma = transcript.challenge_scalar(b"gamma"); + + // In ZK mode, generate Σ-protocol proof BEFORE compute_final_message + // because compute_final_message modifies r_c but the Σ-protocol needs + // the pre-fold-scalars blinds. + #[cfg(feature = "zk")] + let scalar_product_proof = + if std::any::TypeId::of::() == std::any::TypeId::of::() { + // scalar_product_proof appends P1, P2, Q, R to transcript and derives challenge c + Some(prover_state.scalar_product_proof_internal(transcript, rng)) + } else { + None + }; + #[cfg(not(feature = "zk"))] + let _ = rng; // suppress unused warning when zk feature is disabled + let final_message = prover_state.compute_final_message::(&gamma); transcript.append_serde(b"final_e1", &final_message.e1); @@ -233,6 +278,8 @@ where first_messages, second_messages, final_message, + #[cfg(feature = "zk")] + scalar_product_proof, nu, sigma, }) @@ -271,6 +318,9 @@ where /// # Errors /// Returns `DoryError::InvalidProof` if verification fails, or other variants /// if the input parameters are incorrect (e.g., point dimension mismatch). +/// +/// # Panics +/// May panic in ZK mode if internal state is inconsistent (should not occur in normal use). #[tracing::instrument(skip_all, name = "verify_evaluation_proof")] pub fn verify_evaluation_proof( commitment: E::GT, @@ -305,11 +355,12 @@ where transcript.append_serde(b"vmv_d2", &vmv_message.d2); transcript.append_serde(b"vmv_e1", &vmv_message.e1); - // # NOTE: The VMV check `vmv_message.d2 == e(vmv_message.e1, setup.h2)` is deferred + // # NOTE: The VMV check `vmv_message.d2 == e(vmv_message.e1, Γ2,fin)` is deferred // to verify_final where it's batched with other pairings using random linear // combination with challenge `d`. See verify_final documentation for details. - let e2 = setup.h2.scale(&evaluation); + // E2 = y · Γ2,fin where Γ2,fin = g2_0 (distinct from H2 = h2 for blinding) + let e2 = setup.g2_0.scale(&evaluation); // Folded-scalar accumulation with per-round coordinates. // num_rounds = sigma (we fold column dimensions). @@ -364,10 +415,406 @@ where let gamma = transcript.challenge_scalar(b"gamma"); + // In ZK mode, append Σ-protocol values and derive c BEFORE appending final message + // (must match prover's transcript order) + #[cfg(feature = "zk")] + let zk_challenge_c = if let Some(ref sigma_proof) = proof.scalar_product_proof { + transcript.append_serde(b"sigma_p1", &sigma_proof.p1); + transcript.append_serde(b"sigma_p2", &sigma_proof.p2); + transcript.append_serde(b"sigma_q", &sigma_proof.q); + transcript.append_serde(b"sigma_r", &sigma_proof.r); + Some(transcript.challenge_scalar(b"sigma_c")) + } else { + None + }; + transcript.append_serde(b"final_e1", &proof.final_message.e1); transcript.append_serde(b"final_e2", &proof.final_message.e2); let d = transcript.challenge_scalar(b"d"); + // Use verify_final_zk when scalar_product_proof is present + #[cfg(feature = "zk")] + if let Some(ref sigma_proof) = proof.scalar_product_proof { + let c = zk_challenge_c.expect("c should be derived when scalar_product_proof is present"); + return verifier_state.verify_final_zk_with_challenge(sigma_proof, &c, &d); + } + verifier_state.verify_final(&proof.final_message, &gamma, &d) } + +/// Create a zero-knowledge evaluation proof that hides the evaluation y +/// +/// Unlike `create_evaluation_proof`, this function produces a proof where +/// the evaluation y is NOT revealed. Instead, the proof contains: +/// - `y_com`: A commitment to y that the verifier can use +/// - Sigma proofs that prove consistency without revealing y +/// +/// # Returns +/// A tuple of (ZkDoryProof, y_com) where y_com is the commitment to the evaluation +/// that can be given to the verifier. +/// +/// # Errors +/// Returns `DoryError` if dimensions are invalid or proof generation fails. +#[cfg(feature = "zk")] +#[allow(clippy::type_complexity)] +#[allow(clippy::too_many_arguments)] +#[tracing::instrument(skip_all, name = "create_zk_evaluation_proof")] +pub fn create_zk_evaluation_proof( + polynomial: &P, + point: &[F], + row_commitments: Option>, + nu: usize, + sigma: usize, + setup: &ProverSetup, + transcript: &mut T, + rng: &mut R, +) -> Result<(crate::proof::ZkDoryProof, E::G1), DoryError> +where + F: Field, + E: PairingCurve, + E::G1: Group, + E::G2: Group, + E::GT: Group, + M1: DoryRoutines, + M2: DoryRoutines, + T: Transcript, + P: MultilinearLagrange, + R: rand_core::RngCore, +{ + use crate::messages::ZkVMVMessage; + use crate::mode::ZK; + use crate::proof::ZkDoryProof; + use crate::reduce_and_fold::{generate_sigma1_proof, generate_sigma2_proof}; + + if point.len() != nu + sigma { + return Err(DoryError::InvalidPointDimension { + expected: nu + sigma, + actual: point.len(), + }); + } + + if nu > sigma { + return Err(DoryError::InvalidSize { + expected: sigma, + actual: nu, + }); + } + + // Compute row commitments if not provided + let row_comms = match row_commitments { + Some(comms) => comms, + None => { + let (_commitment, rc) = polynomial.commit::(nu, sigma, setup)?; + rc + } + }; + + // Compute evaluation vectors and v_vec + let (left_vec, right_vec) = polynomial.compute_evaluation_vectors(point, nu, sigma); + let v_vec = polynomial.vector_matrix_product(&left_vec, nu, sigma); + + // Pad row commitments for non-square matrices + let mut padded_row_commitments = row_comms.clone(); + if nu < sigma { + padded_row_commitments.resize(1 << sigma, E::G1::identity()); + } + + // Compute y = polynomial(point) + let y = polynomial.evaluate(point); + + // Compute v2 = v_vec scaled by Γ2,fin + let g2_fin = &setup.g2_vec[0]; + let v2 = M2::fixed_base_vector_scalar_mul(g2_fin, &v_vec); + + // Pad vectors for non-square matrices + let mut padded_right_vec = right_vec.clone(); + let mut padded_left_vec = left_vec.clone(); + if nu < sigma { + padded_right_vec.resize(1 << sigma, F::zero()); + padded_left_vec.resize(1 << sigma, F::zero()); + } + + // Sample blinds for VMV message + let r_c: F = ZK::sample(rng); + let r_d2: F = ZK::sample(rng); + let r_e1: F = ZK::sample(rng); + let r_e2: F = ZK::sample(rng); + let r_y: F = ZK::sample(rng); + + // Compute VMV message components with masking + // C = e(⟨T_vec', v_vec⟩, Γ2,fin) + r_c·HT + let t_dot_v = M1::msm(&padded_row_commitments, &v_vec); + let c_raw = E::pair(&t_dot_v, g2_fin); + let c = ZK::mask(c_raw, &setup.ht, &r_c); + + // D2 = e(⟨Γ1, v_vec⟩, Γ2,fin) + r_d2·HT + let g1_bases_at_sigma = &setup.g1_vec[..1 << sigma]; + let g1_dot_v = M1::msm(g1_bases_at_sigma, &v_vec); + let d2_raw = E::pair(&g1_dot_v, g2_fin); + let d2 = ZK::mask(d2_raw, &setup.ht, &r_d2); + + // E1 = ⟨T_vec', L_vec⟩ + r_e1·H1 + let e1_raw = M1::msm(&row_comms, &left_vec); + let e1 = ZK::mask(e1_raw, &setup.h1, &r_e1); + + // E2 = y·Γ2,fin + r_e2·H2 (prover computes, not verifier!) + let e2_raw = g2_fin.scale(&y); + let e2 = ZK::mask(e2_raw, &setup.h2, &r_e2); + + // y_com = y·Γ1,fin + r_y·H1 (commitment to evaluation) + let y_com = setup.g1_vec[0].scale(&y) + setup.h1.scale(&r_y); + + let zk_vmv_message = ZkVMVMessage { + c, + d2, + e1, + e2, + y_com, + }; + + // Append VMV message to transcript + transcript.append_serde(b"vmv_c", &zk_vmv_message.c); + transcript.append_serde(b"vmv_d2", &zk_vmv_message.d2); + transcript.append_serde(b"vmv_e1", &zk_vmv_message.e1); + transcript.append_serde(b"vmv_e2", &zk_vmv_message.e2); + transcript.append_serde(b"vmv_y_com", &zk_vmv_message.y_com); + + // Generate Sigma1 proof (proves y_com and E2 commit to same y) + let sigma1_proof = generate_sigma1_proof::(&y, &r_e2, &r_y, setup, transcript, rng); + + // Generate Sigma2 proof (proves VMV relation: e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2)) + // t1 = r_e1 (since we're using rv=0 for row commitment blinds in this simplified version) + // t2 = -r_d2 + let t1 = r_e1; + let t2 = -r_d2; + let sigma2_proof = generate_sigma2_proof::(&t1, &t2, setup, transcript, rng); + + // Initialize prover state + let mut prover_state: DoryProverState<'_, E, ZK> = DoryProverState::new_with_blinds( + padded_row_commitments, + v2, + Some(v_vec), + padded_right_vec, + padded_left_vec, + setup, + r_c, + r_d2, + r_e1, + r_e2, + ); + + let num_rounds = nu.max(sigma); + let mut first_messages = Vec::with_capacity(num_rounds); + let mut second_messages = Vec::with_capacity(num_rounds); + + // Run reduce-and-fold rounds + for _round in 0..num_rounds { + let (first_msg, r_d1_l, r_d1_r, r_d2_l, r_d2_r) = + prover_state.compute_first_message::(rng); + + transcript.append_serde(b"d1_left", &first_msg.d1_left); + transcript.append_serde(b"d1_right", &first_msg.d1_right); + transcript.append_serde(b"d2_left", &first_msg.d2_left); + transcript.append_serde(b"d2_right", &first_msg.d2_right); + transcript.append_serde(b"e1_beta", &first_msg.e1_beta); + transcript.append_serde(b"e2_beta", &first_msg.e2_beta); + + let beta = transcript.challenge_scalar(b"beta"); + prover_state.apply_first_challenge::(&beta); + + first_messages.push(first_msg); + + let (second_msg, r_c_plus, r_c_minus, r_e1_plus, r_e1_minus, r_e2_plus, r_e2_minus) = + prover_state.compute_second_message::(rng); + + transcript.append_serde(b"c_plus", &second_msg.c_plus); + transcript.append_serde(b"c_minus", &second_msg.c_minus); + transcript.append_serde(b"e1_plus", &second_msg.e1_plus); + transcript.append_serde(b"e1_minus", &second_msg.e1_minus); + transcript.append_serde(b"e2_plus", &second_msg.e2_plus); + transcript.append_serde(b"e2_minus", &second_msg.e2_minus); + + let alpha = transcript.challenge_scalar(b"alpha"); + prover_state.apply_second_challenge::( + &alpha, r_d1_l, r_d1_r, r_d2_l, r_d2_r, r_c_plus, r_c_minus, r_e1_plus, r_e1_minus, + r_e2_plus, r_e2_minus, + ); + + second_messages.push(second_msg); + } + + let gamma = transcript.challenge_scalar(b"gamma"); + + // Generate scalar product proof before compute_final_message modifies r_c + let scalar_product_proof = prover_state.scalar_product_proof_internal(transcript, rng); + + let final_message = prover_state.compute_final_message::(&gamma); + + transcript.append_serde(b"final_e1", &final_message.e1); + transcript.append_serde(b"final_e2", &final_message.e2); + + let _d = transcript.challenge_scalar(b"d"); + + Ok(( + ZkDoryProof { + vmv_message: zk_vmv_message, + first_messages, + second_messages, + final_message, + sigma1_proof, + sigma2_proof, + scalar_product_proof, + nu, + sigma, + }, + y_com, + )) +} + +/// Verify a zero-knowledge evaluation proof without knowing y +/// +/// Unlike `verify_evaluation_proof`, this function does NOT take the evaluation y. +/// Instead, it verifies that: +/// 1. The prover knows y such that polynomial(point) = y +/// 2. y_com is a valid commitment to y +/// +/// # Parameters +/// - `commitment`: Polynomial commitment (Tier 2) +/// - `y_com`: Commitment to the evaluation (from proof generation) +/// - `point`: Evaluation point +/// - `proof`: ZK evaluation proof +/// - `setup`: Verifier setup +/// - `transcript`: Fiat-Shamir transcript +/// +/// # Errors +/// Returns `DoryError::InvalidProof` if verification fails, or other variants +/// if the input parameters are incorrect. +#[cfg(feature = "zk")] +#[tracing::instrument(skip_all, name = "verify_zk_evaluation_proof")] +pub fn verify_zk_evaluation_proof( + commitment: E::GT, + y_com: E::G1, + point: &[F], + proof: &crate::proof::ZkDoryProof, + setup: VerifierSetup, + transcript: &mut T, +) -> Result<(), DoryError> +where + F: Field, + E: PairingCurve, + E::G1: Group, + E::G2: Group, + E::GT: Group, + M1: DoryRoutines, + M2: DoryRoutines, + T: Transcript, +{ + use crate::reduce_and_fold::{verify_sigma1_proof, verify_sigma2_proof}; + + let nu = proof.nu; + let sigma = proof.sigma; + + if point.len() != nu + sigma { + return Err(DoryError::InvalidPointDimension { + expected: nu + sigma, + actual: point.len(), + }); + } + + let vmv_message = &proof.vmv_message; + + // Append VMV message to transcript (same order as prover) + transcript.append_serde(b"vmv_c", &vmv_message.c); + transcript.append_serde(b"vmv_d2", &vmv_message.d2); + transcript.append_serde(b"vmv_e1", &vmv_message.e1); + transcript.append_serde(b"vmv_e2", &vmv_message.e2); + transcript.append_serde(b"vmv_y_com", &vmv_message.y_com); + + // Verify y_com from proof matches provided y_com + if vmv_message.y_com != y_com { + return Err(DoryError::InvalidProof); + } + + // Verify Sigma1 proof (proves E2 and y_com commit to same y) + verify_sigma1_proof::( + &vmv_message.e2, + &y_com, + &proof.sigma1_proof, + &setup, + transcript, + )?; + + // Verify Sigma2 proof (proves VMV relation holds with blinds) + verify_sigma2_proof::( + &vmv_message.e1, + &vmv_message.d2, + &proof.sigma2_proof, + &setup, + transcript, + )?; + + // Use E2 from prover's message (not computed from y!) + let e2 = vmv_message.e2; + + // Folded-scalar accumulation + let num_rounds = sigma; + let col_coords = &point[..sigma]; + let s1_coords: Vec = col_coords.to_vec(); + let mut s2_coords: Vec = vec![F::zero(); sigma]; + let row_coords = &point[sigma..sigma + nu]; + s2_coords[..nu].copy_from_slice(&row_coords[..nu]); + + let mut verifier_state = DoryVerifierState::new( + vmv_message.c, + commitment, + vmv_message.d2, + vmv_message.e1, + e2, + s1_coords, + s2_coords, + num_rounds, + setup.clone(), + ); + + // Process reduce rounds + for round in 0..num_rounds { + let first_msg = &proof.first_messages[round]; + let second_msg = &proof.second_messages[round]; + + transcript.append_serde(b"d1_left", &first_msg.d1_left); + transcript.append_serde(b"d1_right", &first_msg.d1_right); + transcript.append_serde(b"d2_left", &first_msg.d2_left); + transcript.append_serde(b"d2_right", &first_msg.d2_right); + transcript.append_serde(b"e1_beta", &first_msg.e1_beta); + transcript.append_serde(b"e2_beta", &first_msg.e2_beta); + let beta = transcript.challenge_scalar(b"beta"); + + transcript.append_serde(b"c_plus", &second_msg.c_plus); + transcript.append_serde(b"c_minus", &second_msg.c_minus); + transcript.append_serde(b"e1_plus", &second_msg.e1_plus); + transcript.append_serde(b"e1_minus", &second_msg.e1_minus); + transcript.append_serde(b"e2_plus", &second_msg.e2_plus); + transcript.append_serde(b"e2_minus", &second_msg.e2_minus); + let alpha = transcript.challenge_scalar(b"alpha"); + + verifier_state.process_round(first_msg, second_msg, &alpha, &beta); + } + + let _gamma = transcript.challenge_scalar(b"gamma"); + + // Derive challenge c from scalar product proof (same as prover) + transcript.append_serde(b"sigma_p1", &proof.scalar_product_proof.p1); + transcript.append_serde(b"sigma_p2", &proof.scalar_product_proof.p2); + transcript.append_serde(b"sigma_q", &proof.scalar_product_proof.q); + transcript.append_serde(b"sigma_r", &proof.scalar_product_proof.r); + let c = transcript.challenge_scalar(b"sigma_c"); + + transcript.append_serde(b"final_e1", &proof.final_message.e1); + transcript.append_serde(b"final_e2", &proof.final_message.e2); + + let d = transcript.challenge_scalar(b"d"); + + // Verify final with ZK scalar product proof + verifier_state.verify_final_zk_with_challenge(&proof.scalar_product_proof, &c, &d) +} diff --git a/src/lib.rs b/src/lib.rs index ae90490..2118936 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -93,6 +93,7 @@ pub mod error; pub mod evaluation_proof; pub mod messages; +pub mod mode; pub mod primitives; pub mod proof; pub mod reduce_and_fold; @@ -103,11 +104,20 @@ pub mod backends; pub use error::DoryError; pub use evaluation_proof::create_evaluation_proof; +#[cfg(feature = "zk")] +pub use evaluation_proof::{create_zk_evaluation_proof, verify_zk_evaluation_proof}; pub use messages::{FirstReduceMessage, ScalarProductMessage, SecondReduceMessage, VMVMessage}; +#[cfg(feature = "zk")] +pub use messages::{ScalarProductProof, Sigma1Proof, Sigma2Proof, ZkVMVMessage}; +#[cfg(feature = "zk")] +pub use mode::ZK; +pub use mode::{Mode, Transparent}; use primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; pub use primitives::poly::{MultilinearLagrange, Polynomial}; use primitives::serialization::{DoryDeserialize, DorySerialize}; pub use proof::DoryProof; +#[cfg(feature = "zk")] +pub use proof::ZkDoryProof; pub use reduce_and_fold::{DoryProverState, DoryVerifierState}; pub use setup::{ProverSetup, VerifierSetup}; @@ -261,8 +271,9 @@ where /// - Polynomial size doesn't match 2^(nu + sigma) /// - Number of row commitments doesn't match 2^nu #[allow(clippy::type_complexity)] +#[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all, name = "prove")] -pub fn prove( +pub fn prove( polynomial: &P, point: &[F], row_commitments: Vec, @@ -270,6 +281,7 @@ pub fn prove( sigma: usize, setup: &ProverSetup, transcript: &mut T, + rng: &mut R, ) -> Result, DoryError> where F: Field, @@ -281,9 +293,11 @@ where M2: DoryRoutines, P: MultilinearLagrange, T: primitives::transcript::Transcript, + Mo: Mode, + R: rand_core::RngCore, { // Create evaluation proof using row_commitments - evaluation_proof::create_evaluation_proof::( + evaluation_proof::create_evaluation_proof::( polynomial, point, Some(row_commitments), @@ -291,6 +305,7 @@ where sigma, setup, transcript, + rng, ) } diff --git a/src/messages.rs b/src/messages.rs index dd877ee..248885d 100644 --- a/src/messages.rs +++ b/src/messages.rs @@ -43,17 +43,39 @@ pub struct SecondReduceMessage { /// Vector-Matrix-Vector message for polynomial commitment transformation /// -/// Contains C, D₂, E₁. Note: E₂ can be computed by verifier as y·Γ₂,fin +/// Contains C, D₂, E₁. In transparent mode, E₂ = y·Γ₂,fin is computed by verifier. +/// In ZK mode, y is committed rather than revealed. #[derive(Clone, Debug)] pub struct VMVMessage { - /// C = e(MSM(T_vec', v_vec), Γ₂,fin) + /// C = e(MSM(T_vec', v_vec), Γ₂,fin) + r_c·HT pub c: GT, - /// D₂ = e(MSM(Γ₁\[nu\], v_vec), Γ₂,fin) + /// D₂ = e(MSM(Γ₁\[nu\], v_vec), Γ₂,fin) + r_d2·HT pub d2: GT, - /// E₁ = MSM(T_vec', L_vec) + /// E₁ = MSM(T_vec', L_vec) + r_e1·H1 pub e1: G1, } +/// ZK VMV message with committed evaluation +/// +/// In ZK mode, the evaluation y is not revealed. Instead, we commit to it: +/// `y_com = y·Γ1,fin + r_y·H1` +/// +/// The Sigma1 proof ties y_com to E2, proving they commit to the same y. +#[cfg(feature = "zk")] +#[derive(Clone, Debug)] +pub struct ZkVMVMessage { + /// C = e(MSM(T_vec', v_vec), Γ₂,fin) + r_c·HT + pub c: GT, + /// D₂ = e(MSM(Γ₁\[nu\], v_vec), Γ₂,fin) + r_d2·HT + pub d2: GT, + /// E₁ = MSM(T_vec', L_vec) + r_e1·H1 + pub e1: G1, + /// E₂ = y·Γ2,fin + r_e2·H2 (committed evaluation on G2 side) + pub e2: G2, + /// y_com = y·Γ1,fin + r_y·H1 (commitment to evaluation) + pub y_com: G1, +} + /// Final scalar product message (Section 3.1) /// /// Contains E₁, E₂ for the final pairing verification @@ -64,3 +86,70 @@ pub struct ScalarProductMessage { /// E₂ - final G2 element pub e2: G2, } + +/// ZK VMV Σ-protocol 1: proves knowledge of (y, rE2, ry) such that: +/// - E2 = y·Γ2,fin + rE2·H2 +/// - yC = y·Γ1,fin + ry·H1 +/// +/// This proves the commitment yC is consistent with E2. +#[cfg(feature = "zk")] +#[derive(Clone, Debug)] +pub struct Sigma1Proof { + /// Commitment A1 = k1·Γ2,fin + k2·H2 (for E2 relation) + pub a1: G2, + /// Commitment A2 = k1·Γ1,fin + k3·H1 (for yC relation) + pub a2: G1, + /// Response z1 = k1 + c·y + pub z1: F, + /// Response z2 = k2 + c·rE2 + pub z2: F, + /// Response z3 = k3 + c·ry + pub z3: F, +} + +/// ZK VMV Σ-protocol 2: proves knowledge of (t1, t2) such that: +/// e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2) +/// +/// Where t1 = rE1 + rv and t2 = -rD2. +/// This proves the relation between E1 and D2 with blinds. +#[cfg(feature = "zk")] +#[derive(Clone, Debug)] +pub struct Sigma2Proof { + /// Commitment A = e(H1, k1·Γ2,fin + k2·H2) + pub a: GT, + /// Response z1 = k1 + c·t1 + pub z1: F, + /// Response z2 = k2 + c·t2 + pub z2: F, +} + +/// Zero-knowledge scalar product proof (Σ-protocol) +/// +/// Proves knowledge of (v1, v2, rC, rD1, rD2) for relation L1: +/// - C = e(v1, v2) + rC·HT +/// - D1 = e(v1, Γ2) + rD1·HT +/// - D2 = e(Γ1, v2) + rD2·HT +/// +/// Protocol from Dory paper Section 3.1. +#[cfg(feature = "zk")] +#[derive(Clone, Debug)] +pub struct ScalarProductProof { + /// P1 = e(d1, Γ2) + rP1·HT + pub p1: GT, + /// P2 = e(Γ1, d2) + rP2·HT + pub p2: GT, + /// Q = e(d1, v2) + e(v1, d2) + rQ·HT + pub q: GT, + /// R = e(d1, d2) + rR·HT + pub r: GT, + /// E1 = d1 + c·v1 + pub e1: G1, + /// E2 = d2 + c·v2 + pub e2: G2, + /// r1 = rP1 + c·rD1 + pub r1: F, + /// r2 = rP2 + c·rD2 + pub r2: F, + /// r3 = rR + c·rQ + c²·rC + pub r3: F, +} diff --git a/src/mode.rs b/src/mode.rs new file mode 100644 index 0000000..1fb297a --- /dev/null +++ b/src/mode.rs @@ -0,0 +1,65 @@ +//! Mode trait for transparent vs zero-knowledge proofs +//! +//! This module provides a mode abstraction that allows the same protocol implementation +//! to work for both transparent (non-hiding) and zero-knowledge (hiding) proofs. +//! +//! - [`Transparent`]: Default mode with no blinding. `sample` returns zero, `mask` is identity. +//! - [`ZK`]: Zero-knowledge mode (requires `zk` feature). Samples blinds from RNG. + +use crate::primitives::arithmetic::{Field, Group}; + +/// Mode marker trait for transparent vs ZK proofs. +/// +/// Determines whether blinds are sampled (ZK) or zero (transparent). +pub trait Mode: 'static { + /// Sample a blinding factor. + /// + /// - Transparent: returns `F::zero()` without using RNG + /// - ZK: returns a random scalar from the RNG + /// + /// Note: Blinds are sampled from RNG (not transcript) because they are + /// private to the prover. The transcript is only used for public values + /// and deriving challenges that both prover and verifier compute. + fn sample(rng: &mut R) -> F; + + /// Mask a group element with a blinding factor. + /// + /// - Transparent: returns `value` unchanged + /// - ZK: returns `value + base * blind` + fn mask(value: G, base: &G, blind: &G::Scalar) -> G; +} + +/// Transparent mode - no blinding. +/// +/// All samples return zero, all masks return the value unchanged. +/// This is the default mode and produces non-hiding proofs. +pub struct Transparent; + +impl Mode for Transparent { + fn sample(_rng: &mut R) -> F { + F::zero() + } + + fn mask(value: G, _base: &G, _blind: &G::Scalar) -> G { + value + } +} + +/// Zero-knowledge mode - samples blinds from RNG. +/// +/// Produces hiding proofs by masking protocol messages with random blinds. +/// Blinds are sampled from private randomness (RNG), not the transcript, +/// because they must not affect the public challenge derivation. +#[cfg(feature = "zk")] +pub struct ZK; + +#[cfg(feature = "zk")] +impl Mode for ZK { + fn sample(rng: &mut R) -> F { + F::random(rng) + } + + fn mask(value: G, base: &G, blind: &G::Scalar) -> G { + value + base.scale(blind) + } +} diff --git a/src/primitives/poly.rs b/src/primitives/poly.rs index 4ac5118..873e256 100644 --- a/src/primitives/poly.rs +++ b/src/primitives/poly.rs @@ -88,6 +88,36 @@ pub trait Polynomial { E: PairingCurve, M1: DoryRoutines, E::G1: Group; + + /// Commit to polynomial with ZK blinds + /// + /// Same as `commit`, but adds blinds to each row commitment for zero-knowledge: + /// `row_commit[i] = MSM(g1_generators, row_coefficients[i]) + r_i·H1` + /// + /// # Returns + /// `(commitment, row_commitments, row_blinds)` where: + /// - `commitment`: Final commitment in GT (derived from blinded row commitments) + /// - `row_commitments`: Blinded row commitments in G1 + /// - `row_blinds`: The blinds used for each row (needed for proof generation) + /// + /// The sum of row_blinds weighted by the left vector gives `r_v` used in Sigma2. + /// + /// # Errors + /// Returns error if coefficient length doesn't match 2^(nu + sigma) or if setup is insufficient. + #[cfg(feature = "zk")] + #[allow(clippy::type_complexity)] + fn commit_zk( + &self, + nu: usize, + sigma: usize, + setup: &ProverSetup, + rng: &mut R, + ) -> Result<(E::GT, Vec, Vec), DoryError> + where + E: PairingCurve, + M1: DoryRoutines, + E::G1: Group, + R: rand_core::RngCore; } /// Compute multilinear Lagrange basis evaluations at a point diff --git a/src/proof.rs b/src/proof.rs index cffd36f..7c2ead0 100644 --- a/src/proof.rs +++ b/src/proof.rs @@ -4,8 +4,12 @@ //! - VMV message (PCS transform) //! - Multiple rounds of reduce messages (log n rounds) //! - Final scalar product message +//! +//! For ZK mode, the proof additionally contains: +//! - ScalarProductProof (Σ-protocol) for zero-knowledge verification use crate::messages::*; +use crate::primitives::arithmetic::Group; /// A complete Dory evaluation proof /// @@ -15,8 +19,12 @@ use crate::messages::*; /// /// The proof includes the matrix dimensions (nu, sigma) used during proof generation, /// which the verifier uses to ensure consistency with the evaluation point. +/// +/// In ZK mode (when `zk` feature is enabled and `scalar_product_proof` is `Some`), +/// verification uses the Σ-protocol to verify the final inner product relation +/// without revealing intermediate values. #[derive(Clone, Debug)] -pub struct DoryProof { +pub struct DoryProof { /// Vector-Matrix-Vector message for PCS transformation pub vmv_message: VMVMessage, @@ -29,6 +37,52 @@ pub struct DoryProof { /// Final scalar product message pub final_message: ScalarProductMessage, + /// ZK scalar product proof (Σ-protocol) + /// + /// Present only in ZK mode. When `Some`, verification uses `verify_final_zk` + /// which incorporates the Σ-protocol to handle blinded values. + #[cfg(feature = "zk")] + pub scalar_product_proof: Option>, + + /// Log₂ of number of rows in the coefficient matrix + pub nu: usize, + + /// Log₂ of number of columns in the coefficient matrix + pub sigma: usize, +} + +/// A complete Dory ZK evaluation proof +/// +/// In ZK mode, the evaluation `y` is not revealed. Instead: +/// - `y_com = y·Γ1,fin + r_y·H1` commits to the evaluation +/// - `e2 = y·Γ2,fin + r_e2·H2` is the blinded E2 for the reduce protocol +/// - Sigma1 proves y_com and e2 commit to the same y +/// - Sigma2 proves the VMV relation holds with blinds +/// - ScalarProductProof proves the final inner product relation +#[cfg(feature = "zk")] +#[derive(Clone, Debug)] +pub struct ZkDoryProof { + /// ZK VMV message with committed evaluation + pub vmv_message: ZkVMVMessage, + + /// First reduce messages for each round + pub first_messages: Vec>, + + /// Second reduce messages for each round + pub second_messages: Vec>, + + /// Final scalar product message + pub final_message: ScalarProductMessage, + + /// Sigma1 proof: proves y_com and e2 commit to same y + pub sigma1_proof: Sigma1Proof, + + /// Sigma2 proof: proves VMV relation holds with blinds + pub sigma2_proof: Sigma2Proof, + + /// ZK scalar product proof + pub scalar_product_proof: ScalarProductProof, + /// Log₂ of number of rows in the coefficient matrix pub nu: usize, diff --git a/src/reduce_and_fold.rs b/src/reduce_and_fold.rs index 5e68a04..cc5aa97 100644 --- a/src/reduce_and_fold.rs +++ b/src/reduce_and_fold.rs @@ -10,14 +10,25 @@ use crate::error::DoryError; use crate::messages::*; +use crate::mode::{Mode, Transparent}; use crate::primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; use crate::setup::{ProverSetup, VerifierSetup}; +use std::marker::PhantomData; + +#[cfg(feature = "zk")] +use crate::mode::ZK; +#[cfg(feature = "zk")] +use crate::primitives::transcript::Transcript; /// Prover state for the Dory opening protocol /// /// Maintains the current state of the prover during the interactive protocol. /// The state consists of vectors that get folded in each round. -pub struct DoryProverState<'a, E: PairingCurve> { +/// +/// The `M` parameter controls whether the proof is transparent or zero-knowledge: +/// - `Transparent` (default): No blinding, produces non-hiding proofs +/// - `ZK` (requires `zk` feature): Samples blinds from transcript for hiding proofs +pub struct DoryProverState<'a, E: PairingCurve, M: Mode = Transparent> { /// Current v1 vector (G1 elements) v1: Vec, @@ -38,6 +49,21 @@ pub struct DoryProverState<'a, E: PairingCurve> { /// Reference to prover setup setup: &'a ProverSetup, + + // ZK blind accumulators (zero for Transparent mode) + /// Accumulated blind for C (inner product) + r_c: ::Scalar, + /// Accumulated blind for D1 + r_d1: ::Scalar, + /// Accumulated blind for D2 + r_d2: ::Scalar, + /// Accumulated blind for E1 + r_e1: ::Scalar, + /// Accumulated blind for E2 + r_e2: ::Scalar, + + /// Phantom data for mode marker + _mode: PhantomData, } /// Verifier state for the Dory opening protocol @@ -87,7 +113,48 @@ pub struct DoryVerifierState { setup: VerifierSetup, } -impl<'a, E: PairingCurve> DoryProverState<'a, E> { +/// Type alias for first message with blinds (r_d1_l, r_d1_r, r_d2_l, r_d2_r) +pub type FirstMessageWithBlinds = ( + FirstReduceMessage<::G1, ::G2, ::GT>, + <::G1 as Group>::Scalar, + <::G1 as Group>::Scalar, + <::G1 as Group>::Scalar, + <::G1 as Group>::Scalar, +); + +/// Type alias for second message with blinds (r_c_plus, r_c_minus, r_e1_plus, r_e1_minus, r_e2_plus, r_e2_minus) +pub type SecondMessageWithBlinds = ( + SecondReduceMessage<::G1, ::G2, ::GT>, + <::G1 as Group>::Scalar, + <::G1 as Group>::Scalar, + <::G1 as Group>::Scalar, + <::G1 as Group>::Scalar, + <::G1 as Group>::Scalar, + <::G1 as Group>::Scalar, +); + +/// Type alias for accumulated blinds (r_c, r_d1, r_d2) +pub type Blinds = ( + <::G1 as Group>::Scalar, + <::G1 as Group>::Scalar, + <::G1 as Group>::Scalar, +); + +/// Type alias for ZK scalar product proof +#[cfg(feature = "zk")] +pub type ZkScalarProductProof = ScalarProductProof< + ::G1, + ::G2, + <::G1 as Group>::Scalar, + ::GT, +>; + +impl<'a, E: PairingCurve, M: Mode> DoryProverState<'a, E, M> +where + ::Scalar: Field, + E::G2: Group::Scalar>, + E::GT: Group::Scalar>, +{ /// Create new prover state /// /// # Parameters @@ -126,18 +193,52 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { s2, num_rounds, setup, + r_c: ::Scalar::zero(), + r_d1: ::Scalar::zero(), + r_d2: ::Scalar::zero(), + r_e1: ::Scalar::zero(), + r_e2: ::Scalar::zero(), + _mode: PhantomData, } } + /// Create new prover state with initial blinds (for VMV message blinds) + /// + /// Used when the VMV message computation samples initial blinds. + #[allow(clippy::too_many_arguments)] + pub fn new_with_blinds( + v1: Vec, + v2: Vec, + v2_scalars: Option::Scalar>>, + s1: Vec<::Scalar>, + s2: Vec<::Scalar>, + setup: &'a ProverSetup, + r_c: ::Scalar, + r_d2: ::Scalar, + r_e1: ::Scalar, + r_e2: ::Scalar, + ) -> Self { + let mut state = Self::new(v1, v2, v2_scalars, s1, s2, setup); + state.r_c = r_c; + state.r_d2 = r_d2; + state.r_e1 = r_e1; + state.r_e2 = r_e2; + state + } + /// Compute first reduce message for current round /// /// Computes D1L, D1R, D2L, D2R, E1β, E2β based on current state. + /// In ZK mode, samples blinds and masks the D values. + /// + /// Returns the message and the four sampled blinds (r_d1_l, r_d1_r, r_d2_l, r_d2_r) + /// which are needed by `apply_first_challenge` to accumulate blinds. #[tracing::instrument(skip_all, name = "DoryProverState::compute_first_message")] - pub fn compute_first_message(&self) -> FirstReduceMessage + pub fn compute_first_message(&self, rng: &mut R) -> FirstMessageWithBlinds where M1: DoryRoutines, M2: DoryRoutines, - E::G2: Group::Scalar>, + R: rand_core::RngCore, { assert!( self.num_rounds > 0, @@ -154,21 +255,26 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { let g1_prime = &self.setup.g1_vec[..n2]; let g2_prime = &self.setup.g2_vec[..n2]; + // ZK: sample blinds from RNG (zero for Transparent mode) + let r_d1_l: ::Scalar = M::sample(rng); + let r_d1_r: ::Scalar = M::sample(rng); + let r_d2_l: ::Scalar = M::sample(rng); + let r_d2_r: ::Scalar = M::sample(rng); + // Compute D values: multi-pairings between v-vectors and generators // D₁L = ⟨v₁L, Γ₂'⟩, D₁R = ⟨v₁R, Γ₂'⟩ - g2_prime is from setup, use cached version - let d1_left = E::multi_pair_g2_setup(v1_l, g2_prime); - let d1_right = E::multi_pair_g2_setup(v1_r, g2_prime); + let d1_left_base = E::multi_pair_g2_setup(v1_l, g2_prime); + let d1_right_base = E::multi_pair_g2_setup(v1_r, g2_prime); // D₂L = ⟨Γ₁', v₂L⟩, D₂R = ⟨Γ₁', v₂R⟩ - // If v2 was constructed as h2 * scalars (first round), compute MSM(Γ₁', scalars) then one pairing. - let (d2_left, d2_right) = if let Some(scalars) = self.v2_scalars.as_ref() { + // If v2 was constructed as Γ2,fin * scalars (first round), compute MSM(Γ₁', scalars) then one pairing. + // Γ2,fin = g2_vec[0] (commitment base, NOT h2 which is the blinding generator) + let (d2_left_base, d2_right_base) = if let Some(scalars) = self.v2_scalars.as_ref() { let (s_l, s_r) = scalars.split_at(n2); let sum_left = M1::msm(g1_prime, s_l); let sum_right = M1::msm(g1_prime, s_r); - ( - E::pair(&sum_left, &self.setup.h2), - E::pair(&sum_right, &self.setup.h2), - ) + let g2_fin = &self.setup.g2_vec[0]; + (E::pair(&sum_left, g2_fin), E::pair(&sum_right, g2_fin)) } else { ( E::multi_pair_g1_setup(g1_prime, v2_l), @@ -176,6 +282,12 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { ) }; + // ZK: mask D values (identity for Transparent mode) + let d1_left = M::mask(d1_left_base, &self.setup.ht, &r_d1_l); + let d1_right = M::mask(d1_right_base, &self.setup.ht, &r_d1_r); + let d2_left = M::mask(d2_left_base, &self.setup.ht, &r_d2_l); + let d2_right = M::mask(d2_right_base, &self.setup.ht, &r_d2_r); + // Compute E values for extended protocol: MSMs with scalar vectors // E₁β = ⟨Γ₁, s₂⟩ let e1_beta = M1::msm(&self.setup.g1_vec[..1 << self.num_rounds], &self.s2[..]); @@ -183,26 +295,28 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { // E₂β = ⟨Γ₂, s₁⟩ let e2_beta = M2::msm(&self.setup.g2_vec[..1 << self.num_rounds], &self.s1[..]); - FirstReduceMessage { + let msg = FirstReduceMessage { d1_left, d1_right, d2_left, d2_right, e1_beta, e2_beta, - } + }; + + (msg, r_d1_l, r_d1_r, r_d2_l, r_d2_r) } /// Apply first challenge (beta) and combine vectors /// /// Updates the state by combining with generators scaled by beta. + /// Also accumulates blinds: rC ← rC + β·rD2 + β⁻¹·rD1 #[tracing::instrument(skip_all, name = "DoryProverState::apply_first_challenge")] pub fn apply_first_challenge(&mut self, beta: &::Scalar) where M1: DoryRoutines, M2: DoryRoutines, E::G2: Group::Scalar>, - ::Scalar: Field, { let beta_inv = (*beta).inv().expect("beta must be invertible"); @@ -216,17 +330,25 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { // After first combine, the `v2_scalars` optimization does not apply. self.v2_scalars = None; + + // ZK: accumulate blinds using accumulated rD1, rD2 (not message blinds) + // rC ← rC + β·rD2 + β⁻¹·rD1 + self.r_c = self.r_c + self.r_d2 * *beta + self.r_d1 * beta_inv; } /// Compute second reduce message for current round /// /// Computes C+, C-, E1+, E1-, E2+, E2- based on current state. + /// In ZK mode, samples blinds and masks the values. + /// + /// Returns the message and six sampled blinds needed by `apply_second_challenge`. #[tracing::instrument(skip_all, name = "DoryProverState::compute_second_message")] - pub fn compute_second_message(&self) -> SecondReduceMessage + #[allow(clippy::type_complexity)] + pub fn compute_second_message(&self, rng: &mut R) -> SecondMessageWithBlinds where M1: DoryRoutines, M2: DoryRoutines, - E::G2: Group::Scalar>, + R: rand_core::RngCore, { let n2 = 1 << (self.num_rounds - 1); // n/2 @@ -236,42 +358,77 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { let (s1_l, s1_r) = self.s1.split_at(n2); let (s2_l, s2_r) = self.s2.split_at(n2); + // ZK: sample blinds from RNG (zero for Transparent mode) + let r_c_plus: ::Scalar = M::sample(rng); + let r_c_minus: ::Scalar = M::sample(rng); + let r_e1_plus: ::Scalar = M::sample(rng); + let r_e1_minus: ::Scalar = M::sample(rng); + let r_e2_plus: ::Scalar = M::sample(rng); + let r_e2_minus: ::Scalar = M::sample(rng); + // Compute C terms: cross products of v-vectors // C₊ = ⟨v₁L, v₂R⟩ - let c_plus = E::multi_pair(v1_l, v2_r); + let c_plus_base = E::multi_pair(v1_l, v2_r); // C₋ = ⟨v₁R, v₂L⟩ - let c_minus = E::multi_pair(v1_r, v2_l); + let c_minus_base = E::multi_pair(v1_r, v2_l); + + // ZK: mask C values + let c_plus = M::mask(c_plus_base, &self.setup.ht, &r_c_plus); + let c_minus = M::mask(c_minus_base, &self.setup.ht, &r_c_minus); // Compute E terms for extended protocol: cross products with scalars // E₁₊ = ⟨v₁L, s₂R⟩ - let e1_plus = M1::msm(v1_l, s2_r); + let e1_plus_base = M1::msm(v1_l, s2_r); // E₁₋ = ⟨v₁R, s₂L⟩ - let e1_minus = M1::msm(v1_r, s2_l); + let e1_minus_base = M1::msm(v1_r, s2_l); // E₂₊ = ⟨s₁L, v₂R⟩ - let e2_plus = M2::msm(v2_r, s1_l); + let e2_plus_base = M2::msm(v2_r, s1_l); // E₂₋ = ⟨s₁R, v₂L⟩ - let e2_minus = M2::msm(v2_l, s1_r); + let e2_minus_base = M2::msm(v2_l, s1_r); + + // ZK: mask E values + let e1_plus = M::mask(e1_plus_base, &self.setup.h1, &r_e1_plus); + let e1_minus = M::mask(e1_minus_base, &self.setup.h1, &r_e1_minus); + let e2_plus = M::mask(e2_plus_base, &self.setup.h2, &r_e2_plus); + let e2_minus = M::mask(e2_minus_base, &self.setup.h2, &r_e2_minus); - SecondReduceMessage { + let msg = SecondReduceMessage { c_plus, c_minus, e1_plus, e1_minus, e2_plus, e2_minus, - } + }; + + ( + msg, r_c_plus, r_c_minus, r_e1_plus, r_e1_minus, r_e2_plus, r_e2_minus, + ) } /// Apply second challenge (alpha) and fold vectors /// /// Reduces the vector size by half using the alpha challenge. + /// Also accumulates blinds from compute_first_message and compute_second_message. #[tracing::instrument(skip_all, name = "DoryProverState::apply_second_challenge")] - pub fn apply_second_challenge(&mut self, alpha: &::Scalar) - where + #[allow(clippy::too_many_arguments)] + pub fn apply_second_challenge( + &mut self, + alpha: &::Scalar, + r_d1_l: ::Scalar, + r_d1_r: ::Scalar, + r_d2_l: ::Scalar, + r_d2_r: ::Scalar, + r_c_plus: ::Scalar, + r_c_minus: ::Scalar, + r_e1_plus: ::Scalar, + r_e1_minus: ::Scalar, + r_e2_plus: ::Scalar, + r_e2_minus: ::Scalar, + ) where M1: DoryRoutines, M2: DoryRoutines, E::G2: Group::Scalar>, - ::Scalar: Field, { let alpha_inv = (*alpha).inv().expect("alpha must be invertible"); let n2 = 1 << (self.num_rounds - 1); // n/2 @@ -296,6 +453,18 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { M1::fold_field_vectors(s2_l, s2_r, &alpha_inv); self.s2.truncate(n2); + // ZK: update accumulated blinds + // r_c ← r_c + α·r_c+ + α⁻¹·r_c- + self.r_c = self.r_c + r_c_plus * *alpha + r_c_minus * alpha_inv; + // r_d1 ← α·r_d1_l + r_d1_r + self.r_d1 = r_d1_l * *alpha + r_d1_r; + // r_d2 ← α⁻¹·r_d2_l + r_d2_r + self.r_d2 = r_d2_l * alpha_inv + r_d2_r; + // r_e1 ← r_e1 + α·r_e1+ + α⁻¹·r_e1- + self.r_e1 = self.r_e1 + r_e1_plus * *alpha + r_e1_minus * alpha_inv; + // r_e2 ← r_e2 + α·r_e2+ + α⁻¹·r_e2- + self.r_e2 = self.r_e2 + r_e2_plus * *alpha + r_e2_minus * alpha_inv; + // Decrement round counter self.num_rounds -= 1; } @@ -304,16 +473,17 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { /// /// Applies fold-scalars transformation and returns the final E1, E2 elements. /// Must be called when num_rounds=0 (vectors are size 1). + /// + /// Also accumulates the final blind: r_c ← r_c + γ·r_e2 + γ⁻¹·r_e1 #[tracing::instrument(skip_all, name = "DoryProverState::compute_final_message")] pub fn compute_final_message( - self, + &mut self, gamma: &::Scalar, ) -> ScalarProductMessage where M1: DoryRoutines, M2: DoryRoutines, E::G2: Group::Scalar>, - ::Scalar: Field, { debug_assert_eq!(self.num_rounds, 0, "num_rounds must be 0 for final message"); debug_assert_eq!(self.v1.len(), 1, "v1 must have length 1"); @@ -330,8 +500,381 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { let gamma_inv_s2 = gamma_inv * self.s2[0]; let e2 = self.v2[0] + self.setup.h2.scale(&gamma_inv_s2); + // ZK: final blind accumulation + // r_c ← r_c + γ·r_e2 + γ⁻¹·r_e1 + self.r_c = self.r_c + self.r_e2 * *gamma + self.r_e1 * gamma_inv; + ScalarProductMessage { e1, e2 } } + + /// Get accumulated blinds (for ZK mode Σ-protocol) + /// + /// Returns (r_c, r_d1, r_d2) which are needed for the ZK scalar product proof. + pub fn blinds(&self) -> Blinds { + (self.r_c, self.r_d1, self.r_d2) + } + + /// Generate ZK scalar product proof (Σ-protocol) - internal version + /// + /// This is callable from any Mode but only produces meaningful proofs in ZK mode. + /// In Transparent mode, all blinds are zero, so the proof is trivial. + /// + /// Must be called BEFORE `compute_final_message` because that method modifies r_c, + /// but the Σ-protocol needs the pre-fold-scalars blinds. + #[cfg(feature = "zk")] + pub fn scalar_product_proof_internal< + T: crate::primitives::transcript::Transcript, + R: rand_core::RngCore, + >( + &self, + transcript: &mut T, + rng: &mut R, + ) -> crate::messages::ScalarProductProof::Scalar, E::GT> { + debug_assert_eq!(self.v1.len(), 1, "v1 must be length 1 after folding"); + debug_assert_eq!(self.v2.len(), 1, "v2 must be length 1 after folding"); + + let v1 = self.v1[0]; + let v2 = self.v2[0]; + let gamma1 = self.setup.g1_vec[0]; + let gamma2 = self.setup.g2_vec[0]; + + type F = <::G1 as Group>::Scalar; + + // Sample random scalars from RNG (private to prover) + let s_d1: F = Field::random(rng); + let s_d2: F = Field::random(rng); + let d1 = gamma1.scale(&s_d1); + let d2 = gamma2.scale(&s_d2); + + // Sample blinding scalars from RNG (private to prover) + let r_p1: F = Field::random(rng); + let r_p2: F = Field::random(rng); + let r_q: F = Field::random(rng); + let r_r: F = Field::random(rng); + + // Compute first message: P1, P2, Q, R + // P1 = e(d1, Γ2) + rP1·HT + let p1 = E::pair(&d1, &gamma2) + self.setup.ht.scale(&r_p1); + // P2 = e(Γ1, d2) + rP2·HT + let p2 = E::pair(&gamma1, &d2) + self.setup.ht.scale(&r_p2); + // Q = e(d1, v2) + e(v1, d2) + rQ·HT + let q = E::pair(&d1, &v2) + E::pair(&v1, &d2) + self.setup.ht.scale(&r_q); + // R = e(d1, d2) + rR·HT + let r = E::pair(&d1, &d2) + self.setup.ht.scale(&r_r); + + // Append first message to transcript and derive challenge + transcript.append_serde(b"sigma_p1", &p1); + transcript.append_serde(b"sigma_p2", &p2); + transcript.append_serde(b"sigma_q", &q); + transcript.append_serde(b"sigma_r", &r); + let c = transcript.challenge_scalar(b"sigma_c"); + + // Compute response: E1, E2, r1, r2, r3 + // E1 = d1 + c·v1 + let e1 = d1 + v1.scale(&c); + // E2 = d2 + c·v2 + let e2 = d2 + v2.scale(&c); + // r1 = rP1 + c·rD1 + let r1 = r_p1 + c * self.r_d1; + // r2 = rP2 + c·rD2 + let r2 = r_p2 + c * self.r_d2; + // r3 = rR + c·rQ + c²·rC + let c_sq = c * c; + let r3 = r_r + c * r_q + c_sq * self.r_c; + + crate::messages::ScalarProductProof { + p1, + p2, + q, + r, + e1, + e2, + r1, + r2, + r3, + } + } +} + +/// ZK-specific methods for DoryProverState +/// +/// These methods are only available when the `zk` feature is enabled and +/// the prover state is parameterized with the `ZK` mode marker. +#[cfg(feature = "zk")] +impl DoryProverState<'_, E, ZK> +where + ::Scalar: Field, + E::G2: Group::Scalar>, + E::GT: Group::Scalar>, +{ + /// Generate ZK scalar product proof (Σ-protocol) + /// + /// Proves knowledge of (v1, v2, rC, rD1, rD2) for relation L1: + /// - C = e(v1, v2) + rC·HT + /// - D1 = e(v1, Γ2) + rD1·HT + /// - D2 = e(Γ1, v2) + rD2·HT + /// + /// Must be called after `compute_final_message` when v1 and v2 are length 1. + /// + /// # Parameters + /// - `transcript`: Fiat-Shamir transcript for deriving challenge + /// - `rng`: Random number generator for sampling private blinds + /// + /// # Returns + /// `ScalarProductProof` containing (P1, P2, Q, R, E1, E2, r1, r2, r3) + pub fn scalar_product_proof, R: rand_core::RngCore>( + &self, + transcript: &mut T, + rng: &mut R, + ) -> ZkScalarProductProof { + debug_assert_eq!(self.v1.len(), 1, "v1 must be length 1 after folding"); + debug_assert_eq!(self.v2.len(), 1, "v2 must be length 1 after folding"); + + let v1 = self.v1[0]; + let v2 = self.v2[0]; + let gamma1 = self.setup.g1_vec[0]; + let gamma2 = self.setup.g2_vec[0]; + + type F = <::G1 as Group>::Scalar; + + // Sample random scalars from RNG (private to prover) + let s_d1: F = Field::random(rng); + let s_d2: F = Field::random(rng); + let d1 = gamma1.scale(&s_d1); + let d2 = gamma2.scale(&s_d2); + + // Sample blinding scalars from RNG (private to prover) + let r_p1: F = Field::random(rng); + let r_p2: F = Field::random(rng); + let r_q: F = Field::random(rng); + let r_r: F = Field::random(rng); + + // Compute first message: P1, P2, Q, R + // P1 = e(d1, Γ2) + rP1·HT + let p1 = E::pair(&d1, &gamma2) + self.setup.ht.scale(&r_p1); + // P2 = e(Γ1, d2) + rP2·HT + let p2 = E::pair(&gamma1, &d2) + self.setup.ht.scale(&r_p2); + // Q = e(d1, v2) + e(v1, d2) + rQ·HT + let q = E::pair(&d1, &v2) + E::pair(&v1, &d2) + self.setup.ht.scale(&r_q); + // R = e(d1, d2) + rR·HT + let r = E::pair(&d1, &d2) + self.setup.ht.scale(&r_r); + + // Append first message to transcript and derive challenge + transcript.append_serde(b"sigma_p1", &p1); + transcript.append_serde(b"sigma_p2", &p2); + transcript.append_serde(b"sigma_q", &q); + transcript.append_serde(b"sigma_r", &r); + let c = transcript.challenge_scalar(b"sigma_c"); + + // Compute response: E1, E2, r1, r2, r3 + // E1 = d1 + c·v1 + let e1 = d1 + v1.scale(&c); + // E2 = d2 + c·v2 + let e2 = d2 + v2.scale(&c); + // r1 = rP1 + c·rD1 + let r1 = r_p1 + c * self.r_d1; + // r2 = rP2 + c·rD2 + let r2 = r_p2 + c * self.r_d2; + // r3 = rR + c·rQ + c²·rC + let c_sq = c * c; + let r3 = r_r + c * r_q + c_sq * self.r_c; + + ScalarProductProof { + p1, + p2, + q, + r, + e1, + e2, + r1, + r2, + r3, + } + } +} + +/// Generate Sigma1 proof: proves knowledge of (y, rE2) such that E2 = y·Γ2,fin + rE2·H2 +/// +/// Also proves yC = y·Γ1,fin + ry·H1 for commitment consistency. +/// +/// # Generator semantics +/// - Γ1,fin = g1_vec\[0\], Γ2,fin = g2_vec\[0\] (commitment bases) +/// - H1 = h1, H2 = h2 (blinding bases, linearly independent from Γ_fin) +#[cfg(feature = "zk")] +pub fn generate_sigma1_proof( + y: &::Scalar, + r_e2: &::Scalar, + r_y: &::Scalar, + setup: &ProverSetup, + transcript: &mut T, + rng: &mut R, +) -> Sigma1Proof::Scalar> +where + E: PairingCurve, + ::Scalar: Field, + E::G2: Group::Scalar>, + T: Transcript, + R: rand_core::RngCore, +{ + // Γ2,fin = g2_vec[0], Γ1,fin = g1_vec[0] + let g2_fin = &setup.g2_vec[0]; + let g1_fin = &setup.g1_vec[0]; + + // Sample random k1, k2, k3 from RNG (private to prover) + let k1 = ::Scalar::random(rng); + let k2 = ::Scalar::random(rng); + let k3 = ::Scalar::random(rng); + + // A1 = k1·Γ2,fin + k2·H2 (commitment for E2 relation) + let a1 = g2_fin.scale(&k1) + setup.h2.scale(&k2); + // A2 = k1·Γ1,fin + k3·H1 (commitment for yC relation) + let a2 = g1_fin.scale(&k1) + setup.h1.scale(&k3); + + // Append commitments to transcript + transcript.append_serde(b"sigma1_a1", &a1); + transcript.append_serde(b"sigma1_a2", &a2); + + // Get challenge + let c = transcript.challenge_scalar(b"sigma1_c"); + + // Compute responses + let z1 = k1 + c * *y; + let z2 = k2 + c * *r_e2; + let z3 = k3 + c * *r_y; + + Sigma1Proof { a1, a2, z1, z2, z3 } +} + +/// Verify Sigma1 proof +/// +/// # Generator semantics +/// - g1_0 = Γ1,fin, g2_0 = Γ2,fin (commitment bases in verifier setup) +/// - h1 = H1, h2 = H2 (blinding bases) +#[cfg(feature = "zk")] +pub fn verify_sigma1_proof( + e2: &E::G2, + y_commit: &E::G1, + proof: &Sigma1Proof::Scalar>, + setup: &VerifierSetup, + transcript: &mut T, +) -> Result<(), DoryError> +where + E: PairingCurve, + ::Scalar: Field, + E::G2: Group::Scalar>, + T: Transcript, +{ + // Reconstruct challenge + transcript.append_serde(b"sigma1_a1", &proof.a1); + transcript.append_serde(b"sigma1_a2", &proof.a2); + let c = transcript.challenge_scalar(b"sigma1_c"); + + // Check E2 relation: z1·Γ2,fin + z2·H2 = A1 + c·E2 + // Γ2,fin = g2_0, H2 = h2 + let lhs1 = setup.g2_0.scale(&proof.z1) + setup.h2.scale(&proof.z2); + let rhs1 = proof.a1 + e2.scale(&c); + if lhs1 != rhs1 { + return Err(DoryError::InvalidProof); + } + + // Check yC relation: z1·Γ1,fin + z3·H1 = A2 + c·yC + // Γ1,fin = g1_0, H1 = h1 + let lhs2 = setup.g1_0.scale(&proof.z1) + setup.h1.scale(&proof.z3); + let rhs2 = proof.a2 + y_commit.scale(&c); + if lhs2 != rhs2 { + return Err(DoryError::InvalidProof); + } + + Ok(()) +} + +/// Generate Sigma2 proof: proves e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2) +/// +/// Where t1 = rE1 + rv and t2 = -rD2. +/// +/// # Generator semantics +/// - Γ2,fin = g2_vec\[0\] (commitment base) +/// - H1 = h1, H2 = h2 (blinding bases) +#[cfg(feature = "zk")] +pub fn generate_sigma2_proof( + t1: &::Scalar, // rE1 + rv + t2: &::Scalar, // -rD2 + setup: &ProverSetup, + transcript: &mut T, + rng: &mut R, +) -> Sigma2Proof<::Scalar, E::GT> +where + E: PairingCurve, + ::Scalar: Field, + E::G2: Group::Scalar>, + E::GT: Group::Scalar>, + T: Transcript, + R: rand_core::RngCore, +{ + // Γ2,fin = g2_vec[0] + let g2_fin = &setup.g2_vec[0]; + + // Sample random k1, k2 from RNG (private to prover) + let k1 = ::Scalar::random(rng); + let k2 = ::Scalar::random(rng); + + // A = e(H1, k1·Γ2,fin + k2·H2) + let g2_term = g2_fin.scale(&k1) + setup.h2.scale(&k2); + let a = E::pair(&setup.h1, &g2_term); + + // Append commitment to transcript + transcript.append_serde(b"sigma2_a", &a); + + // Get challenge + let c = transcript.challenge_scalar(b"sigma2_c"); + + // Compute responses + let z1 = k1 + c * *t1; + let z2 = k2 + c * *t2; + + Sigma2Proof { a, z1, z2 } +} + +/// Verify Sigma2 proof: e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2) +/// +/// # Generator semantics +/// - g2_0 = Γ2,fin (commitment base in verifier setup) +/// - h1 = H1, h2 = H2 (blinding bases) +#[cfg(feature = "zk")] +pub fn verify_sigma2_proof( + e1: &E::G1, + d2: &E::GT, + proof: &Sigma2Proof<::Scalar, E::GT>, + setup: &VerifierSetup, + transcript: &mut T, +) -> Result<(), DoryError> +where + E: PairingCurve, + ::Scalar: Field, + E::G2: Group::Scalar>, + E::GT: Group::Scalar>, + T: Transcript, +{ + // Reconstruct challenge + transcript.append_serde(b"sigma2_a", &proof.a); + let c = transcript.challenge_scalar(b"sigma2_c"); + + // Compute expected value: e(E1, Γ2,fin) - D2 + // Γ2,fin = g2_0 + let e1_pair = E::pair(e1, &setup.g2_0); + let expected = e1_pair - *d2; + + // Check: e(H1, z1·Γ2,fin + z2·H2) = A + c·expected + // Γ2,fin = g2_0, H2 = h2 + let g2_term = setup.g2_0.scale(&proof.z1) + setup.h2.scale(&proof.z2); + let lhs = E::pair(&setup.h1, &g2_term); + let rhs = proof.a + expected.scale(&c); + + if lhs == rhs { + Ok(()) + } else { + Err(DoryError::InvalidProof) + } } impl DoryVerifierState { @@ -465,11 +1008,15 @@ impl DoryVerifierState { /// Applies fold-scalars transformation and checks the final pairing equation. /// Must be called when num_rounds=0 after all reduce rounds are complete. /// + /// # Generator semantics + /// - g1_0, g2_0: Final generators (Γ1,fin, Γ2,fin) used as commitment bases + /// - h1, h2: Blinding generators (H1, H2) used for zero-knowledge masking + /// /// # Non-optimized Protocol Equations /// /// ## VMV Check (batched together with the final pairing check) /// - /// The VMV protocol requires: `D₂_init = e(E₁_init, H₂)` + /// The VMV protocol requires: `D₂_init = e(E₁_init, Γ2,fin)` = `e(E₁_init, g2_0)` /// /// This was originally checked as a standalone pairing in `verify_evaluation_proof`. /// We defer it here to batch with other pairings. @@ -493,42 +1040,19 @@ impl DoryVerifierState { /// ## Batching the VMV Check /// /// We use random linear combination with challenge `d²` to defer the VMV check. - /// We use `d²` (not `d`) to ensure sufficient independence from the existing `d·D₂` term. - /// - /// Multiplying by `d²` preserves soundness because: - /// - `d` is derived from the transcript AFTER `D₂_init` and `E₁_init` are committed - /// - If `D₂_init ≠ e(E₁_init, H₂)`, then with overwhelming probability: - /// `T + d²·D₂_init ≠ multi_pair([...]) + d²·e(E₁_init, H₂)` - /// - /// - /// ## Combining Pairings + /// The VMV check uses Γ2,fin (g2_0), separate from the fold-scalars blinding which uses H₂ (h2). /// - /// After moving all pairings to LHS and using bilinearity: - /// - /// Terms sharing H₂ (fold-scalars pairings + deferred VMV check): + /// ## Final Combined Check (4 pairings) /// /// ```text - /// e(E₁_acc, H₂)^(-γ⁻¹) · e((s₂·γ⁻¹)·Γ₁₀, H₂)^(-d) · e(E₁_init, H₂)^(d²) - /// = e((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀) + d²·E₁_init, H₂) - /// ``` - /// - /// ## Final Combined Check - /// - /// The final check verifies both: - /// - (a) The original fold-scalars/reduce protocol equation - /// - (b) The VMV constraint `D₂_init = e(E₁_init, H₂)` - /// - /// Combined via: `(a) + d²·(b)` where `d` is the final challenge. - /// - /// ```text - /// e(E₁_final + d·Γ₁₀, E₂_final + d⁻¹·Γ₂₀) - /// · e(H₁, (-γ)·(E₂_acc + (d⁻¹·s₁)·Γ₂₀)) - /// · e((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀) + d²·E₁_init, H₂) + /// e(E₁_final + d·Γ₁₀, E₂_final + d⁻¹·Γ₂₀) // Pair 1: main verification + /// · e(H₁, (-γ)·(E₂_acc + (d⁻¹·s₁)·Γ₂₀)) // Pair 2: γ·e(H₁, E₂) term + /// · e((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀), H₂) // Pair 3: γ⁻¹·e(E₁, H₂) term + /// · e(d²·E₁_init, Γ2,fin) // Pair 4: deferred VMV check /// = T + d²·D₂_init /// ``` /// - /// This is 3 miller loops + 1 final exponentiation, - /// Whereas a naive check would be 6 ML + 6 FE + /// This is 4 miller loops + 1 final exponentiation (vs 7+ for naive check) #[tracing::instrument(skip_all, name = "DoryVerifierState::verify_final")] pub fn verify_final( &mut self, @@ -573,16 +1097,141 @@ impl DoryVerifierState { let p2_g1 = self.setup.h1; let p2_g2 = g2_term.scale(&neg_gamma); - // Pair 3: ((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀) + d²·E₁_init, H₂) - // The d²·E₁_init term is the deferred VMV check: d²·e(E₁_init, H₂) - // We use d² to ensure independence from other d-scaled terms. + // Pair 3: ((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀), H₂) + // This is the fold-scalars γ⁻¹·e(E₁, H₂) term - uses H₂ (h2) let d_s2 = *d * self.s2_acc; let g1_term = self.e1 + self.setup.g1_0.scale(&d_s2); - let p3_g1 = g1_term.scale(&neg_gamma_inv) + self.e1_init.scale(&d_sq); + let p3_g1 = g1_term.scale(&neg_gamma_inv); let p3_g2 = self.setup.h2; - // Single multi-pairing: 3 miller loops + 1 final exponentiation - let lhs = E::multi_pair(&[p1_g1, p2_g1, p3_g1], &[p1_g2, p2_g2, p3_g2]); + // Pair 4: (d²·E₁_init, Γ2,fin) + // This is the deferred VMV check: d²·e(E₁_init, Γ2,fin) - uses Γ2,fin (g2_0) + let p4_g1 = self.e1_init.scale(&d_sq); + let p4_g2 = self.setup.g2_0; + + // Multi-pairing: 4 miller loops + 1 final exponentiation + let lhs = E::multi_pair(&[p1_g1, p2_g1, p3_g1, p4_g1], &[p1_g2, p2_g2, p3_g2, p4_g2]); + + if lhs == rhs { + Ok(()) + } else { + Err(DoryError::InvalidProof) + } + } + + /// Verify final scalar product with ZK proof (Σ-protocol) + /// + /// Verifies knowledge of (v1, v2, rC, rD1, rD2) for relation L1: + /// - C = e(v1, v2) + rC·HT + /// - D1 = e(v1, Γ2) + rD1·HT + /// - D2 = e(Γ1, v2) + rD2·HT + /// + /// Verification equation from Dory paper: + /// e(E1 + d·Γ1, E2 + d⁻¹·Γ2) = χ + R + c·Q + c²·C + d·P2 + d·c·D2 + /// + d⁻¹·P1 + d⁻¹·c·D1 - (r3 + d·r2 + d⁻¹·r1)·HT + /// + /// # Parameters + /// - `proof`: ZK Σ-protocol proof containing (P1, P2, Q, R, E1, E2, r1, r2, r3) + /// - `d`: Final batching challenge from transcript + /// - `transcript`: Fiat-Shamir transcript for deriving challenge c + #[cfg(feature = "zk")] + #[tracing::instrument(skip_all, name = "DoryVerifierState::verify_final_zk")] + pub fn verify_final_zk>( + &mut self, + proof: &ZkScalarProductProof, + d: &::Scalar, + transcript: &mut T, + ) -> Result<(), DoryError> + where + E::G2: Group::Scalar>, + E::GT: Group::Scalar>, + ::Scalar: Field, + { + debug_assert_eq!( + self.num_rounds, 0, + "num_rounds must be 0 for final verification" + ); + + let d_inv = (*d).inv().expect("d must be invertible"); + + // Reconstruct challenge c from transcript (must match prover's derivation) + transcript.append_serde(b"sigma_p1", &proof.p1); + transcript.append_serde(b"sigma_p2", &proof.p2); + transcript.append_serde(b"sigma_q", &proof.q); + transcript.append_serde(b"sigma_r", &proof.r); + let c = transcript.challenge_scalar(b"sigma_c"); + let c_sq = c * c; + + // LHS: e(E1 + d·Γ1, E2 + d⁻¹·Γ2) + let lhs_g1 = proof.e1 + self.setup.g1_0.scale(d); + let lhs_g2 = proof.e2 + self.setup.g2_0.scale(&d_inv); + let lhs = E::pair(&lhs_g1, &lhs_g2); + + // RHS: χ + R + c·Q + c²·C + d·P2 + d·c·D2 + d⁻¹·P1 + d⁻¹·c·D1 - (r3 + d·r2 + d⁻¹·r1)·HT + let mut rhs = self.setup.chi[0]; // χ + rhs = rhs + proof.r; // + R + rhs = rhs + proof.q.scale(&c); // + c·Q + rhs = rhs + self.c.scale(&c_sq); // + c²·C + rhs = rhs + proof.p2.scale(d); // + d·P2 + rhs = rhs + self.d2.scale(&(*d * c)); // + d·c·D2 + rhs = rhs + proof.p1.scale(&d_inv); // + d⁻¹·P1 + rhs = rhs + self.d1.scale(&(d_inv * c)); // + d⁻¹·c·D1 + + // Blind correction: -(r3 + d·r2 + d⁻¹·r1)·HT + let r_total = proof.r3 + *d * proof.r2 + d_inv * proof.r1; + rhs = rhs - self.setup.ht.scale(&r_total); + + if lhs == rhs { + Ok(()) + } else { + Err(DoryError::InvalidProof) + } + } + + /// Verify final scalar product with ZK proof using pre-derived challenge + /// + /// Same as `verify_final_zk` but takes the challenge `c` as a parameter + /// instead of deriving it from transcript. Use when the caller needs to + /// control transcript ordering (e.g., appending P1..R before final_message). + #[cfg(feature = "zk")] + #[tracing::instrument(skip_all, name = "DoryVerifierState::verify_final_zk_with_challenge")] + pub fn verify_final_zk_with_challenge( + &mut self, + proof: &ZkScalarProductProof, + c: &::Scalar, + d: &::Scalar, + ) -> Result<(), DoryError> + where + E::G2: Group::Scalar>, + E::GT: Group::Scalar>, + ::Scalar: Field, + { + debug_assert_eq!( + self.num_rounds, 0, + "num_rounds must be 0 for final verification" + ); + + let d_inv = (*d).inv().expect("d must be invertible"); + let c_sq = *c * *c; + + // LHS: e(E1 + d·Γ1, E2 + d⁻¹·Γ2) + let lhs_g1 = proof.e1 + self.setup.g1_0.scale(d); + let lhs_g2 = proof.e2 + self.setup.g2_0.scale(&d_inv); + let lhs = E::pair(&lhs_g1, &lhs_g2); + + // RHS: χ + R + c·Q + c²·C + d·P2 + d·c·D2 + d⁻¹·P1 + d⁻¹·c·D1 - (r3 + d·r2 + d⁻¹·r1)·HT + let mut rhs = self.setup.chi[0]; + rhs = rhs + proof.r; + rhs = rhs + proof.q.scale(c); + rhs = rhs + self.c.scale(&c_sq); + rhs = rhs + proof.p2.scale(d); + rhs = rhs + self.d2.scale(&(*d * *c)); + rhs = rhs + proof.p1.scale(&d_inv); + rhs = rhs + self.d1.scale(&(d_inv * *c)); + + // Blind correction: -(r3 + d·r2 + d⁻¹·r1)·HT + let r_total = proof.r3 + *d * proof.r2 + d_inv * proof.r1; + rhs = rhs - self.setup.ht.scale(&r_total); if lhs == rhs { Ok(()) diff --git a/src/setup.rs b/src/setup.rs index 6a60806..878e623 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -44,6 +44,11 @@ pub struct ProverSetup { /// /// Contains precomputed pairing values for efficient verification. /// Derived from the prover setup. +/// +/// # Generator semantics +/// - `g1_0`, `g2_0`: Final generators (Γ1,fin, Γ2,fin) used as commitment bases +/// - `h1`, `h2`: Blinding generators (H1, H2) used for zero-knowledge masking +/// - These MUST be linearly independent (discrete log unknown between them) #[derive(Clone, Debug, DorySerialize, DoryDeserialize)] pub struct VerifierSetup { /// Δ₁L\[k\] = e(Γ₁\[..2^(k-1)\], Γ₂\[..2^(k-1)\]) @@ -61,21 +66,25 @@ pub struct VerifierSetup { /// χ\[k\] = e(Γ₁\[..2^k\], Γ₂\[..2^k\]) pub chi: Vec, - /// First G1 generator + /// Γ1,fin - first G1 generator (commitment base, NOT for blinding) pub g1_0: E::G1, - /// First G2 generator + /// Γ2,fin - first G2 generator (commitment base, NOT for blinding) pub g2_0: E::G2, - /// Blinding generator in G1 + /// H1 - blinding generator in G1 (linearly independent from g1_0) pub h1: E::G1, - /// Blinding generator in G2 + /// H2 - blinding generator in G2 (linearly independent from g2_0) pub h2: E::G2, - /// h_t = e(h₁, h₂) + /// HT = e(H1, H2) - blinding base in GT pub ht: E::GT, + /// e(H1, Γ2,fin) = e(h1, g2_0) - precomputed for ZK Sigma2 verification + #[cfg(feature = "zk")] + pub h1_g2_fin: E::GT, + /// Maximum log₂ of polynomial size supported pub max_log_n: usize, } @@ -169,6 +178,8 @@ impl ProverSetup { h1: self.h1, h2: self.h2, ht: self.ht, + #[cfg(feature = "zk")] + h1_g2_fin: E::pair(&self.h1, &self.g2_vec[0]), max_log_n: max_num_rounds * 2, // Since square matrices: max_log_n = 2 * max_nu } } diff --git a/tests/arkworks/evaluation.rs b/tests/arkworks/evaluation.rs index 9c4515f..3d471a2 100644 --- a/tests/arkworks/evaluation.rs +++ b/tests/arkworks/evaluation.rs @@ -2,10 +2,11 @@ use super::*; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, verify}; +use dory_pcs::{prove, verify, Transparent}; #[test] fn test_evaluation_proof_small() { + let mut rng = rand::thread_rng(); let setup = test_setup(4); let verifier_setup = setup.to_verifier_setup(); @@ -20,7 +21,7 @@ fn test_evaluation_proof_small() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -28,6 +29,7 @@ fn test_evaluation_proof_small() { sigma, &setup, &mut prover_transcript, + &mut rng, ); assert!(result.is_ok()); @@ -49,6 +51,7 @@ fn test_evaluation_proof_small() { #[test] fn test_evaluation_proof_with_precomputed_commitment() { + let mut rng = rand::thread_rng(); let setup = test_setup(4); let verifier_setup = setup.to_verifier_setup(); @@ -63,7 +66,7 @@ fn test_evaluation_proof_with_precomputed_commitment() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -71,6 +74,7 @@ fn test_evaluation_proof_with_precomputed_commitment() { sigma, &setup, &mut prover_transcript, + &mut rng, ); assert!(result.is_ok()); @@ -92,6 +96,7 @@ fn test_evaluation_proof_with_precomputed_commitment() { #[test] fn test_evaluation_proof_constant_polynomial() { + let mut rng = rand::thread_rng(); let setup = test_setup(4); let verifier_setup = setup.to_verifier_setup(); @@ -109,7 +114,7 @@ fn test_evaluation_proof_constant_polynomial() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -117,6 +122,7 @@ fn test_evaluation_proof_constant_polynomial() { sigma, &setup, &mut prover_transcript, + &mut rng, ) .unwrap(); @@ -138,6 +144,7 @@ fn test_evaluation_proof_constant_polynomial() { #[test] fn test_evaluation_proof_wrong_evaluation_fails() { + let mut rng = rand::thread_rng(); let setup = test_setup(4); let verifier_setup = setup.to_verifier_setup(); @@ -152,7 +159,7 @@ fn test_evaluation_proof_wrong_evaluation_fails() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -160,6 +167,7 @@ fn test_evaluation_proof_wrong_evaluation_fails() { sigma, &setup, &mut prover_transcript, + &mut rng, ) .unwrap(); @@ -181,6 +189,7 @@ fn test_evaluation_proof_wrong_evaluation_fails() { #[test] fn test_evaluation_proof_different_sizes() { + let mut rng = rand::thread_rng(); { let setup = test_setup(4); let verifier_setup = setup.to_verifier_setup(); @@ -191,7 +200,7 @@ fn test_evaluation_proof_different_sizes() { let (tier_2, tier_1) = poly.commit::(1, 1, &setup).unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -199,6 +208,7 @@ fn test_evaluation_proof_different_sizes() { 1, &setup, &mut prover_transcript, + &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -225,7 +235,7 @@ fn test_evaluation_proof_different_sizes() { let (tier_2, tier_1) = poly.commit::(3, 3, &setup).unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -233,6 +243,7 @@ fn test_evaluation_proof_different_sizes() { 3, &setup, &mut prover_transcript, + &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -252,6 +263,7 @@ fn test_evaluation_proof_different_sizes() { #[test] fn test_multiple_evaluations_same_commitment() { + let mut rng = rand::thread_rng(); let setup = test_setup(4); let verifier_setup = setup.to_verifier_setup(); @@ -267,7 +279,7 @@ fn test_multiple_evaluations_same_commitment() { let point = random_point(4); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1.clone(), @@ -275,6 +287,7 @@ fn test_multiple_evaluations_same_commitment() { sigma, &setup, &mut prover_transcript, + &mut rng, ) .unwrap(); diff --git a/tests/arkworks/homomorphic.rs b/tests/arkworks/homomorphic.rs index 6c3122a..ec29f5d 100644 --- a/tests/arkworks/homomorphic.rs +++ b/tests/arkworks/homomorphic.rs @@ -4,7 +4,7 @@ use super::*; use dory_pcs::backends::arkworks::ArkG1; use dory_pcs::primitives::arithmetic::{Field, Group}; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; +use dory_pcs::{prove, setup, verify, Transparent}; #[test] fn test_homomorphic_combination_e2e() { @@ -87,7 +87,7 @@ fn test_homomorphic_combination_e2e() { // Create evaluation proof using combined commitment let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &combined_poly, &point, combined_tier1, @@ -95,6 +95,7 @@ fn test_homomorphic_combination_e2e() { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .unwrap(); @@ -177,7 +178,7 @@ fn test_homomorphic_combination_small() { let evaluation = combined_poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &combined_poly, &point, combined_tier1, @@ -185,6 +186,7 @@ fn test_homomorphic_combination_small() { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .unwrap(); diff --git a/tests/arkworks/integration.rs b/tests/arkworks/integration.rs index 567cf78..14e3b61 100644 --- a/tests/arkworks/integration.rs +++ b/tests/arkworks/integration.rs @@ -3,7 +3,7 @@ use super::*; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; +use dory_pcs::{prove, setup, verify, Transparent}; #[test] fn test_full_workflow() { @@ -24,7 +24,7 @@ fn test_full_workflow() { let expected_evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -32,6 +32,7 @@ fn test_full_workflow() { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -67,7 +68,7 @@ fn test_workflow_without_precommitment() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -75,6 +76,7 @@ fn test_workflow_without_precommitment() { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -109,7 +111,7 @@ fn test_batched_proofs() { let point = random_point(8); let mut prover_transcript = Blake2bTranscript::new(format!("test-{i}").as_bytes()); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1.clone(), @@ -117,6 +119,7 @@ fn test_batched_proofs() { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -162,7 +165,7 @@ fn test_linear_polynomial() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -170,6 +173,7 @@ fn test_linear_polynomial() { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .unwrap(); @@ -206,7 +210,7 @@ fn test_zero_polynomial() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -214,6 +218,7 @@ fn test_zero_polynomial() { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .unwrap(); @@ -254,7 +259,7 @@ fn test_soundness_wrong_commitment() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly2, &point, tier_1_poly2, @@ -262,6 +267,7 @@ fn test_soundness_wrong_commitment() { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .unwrap(); let evaluation = poly2.evaluate(&point); diff --git a/tests/arkworks/mod.rs b/tests/arkworks/mod.rs index e235c47..43aecde 100644 --- a/tests/arkworks/mod.rs +++ b/tests/arkworks/mod.rs @@ -18,6 +18,10 @@ pub mod integration; pub mod non_square; pub mod setup; pub mod soundness; +#[cfg(feature = "zk")] +pub mod zk; +#[cfg(feature = "zk")] +pub mod zk_statistical; pub fn random_polynomial(size: usize) -> ArkworksPolynomial { let mut rng = thread_rng(); diff --git a/tests/arkworks/non_square.rs b/tests/arkworks/non_square.rs index 8ce8d57..24fd0fd 100644 --- a/tests/arkworks/non_square.rs +++ b/tests/arkworks/non_square.rs @@ -2,7 +2,7 @@ use super::*; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; +use dory_pcs::{prove, setup, verify, Transparent}; #[test] fn test_non_square_matrix_nu_eq_sigma_minus_1() { @@ -23,7 +23,7 @@ fn test_non_square_matrix_nu_eq_sigma_minus_1() { .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -31,6 +31,7 @@ fn test_non_square_matrix_nu_eq_sigma_minus_1() { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .expect("Proof generation should succeed"); @@ -68,7 +69,7 @@ fn test_non_square_matrix_nu_greater_than_sigma_rejected() { .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let proof_result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof_result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -76,6 +77,7 @@ fn test_non_square_matrix_nu_greater_than_sigma_rejected() { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ); assert!( @@ -103,7 +105,7 @@ fn test_non_square_matrix_small() { .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -111,6 +113,7 @@ fn test_non_square_matrix_small() { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .expect("Proof generation should succeed"); @@ -152,7 +155,7 @@ fn test_non_square_matrix_very_rectangular() { .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -160,6 +163,7 @@ fn test_non_square_matrix_very_rectangular() { sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .expect("Proof generation should succeed"); diff --git a/tests/arkworks/soundness.rs b/tests/arkworks/soundness.rs index 0078615..39d0e38 100644 --- a/tests/arkworks/soundness.rs +++ b/tests/arkworks/soundness.rs @@ -5,7 +5,7 @@ use ark_bn254::{Fq12, Fr, G1Projective, G2Projective}; use ark_ff::UniformRand; use dory_pcs::backends::arkworks::{ArkFr, ArkG1, ArkG2, ArkGT}; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, verify}; +use dory_pcs::{prove, verify, Transparent}; use std::mem::swap; #[allow(clippy::type_complexity)] @@ -31,8 +31,9 @@ fn create_valid_proof_components( .commit::(nu, sigma, &prover_setup) .unwrap(); + let mut rng = rand::thread_rng(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -40,6 +41,7 @@ fn create_valid_proof_components( sigma, &prover_setup, &mut prover_transcript, + &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); diff --git a/tests/arkworks/zk.rs b/tests/arkworks/zk.rs new file mode 100644 index 0000000..2930cdb --- /dev/null +++ b/tests/arkworks/zk.rs @@ -0,0 +1,334 @@ +//! Zero-knowledge mode tests for Dory PCS + +use super::*; +use dory_pcs::primitives::poly::Polynomial; +use dory_pcs::{create_zk_evaluation_proof, prove, setup, verify, verify_zk_evaluation_proof, ZK}; + +#[test] +fn test_zk_full_workflow() { + let mut rng = rand::thread_rng(); + let max_log_n = 10; + + let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + + let poly = random_polynomial(256); + let nu = 4; + let sigma = 4; + + let (tier_2, tier_1) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(8); + let expected_evaluation = poly.evaluate(&point); + + let mut prover_transcript = fresh_transcript(); + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + &poly, + &point, + tier_1, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + ) + .unwrap(); + let evaluation = poly.evaluate(&point); + assert_eq!(evaluation, expected_evaluation); + + let mut verifier_transcript = fresh_transcript(); + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!(result.is_ok(), "ZK proof verification failed: {:?}", result); +} + +#[test] +fn test_zk_small_polynomial() { + let mut rng = rand::thread_rng(); + let (prover_setup, verifier_setup) = test_setup_pair(4); + + let poly = random_polynomial(4); + let nu = 1; + let sigma = 1; + + let (tier_2, tier_1) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(2); + let evaluation = poly.evaluate(&point); + + let mut prover_transcript = fresh_transcript(); + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + &poly, + &point, + tier_1, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!( + result.is_ok(), + "ZK small polynomial test failed: {:?}", + result + ); +} + +#[test] +fn test_zk_larger_polynomial() { + let mut rng = rand::thread_rng(); + let (prover_setup, verifier_setup) = setup::(&mut rng, 12); + + let poly = random_polynomial(1024); + let nu = 5; + let sigma = 5; + + let (tier_2, tier_1) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(10); + let evaluation = poly.evaluate(&point); + + let mut prover_transcript = fresh_transcript(); + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + &poly, + &point, + tier_1, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!( + result.is_ok(), + "ZK larger polynomial test failed: {:?}", + result + ); +} + +#[test] +fn test_zk_non_square_matrix() { + let mut rng = rand::thread_rng(); + let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + + // Non-square: nu=3, sigma=4 (8 rows, 16 columns = 128 coefficients) + let poly = random_polynomial(128); + let nu = 3; + let sigma = 4; + + let (tier_2, tier_1) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(7); // nu + sigma = 7 + let evaluation = poly.evaluate(&point); + + let mut prover_transcript = fresh_transcript(); + let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + &poly, + &point, + tier_1, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!( + result.is_ok(), + "ZK non-square matrix test failed: {:?}", + result + ); +} + +/// Test the full ZK API where y is hidden from the verifier +#[test] +fn test_zk_hidden_evaluation() { + let mut rng = rand::thread_rng(); + let (prover_setup, verifier_setup) = test_setup_pair(6); + + let poly = random_polynomial(16); + let nu = 2; + let sigma = 2; + + let (tier_2, tier_1) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(4); + + // Create ZK proof - returns (proof, y_com) instead of revealing y + let mut prover_transcript = fresh_transcript(); + let (zk_proof, y_com) = + create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + ) + .unwrap(); + + // Verify ZK proof - verifier does NOT receive y, only y_com + let mut verifier_transcript = fresh_transcript(); + let result = verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + y_com, + &point, + &zk_proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!( + result.is_ok(), + "ZK hidden evaluation proof verification failed: {:?}", + result + ); +} + +/// Test that wrong y_com is rejected +#[test] +fn test_zk_wrong_y_com_rejected() { + use dory_pcs::primitives::arithmetic::Group; + + let mut rng = rand::thread_rng(); + let (prover_setup, verifier_setup) = test_setup_pair(6); + + let poly = random_polynomial(16); + let nu = 2; + let sigma = 2; + + let (tier_2, tier_1) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(4); + + let mut prover_transcript = fresh_transcript(); + let (zk_proof, y_com) = + create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + ) + .unwrap(); + + // Tamper with y_com - add a random element + let wrong_y_com = y_com + prover_setup.h1.scale(&ArkFr::from_u64(42)); + + let mut verifier_transcript = fresh_transcript(); + let result = verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + wrong_y_com, + &point, + &zk_proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!(result.is_err(), "Verification should fail with wrong y_com"); +} + +/// Test full ZK with larger polynomial +#[test] +fn test_zk_hidden_evaluation_larger() { + let mut rng = rand::thread_rng(); + let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + + let poly = random_polynomial(256); + let nu = 4; + let sigma = 4; + + let (tier_2, tier_1) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(8); + + let mut prover_transcript = fresh_transcript(); + let (zk_proof, y_com) = + create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + let result = verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + y_com, + &point, + &zk_proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!( + result.is_ok(), + "ZK hidden evaluation (larger) failed: {:?}", + result + ); +} diff --git a/tests/arkworks/zk_statistical.rs b/tests/arkworks/zk_statistical.rs new file mode 100644 index 0000000..25e8d4e --- /dev/null +++ b/tests/arkworks/zk_statistical.rs @@ -0,0 +1,562 @@ +//! Statistical tests for zero-knowledge property of Dory PCS +//! +//! Verifies that proof elements are statistically indistinguishable from uniform +//! random regardless of the witness (polynomial) distribution. + +use super::*; +use ark_serialize::CanonicalSerialize; +use dory_pcs::primitives::arithmetic::Field; +use dory_pcs::primitives::poly::Polynomial; +use dory_pcs::{create_zk_evaluation_proof, setup, verify_zk_evaluation_proof}; +use rand::rngs::StdRng; +use rand::SeedableRng; +use std::collections::HashMap; + +const NUM_BUCKETS: usize = 16; + +/// Bucket distribution tracker for statistical analysis +struct BucketTracker { + buckets: HashMap>, +} + +impl BucketTracker { + fn new() -> Self { + Self { + buckets: HashMap::new(), + } + } + + fn record(&mut self, name: &str, bucket: usize) { + self.buckets + .entry(name.to_string()) + .or_insert_with(|| vec![0; NUM_BUCKETS])[bucket] += 1; + } + + fn chi_squared(&self, name: &str, expected: f64) -> Option { + self.buckets.get(name).map(|buckets| { + buckets + .iter() + .map(|&observed| { + let diff = observed as f64 - expected; + diff * diff / expected + }) + .sum() + }) + } + + fn all_names(&self) -> Vec { + let mut names: Vec<_> = self.buckets.keys().cloned().collect(); + names.sort(); + names + } +} + +/// Extract low bytes from serializable element for bucketing +fn bucket_from_serializable(elem: &T) -> usize { + let mut bytes = Vec::new(); + elem.serialize_compressed(&mut bytes).unwrap(); + // Use first byte for primary bucket + (bytes[0] as usize) % NUM_BUCKETS +} + +/// Collect bucket statistics from a full ZK proof (with hidden y) +fn collect_full_zk_proof_stats( + proof: &dory_pcs::ZkDoryProof< + dory_pcs::backends::arkworks::ArkG1, + dory_pcs::backends::arkworks::ArkG2, + ArkFr, + dory_pcs::backends::arkworks::ArkGT, + >, + y_com: &dory_pcs::backends::arkworks::ArkG1, + tracker: &mut BucketTracker, +) { + // ZK VMV message elements - includes y_com and prover-computed e2 + tracker.record("zk_vmv_c", bucket_from_serializable(&proof.vmv_message.c)); + tracker.record("zk_vmv_d2", bucket_from_serializable(&proof.vmv_message.d2)); + tracker.record("zk_vmv_e1", bucket_from_serializable(&proof.vmv_message.e1)); + tracker.record("zk_vmv_e2", bucket_from_serializable(&proof.vmv_message.e2)); // Prover-computed E2 + tracker.record( + "zk_vmv_y_com", + bucket_from_serializable(&proof.vmv_message.y_com), + ); // Commitment to y + tracker.record("zk_y_com_input", bucket_from_serializable(y_com)); // y_com returned to caller + + // First reduce messages (D values only - e1_beta/e2_beta are public) + for (i, msg) in proof.first_messages.iter().enumerate() { + let prefix = format!("zk_first_{}", i); + tracker.record( + &format!("{}_d1_left", prefix), + bucket_from_serializable(&msg.d1_left), + ); + tracker.record( + &format!("{}_d1_right", prefix), + bucket_from_serializable(&msg.d1_right), + ); + tracker.record( + &format!("{}_d2_left", prefix), + bucket_from_serializable(&msg.d2_left), + ); + tracker.record( + &format!("{}_d2_right", prefix), + bucket_from_serializable(&msg.d2_right), + ); + } + + // Second reduce messages + for (i, msg) in proof.second_messages.iter().enumerate() { + let prefix = format!("zk_second_{}", i); + tracker.record( + &format!("{}_c_plus", prefix), + bucket_from_serializable(&msg.c_plus), + ); + tracker.record( + &format!("{}_c_minus", prefix), + bucket_from_serializable(&msg.c_minus), + ); + tracker.record( + &format!("{}_e1_plus", prefix), + bucket_from_serializable(&msg.e1_plus), + ); + tracker.record( + &format!("{}_e1_minus", prefix), + bucket_from_serializable(&msg.e1_minus), + ); + tracker.record( + &format!("{}_e2_plus", prefix), + bucket_from_serializable(&msg.e2_plus), + ); + tracker.record( + &format!("{}_e2_minus", prefix), + bucket_from_serializable(&msg.e2_minus), + ); + } + + // Final message + tracker.record( + "zk_final_e1", + bucket_from_serializable(&proof.final_message.e1), + ); + tracker.record( + "zk_final_e2", + bucket_from_serializable(&proof.final_message.e2), + ); + + // Sigma1 proof (proves y_com and E2 commit to same y) + tracker.record( + "sigma1_a1", + bucket_from_serializable(&proof.sigma1_proof.a1), + ); + tracker.record( + "sigma1_a2", + bucket_from_serializable(&proof.sigma1_proof.a2), + ); + tracker.record( + "sigma1_z1", + bucket_from_serializable(&proof.sigma1_proof.z1), + ); + tracker.record( + "sigma1_z2", + bucket_from_serializable(&proof.sigma1_proof.z2), + ); + tracker.record( + "sigma1_z3", + bucket_from_serializable(&proof.sigma1_proof.z3), + ); + + // Sigma2 proof (proves VMV relation) + tracker.record("sigma2_a", bucket_from_serializable(&proof.sigma2_proof.a)); + tracker.record( + "sigma2_z1", + bucket_from_serializable(&proof.sigma2_proof.z1), + ); + tracker.record( + "sigma2_z2", + bucket_from_serializable(&proof.sigma2_proof.z2), + ); + + // Scalar product proof + tracker.record( + "zk_sp_p1", + bucket_from_serializable(&proof.scalar_product_proof.p1), + ); + tracker.record( + "zk_sp_p2", + bucket_from_serializable(&proof.scalar_product_proof.p2), + ); + tracker.record( + "zk_sp_q", + bucket_from_serializable(&proof.scalar_product_proof.q), + ); + tracker.record( + "zk_sp_r", + bucket_from_serializable(&proof.scalar_product_proof.r), + ); + tracker.record( + "zk_sp_e1", + bucket_from_serializable(&proof.scalar_product_proof.e1), + ); + tracker.record( + "zk_sp_e2", + bucket_from_serializable(&proof.scalar_product_proof.e2), + ); + tracker.record( + "zk_sp_r1", + bucket_from_serializable(&proof.scalar_product_proof.r1), + ); + tracker.record( + "zk_sp_r2", + bucket_from_serializable(&proof.scalar_product_proof.r2), + ); + tracker.record( + "zk_sp_r3", + bucket_from_serializable(&proof.scalar_product_proof.r3), + ); +} + +/// Statistical test for zero-knowledge property (full ZK with hidden y). +/// +/// Creates polynomials with different coefficient distributions and verifies +/// that all resulting proof elements (including y_com, Sigma1, Sigma2) are +/// statistically indistinguishable from uniform random. +#[test] +fn test_zk_statistical_indistinguishability() { + const NUM_TRIALS: usize = 100; + + let mut rng = StdRng::seed_from_u64(0xDEADBEEF); + let (prover_setup, verifier_setup) = setup::(&mut rng, 6); + + let nu = 2; + let sigma = 2; + let poly_size = 16; + let point = random_point(nu + sigma); + + // Track distributions for three witness types + let mut tracker_zeros = BucketTracker::new(); + let mut tracker_ones = BucketTracker::new(); + let mut tracker_random = BucketTracker::new(); + + for trial in 0..NUM_TRIALS { + // Reseed RNG for reproducibility within each trial type + let mut trial_rng = StdRng::seed_from_u64(0xCAFEBABE + trial as u64); + + // Distribution A: All-zeros polynomial (y=0 for all points) + { + let coeffs = vec![ArkFr::zero(); poly_size]; + let poly = ArkworksPolynomial::new(coeffs); + + let (tier_2, tier_1) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let mut transcript = fresh_transcript(); + let (proof, y_com) = + create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut transcript, + &mut trial_rng, + ) + .unwrap(); + + // Verify proof is valid (without revealing y!) + let mut verifier_transcript = fresh_transcript(); + assert!( + verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + y_com, + &point, + &proof, + verifier_setup.clone(), + &mut verifier_transcript, + ) + .is_ok() + ); + + collect_full_zk_proof_stats(&proof, &y_com, &mut tracker_zeros); + } + + // Distribution B: All-ones polynomial (y=2^n for point=(0,0,...)) + { + let coeffs = vec![ArkFr::one(); poly_size]; + let poly = ArkworksPolynomial::new(coeffs); + + let (tier_2, tier_1) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let mut transcript = fresh_transcript(); + let (proof, y_com) = + create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut transcript, + &mut trial_rng, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + assert!( + verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + y_com, + &point, + &proof, + verifier_setup.clone(), + &mut verifier_transcript, + ) + .is_ok() + ); + + collect_full_zk_proof_stats(&proof, &y_com, &mut tracker_ones); + } + + // Distribution C: Random polynomial + { + let poly = random_polynomial(poly_size); + + let (tier_2, tier_1) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let mut transcript = fresh_transcript(); + let (proof, y_com) = + create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut transcript, + &mut trial_rng, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + assert!( + verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + y_com, + &point, + &proof, + verifier_setup.clone(), + &mut verifier_transcript, + ) + .is_ok() + ); + + collect_full_zk_proof_stats(&proof, &y_com, &mut tracker_random); + } + } + + // Statistical analysis + let expected = NUM_TRIALS as f64 / NUM_BUCKETS as f64; + + // Critical value for χ² with df=15 at α=0.01 is ~30.58 + // Use lenient threshold for randomness testing + let critical_value = 35.0; + + let mut failures = Vec::new(); + + for name in tracker_zeros.all_names() { + // Test uniformity for each witness type + if let Some(chi2) = tracker_zeros.chi_squared(&name, expected) { + if chi2 >= critical_value { + failures.push(format!( + "zeros/{}: χ²={:.2} >= {:.2}", + name, chi2, critical_value + )); + } + } + + if let Some(chi2) = tracker_ones.chi_squared(&name, expected) { + if chi2 >= critical_value { + failures.push(format!( + "ones/{}: χ²={:.2} >= {:.2}", + name, chi2, critical_value + )); + } + } + + if let Some(chi2) = tracker_random.chi_squared(&name, expected) { + if chi2 >= critical_value { + failures.push(format!( + "random/{}: χ²={:.2} >= {:.2}", + name, chi2, critical_value + )); + } + } + } + + if !failures.is_empty() { + panic!( + "ZK statistical test failed - {} elements showed non-uniform distribution:\n{}", + failures.len(), + failures.join("\n") + ); + } +} + +/// Test that proof distributions from different witnesses are similar (two-sample test) +/// Uses full ZK API with hidden y to test all proof elements including y_com. +#[test] +fn test_zk_witness_independence() { + const NUM_TRIALS: usize = 80; + + let mut rng = StdRng::seed_from_u64(0xFEEDFACE); + let (prover_setup, verifier_setup) = setup::(&mut rng, 6); + + let nu = 2; + let sigma = 2; + let poly_size = 16; + let point = random_point(nu + sigma); + + let mut tracker_skewed = BucketTracker::new(); + let mut tracker_uniform = BucketTracker::new(); + + for trial in 0..NUM_TRIALS { + let mut trial_rng = StdRng::seed_from_u64(0xABCDEF00 + trial as u64); + + // Skewed: Single non-zero coefficient at position 0 (y will be small/predictable) + { + let mut coeffs = vec![ArkFr::zero(); poly_size]; + coeffs[0] = ArkFr::from_u64(42); + let poly = ArkworksPolynomial::new(coeffs); + + let (tier_2, tier_1) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let mut transcript = fresh_transcript(); + let (proof, y_com) = + create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut transcript, + &mut trial_rng, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + assert!( + verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + y_com, + &point, + &proof, + verifier_setup.clone(), + &mut verifier_transcript, + ) + .is_ok() + ); + + collect_full_zk_proof_stats(&proof, &y_com, &mut tracker_skewed); + } + + // Uniform: Random polynomial (y will be random) + { + let poly = random_polynomial(poly_size); + + let (tier_2, tier_1) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let mut transcript = fresh_transcript(); + let (proof, y_com) = + create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut transcript, + &mut trial_rng, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + assert!( + verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + y_com, + &point, + &proof, + verifier_setup.clone(), + &mut verifier_transcript, + ) + .is_ok() + ); + + collect_full_zk_proof_stats(&proof, &y_com, &mut tracker_uniform); + } + } + + // Two-sample χ² test between skewed and uniform witness distributions + fn two_sample_chi_squared(a: &[usize], b: &[usize]) -> f64 { + let n_a: f64 = a.iter().sum::() as f64; + let n_b: f64 = b.iter().sum::() as f64; + let n_total = n_a + n_b; + + a.iter() + .zip(b.iter()) + .map(|(&obs_a, &obs_b)| { + let pooled = obs_a as f64 + obs_b as f64; + if pooled < 1.0 { + return 0.0; + } + let expected_a = pooled * n_a / n_total; + let expected_b = pooled * n_b / n_total; + let term_a = if expected_a > 0.0 { + (obs_a as f64 - expected_a).powi(2) / expected_a + } else { + 0.0 + }; + let term_b = if expected_b > 0.0 { + (obs_b as f64 - expected_b).powi(2) / expected_b + } else { + 0.0 + }; + term_a + term_b + }) + .sum() + } + + // Critical value for two-sample χ² with df=15 at α=0.005 + // Using slightly higher threshold to reduce false positives from random variation + let critical_value = 40.0; + let mut failures = Vec::new(); + + for name in tracker_skewed.all_names() { + let buckets_skewed = tracker_skewed.buckets.get(&name).unwrap(); + let buckets_uniform = tracker_uniform.buckets.get(&name).unwrap(); + + let chi2 = two_sample_chi_squared(buckets_skewed, buckets_uniform); + if chi2 >= critical_value { + failures.push(format!( + "{}: skewed vs uniform χ²={:.2} >= {:.2}", + name, chi2, critical_value + )); + } + } + + if !failures.is_empty() { + panic!( + "ZK witness independence test failed - {} elements showed witness-dependent distribution:\n{}", + failures.len(), + failures.join("\n") + ); + } +} diff --git a/zk.md b/zk.md new file mode 100644 index 0000000..174684d --- /dev/null +++ b/zk.md @@ -0,0 +1,296 @@ +# Zero-Knowledge Dory Implementation Guide + +Integrate ZK into existing functions via `Mode` trait. No new protocol functions. + +--- + +## Part 1: Mode Trait + +```rust +// src/mode.rs + +pub trait Mode: 'static { + fn sample(transcript: &mut T, label: &[u8]) -> F; + fn mask>(value: G, base: &G, blind: &F) -> G; +} + +pub struct Transparent; + +impl Mode for Transparent { + fn sample(_: &mut T, _: &[u8]) -> F { F::ZERO } + fn mask>(value: G, _: &G, _: &F) -> G { value } +} + +#[cfg(feature = "zk")] +pub struct ZK; + +#[cfg(feature = "zk")] +impl Mode for ZK { + fn sample(transcript: &mut T, label: &[u8]) -> F { + transcript.challenge_scalar(label) + } + fn mask>(value: G, base: &G, blind: &F) -> G { + value.add(&base.scale(blind)) + } +} +``` + +--- + +## Part 2: Existing Struct Changes + +```diff +// src/reduce_and_fold.rs + +-pub struct DoryProverState<'a, E: PairingCurve> { ++pub struct DoryProverState<'a, E: PairingCurve, M: Mode = Transparent> { + pub v1: Vec, + pub v2: Vec, + pub v2_scalars: Option>, + pub s1: Vec, + pub s2: Vec, + pub num_rounds: usize, + pub setup: &'a ProverSetup, ++ ++ // Blinds (zero for Transparent, accumulated for ZK) ++ pub r_c: E::Scalar, ++ pub r_d1: E::Scalar, ++ pub r_d2: E::Scalar, ++ pub r_e1: E::Scalar, ++ pub r_e2: E::Scalar, ++ _mode: PhantomData, + } +``` + +--- + +## Part 3: Existing `reduce_round` Changes + +```diff +-impl<'a, E: PairingCurve> DoryProverState<'a, E> { ++impl<'a, E: PairingCurve, M: Mode> DoryProverState<'a, E, M> { + pub fn reduce_round>( + &mut self, + transcript: &mut T, + ) -> (...) { + let half = self.v1.len() / 2; + let (v1_l, v1_r) = self.v1.split_at(half); + let (v2_l, v2_r) = self.v2.split_at(half); + let g2_prime = &self.setup.g2_vec[..half]; + let g1_prime = &self.setup.g1_vec[..half]; + ++ // ZK: sample blinds ++ let r_d1_l = M::sample(transcript, b"r_d1_l"); ++ let r_d1_r = M::sample(transcript, b"r_d1_r"); ++ let r_d2_l = M::sample(transcript, b"r_d2_l"); ++ let r_d2_r = M::sample(transcript, b"r_d2_r"); + +- let d1_left = E::multi_pair(v1_l, g2_prime); +- let d1_right = E::multi_pair(v1_r, g2_prime); +- let d2_left = E::multi_pair(g1_prime, v2_l); +- let d2_right = E::multi_pair(g1_prime, v2_r); ++ // ZK: mask pairing results ++ let d1_left = M::mask(E::multi_pair(v1_l, g2_prime), &self.setup.ht, &r_d1_l); ++ let d1_right = M::mask(E::multi_pair(v1_r, g2_prime), &self.setup.ht, &r_d1_r); ++ let d2_left = M::mask(E::multi_pair(g1_prime, v2_l), &self.setup.ht, &r_d2_l); ++ let d2_right = M::mask(E::multi_pair(g1_prime, v2_r), &self.setup.ht, &r_d2_r); + + // ... e1_beta, e2_beta unchanged ... + + let first_msg = FirstReduceMessage { d1_left, d1_right, d2_left, d2_right, e1_beta, e2_beta }; + transcript.append_serde(b"first", &first_msg); + + let beta = transcript.challenge_scalar(b"beta"); + let beta_inv = beta.inv().unwrap(); + + // ... vector updates unchanged ... + ++ // ZK: accumulate blinds ++ self.r_c = self.r_c ++ .add(&r_d2_l.add(&r_d2_r).mul(&beta)) ++ .add(&r_d1_l.add(&r_d1_r).mul(&beta_inv)); + ++ // ZK: sample second round blinds ++ let r_c_plus = M::sample(transcript, b"r_c+"); ++ let r_c_minus = M::sample(transcript, b"r_c-"); ++ let r_e1_plus = M::sample(transcript, b"r_e1+"); ++ let r_e1_minus = M::sample(transcript, b"r_e1-"); ++ let r_e2_plus = M::sample(transcript, b"r_e2+"); ++ let r_e2_minus = M::sample(transcript, b"r_e2-"); + +- let c_plus = E::multi_pair(v1_l, v2_r); +- let c_minus = E::multi_pair(v1_r, v2_l); +- let e1_plus = G1Routines::msm(v1_l, s2_r); +- let e1_minus = G1Routines::msm(v1_r, s2_l); +- let e2_plus = G2Routines::msm(v2_r, s1_l); +- let e2_minus = G2Routines::msm(v2_l, s1_r); ++ // ZK: mask cross terms ++ let c_plus = M::mask(E::multi_pair(v1_l, v2_r), &self.setup.ht, &r_c_plus); ++ let c_minus = M::mask(E::multi_pair(v1_r, v2_l), &self.setup.ht, &r_c_minus); ++ let e1_plus = M::mask(G1Routines::msm(v1_l, s2_r), &self.setup.h1, &r_e1_plus); ++ let e1_minus = M::mask(G1Routines::msm(v1_r, s2_l), &self.setup.h1, &r_e1_minus); ++ let e2_plus = M::mask(G2Routines::msm(v2_r, s1_l), &self.setup.h2, &r_e2_plus); ++ let e2_minus = M::mask(G2Routines::msm(v2_l, s1_r), &self.setup.h2, &r_e2_minus); + + // ... second_msg, alpha challenge, vector folding unchanged ... + ++ // ZK: update accumulated blinds ++ self.r_c = self.r_c ++ .add(&r_c_plus.mul(&alpha)) ++ .add(&r_c_minus.mul(&alpha_inv)); ++ self.r_d1 = r_d1_l.mul(&alpha).add(&r_d1_r); ++ self.r_d2 = r_d2_l.mul(&alpha_inv).add(&r_d2_r); ++ self.r_e1 = self.r_e1 ++ .add(&r_e1_plus.mul(&alpha)) ++ .add(&r_e1_minus.mul(&alpha_inv)); ++ self.r_e2 = self.r_e2 ++ .add(&r_e2_plus.mul(&alpha)) ++ .add(&r_e2_minus.mul(&alpha_inv)); + + self.num_rounds -= 1; + (first_msg, second_msg) + } +``` + +--- + +## Part 4: Existing `fold_scalars` Changes + +```diff + pub fn fold_scalars(&mut self, gamma: &E::Scalar) { + let gamma_inv = gamma.inv().unwrap(); + self.v1[0] = self.v1[0].add(&self.setup.h1.scale(&gamma.mul(&self.s1[0]))); + self.v2[0] = self.v2[0].add(&self.setup.h2.scale(&gamma_inv.mul(&self.s2[0]))); ++ ++ // ZK: final blind accumulation ++ self.r_c = self.r_c ++ .add(&self.r_e2.mul(gamma)) ++ .add(&self.r_e1.mul(&gamma_inv)); + } +``` + +--- + +## Part 5: Existing `compute_vmv_message` Changes + +```diff +-pub fn compute_vmv_message( ++pub fn compute_vmv_message>( + row_commitments: &[E::G1], + v_vec: &[E::Scalar], + left_vec: &[E::Scalar], + setup: &ProverSetup, + transcript: &mut T, +-) -> VMVMessage ++) -> (VMVMessage, E::Scalar, E::Scalar, E::Scalar, E::Scalar) + where + E: PairingCurve, + T: Transcript, + { ++ let r_c = M::sample(transcript, b"vmv_r_c"); ++ let r_d2 = M::sample(transcript, b"vmv_r_d2"); ++ let r_e1 = M::sample(transcript, b"vmv_r_e1"); ++ let r_e2 = M::sample(transcript, b"vmv_r_e2"); + + let v_dot_t0 = G1Routines::msm(row_commitments, v_vec); +- let c = E::pair(&v_dot_t0, &setup.g2_vec[0]); ++ let c = M::mask(E::pair(&v_dot_t0, &setup.g2_vec[0]), &setup.ht, &r_c); + + let g1_dot_v = G1Routines::msm(&setup.g1_vec[..v_vec.len()], v_vec); +- let d2 = E::pair(&g1_dot_v, &setup.g2_vec[0]); ++ let d2 = M::mask(E::pair(&g1_dot_v, &setup.g2_vec[0]), &setup.ht, &r_d2); + +- let e1 = G1Routines::msm(row_commitments, left_vec); ++ let e1 = M::mask(G1Routines::msm(row_commitments, left_vec), &setup.h1, &r_e1); + +- VMVMessage { c, d2, e1 } ++ (VMVMessage { c, d2, e1 }, r_c, r_d2, r_e1, r_e2) + } +``` + +--- + +## Part 6: Existing `scalar_product` Changes + +```diff + pub fn scalar_product(&self) -> (E::G1, E::G2) { + (self.v1[0], self.v2[0]) + } ++ ++ // ZK-only: Σ-protocol for final step ++ #[cfg(feature = "zk")] ++ pub fn scalar_product_zk>(&self, transcript: &mut T) -> ScalarProductProof ++ where ++ M: Mode, // Only callable when M = ZK ++ { ++ // ... Σ-protocol using self.r_c, self.r_d1, self.r_d2 ... ++ } +``` + +--- + +## Part 7: Usage + +```rust +// Transparent (default) - unchanged call sites +let state: DoryProverState = DoryProverState::new(...); +// or explicitly: +let state: DoryProverState = DoryProverState::new(...); + +// ZK mode +#[cfg(feature = "zk")] +let state: DoryProverState = DoryProverState::new_zk(...); +``` + +--- + +## Part 8: Summary + +| Mode | `M::sample()` | `M::mask()` | +|------|---------------|-------------| +| `Transparent` | `F::ZERO` | `value` (identity) | +| `ZK` | `transcript.challenge_scalar()` | `value + base*blind` | + +**Integration pattern**: +1. Add `M: Mode` parameter to existing structs/functions +2. Insert `M::sample()` before operations needing blinds +3. Wrap computed values with `M::mask(value, base, &blind)` +4. Add blind accumulation after challenges + +For `Transparent`: sample returns zero, mask returns value unchanged, accumulation is `0 + 0*x = 0`. + +--- + +## Part 9: ZK-Only Additions + +Only these are truly new (feature-gated): + +```rust +#[cfg(feature = "zk")] +pub struct ScalarProductProof { ... } + +#[cfg(feature = "zk")] +pub struct Sigma1Proof { ... } + +#[cfg(feature = "zk")] +pub struct Sigma2Proof { ... } +``` + +--- + +## Part 10: Checklist + +- [ ] Add `Mode` trait to `src/mode.rs` +- [ ] Add `M: Mode` parameter to `DoryProverState` +- [ ] Add blind fields (`r_c`, `r_d1`, `r_d2`, `r_e1`, `r_e2`) to state +- [ ] Modify `reduce_round`: insert `M::sample()` and `M::mask()` calls +- [ ] Modify `fold_scalars`: add blind accumulation +- [ ] Modify `compute_vmv_message`: add sampling and masking +- [ ] Add `scalar_product_zk` impl for `DoryProverState` +- [ ] Add `ScalarProductProof`, `Sigma1Proof`, `Sigma2Proof` (ZK only) + +--- + +## References + +- IACR 2020/1274: "Dory: Efficient, Transparent arguments for Generalised Inner Products and Polynomial Commitments" diff --git a/zk_refactor_plan.md b/zk_refactor_plan.md new file mode 100644 index 0000000..b32e121 --- /dev/null +++ b/zk_refactor_plan.md @@ -0,0 +1,737 @@ +# ZK Implementation Refactoring Plan (v2) + +## Design Philosophy + +Use Rust's trait system to unify Transparent and ZK modes into **single code paths**. The `Mode` trait with associated types determines all behavioral differences at compile time. + +**Two modes only:** +- `Transparent` - no blinds, evaluation `y` revealed to verifier +- `ZK` - full blinds, evaluation hidden (verifier receives `y_com`) + +--- + +## Core Design: The Mode Trait + +```rust +pub trait Mode: 'static + Clone { + /// Witness provided to verifier + /// - Transparent: the evaluation F + /// - ZK: commitment to evaluation G1 + type Witness>: Clone; + + /// Extra proof elements + /// - Transparent: () + /// - ZK: ZkProofs (sigma1, sigma2, scalar_product) + type Extras, GT: Group>: Clone + Default; + + /// VMV message extension + /// - Transparent: () + /// - ZK: (E2, y_com) tuple + type VmvExtras>: Clone + Default; + + /// Sample blinding factor (zero for Transparent, random for ZK) + fn sample(rng: &mut R) -> F; + + /// Mask group element (identity for Transparent, adds blind for ZK) + fn mask(value: G, base: &G, blind: &G::Scalar) -> G; + + /// Create witness and VMV extras from evaluation y + fn create_witness( + y: &E::Scalar, + r_e2: &E::Scalar, + setup: &ProverSetup, + rng: &mut R, + ) -> (Self::Witness, Self::VmvExtras, WitnessSecrets) + where + E::G1: Group, + E::G2: Group; + + /// Extract E2 for verifier state + /// - Transparent: compute from witness (y · Γ2,fin) + /// - ZK: extract from VMV extras + fn verifier_e2( + witness: &Self::Witness, + vmv_extras: &Self::VmvExtras, + setup: &VerifierSetup, + ) -> E::G2 + where + E::G1: Group, + E::G2: Group; + + /// Generate extra proofs (sigma proofs for ZK, no-op for Transparent) + fn generate_extras, R: RngCore>( + witness_secrets: &WitnessSecrets, + vmv_blinds: &VmvBlinds, + prover_state: &DoryProverState, + setup: &ProverSetup, + transcript: &mut T, + rng: &mut R, + ) -> Self::Extras + where + E::G1: Group, + E::G2: Group, + E::GT: Group; + + /// Verify extra proofs (no-op for Transparent) + fn verify_extras>( + extras: &Self::Extras, + vmv_extras: &Self::VmvExtras, + vmv_message: &VMVMessage, + setup: &VerifierSetup, + transcript: &mut T, + ) -> Result<(), DoryError> + where + E::G1: Group, + E::G2: Group, + E::GT: Group; + + /// Append VMV extras to transcript (no-op for Transparent) + fn append_vmv_extras_to_transcript>( + vmv_extras: &Self::VmvExtras, + transcript: &mut T, + ) where + E::G1: Group, + E::G2: Group; + + /// Final verification dispatch + fn verify_final( + verifier_state: &mut DoryVerifierState, + extras: &Self::Extras, + final_message: &ScalarProductMessage, + gamma: &E::Scalar, + d: &E::Scalar, + ) -> Result<(), DoryError> + where + E::G1: Group, + E::G2: Group, + E::GT: Group; +} +``` + +--- + +## Mode Implementations + +### Transparent Mode + +```rust +#[derive(Clone, Copy, Default)] +pub struct Transparent; + +impl Mode for Transparent { + type Witness> = F; + type Extras, GT: Group> = (); + type VmvExtras> = (); + + fn sample(_rng: &mut R) -> F { + F::zero() + } + + fn mask(value: G, _base: &G, _blind: &G::Scalar) -> G { + value + } + + fn create_witness( + y: &E::Scalar, + _r_e2: &E::Scalar, + _setup: &ProverSetup, + _rng: &mut R, + ) -> (E::Scalar, (), WitnessSecrets) + where + E::G1: Group, + E::G2: Group, + { + (*y, (), WitnessSecrets::default()) + } + + fn verifier_e2( + witness: &E::Scalar, + _vmv_extras: &(), + setup: &VerifierSetup, + ) -> E::G2 + where + E::G1: Group, + E::G2: Group, + { + // E2 = y · Γ2,fin + setup.g2_0.scale(witness) + } + + fn generate_extras, R: RngCore>( + _witness_secrets: &WitnessSecrets, + _vmv_blinds: &VmvBlinds, + _prover_state: &DoryProverState, + _setup: &ProverSetup, + _transcript: &mut T, + _rng: &mut R, + ) -> () + where + E::G1: Group, + E::G2: Group, + E::GT: Group, + { + () + } + + fn verify_extras>( + _extras: &(), + _vmv_extras: &(), + _vmv_message: &VMVMessage, + _setup: &VerifierSetup, + _transcript: &mut T, + ) -> Result<(), DoryError> + where + E::G1: Group, + E::G2: Group, + E::GT: Group, + { + Ok(()) + } + + fn append_vmv_extras_to_transcript>( + _vmv_extras: &(), + _transcript: &mut T, + ) where + E::G1: Group, + E::G2: Group, + { + // No extras to append + } + + fn verify_final( + verifier_state: &mut DoryVerifierState, + _extras: &(), + final_message: &ScalarProductMessage, + gamma: &E::Scalar, + d: &E::Scalar, + ) -> Result<(), DoryError> + where + E::G1: Group, + E::G2: Group, + E::GT: Group, + { + verifier_state.verify_final(final_message, gamma, d) + } +} +``` + +### ZK Mode + +```rust +#[derive(Clone, Copy, Default)] +#[cfg(feature = "zk")] +pub struct ZK; + +/// Secrets generated during witness creation, needed for sigma proofs +#[cfg(feature = "zk")] +pub struct WitnessSecrets { + pub r_y: F, // blind for y_com + pub r_e2: F, // blind for E2 +} + +/// VMV extras for ZK mode +#[cfg(feature = "zk")] +#[derive(Clone)] +pub struct ZkVmvExtras { + pub e2: G2, // E2 = y·Γ2,fin + r_e2·H2 + pub y_com: G1, // y_com = y·Γ1,fin + r_y·H1 +} + +/// Extra proofs for ZK mode +#[cfg(feature = "zk")] +#[derive(Clone)] +pub struct ZkProofs { + pub sigma1: Sigma1Proof, + pub sigma2: Sigma2Proof, + pub scalar_product: ScalarProductProof, +} + +#[cfg(feature = "zk")] +impl Mode for ZK { + type Witness> = G1; // y_com + type Extras, GT: Group> = + ZkProofs; + type VmvExtras> = ZkVmvExtras; + + fn sample(rng: &mut R) -> F { + F::random(rng) + } + + fn mask(value: G, base: &G, blind: &G::Scalar) -> G { + value + base.scale(blind) + } + + fn create_witness( + y: &E::Scalar, + r_e2: &E::Scalar, + setup: &ProverSetup, + rng: &mut R, + ) -> (E::G1, ZkVmvExtras, WitnessSecrets) + where + E::G1: Group, + E::G2: Group, + { + let r_y = E::Scalar::random(rng); + + // y_com = y·Γ1,fin + r_y·H1 + let y_com = setup.g1_vec[0].scale(y) + setup.h1.scale(&r_y); + + // E2 = y·Γ2,fin + r_e2·H2 + let e2 = setup.g2_vec[0].scale(y) + setup.h2.scale(r_e2); + + let vmv_extras = ZkVmvExtras { e2, y_com }; + let secrets = WitnessSecrets { r_y, r_e2: *r_e2 }; + + (y_com, vmv_extras, secrets) + } + + fn verifier_e2( + _witness: &E::G1, + vmv_extras: &ZkVmvExtras, + _setup: &VerifierSetup, + ) -> E::G2 + where + E::G1: Group, + E::G2: Group, + { + // E2 comes from prover's VMV message + vmv_extras.e2 + } + + fn generate_extras, R: RngCore>( + witness_secrets: &WitnessSecrets, + vmv_blinds: &VmvBlinds, + prover_state: &DoryProverState, + setup: &ProverSetup, + transcript: &mut T, + rng: &mut R, + ) -> ZkProofs + where + E::G1: Group, + E::G2: Group, + E::GT: Group, + { + // Generate sigma1: proves y_com and E2 commit to same y + let sigma1 = generate_sigma1_proof(/* ... */); + + // Generate sigma2: proves VMV relation holds with blinds + let sigma2 = generate_sigma2_proof(/* ... */); + + // Generate scalar product proof + let scalar_product = prover_state.scalar_product_proof(transcript, rng); + + ZkProofs { sigma1, sigma2, scalar_product } + } + + fn verify_extras>( + extras: &ZkProofs, + vmv_extras: &ZkVmvExtras, + vmv_message: &VMVMessage, + setup: &VerifierSetup, + transcript: &mut T, + ) -> Result<(), DoryError> + where + E::G1: Group, + E::G2: Group, + E::GT: Group, + { + verify_sigma1_proof(&vmv_extras.e2, &vmv_extras.y_com, &extras.sigma1, setup, transcript)?; + verify_sigma2_proof(&vmv_message.e1, &vmv_message.d2, &extras.sigma2, setup, transcript)?; + Ok(()) + } + + fn append_vmv_extras_to_transcript>( + vmv_extras: &ZkVmvExtras, + transcript: &mut T, + ) where + E::G1: Group, + E::G2: Group, + { + transcript.append_serde(b"vmv_e2", &vmv_extras.e2); + transcript.append_serde(b"vmv_y_com", &vmv_extras.y_com); + } + + fn verify_final( + verifier_state: &mut DoryVerifierState, + extras: &ZkProofs, + _final_message: &ScalarProductMessage, + _gamma: &E::Scalar, + d: &E::Scalar, + ) -> Result<(), DoryError> + where + E::G1: Group, + E::G2: Group, + E::GT: Group, + { + // Derive challenge c from scalar product proof (already in transcript) + let c = /* from transcript */; + verifier_state.verify_final_zk_with_challenge(&extras.scalar_product, &c, d) + } +} +``` + +--- + +## Unified Data Structures + +### DoryProof (Single Type) + +```rust +/// Complete Dory evaluation proof, parameterized by Mode +pub struct DoryProof +where + G1: Group, + G2: Group, + GT: Group, +{ + /// VMV message (C, D2, E1) + pub vmv_message: VMVMessage, + + /// VMV extras (empty for Transparent, E2/y_com for ZK) + pub vmv_extras: M::VmvExtras, + + /// Reduce round messages + pub first_messages: Vec>, + pub second_messages: Vec>, + + /// Final scalar product message + pub final_message: ScalarProductMessage, + + /// Mode-specific extra proofs (empty for Transparent, sigma proofs for ZK) + pub extras: M::Extras, + + /// Matrix dimensions + pub nu: usize, + pub sigma: usize, +} +``` + +### VMVMessage (Unchanged) + +```rust +/// VMV message - same for both modes +/// Mode-specific data (E2, y_com) stored in DoryProof::vmv_extras +pub struct VMVMessage { + pub c: GT, + pub d2: GT, + pub e1: G1, +} +``` + +--- + +## Unified API + +### Single prove Function + +```rust +/// Create evaluation proof +/// +/// Returns (proof, witness) where witness is: +/// - Transparent: the evaluation y (type F) +/// - ZK: commitment to y (type G1) +pub fn prove( + polynomial: &P, + point: &[F], + row_commitments: Vec, + nu: usize, + sigma: usize, + setup: &ProverSetup, + transcript: &mut T, + rng: &mut R, +) -> Result<(DoryProof, M::Witness), DoryError> +where + F: Field, + E: PairingCurve, + E::G1: Group, + E::G2: Group, + E::GT: Group, + M1: DoryRoutines, + M2: DoryRoutines, + P: MultilinearLagrange, + T: Transcript, + M: Mode, + R: RngCore, +{ + // Validation + if point.len() != nu + sigma { + return Err(DoryError::InvalidPointDimension { ... }); + } + if nu > sigma { + return Err(DoryError::InvalidSize { ... }); + } + + // Compute evaluation vectors + let (left_vec, right_vec) = polynomial.compute_evaluation_vectors(point, nu, sigma); + let v_vec = polynomial.vector_matrix_product(&left_vec, nu, sigma); + + // Compute y + let y = polynomial.evaluate(point); + + // Sample VMV blinds using Mode + let r_c: F = M::sample(rng); + let r_d2: F = M::sample(rng); + let r_e1: F = M::sample(rng); + let r_e2: F = M::sample(rng); + let vmv_blinds = VmvBlinds { r_c, r_d2, r_e1, r_e2 }; + + // Create witness and VMV extras using Mode + let (witness, vmv_extras, witness_secrets) = M::create_witness(&y, &r_e2, setup, rng); + + // Compute VMV message using Mode::mask + let g2_fin = &setup.g2_vec[0]; + let t_vec_v = M1::msm(&padded_row_commitments, &v_vec); + let c = M::mask(E::pair(&t_vec_v, g2_fin), &setup.ht, &r_c); + + let g1_bases = &setup.g1_vec[..1 << sigma]; + let gamma1_v = M1::msm(g1_bases, &v_vec); + let d2 = M::mask(E::pair(&gamma1_v, g2_fin), &setup.ht, &r_d2); + + let e1 = M::mask(M1::msm(&row_commitments, &left_vec), &setup.h1, &r_e1); + + let vmv_message = VMVMessage { c, d2, e1 }; + + // Append to transcript + transcript.append_serde(b"vmv_c", &vmv_message.c); + transcript.append_serde(b"vmv_d2", &vmv_message.d2); + transcript.append_serde(b"vmv_e1", &vmv_message.e1); + M::append_vmv_extras_to_transcript::(&vmv_extras, transcript); + + // Initialize prover state + let mut prover_state = DoryProverState::new_with_blinds( + padded_row_commitments, v2, Some(v_vec), + padded_right_vec, padded_left_vec, + setup, r_c, r_d2, r_e1, r_e2, + ); + + // Reduce-and-fold rounds (identical for both modes) + let mut first_messages = Vec::with_capacity(num_rounds); + let mut second_messages = Vec::with_capacity(num_rounds); + + for _ in 0..num_rounds { + let (first_msg, r_d1_l, r_d1_r, r_d2_l, r_d2_r) = + prover_state.compute_first_message::(rng); + // ... append to transcript, get beta ... + prover_state.apply_first_challenge::(&beta); + first_messages.push(first_msg); + + let (second_msg, ...) = prover_state.compute_second_message::(rng); + // ... append to transcript, get alpha ... + prover_state.apply_second_challenge::(...); + second_messages.push(second_msg); + } + + let gamma = transcript.challenge_scalar(b"gamma"); + + // Generate mode-specific extras (sigma proofs for ZK, nothing for Transparent) + let extras = M::generate_extras(&witness_secrets, &vmv_blinds, &prover_state, setup, transcript, rng); + + let final_message = prover_state.compute_final_message::(&gamma); + + transcript.append_serde(b"final_e1", &final_message.e1); + transcript.append_serde(b"final_e2", &final_message.e2); + let _d = transcript.challenge_scalar(b"d"); + + Ok(( + DoryProof { + vmv_message, + vmv_extras, + first_messages, + second_messages, + final_message, + extras, + nu, + sigma, + }, + witness, + )) +} +``` + +### Single verify Function + +```rust +/// Verify evaluation proof +/// +/// Takes witness which is: +/// - Transparent: the evaluation y (type F) +/// - ZK: commitment to y (type G1) +pub fn verify( + commitment: E::GT, + witness: M::Witness, + point: &[F], + proof: &DoryProof, + setup: VerifierSetup, + transcript: &mut T, +) -> Result<(), DoryError> +where + F: Field, + E: PairingCurve, + E::G1: Group, + E::G2: Group, + E::GT: Group, + M1: DoryRoutines, + M2: DoryRoutines, + T: Transcript, + M: Mode, +{ + let nu = proof.nu; + let sigma = proof.sigma; + + if point.len() != nu + sigma { + return Err(DoryError::InvalidPointDimension { ... }); + } + + // Append VMV to transcript + transcript.append_serde(b"vmv_c", &proof.vmv_message.c); + transcript.append_serde(b"vmv_d2", &proof.vmv_message.d2); + transcript.append_serde(b"vmv_e1", &proof.vmv_message.e1); + M::append_vmv_extras_to_transcript::(&proof.vmv_extras, transcript); + + // Verify mode-specific extras (sigma proofs for ZK) + M::verify_extras(&proof.extras, &proof.vmv_extras, &proof.vmv_message, &setup, transcript)?; + + // Get E2 based on mode + let e2 = M::verifier_e2::(&witness, &proof.vmv_extras, &setup); + + // Initialize verifier state + let mut verifier_state = DoryVerifierState::new( + proof.vmv_message.c, + commitment, + proof.vmv_message.d2, + proof.vmv_message.e1, + e2, + s1_coords, + s2_coords, + num_rounds, + setup.clone(), + ); + + // Process rounds (identical for both modes) + for round in 0..num_rounds { + let first_msg = &proof.first_messages[round]; + let second_msg = &proof.second_messages[round]; + // ... append to transcript, get challenges ... + verifier_state.process_round(first_msg, second_msg, &alpha, &beta); + } + + let gamma = transcript.challenge_scalar(b"gamma"); + + // Append final message + transcript.append_serde(b"final_e1", &proof.final_message.e1); + transcript.append_serde(b"final_e2", &proof.final_message.e2); + let d = transcript.challenge_scalar(b"d"); + + // Final verification dispatch based on mode + M::verify_final(&mut verifier_state, &proof.extras, &proof.final_message, &gamma, &d) +} +``` + +--- + +## Usage Examples + +### Transparent Mode + +```rust +// Prove +let (proof, evaluation) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( + &poly, &point, tier_1, nu, sigma, &prover_setup, &mut transcript, &mut rng +)?; + +// Verify +verify::<_, BN254, G1Routines, G2Routines, _, Transparent>( + tier_2, evaluation, &point, &proof, verifier_setup, &mut transcript +)?; +``` + +### ZK Mode + +```rust +// Prove - returns y_com instead of y +let (proof, y_com) = prove::<_, BN254, G1Routines, G2Routines, _, _, ZK, _>( + &poly, &point, tier_1, nu, sigma, &prover_setup, &mut transcript, &mut rng +)?; + +// Verify - takes y_com instead of y +verify::<_, BN254, G1Routines, G2Routines, _, ZK>( + tier_2, y_com, &point, &proof, verifier_setup, &mut transcript +)?; +``` + +--- + +## Files to Modify + +### Delete Entirely +- `src/backends/arkworks/ark_poly.rs`: `commit_zk` method (~60 lines) +- `src/primitives/poly.rs`: `commit_zk` trait method (~15 lines) + +### Major Refactor +| File | Changes | +|------|---------| +| `src/mode.rs` | Expand Mode trait with associated types and methods | +| `src/proof.rs` | Single `DoryProof<..., M: Mode>`, delete `ZkDoryProof` | +| `src/messages.rs` | Delete `ZkVMVMessage`, add `ZkVmvExtras` | +| `src/evaluation_proof.rs` | Single `prove`/`verify`, delete duplicates (~350 lines removed) | +| `src/reduce_and_fold.rs` | Delete duplicate `scalar_product_proof`, keep single impl | +| `src/lib.rs` | Update exports | + +### Minor Updates +| File | Changes | +|------|---------| +| `tests/arkworks/*.rs` | Update to new API | +| `examples/*.rs` | Update to new API (add `Transparent` parameter) | + +--- + +## Expected Diff Reduction + +| Component | Before | After | Reduction | +|-----------|--------|-------|-----------| +| `evaluation_proof.rs` | +453 | ~+50 | -400 | +| `reduce_and_fold.rs` | +783 | ~+300 | -483 | +| `proof.rs` | +56 | ~+20 | -36 | +| `messages.rs` | +97 | ~+40 | -57 | +| `ark_poly.rs` | +58 | 0 | -58 | +| **Total** | +1494 | ~+450 | **-1044 (70%)** | + +--- + +## Implementation Order + +1. **Expand Mode trait** in `src/mode.rs` + - Add associated types + - Add all trait methods with Transparent impl + - Add ZK impl (feature-gated) + +2. **Unify proof types** in `src/proof.rs` and `src/messages.rs` + - Single `DoryProof<..., M>` + - Add `ZkVmvExtras`, `ZkProofs` + - Delete `ZkDoryProof`, `ZkVMVMessage` + +3. **Unify prove/verify** in `src/evaluation_proof.rs` + - Single `prove` function using Mode methods + - Single `verify` function using Mode methods + - Delete `create_zk_evaluation_proof`, `verify_zk_evaluation_proof` + +4. **Clean up reduce_and_fold.rs** + - Delete duplicate `scalar_product_proof` + - Update to use Mode trait + +5. **Delete dead code** + - Remove `commit_zk` from trait and impl + +6. **Update tests and examples** + - Add explicit `Transparent` or `ZK` mode parameter + +--- + +## Verification Checklist + +- [ ] `cargo nextest run --features backends` passes (Transparent mode) +- [ ] `cargo nextest run --features "backends,zk"` passes (both modes) +- [ ] `cargo clippy --features "backends,zk"` clean +- [ ] `cargo doc --features "backends,zk"` builds +- [ ] Statistical ZK tests still pass +- [ ] No performance regression in Transparent mode From b7d7157bbfbb0c1ea21675eacfb860e99f5f17c7 Mon Sep 17 00:00:00 2001 From: markosg04 Date: Fri, 23 Jan 2026 17:15:57 -0500 Subject: [PATCH 02/16] refactor: some clean up --- src/backends/arkworks/ark_serde.rs | 12 +- src/evaluation_proof.rs | 663 ++++++----------------------- src/lib.rs | 6 +- src/messages.rs | 23 +- src/proof.rs | 68 +-- src/reduce_and_fold.rs | 585 ++++++------------------- tests/arkworks/zk.rs | 94 ++-- tests/arkworks/zk_statistical.rs | 272 +++++------- 8 files changed, 453 insertions(+), 1270 deletions(-) diff --git a/src/backends/arkworks/ark_serde.rs b/src/backends/arkworks/ark_serde.rs index 214bbd0..94054c1 100644 --- a/src/backends/arkworks/ark_serde.rs +++ b/src/backends/arkworks/ark_serde.rs @@ -422,10 +422,18 @@ impl CanonicalDeserialize for ArkDoryProof { first_messages, second_messages, final_message, - #[cfg(feature = "zk")] - scalar_product_proof: None, nu, sigma, + #[cfg(feature = "zk")] + e2: None, + #[cfg(feature = "zk")] + y_com: None, + #[cfg(feature = "zk")] + sigma1_proof: None, + #[cfg(feature = "zk")] + sigma2_proof: None, + #[cfg(feature = "zk")] + scalar_product_proof: None, }) } } diff --git a/src/evaluation_proof.rs b/src/evaluation_proof.rs index 559916e..8512d58 100644 --- a/src/evaluation_proof.rs +++ b/src/evaluation_proof.rs @@ -35,42 +35,33 @@ use crate::proof::DoryProof; use crate::reduce_and_fold::{DoryProverState, DoryVerifierState}; use crate::setup::{ProverSetup, VerifierSetup}; +#[cfg(feature = "zk")] +use crate::mode::ZK; + /// Create evaluation proof for a polynomial at a point /// /// Implements Eval-VMV-RE protocol from Dory Section 5. /// The protocol proves that polynomial(point) = evaluation via the VMV relation: /// evaluation = L^T × M × R /// +/// # Mode Parameter +/// - `Transparent`: Non-hiding proof, evaluation revealed to verifier +/// - `ZK` (requires `zk` feature): Zero-knowledge proof, evaluation hidden +/// /// # Algorithm /// 1. Compute or use provided row commitments (Tier 1 commitment) /// 2. Split evaluation point into left and right vectors /// 3. Compute v_vec (column evaluations) -/// 4. Create VMV message (C, D2, E1) -/// 5. Initialize prover state for inner product / reduce-and-fold protocol -/// 6. Run max(nu, sigma) rounds of reduce-and-fold (with automatic padding for non-square): -/// - First reduce: compute message and apply beta challenge (reduce) -/// - Second reduce: compute message and apply alpha challenge (fold) +/// 4. Create VMV message (C, D2, E1) with mode-specific blinding +/// 5. In ZK mode: compute y_com, E2, and sigma proofs +/// 6. Run max(nu, sigma) rounds of reduce-and-fold /// 7. Compute final scalar product message /// -/// # Parameters -/// - `polynomial`: Polynomial to prove evaluation for -/// - `point`: Evaluation point (length nu + sigma) -/// - `row_commitments`: Optional precomputed row commitments from polynomial.commit() -/// - `nu`: Log₂ of number of rows (constraint: nu ≤ sigma) -/// - `sigma`: Log₂ of number of columns -/// - `setup`: Prover setup -/// - `transcript`: Fiat-Shamir transcript for challenge generation -/// - `rng`: Random number generator for sampling blinds (ZK mode only) -/// /// # Returns -/// Complete Dory proof containing VMV message, reduce messages, and final message +/// Complete Dory proof. In ZK mode, proof contains y_com for verifier. /// /// # Errors /// Returns error if dimensions are invalid (nu > sigma) or protocol fails -/// -/// # Matrix Layout -/// Supports both square (nu = sigma) and non-square (nu < sigma) matrices. -/// For non-square matrices, vectors are automatically padded to length 2^sigma. #[allow(clippy::type_complexity)] #[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all, name = "create_evaluation_proof")] @@ -104,7 +95,6 @@ where }); } - // Validate matrix dimensions: nu must be ≤ sigma (rows ≤ columns) if nu > sigma { return Err(DoryError::InvalidSize { expected: sigma, @@ -112,23 +102,15 @@ where }); } - let row_commitments = if let Some(rc) = row_commitments { - rc - } else { - let (_commitment, rc) = polynomial.commit::(nu, sigma, setup)?; - rc + let row_commitments = match row_commitments { + Some(rc) => rc, + None => { + let (_commitment, rc) = polynomial.commit::(nu, sigma, setup)?; + rc + } }; - let _span_eval_vecs = tracing::span!( - tracing::Level::DEBUG, - "compute_evaluation_vectors", - nu, - sigma - ) - .entered(); let (left_vec, right_vec) = polynomial.compute_evaluation_vectors(point, nu, sigma); - drop(_span_eval_vecs); - let v_vec = polynomial.vector_matrix_product(&left_vec, nu, sigma); let mut padded_row_commitments = row_commitments.clone(); @@ -136,85 +118,91 @@ where padded_row_commitments.resize(1 << sigma, E::G1::identity()); } - let _span_vmv = - tracing::span!(tracing::Level::DEBUG, "compute_vmv_message", nu, sigma).entered(); - // Sample VMV blinds (zero in Transparent mode, random in ZK mode) let r_c: F = Mo::sample(rng); let r_d2: F = Mo::sample(rng); let r_e1: F = Mo::sample(rng); let r_e2: F = Mo::sample(rng); - // Γ2,fin = g2_vec[0] (commitment base, distinct from H2 = h2 for blinding) let g2_fin = &setup.g2_vec[0]; // C = e(⟨row_commitments, v_vec⟩, Γ2,fin) + r_c·HT let t_vec_v = M1::msm(&padded_row_commitments, &v_vec); - let c_raw = E::pair(&t_vec_v, g2_fin); - let c = Mo::mask(c_raw, &setup.ht, &r_c); + let c = Mo::mask(E::pair(&t_vec_v, g2_fin), &setup.ht, &r_c); // D₂ = e(⟨Γ₁[sigma], v_vec⟩, Γ2,fin) + r_d2·HT - let g1_bases_at_sigma = &setup.g1_vec[..1 << sigma]; - let gamma1_v = M1::msm(g1_bases_at_sigma, &v_vec); - let d2_raw = E::pair(&gamma1_v, g2_fin); - let d2 = Mo::mask(d2_raw, &setup.ht, &r_d2); + let g1_bases = &setup.g1_vec[..1 << sigma]; + let d2 = Mo::mask( + E::pair(&M1::msm(g1_bases, &v_vec), g2_fin), + &setup.ht, + &r_d2, + ); // E₁ = ⟨row_commitments, left_vec⟩ + r_e1·H₁ - let e1_raw = M1::msm(&row_commitments, &left_vec); - let e1 = Mo::mask(e1_raw, &setup.h1, &r_e1); + let e1 = Mo::mask(M1::msm(&row_commitments, &left_vec), &setup.h1, &r_e1); let vmv_message = VMVMessage { c, d2, e1 }; - drop(_span_vmv); - let _span_transcript = tracing::span!(tracing::Level::DEBUG, "vmv_transcript").entered(); transcript.append_serde(b"vmv_c", &vmv_message.c); transcript.append_serde(b"vmv_d2", &vmv_message.d2); transcript.append_serde(b"vmv_e1", &vmv_message.e1); - drop(_span_transcript); - let _span_init = tracing::span!( - tracing::Level::DEBUG, - "fixed_base_vector_scalar_mul_h2", - nu, - sigma - ) - .entered(); - - // v₂ = v_vec · Γ₂,fin (each scalar scales Γ2,fin = g2_vec[0]) - let v2 = { - let _span = - tracing::span!(tracing::Level::DEBUG, "fixed_base_vector_scalar_mul_g2_fin").entered(); - M2::fixed_base_vector_scalar_mul(g2_fin, &v_vec) - }; + // ZK mode: compute y, y_com, E2, and sigma proofs + #[cfg(feature = "zk")] + let (zk_e2, zk_y_com, zk_sigma1, zk_sigma2) = + if std::any::TypeId::of::() == std::any::TypeId::of::() { + use crate::reduce_and_fold::{generate_sigma1_proof, generate_sigma2_proof}; + + let y = polynomial.evaluate(point); + let r_y: F = Mo::sample(rng); + + // E2 = y·Γ2,fin + r_e2·H2 + let e2 = Mo::mask(g2_fin.scale(&y), &setup.h2, &r_e2); + // y_com = y·Γ1,fin + r_y·H1 + let y_com = setup.g1_vec[0].scale(&y) + setup.h1.scale(&r_y); + + transcript.append_serde(b"vmv_e2", &e2); + transcript.append_serde(b"vmv_y_com", &y_com); + + let sigma1 = generate_sigma1_proof::(&y, &r_e2, &r_y, setup, transcript, rng); + let t1 = r_e1; + let t2 = -r_d2; + let sigma2 = generate_sigma2_proof::(&t1, &t2, setup, transcript, rng); + + (Some(e2), Some(y_com), Some(sigma1), Some(sigma2)) + } else { + (None, None, None, None) + }; + + // v₂ = v_vec · Γ₂,fin + let v2 = M2::fixed_base_vector_scalar_mul(g2_fin, &v_vec); - let mut padded_right_vec = right_vec.clone(); - let mut padded_left_vec = left_vec.clone(); + let mut padded_right_vec = right_vec; + let mut padded_left_vec = left_vec; if nu < sigma { padded_right_vec.resize(1 << sigma, F::zero()); padded_left_vec.resize(1 << sigma, F::zero()); } - // Create prover state with initial blinds from VMV let mut prover_state: DoryProverState<'_, E, Mo> = DoryProverState::new_with_blinds( - padded_row_commitments, // v1 = T_vec_prime (row commitments, padded) - v2, // v2 = v_vec · g_fin - Some(v_vec), // v2_scalars for first-round MSM+pair optimization - padded_right_vec, // s1 = right_vec (padded) - padded_left_vec, // s2 = left_vec (padded) + padded_row_commitments, + v2, + Some(v_vec), + padded_right_vec, + padded_left_vec, setup, - r_c, // Initial r_c from VMV - r_d2, // Initial r_d2 from VMV - r_e1, // Initial r_e1 from VMV - r_e2, // Initial r_e2 from VMV + r_c, + r_d2, + r_e1, + r_e2, ); - drop(_span_init); let num_rounds = nu.max(sigma); let mut first_messages = Vec::with_capacity(num_rounds); let mut second_messages = Vec::with_capacity(num_rounds); for _round in 0..num_rounds { - let (first_msg, r_d1_l, r_d1_r, r_d2_l, r_d2_r) = + let (first_msg, d1_blinds, d2_blinds) = prover_state.compute_first_message::(rng); transcript.append_serde(b"d1_left", &first_msg.d1_left); @@ -225,12 +213,10 @@ where transcript.append_serde(b"e2_beta", &first_msg.e2_beta); let beta = transcript.challenge_scalar(b"beta"); - // apply_first_challenge uses accumulated blinds (self.r_d1, self.r_d2) prover_state.apply_first_challenge::(&beta); - first_messages.push(first_msg); - let (second_msg, r_c_plus, r_c_minus, r_e1_plus, r_e1_minus, r_e2_plus, r_e2_minus) = + let (second_msg, c_blinds, e1_blinds, e2_blinds) = prover_state.compute_second_message::(rng); transcript.append_serde(b"c_plus", &second_msg.c_plus); @@ -241,36 +227,26 @@ where transcript.append_serde(b"e2_minus", &second_msg.e2_minus); let alpha = transcript.challenge_scalar(b"alpha"); - // apply_second_challenge folds message blinds into accumulated blinds prover_state.apply_second_challenge::( - &alpha, r_d1_l, r_d1_r, r_d2_l, r_d2_r, r_c_plus, r_c_minus, r_e1_plus, r_e1_minus, - r_e2_plus, r_e2_minus, + &alpha, d1_blinds, d2_blinds, c_blinds, e1_blinds, e2_blinds, ); - second_messages.push(second_msg); } let gamma = transcript.challenge_scalar(b"gamma"); - // In ZK mode, generate Σ-protocol proof BEFORE compute_final_message - // because compute_final_message modifies r_c but the Σ-protocol needs - // the pre-fold-scalars blinds. + // Generate scalar product proof in ZK mode #[cfg(feature = "zk")] - let scalar_product_proof = - if std::any::TypeId::of::() == std::any::TypeId::of::() { - // scalar_product_proof appends P1, P2, Q, R to transcript and derives challenge c - Some(prover_state.scalar_product_proof_internal(transcript, rng)) - } else { - None - }; - #[cfg(not(feature = "zk"))] - let _ = rng; // suppress unused warning when zk feature is disabled + let scalar_product_proof = if std::any::TypeId::of::() == std::any::TypeId::of::() { + Some(prover_state.scalar_product_proof_internal(transcript, rng)) + } else { + None + }; let final_message = prover_state.compute_final_message::(&gamma); transcript.append_serde(b"final_e1", &final_message.e1); transcript.append_serde(b"final_e2", &final_message.e2); - let _d = transcript.challenge_scalar(b"d"); Ok(DoryProof { @@ -278,10 +254,18 @@ where first_messages, second_messages, final_message, - #[cfg(feature = "zk")] - scalar_product_proof, nu, sigma, + #[cfg(feature = "zk")] + e2: zk_e2, + #[cfg(feature = "zk")] + y_com: zk_y_com, + #[cfg(feature = "zk")] + sigma1_proof: zk_sigma1, + #[cfg(feature = "zk")] + sigma2_proof: zk_sigma2, + #[cfg(feature = "zk")] + scalar_product_proof, }) } @@ -290,37 +274,20 @@ where /// Verifies that a committed polynomial evaluates to the claimed value at the given point. /// Works with both square and non-square matrix layouts (nu ≤ sigma). /// -/// # Algorithm -/// 1. Extract VMV message from proof -/// 2. Check sigma protocol 2: d2 = e(e1, h2) -/// 3. Compute e2 = h2 * evaluation -/// 4. Initialize verifier state with commitment and VMV message -/// 5. Run max(nu, sigma) rounds of reduce-and-fold verification (with automatic padding) -/// 6. Derive gamma and d challenges -/// 7. Verify final scalar product message +/// # Verification Modes +/// - **Transparent**: Takes evaluation `y` as input, computes E2 = y·Γ2,fin +/// - **ZK**: Takes `y_com` (from proof.y_com), uses E2 from proof, verifies sigma proofs /// /// # Parameters -/// - `commitment`: Polynomial commitment (in GT) - can be a homomorphically combined commitment -/// - `evaluation`: Claimed evaluation result +/// - `commitment`: Polynomial commitment (in GT) +/// - `evaluation`: Claimed evaluation (transparent) or None (ZK uses proof.y_com) /// - `point`: Evaluation point (length must equal proof.nu + proof.sigma) -/// - `proof`: Evaluation proof to verify (contains nu and sigma dimensions) +/// - `proof`: Evaluation proof to verify /// - `setup`: Verifier setup /// - `transcript`: Fiat-Shamir transcript for challenge generation /// -/// # Returns -/// `Ok(())` if proof is valid, `Err(DoryError)` otherwise -/// -/// # Homomorphic Verification -/// This function can verify proofs for homomorphically combined polynomials. -/// The commitment parameter should be the combined commitment, and the evaluation -/// should be the evaluation of the combined polynomial. -/// /// # Errors -/// Returns `DoryError::InvalidProof` if verification fails, or other variants -/// if the input parameters are incorrect (e.g., point dimension mismatch). -/// -/// # Panics -/// May panic in ZK mode if internal state is inconsistent (should not occur in normal use). +/// Returns `DoryError::InvalidProof` if verification fails. #[tracing::instrument(skip_all, name = "verify_evaluation_proof")] pub fn verify_evaluation_proof( commitment: E::GT, @@ -355,407 +322,35 @@ where transcript.append_serde(b"vmv_d2", &vmv_message.d2); transcript.append_serde(b"vmv_e1", &vmv_message.e1); - // # NOTE: The VMV check `vmv_message.d2 == e(vmv_message.e1, Γ2,fin)` is deferred - // to verify_final where it's batched with other pairings using random linear - // combination with challenge `d`. See verify_final documentation for details. - - // E2 = y · Γ2,fin where Γ2,fin = g2_0 (distinct from H2 = h2 for blinding) - let e2 = setup.g2_0.scale(&evaluation); - - // Folded-scalar accumulation with per-round coordinates. - // num_rounds = sigma (we fold column dimensions). - let num_rounds = sigma; - // s1 (right/prover): the σ column coordinates in natural order (LSB→MSB). - // No padding here: the verifier folds across the σ column dimensions. - // With MSB-first folding, these coordinates are only consumed after the first σ−ν rounds, - // which correspond to the padded MSB dimensions on the left tensor, matching the prover. - let col_coords = &point[..sigma]; - let s1_coords: Vec = col_coords.to_vec(); - // s2 (left/prover): the ν row coordinates in natural order, followed by zeros for the extra - // MSB dimensions. Conceptually this is s ⊗ [1,0]^(σ−ν): under MSB-first folds, the first - // σ−ν rounds multiply s2 by α⁻¹ while contributing no right halves (since those entries are 0). - let mut s2_coords: Vec = vec![F::zero(); sigma]; - let row_coords = &point[sigma..sigma + nu]; - s2_coords[..nu].copy_from_slice(&row_coords[..nu]); - - let mut verifier_state = DoryVerifierState::new( - vmv_message.c, // c from VMV message - commitment, // d1 = commitment - vmv_message.d2, // d2 from VMV message - vmv_message.e1, // e1 from VMV message - e2, // e2 computed from evaluation - s1_coords, // s1: columns c0..c_{σ−1} (LSB→MSB), no padding; folded across σ dims - s2_coords, // s2: rows r0..r_{ν−1} then zeros in MSB dims (emulates s ⊗ [1,0]^(σ−ν)) - num_rounds, - setup.clone(), - ); - - for round in 0..num_rounds { - let first_msg = &proof.first_messages[round]; - let second_msg = &proof.second_messages[round]; - - transcript.append_serde(b"d1_left", &first_msg.d1_left); - transcript.append_serde(b"d1_right", &first_msg.d1_right); - transcript.append_serde(b"d2_left", &first_msg.d2_left); - transcript.append_serde(b"d2_right", &first_msg.d2_right); - transcript.append_serde(b"e1_beta", &first_msg.e1_beta); - transcript.append_serde(b"e2_beta", &first_msg.e2_beta); - let beta = transcript.challenge_scalar(b"beta"); - - transcript.append_serde(b"c_plus", &second_msg.c_plus); - transcript.append_serde(b"c_minus", &second_msg.c_minus); - transcript.append_serde(b"e1_plus", &second_msg.e1_plus); - transcript.append_serde(b"e1_minus", &second_msg.e1_minus); - transcript.append_serde(b"e2_plus", &second_msg.e2_plus); - transcript.append_serde(b"e2_minus", &second_msg.e2_minus); - let alpha = transcript.challenge_scalar(b"alpha"); - - verifier_state.process_round(first_msg, second_msg, &alpha, &beta); - } - - let gamma = transcript.challenge_scalar(b"gamma"); - - // In ZK mode, append Σ-protocol values and derive c BEFORE appending final message - // (must match prover's transcript order) + // Determine E2 based on proof mode (ZK vs transparent) #[cfg(feature = "zk")] - let zk_challenge_c = if let Some(ref sigma_proof) = proof.scalar_product_proof { - transcript.append_serde(b"sigma_p1", &sigma_proof.p1); - transcript.append_serde(b"sigma_p2", &sigma_proof.p2); - transcript.append_serde(b"sigma_q", &sigma_proof.q); - transcript.append_serde(b"sigma_r", &sigma_proof.r); - Some(transcript.challenge_scalar(b"sigma_c")) - } else { - None - }; - - transcript.append_serde(b"final_e1", &proof.final_message.e1); - transcript.append_serde(b"final_e2", &proof.final_message.e2); - - let d = transcript.challenge_scalar(b"d"); - - // Use verify_final_zk when scalar_product_proof is present - #[cfg(feature = "zk")] - if let Some(ref sigma_proof) = proof.scalar_product_proof { - let c = zk_challenge_c.expect("c should be derived when scalar_product_proof is present"); - return verifier_state.verify_final_zk_with_challenge(sigma_proof, &c, &d); - } - - verifier_state.verify_final(&proof.final_message, &gamma, &d) -} - -/// Create a zero-knowledge evaluation proof that hides the evaluation y -/// -/// Unlike `create_evaluation_proof`, this function produces a proof where -/// the evaluation y is NOT revealed. Instead, the proof contains: -/// - `y_com`: A commitment to y that the verifier can use -/// - Sigma proofs that prove consistency without revealing y -/// -/// # Returns -/// A tuple of (ZkDoryProof, y_com) where y_com is the commitment to the evaluation -/// that can be given to the verifier. -/// -/// # Errors -/// Returns `DoryError` if dimensions are invalid or proof generation fails. -#[cfg(feature = "zk")] -#[allow(clippy::type_complexity)] -#[allow(clippy::too_many_arguments)] -#[tracing::instrument(skip_all, name = "create_zk_evaluation_proof")] -pub fn create_zk_evaluation_proof( - polynomial: &P, - point: &[F], - row_commitments: Option>, - nu: usize, - sigma: usize, - setup: &ProverSetup, - transcript: &mut T, - rng: &mut R, -) -> Result<(crate::proof::ZkDoryProof, E::G1), DoryError> -where - F: Field, - E: PairingCurve, - E::G1: Group, - E::G2: Group, - E::GT: Group, - M1: DoryRoutines, - M2: DoryRoutines, - T: Transcript, - P: MultilinearLagrange, - R: rand_core::RngCore, -{ - use crate::messages::ZkVMVMessage; - use crate::mode::ZK; - use crate::proof::ZkDoryProof; - use crate::reduce_and_fold::{generate_sigma1_proof, generate_sigma2_proof}; - - if point.len() != nu + sigma { - return Err(DoryError::InvalidPointDimension { - expected: nu + sigma, - actual: point.len(), - }); - } + let (e2, is_zk) = if let (Some(proof_e2), Some(y_com)) = (&proof.e2, &proof.y_com) { + use crate::reduce_and_fold::{verify_sigma1_proof, verify_sigma2_proof}; - if nu > sigma { - return Err(DoryError::InvalidSize { - expected: sigma, - actual: nu, - }); - } + transcript.append_serde(b"vmv_e2", proof_e2); + transcript.append_serde(b"vmv_y_com", y_com); - // Compute row commitments if not provided - let row_comms = match row_commitments { - Some(comms) => comms, - None => { - let (_commitment, rc) = polynomial.commit::(nu, sigma, setup)?; - rc + // Verify sigma proofs + if let Some(ref sigma1) = proof.sigma1_proof { + verify_sigma1_proof::(proof_e2, y_com, sigma1, &setup, transcript)?; + } + if let Some(ref sigma2) = proof.sigma2_proof { + verify_sigma2_proof::( + &vmv_message.e1, + &vmv_message.d2, + sigma2, + &setup, + transcript, + )?; } - }; - - // Compute evaluation vectors and v_vec - let (left_vec, right_vec) = polynomial.compute_evaluation_vectors(point, nu, sigma); - let v_vec = polynomial.vector_matrix_product(&left_vec, nu, sigma); - - // Pad row commitments for non-square matrices - let mut padded_row_commitments = row_comms.clone(); - if nu < sigma { - padded_row_commitments.resize(1 << sigma, E::G1::identity()); - } - - // Compute y = polynomial(point) - let y = polynomial.evaluate(point); - - // Compute v2 = v_vec scaled by Γ2,fin - let g2_fin = &setup.g2_vec[0]; - let v2 = M2::fixed_base_vector_scalar_mul(g2_fin, &v_vec); - - // Pad vectors for non-square matrices - let mut padded_right_vec = right_vec.clone(); - let mut padded_left_vec = left_vec.clone(); - if nu < sigma { - padded_right_vec.resize(1 << sigma, F::zero()); - padded_left_vec.resize(1 << sigma, F::zero()); - } - // Sample blinds for VMV message - let r_c: F = ZK::sample(rng); - let r_d2: F = ZK::sample(rng); - let r_e1: F = ZK::sample(rng); - let r_e2: F = ZK::sample(rng); - let r_y: F = ZK::sample(rng); - - // Compute VMV message components with masking - // C = e(⟨T_vec', v_vec⟩, Γ2,fin) + r_c·HT - let t_dot_v = M1::msm(&padded_row_commitments, &v_vec); - let c_raw = E::pair(&t_dot_v, g2_fin); - let c = ZK::mask(c_raw, &setup.ht, &r_c); - - // D2 = e(⟨Γ1, v_vec⟩, Γ2,fin) + r_d2·HT - let g1_bases_at_sigma = &setup.g1_vec[..1 << sigma]; - let g1_dot_v = M1::msm(g1_bases_at_sigma, &v_vec); - let d2_raw = E::pair(&g1_dot_v, g2_fin); - let d2 = ZK::mask(d2_raw, &setup.ht, &r_d2); - - // E1 = ⟨T_vec', L_vec⟩ + r_e1·H1 - let e1_raw = M1::msm(&row_comms, &left_vec); - let e1 = ZK::mask(e1_raw, &setup.h1, &r_e1); - - // E2 = y·Γ2,fin + r_e2·H2 (prover computes, not verifier!) - let e2_raw = g2_fin.scale(&y); - let e2 = ZK::mask(e2_raw, &setup.h2, &r_e2); - - // y_com = y·Γ1,fin + r_y·H1 (commitment to evaluation) - let y_com = setup.g1_vec[0].scale(&y) + setup.h1.scale(&r_y); - - let zk_vmv_message = ZkVMVMessage { - c, - d2, - e1, - e2, - y_com, + (*proof_e2, true) + } else { + (setup.g2_0.scale(&evaluation), false) }; - // Append VMV message to transcript - transcript.append_serde(b"vmv_c", &zk_vmv_message.c); - transcript.append_serde(b"vmv_d2", &zk_vmv_message.d2); - transcript.append_serde(b"vmv_e1", &zk_vmv_message.e1); - transcript.append_serde(b"vmv_e2", &zk_vmv_message.e2); - transcript.append_serde(b"vmv_y_com", &zk_vmv_message.y_com); - - // Generate Sigma1 proof (proves y_com and E2 commit to same y) - let sigma1_proof = generate_sigma1_proof::(&y, &r_e2, &r_y, setup, transcript, rng); - - // Generate Sigma2 proof (proves VMV relation: e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2)) - // t1 = r_e1 (since we're using rv=0 for row commitment blinds in this simplified version) - // t2 = -r_d2 - let t1 = r_e1; - let t2 = -r_d2; - let sigma2_proof = generate_sigma2_proof::(&t1, &t2, setup, transcript, rng); - - // Initialize prover state - let mut prover_state: DoryProverState<'_, E, ZK> = DoryProverState::new_with_blinds( - padded_row_commitments, - v2, - Some(v_vec), - padded_right_vec, - padded_left_vec, - setup, - r_c, - r_d2, - r_e1, - r_e2, - ); - - let num_rounds = nu.max(sigma); - let mut first_messages = Vec::with_capacity(num_rounds); - let mut second_messages = Vec::with_capacity(num_rounds); - - // Run reduce-and-fold rounds - for _round in 0..num_rounds { - let (first_msg, r_d1_l, r_d1_r, r_d2_l, r_d2_r) = - prover_state.compute_first_message::(rng); - - transcript.append_serde(b"d1_left", &first_msg.d1_left); - transcript.append_serde(b"d1_right", &first_msg.d1_right); - transcript.append_serde(b"d2_left", &first_msg.d2_left); - transcript.append_serde(b"d2_right", &first_msg.d2_right); - transcript.append_serde(b"e1_beta", &first_msg.e1_beta); - transcript.append_serde(b"e2_beta", &first_msg.e2_beta); - - let beta = transcript.challenge_scalar(b"beta"); - prover_state.apply_first_challenge::(&beta); - - first_messages.push(first_msg); - - let (second_msg, r_c_plus, r_c_minus, r_e1_plus, r_e1_minus, r_e2_plus, r_e2_minus) = - prover_state.compute_second_message::(rng); - - transcript.append_serde(b"c_plus", &second_msg.c_plus); - transcript.append_serde(b"c_minus", &second_msg.c_minus); - transcript.append_serde(b"e1_plus", &second_msg.e1_plus); - transcript.append_serde(b"e1_minus", &second_msg.e1_minus); - transcript.append_serde(b"e2_plus", &second_msg.e2_plus); - transcript.append_serde(b"e2_minus", &second_msg.e2_minus); - - let alpha = transcript.challenge_scalar(b"alpha"); - prover_state.apply_second_challenge::( - &alpha, r_d1_l, r_d1_r, r_d2_l, r_d2_r, r_c_plus, r_c_minus, r_e1_plus, r_e1_minus, - r_e2_plus, r_e2_minus, - ); - - second_messages.push(second_msg); - } - - let gamma = transcript.challenge_scalar(b"gamma"); - - // Generate scalar product proof before compute_final_message modifies r_c - let scalar_product_proof = prover_state.scalar_product_proof_internal(transcript, rng); - - let final_message = prover_state.compute_final_message::(&gamma); - - transcript.append_serde(b"final_e1", &final_message.e1); - transcript.append_serde(b"final_e2", &final_message.e2); - - let _d = transcript.challenge_scalar(b"d"); - - Ok(( - ZkDoryProof { - vmv_message: zk_vmv_message, - first_messages, - second_messages, - final_message, - sigma1_proof, - sigma2_proof, - scalar_product_proof, - nu, - sigma, - }, - y_com, - )) -} - -/// Verify a zero-knowledge evaluation proof without knowing y -/// -/// Unlike `verify_evaluation_proof`, this function does NOT take the evaluation y. -/// Instead, it verifies that: -/// 1. The prover knows y such that polynomial(point) = y -/// 2. y_com is a valid commitment to y -/// -/// # Parameters -/// - `commitment`: Polynomial commitment (Tier 2) -/// - `y_com`: Commitment to the evaluation (from proof generation) -/// - `point`: Evaluation point -/// - `proof`: ZK evaluation proof -/// - `setup`: Verifier setup -/// - `transcript`: Fiat-Shamir transcript -/// -/// # Errors -/// Returns `DoryError::InvalidProof` if verification fails, or other variants -/// if the input parameters are incorrect. -#[cfg(feature = "zk")] -#[tracing::instrument(skip_all, name = "verify_zk_evaluation_proof")] -pub fn verify_zk_evaluation_proof( - commitment: E::GT, - y_com: E::G1, - point: &[F], - proof: &crate::proof::ZkDoryProof, - setup: VerifierSetup, - transcript: &mut T, -) -> Result<(), DoryError> -where - F: Field, - E: PairingCurve, - E::G1: Group, - E::G2: Group, - E::GT: Group, - M1: DoryRoutines, - M2: DoryRoutines, - T: Transcript, -{ - use crate::reduce_and_fold::{verify_sigma1_proof, verify_sigma2_proof}; - - let nu = proof.nu; - let sigma = proof.sigma; - - if point.len() != nu + sigma { - return Err(DoryError::InvalidPointDimension { - expected: nu + sigma, - actual: point.len(), - }); - } - - let vmv_message = &proof.vmv_message; - - // Append VMV message to transcript (same order as prover) - transcript.append_serde(b"vmv_c", &vmv_message.c); - transcript.append_serde(b"vmv_d2", &vmv_message.d2); - transcript.append_serde(b"vmv_e1", &vmv_message.e1); - transcript.append_serde(b"vmv_e2", &vmv_message.e2); - transcript.append_serde(b"vmv_y_com", &vmv_message.y_com); - - // Verify y_com from proof matches provided y_com - if vmv_message.y_com != y_com { - return Err(DoryError::InvalidProof); - } - - // Verify Sigma1 proof (proves E2 and y_com commit to same y) - verify_sigma1_proof::( - &vmv_message.e2, - &y_com, - &proof.sigma1_proof, - &setup, - transcript, - )?; - - // Verify Sigma2 proof (proves VMV relation holds with blinds) - verify_sigma2_proof::( - &vmv_message.e1, - &vmv_message.d2, - &proof.sigma2_proof, - &setup, - transcript, - )?; - - // Use E2 from prover's message (not computed from y!) - let e2 = vmv_message.e2; + #[cfg(not(feature = "zk"))] + let (e2, _is_zk) = (setup.g2_0.scale(&evaluation), false); // Folded-scalar accumulation let num_rounds = sigma; @@ -777,7 +372,6 @@ where setup.clone(), ); - // Process reduce rounds for round in 0..num_rounds { let first_msg = &proof.first_messages[round]; let second_msg = &proof.second_messages[round]; @@ -801,20 +395,31 @@ where verifier_state.process_round(first_msg, second_msg, &alpha, &beta); } - let _gamma = transcript.challenge_scalar(b"gamma"); + let gamma = transcript.challenge_scalar(b"gamma"); - // Derive challenge c from scalar product proof (same as prover) - transcript.append_serde(b"sigma_p1", &proof.scalar_product_proof.p1); - transcript.append_serde(b"sigma_p2", &proof.scalar_product_proof.p2); - transcript.append_serde(b"sigma_q", &proof.scalar_product_proof.q); - transcript.append_serde(b"sigma_r", &proof.scalar_product_proof.r); - let c = transcript.challenge_scalar(b"sigma_c"); + // ZK mode: verify with scalar product proof + #[cfg(feature = "zk")] + if is_zk { + if let Some(ref sigma_proof) = proof.scalar_product_proof { + transcript.append_serde(b"sigma_p1", &sigma_proof.p1); + transcript.append_serde(b"sigma_p2", &sigma_proof.p2); + transcript.append_serde(b"sigma_q", &sigma_proof.q); + transcript.append_serde(b"sigma_r", &sigma_proof.r); + let c = transcript.challenge_scalar(b"sigma_c"); + + transcript.append_serde(b"final_e1", &proof.final_message.e1); + transcript.append_serde(b"final_e2", &proof.final_message.e2); + let d = transcript.challenge_scalar(b"d"); + + return verifier_state.verify_final_zk_with_challenge(sigma_proof, &c, &d); + } + } + // Transparent mode transcript.append_serde(b"final_e1", &proof.final_message.e1); transcript.append_serde(b"final_e2", &proof.final_message.e2); - let d = transcript.challenge_scalar(b"d"); - // Verify final with ZK scalar product proof - verifier_state.verify_final_zk_with_challenge(&proof.scalar_product_proof, &c, &d) + let _ = gamma; // Used in verify_final + verifier_state.verify_final(&proof.final_message, &gamma, &d) } diff --git a/src/lib.rs b/src/lib.rs index 2118936..4e57eed 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -104,11 +104,9 @@ pub mod backends; pub use error::DoryError; pub use evaluation_proof::create_evaluation_proof; -#[cfg(feature = "zk")] -pub use evaluation_proof::{create_zk_evaluation_proof, verify_zk_evaluation_proof}; pub use messages::{FirstReduceMessage, ScalarProductMessage, SecondReduceMessage, VMVMessage}; #[cfg(feature = "zk")] -pub use messages::{ScalarProductProof, Sigma1Proof, Sigma2Proof, ZkVMVMessage}; +pub use messages::{ScalarProductProof, Sigma1Proof, Sigma2Proof}; #[cfg(feature = "zk")] pub use mode::ZK; pub use mode::{Mode, Transparent}; @@ -116,8 +114,6 @@ use primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; pub use primitives::poly::{MultilinearLagrange, Polynomial}; use primitives::serialization::{DoryDeserialize, DorySerialize}; pub use proof::DoryProof; -#[cfg(feature = "zk")] -pub use proof::ZkDoryProof; pub use reduce_and_fold::{DoryProverState, DoryVerifierState}; pub use setup::{ProverSetup, VerifierSetup}; diff --git a/src/messages.rs b/src/messages.rs index 248885d..3f6d746 100644 --- a/src/messages.rs +++ b/src/messages.rs @@ -44,7 +44,7 @@ pub struct SecondReduceMessage { /// Vector-Matrix-Vector message for polynomial commitment transformation /// /// Contains C, D₂, E₁. In transparent mode, E₂ = y·Γ₂,fin is computed by verifier. -/// In ZK mode, y is committed rather than revealed. +/// In ZK mode, E₂ and y_com are stored in the proof's optional fields. #[derive(Clone, Debug)] pub struct VMVMessage { /// C = e(MSM(T_vec', v_vec), Γ₂,fin) + r_c·HT @@ -55,27 +55,6 @@ pub struct VMVMessage { pub e1: G1, } -/// ZK VMV message with committed evaluation -/// -/// In ZK mode, the evaluation y is not revealed. Instead, we commit to it: -/// `y_com = y·Γ1,fin + r_y·H1` -/// -/// The Sigma1 proof ties y_com to E2, proving they commit to the same y. -#[cfg(feature = "zk")] -#[derive(Clone, Debug)] -pub struct ZkVMVMessage { - /// C = e(MSM(T_vec', v_vec), Γ₂,fin) + r_c·HT - pub c: GT, - /// D₂ = e(MSM(Γ₁\[nu\], v_vec), Γ₂,fin) + r_d2·HT - pub d2: GT, - /// E₁ = MSM(T_vec', L_vec) + r_e1·H1 - pub e1: G1, - /// E₂ = y·Γ2,fin + r_e2·H2 (committed evaluation on G2 side) - pub e2: G2, - /// y_com = y·Γ1,fin + r_y·H1 (commitment to evaluation) - pub y_com: G1, -} - /// Final scalar product message (Section 3.1) /// /// Contains E₁, E₂ for the final pairing verification diff --git a/src/proof.rs b/src/proof.rs index 7c2ead0..a4bd0a9 100644 --- a/src/proof.rs +++ b/src/proof.rs @@ -6,7 +6,8 @@ //! - Final scalar product message //! //! For ZK mode, the proof additionally contains: -//! - ScalarProductProof (Σ-protocol) for zero-knowledge verification +//! - E2 and y_com (blinded VMV extension) +//! - Sigma proofs for ZK verification use crate::messages::*; use crate::primitives::arithmetic::Group; @@ -17,75 +18,46 @@ use crate::primitives::arithmetic::Group; /// at a given point. It consists of messages from the interactive protocol made /// non-interactive via Fiat-Shamir. /// -/// The proof includes the matrix dimensions (nu, sigma) used during proof generation, -/// which the verifier uses to ensure consistency with the evaluation point. -/// -/// In ZK mode (when `zk` feature is enabled and `scalar_product_proof` is `Some`), -/// verification uses the Σ-protocol to verify the final inner product relation -/// without revealing intermediate values. +/// In ZK mode (when `zk` feature is enabled), additional fields contain the +/// sigma proofs and blinded values needed for zero-knowledge verification. #[derive(Clone, Debug)] pub struct DoryProof { /// Vector-Matrix-Vector message for PCS transformation pub vmv_message: VMVMessage, - /// First reduce messages for each round (nu rounds total) + /// First reduce messages for each round pub first_messages: Vec>, - /// Second reduce messages for each round (nu rounds total) + /// Second reduce messages for each round pub second_messages: Vec>, /// Final scalar product message pub final_message: ScalarProductMessage, - /// ZK scalar product proof (Σ-protocol) - /// - /// Present only in ZK mode. When `Some`, verification uses `verify_final_zk` - /// which incorporates the Σ-protocol to handle blinded values. - #[cfg(feature = "zk")] - pub scalar_product_proof: Option>, - /// Log₂ of number of rows in the coefficient matrix pub nu: usize, /// Log₂ of number of columns in the coefficient matrix pub sigma: usize, -} -/// A complete Dory ZK evaluation proof -/// -/// In ZK mode, the evaluation `y` is not revealed. Instead: -/// - `y_com = y·Γ1,fin + r_y·H1` commits to the evaluation -/// - `e2 = y·Γ2,fin + r_e2·H2` is the blinded E2 for the reduce protocol -/// - Sigma1 proves y_com and e2 commit to the same y -/// - Sigma2 proves the VMV relation holds with blinds -/// - ScalarProductProof proves the final inner product relation -#[cfg(feature = "zk")] -#[derive(Clone, Debug)] -pub struct ZkDoryProof { - /// ZK VMV message with committed evaluation - pub vmv_message: ZkVMVMessage, - - /// First reduce messages for each round - pub first_messages: Vec>, - - /// Second reduce messages for each round - pub second_messages: Vec>, + // ZK-specific fields (present when zk feature is enabled) + /// E2 = y·Γ2,fin + r_e2·H2 (blinded VMV extension) + #[cfg(feature = "zk")] + pub e2: Option, - /// Final scalar product message - pub final_message: ScalarProductMessage, + /// y_com = y·Γ1,fin + r_y·H1 (commitment to evaluation) + #[cfg(feature = "zk")] + pub y_com: Option, /// Sigma1 proof: proves y_com and e2 commit to same y - pub sigma1_proof: Sigma1Proof, + #[cfg(feature = "zk")] + pub sigma1_proof: Option>, /// Sigma2 proof: proves VMV relation holds with blinds - pub sigma2_proof: Sigma2Proof, - - /// ZK scalar product proof - pub scalar_product_proof: ScalarProductProof, - - /// Log₂ of number of rows in the coefficient matrix - pub nu: usize, + #[cfg(feature = "zk")] + pub sigma2_proof: Option>, - /// Log₂ of number of columns in the coefficient matrix - pub sigma: usize, + /// ZK scalar product proof (Σ-protocol) + #[cfg(feature = "zk")] + pub scalar_product_proof: Option>, } diff --git a/src/reduce_and_fold.rs b/src/reduce_and_fold.rs index cc5aa97..4ab81d7 100644 --- a/src/reduce_and_fold.rs +++ b/src/reduce_and_fold.rs @@ -15,11 +15,24 @@ use crate::primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; use crate::setup::{ProverSetup, VerifierSetup}; use std::marker::PhantomData; -#[cfg(feature = "zk")] -use crate::mode::ZK; #[cfg(feature = "zk")] use crate::primitives::transcript::Transcript; +/// Scalar field type alias for a pairing curve. +type Scalar = <::G1 as Group>::Scalar; + +/// Accumulated blinds tuple (r_c, r_d1, r_d2) for ZK mode. +type Blinds = (Scalar, Scalar, Scalar); + +/// ZK scalar product proof type alias. +#[cfg(feature = "zk")] +type ZkScalarProductProof = ScalarProductProof< + ::G1, + ::G2, + Scalar, + ::GT, +>; + /// Prover state for the Dory opening protocol /// /// Maintains the current state of the prover during the interactive protocol. @@ -113,42 +126,6 @@ pub struct DoryVerifierState { setup: VerifierSetup, } -/// Type alias for first message with blinds (r_d1_l, r_d1_r, r_d2_l, r_d2_r) -pub type FirstMessageWithBlinds = ( - FirstReduceMessage<::G1, ::G2, ::GT>, - <::G1 as Group>::Scalar, - <::G1 as Group>::Scalar, - <::G1 as Group>::Scalar, - <::G1 as Group>::Scalar, -); - -/// Type alias for second message with blinds (r_c_plus, r_c_minus, r_e1_plus, r_e1_minus, r_e2_plus, r_e2_minus) -pub type SecondMessageWithBlinds = ( - SecondReduceMessage<::G1, ::G2, ::GT>, - <::G1 as Group>::Scalar, - <::G1 as Group>::Scalar, - <::G1 as Group>::Scalar, - <::G1 as Group>::Scalar, - <::G1 as Group>::Scalar, - <::G1 as Group>::Scalar, -); - -/// Type alias for accumulated blinds (r_c, r_d1, r_d2) -pub type Blinds = ( - <::G1 as Group>::Scalar, - <::G1 as Group>::Scalar, - <::G1 as Group>::Scalar, -); - -/// Type alias for ZK scalar product proof -#[cfg(feature = "zk")] -pub type ZkScalarProductProof = ScalarProductProof< - ::G1, - ::G2, - <::G1 as Group>::Scalar, - ::GT, ->; - impl<'a, E: PairingCurve, M: Mode> DoryProverState<'a, E, M> where ::Scalar: Field, @@ -226,20 +203,21 @@ where state } - /// Compute first reduce message for current round - /// - /// Computes D1L, D1R, D2L, D2R, E1β, E2β based on current state. - /// In ZK mode, samples blinds and masks the D values. - /// - /// Returns the message and the four sampled blinds (r_d1_l, r_d1_r, r_d2_l, r_d2_r) - /// which are needed by `apply_first_challenge` to accumulate blinds. + /// Compute first reduce message for current round. Returns (message, d1_blinds, d2_blinds). #[tracing::instrument(skip_all, name = "DoryProverState::compute_first_message")] - pub fn compute_first_message(&self, rng: &mut R) -> FirstMessageWithBlinds - where + #[allow(clippy::type_complexity)] + pub fn compute_first_message< M1: DoryRoutines, M2: DoryRoutines, R: rand_core::RngCore, - { + >( + &self, + rng: &mut R, + ) -> ( + FirstReduceMessage, + [::Scalar; 2], + [::Scalar; 2], + ) { assert!( self.num_rounds > 0, "Not enough rounds left in prover state" @@ -295,16 +273,18 @@ where // E₂β = ⟨Γ₂, s₁⟩ let e2_beta = M2::msm(&self.setup.g2_vec[..1 << self.num_rounds], &self.s1[..]); - let msg = FirstReduceMessage { - d1_left, - d1_right, - d2_left, - d2_right, - e1_beta, - e2_beta, - }; - - (msg, r_d1_l, r_d1_r, r_d2_l, r_d2_r) + ( + FirstReduceMessage { + d1_left, + d1_right, + d2_left, + d2_right, + e1_beta, + e2_beta, + }, + [r_d1_l, r_d1_r], + [r_d2_l, r_d2_r], + ) } /// Apply first challenge (beta) and combine vectors @@ -336,20 +316,22 @@ where self.r_c = self.r_c + self.r_d2 * *beta + self.r_d1 * beta_inv; } - /// Compute second reduce message for current round - /// - /// Computes C+, C-, E1+, E1-, E2+, E2- based on current state. - /// In ZK mode, samples blinds and masks the values. - /// - /// Returns the message and six sampled blinds needed by `apply_second_challenge`. + /// Compute second reduce message for current round. Returns (message, c_blinds, e1_blinds, e2_blinds). #[tracing::instrument(skip_all, name = "DoryProverState::compute_second_message")] #[allow(clippy::type_complexity)] - pub fn compute_second_message(&self, rng: &mut R) -> SecondMessageWithBlinds - where + pub fn compute_second_message< M1: DoryRoutines, M2: DoryRoutines, R: rand_core::RngCore, - { + >( + &self, + rng: &mut R, + ) -> ( + SecondReduceMessage, + [::Scalar; 2], + [::Scalar; 2], + [::Scalar; 2], + ) { let n2 = 1 << (self.num_rounds - 1); // n/2 // Split all vectors into left and right halves @@ -392,44 +374,32 @@ where let e2_plus = M::mask(e2_plus_base, &self.setup.h2, &r_e2_plus); let e2_minus = M::mask(e2_minus_base, &self.setup.h2, &r_e2_minus); - let msg = SecondReduceMessage { - c_plus, - c_minus, - e1_plus, - e1_minus, - e2_plus, - e2_minus, - }; - ( - msg, r_c_plus, r_c_minus, r_e1_plus, r_e1_minus, r_e2_plus, r_e2_minus, + SecondReduceMessage { + c_plus, + c_minus, + e1_plus, + e1_minus, + e2_plus, + e2_minus, + }, + [r_c_plus, r_c_minus], + [r_e1_plus, r_e1_minus], + [r_e2_plus, r_e2_minus], ) } - /// Apply second challenge (alpha) and fold vectors - /// - /// Reduces the vector size by half using the alpha challenge. - /// Also accumulates blinds from compute_first_message and compute_second_message. + /// Apply second challenge (alpha) and fold vectors. #[tracing::instrument(skip_all, name = "DoryProverState::apply_second_challenge")] - #[allow(clippy::too_many_arguments)] - pub fn apply_second_challenge( + pub fn apply_second_challenge, M2: DoryRoutines>( &mut self, alpha: &::Scalar, - r_d1_l: ::Scalar, - r_d1_r: ::Scalar, - r_d2_l: ::Scalar, - r_d2_r: ::Scalar, - r_c_plus: ::Scalar, - r_c_minus: ::Scalar, - r_e1_plus: ::Scalar, - r_e1_minus: ::Scalar, - r_e2_plus: ::Scalar, - r_e2_minus: ::Scalar, - ) where - M1: DoryRoutines, - M2: DoryRoutines, - E::G2: Group::Scalar>, - { + d1_blinds: [::Scalar; 2], + d2_blinds: [::Scalar; 2], + c_blinds: [::Scalar; 2], + e1_blinds: [::Scalar; 2], + e2_blinds: [::Scalar; 2], + ) { let alpha_inv = (*alpha).inv().expect("alpha must be invertible"); let n2 = 1 << (self.num_rounds - 1); // n/2 @@ -454,16 +424,11 @@ where self.s2.truncate(n2); // ZK: update accumulated blinds - // r_c ← r_c + α·r_c+ + α⁻¹·r_c- - self.r_c = self.r_c + r_c_plus * *alpha + r_c_minus * alpha_inv; - // r_d1 ← α·r_d1_l + r_d1_r - self.r_d1 = r_d1_l * *alpha + r_d1_r; - // r_d2 ← α⁻¹·r_d2_l + r_d2_r - self.r_d2 = r_d2_l * alpha_inv + r_d2_r; - // r_e1 ← r_e1 + α·r_e1+ + α⁻¹·r_e1- - self.r_e1 = self.r_e1 + r_e1_plus * *alpha + r_e1_minus * alpha_inv; - // r_e2 ← r_e2 + α·r_e2+ + α⁻¹·r_e2- - self.r_e2 = self.r_e2 + r_e2_plus * *alpha + r_e2_minus * alpha_inv; + self.r_c = self.r_c + c_blinds[0] * *alpha + c_blinds[1] * alpha_inv; + self.r_d1 = d1_blinds[0] * *alpha + d1_blinds[1]; + self.r_d2 = d2_blinds[0] * alpha_inv + d2_blinds[1]; + self.r_e1 = self.r_e1 + e1_blinds[0] * *alpha + e1_blinds[1] * alpha_inv; + self.r_e2 = self.r_e2 + e2_blinds[0] * *alpha + e2_blinds[1] * alpha_inv; // Decrement round counter self.num_rounds -= 1; @@ -507,9 +472,7 @@ where ScalarProductMessage { e1, e2 } } - /// Get accumulated blinds (for ZK mode Σ-protocol) - /// - /// Returns (r_c, r_d1, r_d2) which are needed for the ZK scalar product proof. + /// Get accumulated blinds (r_c, r_d1, r_d2) for ZK mode Σ-protocol. pub fn blinds(&self) -> Blinds { (self.r_c, self.r_d1, self.r_d2) } @@ -596,112 +559,9 @@ where } } -/// ZK-specific methods for DoryProverState -/// -/// These methods are only available when the `zk` feature is enabled and -/// the prover state is parameterized with the `ZK` mode marker. -#[cfg(feature = "zk")] -impl DoryProverState<'_, E, ZK> -where - ::Scalar: Field, - E::G2: Group::Scalar>, - E::GT: Group::Scalar>, -{ - /// Generate ZK scalar product proof (Σ-protocol) - /// - /// Proves knowledge of (v1, v2, rC, rD1, rD2) for relation L1: - /// - C = e(v1, v2) + rC·HT - /// - D1 = e(v1, Γ2) + rD1·HT - /// - D2 = e(Γ1, v2) + rD2·HT - /// - /// Must be called after `compute_final_message` when v1 and v2 are length 1. - /// - /// # Parameters - /// - `transcript`: Fiat-Shamir transcript for deriving challenge - /// - `rng`: Random number generator for sampling private blinds - /// - /// # Returns - /// `ScalarProductProof` containing (P1, P2, Q, R, E1, E2, r1, r2, r3) - pub fn scalar_product_proof, R: rand_core::RngCore>( - &self, - transcript: &mut T, - rng: &mut R, - ) -> ZkScalarProductProof { - debug_assert_eq!(self.v1.len(), 1, "v1 must be length 1 after folding"); - debug_assert_eq!(self.v2.len(), 1, "v2 must be length 1 after folding"); - - let v1 = self.v1[0]; - let v2 = self.v2[0]; - let gamma1 = self.setup.g1_vec[0]; - let gamma2 = self.setup.g2_vec[0]; - - type F = <::G1 as Group>::Scalar; - - // Sample random scalars from RNG (private to prover) - let s_d1: F = Field::random(rng); - let s_d2: F = Field::random(rng); - let d1 = gamma1.scale(&s_d1); - let d2 = gamma2.scale(&s_d2); - - // Sample blinding scalars from RNG (private to prover) - let r_p1: F = Field::random(rng); - let r_p2: F = Field::random(rng); - let r_q: F = Field::random(rng); - let r_r: F = Field::random(rng); - - // Compute first message: P1, P2, Q, R - // P1 = e(d1, Γ2) + rP1·HT - let p1 = E::pair(&d1, &gamma2) + self.setup.ht.scale(&r_p1); - // P2 = e(Γ1, d2) + rP2·HT - let p2 = E::pair(&gamma1, &d2) + self.setup.ht.scale(&r_p2); - // Q = e(d1, v2) + e(v1, d2) + rQ·HT - let q = E::pair(&d1, &v2) + E::pair(&v1, &d2) + self.setup.ht.scale(&r_q); - // R = e(d1, d2) + rR·HT - let r = E::pair(&d1, &d2) + self.setup.ht.scale(&r_r); - - // Append first message to transcript and derive challenge - transcript.append_serde(b"sigma_p1", &p1); - transcript.append_serde(b"sigma_p2", &p2); - transcript.append_serde(b"sigma_q", &q); - transcript.append_serde(b"sigma_r", &r); - let c = transcript.challenge_scalar(b"sigma_c"); - - // Compute response: E1, E2, r1, r2, r3 - // E1 = d1 + c·v1 - let e1 = d1 + v1.scale(&c); - // E2 = d2 + c·v2 - let e2 = d2 + v2.scale(&c); - // r1 = rP1 + c·rD1 - let r1 = r_p1 + c * self.r_d1; - // r2 = rP2 + c·rD2 - let r2 = r_p2 + c * self.r_d2; - // r3 = rR + c·rQ + c²·rC - let c_sq = c * c; - let r3 = r_r + c * r_q + c_sq * self.r_c; - - ScalarProductProof { - p1, - p2, - q, - r, - e1, - e2, - r1, - r2, - r3, - } - } -} - -/// Generate Sigma1 proof: proves knowledge of (y, rE2) such that E2 = y·Γ2,fin + rE2·H2 -/// -/// Also proves yC = y·Γ1,fin + ry·H1 for commitment consistency. -/// -/// # Generator semantics -/// - Γ1,fin = g1_vec\[0\], Γ2,fin = g2_vec\[0\] (commitment bases) -/// - H1 = h1, H2 = h2 (blinding bases, linearly independent from Γ_fin) +/// Generate Sigma1 proof: proves knowledge of (y, rE2) s.t. E2 = y·Γ2,fin + rE2·H2. #[cfg(feature = "zk")] -pub fn generate_sigma1_proof( +pub fn generate_sigma1_proof, R: rand_core::RngCore>( y: &::Scalar, r_e2: &::Scalar, r_y: &::Scalar, @@ -710,48 +570,32 @@ pub fn generate_sigma1_proof( rng: &mut R, ) -> Sigma1Proof::Scalar> where - E: PairingCurve, ::Scalar: Field, E::G2: Group::Scalar>, - T: Transcript, - R: rand_core::RngCore, { - // Γ2,fin = g2_vec[0], Γ1,fin = g1_vec[0] - let g2_fin = &setup.g2_vec[0]; - let g1_fin = &setup.g1_vec[0]; - - // Sample random k1, k2, k3 from RNG (private to prover) - let k1 = ::Scalar::random(rng); - let k2 = ::Scalar::random(rng); - let k3 = ::Scalar::random(rng); - - // A1 = k1·Γ2,fin + k2·H2 (commitment for E2 relation) + let (g2_fin, g1_fin) = (&setup.g2_vec[0], &setup.g1_vec[0]); + let (k1, k2, k3) = ( + ::Scalar::random(rng), + ::Scalar::random(rng), + ::Scalar::random(rng), + ); let a1 = g2_fin.scale(&k1) + setup.h2.scale(&k2); - // A2 = k1·Γ1,fin + k3·H1 (commitment for yC relation) let a2 = g1_fin.scale(&k1) + setup.h1.scale(&k3); - - // Append commitments to transcript transcript.append_serde(b"sigma1_a1", &a1); transcript.append_serde(b"sigma1_a2", &a2); - - // Get challenge let c = transcript.challenge_scalar(b"sigma1_c"); - - // Compute responses - let z1 = k1 + c * *y; - let z2 = k2 + c * *r_e2; - let z3 = k3 + c * *r_y; - - Sigma1Proof { a1, a2, z1, z2, z3 } + Sigma1Proof { + a1, + a2, + z1: k1 + c * *y, + z2: k2 + c * *r_e2, + z3: k3 + c * *r_y, + } } -/// Verify Sigma1 proof -/// -/// # Generator semantics -/// - g1_0 = Γ1,fin, g2_0 = Γ2,fin (commitment bases in verifier setup) -/// - h1 = H1, h2 = H2 (blinding bases) +/// Verify Sigma1 proof. #[cfg(feature = "zk")] -pub fn verify_sigma1_proof( +pub fn verify_sigma1_proof>( e2: &E::G2, y_commit: &E::G1, proof: &Sigma1Proof::Scalar>, @@ -759,89 +603,57 @@ pub fn verify_sigma1_proof( transcript: &mut T, ) -> Result<(), DoryError> where - E: PairingCurve, ::Scalar: Field, E::G2: Group::Scalar>, - T: Transcript, { - // Reconstruct challenge transcript.append_serde(b"sigma1_a1", &proof.a1); transcript.append_serde(b"sigma1_a2", &proof.a2); let c = transcript.challenge_scalar(b"sigma1_c"); - // Check E2 relation: z1·Γ2,fin + z2·H2 = A1 + c·E2 - // Γ2,fin = g2_0, H2 = h2 - let lhs1 = setup.g2_0.scale(&proof.z1) + setup.h2.scale(&proof.z2); - let rhs1 = proof.a1 + e2.scale(&c); - if lhs1 != rhs1 { + if setup.g2_0.scale(&proof.z1) + setup.h2.scale(&proof.z2) != proof.a1 + e2.scale(&c) { return Err(DoryError::InvalidProof); } - // Check yC relation: z1·Γ1,fin + z3·H1 = A2 + c·yC - // Γ1,fin = g1_0, H1 = h1 - let lhs2 = setup.g1_0.scale(&proof.z1) + setup.h1.scale(&proof.z3); - let rhs2 = proof.a2 + y_commit.scale(&c); - if lhs2 != rhs2 { + if setup.g1_0.scale(&proof.z1) + setup.h1.scale(&proof.z3) != proof.a2 + y_commit.scale(&c) { return Err(DoryError::InvalidProof); } - Ok(()) } -/// Generate Sigma2 proof: proves e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2) -/// -/// Where t1 = rE1 + rv and t2 = -rD2. -/// -/// # Generator semantics -/// - Γ2,fin = g2_vec\[0\] (commitment base) -/// - H1 = h1, H2 = h2 (blinding bases) +/// Generate Sigma2 proof: proves e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2). #[cfg(feature = "zk")] -pub fn generate_sigma2_proof( - t1: &::Scalar, // rE1 + rv - t2: &::Scalar, // -rD2 +pub fn generate_sigma2_proof, R: rand_core::RngCore>( + t1: &::Scalar, + t2: &::Scalar, setup: &ProverSetup, transcript: &mut T, rng: &mut R, ) -> Sigma2Proof<::Scalar, E::GT> where - E: PairingCurve, ::Scalar: Field, E::G2: Group::Scalar>, E::GT: Group::Scalar>, - T: Transcript, - R: rand_core::RngCore, { - // Γ2,fin = g2_vec[0] - let g2_fin = &setup.g2_vec[0]; - - // Sample random k1, k2 from RNG (private to prover) - let k1 = ::Scalar::random(rng); - let k2 = ::Scalar::random(rng); - - // A = e(H1, k1·Γ2,fin + k2·H2) - let g2_term = g2_fin.scale(&k1) + setup.h2.scale(&k2); - let a = E::pair(&setup.h1, &g2_term); - - // Append commitment to transcript + let (k1, k2) = ( + ::Scalar::random(rng), + ::Scalar::random(rng), + ); + let a = E::pair( + &setup.h1, + &(setup.g2_vec[0].scale(&k1) + setup.h2.scale(&k2)), + ); transcript.append_serde(b"sigma2_a", &a); - - // Get challenge let c = transcript.challenge_scalar(b"sigma2_c"); - - // Compute responses - let z1 = k1 + c * *t1; - let z2 = k2 + c * *t2; - - Sigma2Proof { a, z1, z2 } + Sigma2Proof { + a, + z1: k1 + c * *t1, + z2: k2 + c * *t2, + } } -/// Verify Sigma2 proof: e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2) -/// -/// # Generator semantics -/// - g2_0 = Γ2,fin (commitment base in verifier setup) -/// - h1 = H1, h2 = H2 (blinding bases) +/// Verify Sigma2 proof. #[cfg(feature = "zk")] -pub fn verify_sigma2_proof( +pub fn verify_sigma2_proof>( e1: &E::G1, d2: &E::GT, proof: &Sigma2Proof<::Scalar, E::GT>, @@ -849,28 +661,18 @@ pub fn verify_sigma2_proof( transcript: &mut T, ) -> Result<(), DoryError> where - E: PairingCurve, ::Scalar: Field, E::G2: Group::Scalar>, E::GT: Group::Scalar>, - T: Transcript, { - // Reconstruct challenge transcript.append_serde(b"sigma2_a", &proof.a); let c = transcript.challenge_scalar(b"sigma2_c"); - - // Compute expected value: e(E1, Γ2,fin) - D2 - // Γ2,fin = g2_0 - let e1_pair = E::pair(e1, &setup.g2_0); - let expected = e1_pair - *d2; - - // Check: e(H1, z1·Γ2,fin + z2·H2) = A + c·expected - // Γ2,fin = g2_0, H2 = h2 - let g2_term = setup.g2_0.scale(&proof.z1) + setup.h2.scale(&proof.z2); - let lhs = E::pair(&setup.h1, &g2_term); - let rhs = proof.a + expected.scale(&c); - - if lhs == rhs { + let expected = E::pair(e1, &setup.g2_0) - *d2; + let lhs = E::pair( + &setup.h1, + &(setup.g2_0.scale(&proof.z1) + setup.h2.scale(&proof.z2)), + ); + if lhs == proof.a + expected.scale(&c) { Ok(()) } else { Err(DoryError::InvalidProof) @@ -878,26 +680,7 @@ where } impl DoryVerifierState { - /// Create new verifier state - /// - /// # Parameters - /// - `c`: Initial inner product value - /// - `d1`: Initial d1 value (from VMV) - /// - `d2`: Initial d2 value (from VMV) - /// - `e1`: Initial e1 value - /// - `e2`: Initial e2 value - /// - /// Construct verifier state for O(1) accumulation - /// - /// - `s1_coords`: Per-round coordinates for s1 (right_vec in prover) - /// - `s2_coords`: Per-round coordinates for s2 (left_vec in prover) - /// - `num_rounds`: Number of rounds - /// - `setup`: Verifier setup parameters - /// - /// Note: `e1` and `d2` are stored both as initial values (for batched VMV check) - /// and as accumulators (updated during reduce rounds) - /// this is because the VMV check happens before the folding rounds, so we need to save - /// the value for the final batched pairing check. + /// Create new verifier state for O(1) accumulation. #[allow(clippy::too_many_arguments)] pub fn new( c: E::GT, @@ -1003,56 +786,7 @@ impl DoryVerifierState { self.num_rounds -= 1; } - /// Verify final scalar product message - /// - /// Applies fold-scalars transformation and checks the final pairing equation. - /// Must be called when num_rounds=0 after all reduce rounds are complete. - /// - /// # Generator semantics - /// - g1_0, g2_0: Final generators (Γ1,fin, Γ2,fin) used as commitment bases - /// - h1, h2: Blinding generators (H1, H2) used for zero-knowledge masking - /// - /// # Non-optimized Protocol Equations - /// - /// ## VMV Check (batched together with the final pairing check) - /// - /// The VMV protocol requires: `D₂_init = e(E₁_init, Γ2,fin)` = `e(E₁_init, g2_0)` - /// - /// This was originally checked as a standalone pairing in `verify_evaluation_proof`. - /// We defer it here to batch with other pairings. - /// - /// ## Fold-Scalars Updates - /// - /// ```text - /// C' ← C + (s₁·s₂)·HT + γ·e(H₁, E₂) + γ⁻¹·e(E₁, H₂) - /// D₁' ← D₁ + e(H₁, (s₁·γ)·Γ₂₀) - /// D₂' ← D₂ + e((s₂·γ⁻¹)·Γ₁₀, H₂) - /// ``` - /// - /// ## Final Verification - /// - /// ```text - /// e(E₁ + d·Γ₁₀, E₂ + d⁻¹·Γ₂₀) = C' + χ₀ + d·D₂' + d⁻¹·D₁' - /// ``` - /// - /// # Multi-Pairing Optimization - /// - /// ## Batching the VMV Check - /// - /// We use random linear combination with challenge `d²` to defer the VMV check. - /// The VMV check uses Γ2,fin (g2_0), separate from the fold-scalars blinding which uses H₂ (h2). - /// - /// ## Final Combined Check (4 pairings) - /// - /// ```text - /// e(E₁_final + d·Γ₁₀, E₂_final + d⁻¹·Γ₂₀) // Pair 1: main verification - /// · e(H₁, (-γ)·(E₂_acc + (d⁻¹·s₁)·Γ₂₀)) // Pair 2: γ·e(H₁, E₂) term - /// · e((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀), H₂) // Pair 3: γ⁻¹·e(E₁, H₂) term - /// · e(d²·E₁_init, Γ2,fin) // Pair 4: deferred VMV check - /// = T + d²·D₂_init - /// ``` - /// - /// This is 4 miller loops + 1 final exponentiation (vs 7+ for naive check) + /// Verify final scalar product message (4 pairings batched with VMV check). #[tracing::instrument(skip_all, name = "DoryVerifierState::verify_final")] pub fn verify_final( &mut self, @@ -1119,87 +853,14 @@ impl DoryVerifierState { } } - /// Verify final scalar product with ZK proof (Σ-protocol) - /// - /// Verifies knowledge of (v1, v2, rC, rD1, rD2) for relation L1: - /// - C = e(v1, v2) + rC·HT - /// - D1 = e(v1, Γ2) + rD1·HT - /// - D2 = e(Γ1, v2) + rD2·HT - /// - /// Verification equation from Dory paper: - /// e(E1 + d·Γ1, E2 + d⁻¹·Γ2) = χ + R + c·Q + c²·C + d·P2 + d·c·D2 - /// + d⁻¹·P1 + d⁻¹·c·D1 - (r3 + d·r2 + d⁻¹·r1)·HT - /// - /// # Parameters - /// - `proof`: ZK Σ-protocol proof containing (P1, P2, Q, R, E1, E2, r1, r2, r3) - /// - `d`: Final batching challenge from transcript - /// - `transcript`: Fiat-Shamir transcript for deriving challenge c - #[cfg(feature = "zk")] - #[tracing::instrument(skip_all, name = "DoryVerifierState::verify_final_zk")] - pub fn verify_final_zk>( - &mut self, - proof: &ZkScalarProductProof, - d: &::Scalar, - transcript: &mut T, - ) -> Result<(), DoryError> - where - E::G2: Group::Scalar>, - E::GT: Group::Scalar>, - ::Scalar: Field, - { - debug_assert_eq!( - self.num_rounds, 0, - "num_rounds must be 0 for final verification" - ); - - let d_inv = (*d).inv().expect("d must be invertible"); - - // Reconstruct challenge c from transcript (must match prover's derivation) - transcript.append_serde(b"sigma_p1", &proof.p1); - transcript.append_serde(b"sigma_p2", &proof.p2); - transcript.append_serde(b"sigma_q", &proof.q); - transcript.append_serde(b"sigma_r", &proof.r); - let c = transcript.challenge_scalar(b"sigma_c"); - let c_sq = c * c; - - // LHS: e(E1 + d·Γ1, E2 + d⁻¹·Γ2) - let lhs_g1 = proof.e1 + self.setup.g1_0.scale(d); - let lhs_g2 = proof.e2 + self.setup.g2_0.scale(&d_inv); - let lhs = E::pair(&lhs_g1, &lhs_g2); - - // RHS: χ + R + c·Q + c²·C + d·P2 + d·c·D2 + d⁻¹·P1 + d⁻¹·c·D1 - (r3 + d·r2 + d⁻¹·r1)·HT - let mut rhs = self.setup.chi[0]; // χ - rhs = rhs + proof.r; // + R - rhs = rhs + proof.q.scale(&c); // + c·Q - rhs = rhs + self.c.scale(&c_sq); // + c²·C - rhs = rhs + proof.p2.scale(d); // + d·P2 - rhs = rhs + self.d2.scale(&(*d * c)); // + d·c·D2 - rhs = rhs + proof.p1.scale(&d_inv); // + d⁻¹·P1 - rhs = rhs + self.d1.scale(&(d_inv * c)); // + d⁻¹·c·D1 - - // Blind correction: -(r3 + d·r2 + d⁻¹·r1)·HT - let r_total = proof.r3 + *d * proof.r2 + d_inv * proof.r1; - rhs = rhs - self.setup.ht.scale(&r_total); - - if lhs == rhs { - Ok(()) - } else { - Err(DoryError::InvalidProof) - } - } - - /// Verify final scalar product with ZK proof using pre-derived challenge - /// - /// Same as `verify_final_zk` but takes the challenge `c` as a parameter - /// instead of deriving it from transcript. Use when the caller needs to - /// control transcript ordering (e.g., appending P1..R before final_message). + /// Verify final scalar product with ZK proof using pre-derived challenge. #[cfg(feature = "zk")] #[tracing::instrument(skip_all, name = "DoryVerifierState::verify_final_zk_with_challenge")] pub fn verify_final_zk_with_challenge( &mut self, proof: &ZkScalarProductProof, - c: &::Scalar, - d: &::Scalar, + c: &Scalar, + d: &Scalar, ) -> Result<(), DoryError> where E::G2: Group::Scalar>, diff --git a/tests/arkworks/zk.rs b/tests/arkworks/zk.rs index 2930cdb..101a7f6 100644 --- a/tests/arkworks/zk.rs +++ b/tests/arkworks/zk.rs @@ -2,7 +2,7 @@ use super::*; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{create_zk_evaluation_proof, prove, setup, verify, verify_zk_evaluation_proof, ZK}; +use dory_pcs::{create_evaluation_proof, prove, setup, verify, ZK}; #[test] fn test_zk_full_workflow() { @@ -190,6 +190,7 @@ fn test_zk_non_square_matrix() { } /// Test the full ZK API where y is hidden from the verifier +/// With unified API, verifier extracts y_com from proof.y_com #[test] fn test_zk_hidden_evaluation() { let mut rng = rand::thread_rng(); @@ -204,29 +205,33 @@ fn test_zk_hidden_evaluation() { .unwrap(); let point = random_point(4); + let evaluation = poly.evaluate(&point); - // Create ZK proof - returns (proof, y_com) instead of revealing y + // Create ZK proof using unified API with ZK mode let mut prover_transcript = fresh_transcript(); - let (zk_proof, y_com) = - create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( - &poly, - &point, - Some(tier_1), - nu, - sigma, - &prover_setup, - &mut prover_transcript, - &mut rng, - ) - .unwrap(); + let proof = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + ) + .unwrap(); - // Verify ZK proof - verifier does NOT receive y, only y_com + // Verify y_com is present in proof + assert!(proof.y_com.is_some(), "ZK proof should contain y_com"); + assert!(proof.e2.is_some(), "ZK proof should contain e2"); + + // Verify ZK proof - for ZK proofs, evaluation is ignored (e2 from proof is used) let mut verifier_transcript = fresh_transcript(); - let result = verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( tier_2, - y_com, + evaluation, &point, - &zk_proof, + &proof, verifier_setup, &mut verifier_transcript, ); @@ -238,9 +243,9 @@ fn test_zk_hidden_evaluation() { ); } -/// Test that wrong y_com is rejected +/// Test that tampered e2 in proof is rejected #[test] -fn test_zk_wrong_y_com_rejected() { +fn test_zk_tampered_e2_rejected() { use dory_pcs::primitives::arithmetic::Group; let mut rng = rand::thread_rng(); @@ -255,10 +260,11 @@ fn test_zk_wrong_y_com_rejected() { .unwrap(); let point = random_point(4); + let evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let (zk_proof, y_com) = - create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + let mut proof = + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, Some(tier_1), @@ -270,20 +276,22 @@ fn test_zk_wrong_y_com_rejected() { ) .unwrap(); - // Tamper with y_com - add a random element - let wrong_y_com = y_com + prover_setup.h1.scale(&ArkFr::from_u64(42)); + // Tamper with e2 in the proof + if let Some(ref mut e2) = proof.e2 { + *e2 = *e2 + prover_setup.h2.scale(&ArkFr::from_u64(42)); + } let mut verifier_transcript = fresh_transcript(); - let result = verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( tier_2, - wrong_y_com, + evaluation, &point, - &zk_proof, + &proof, verifier_setup, &mut verifier_transcript, ); - assert!(result.is_err(), "Verification should fail with wrong y_com"); + assert!(result.is_err(), "Verification should fail with tampered e2"); } /// Test full ZK with larger polynomial @@ -301,27 +309,27 @@ fn test_zk_hidden_evaluation_larger() { .unwrap(); let point = random_point(8); + let evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let (zk_proof, y_com) = - create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( - &poly, - &point, - Some(tier_1), - nu, - sigma, - &prover_setup, - &mut prover_transcript, - &mut rng, - ) - .unwrap(); + let proof = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + ) + .unwrap(); let mut verifier_transcript = fresh_transcript(); - let result = verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( tier_2, - y_com, + evaluation, &point, - &zk_proof, + &proof, verifier_setup, &mut verifier_transcript, ); diff --git a/tests/arkworks/zk_statistical.rs b/tests/arkworks/zk_statistical.rs index 25e8d4e..c58f429 100644 --- a/tests/arkworks/zk_statistical.rs +++ b/tests/arkworks/zk_statistical.rs @@ -7,7 +7,7 @@ use super::*; use ark_serialize::CanonicalSerialize; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{create_zk_evaluation_proof, setup, verify_zk_evaluation_proof}; +use dory_pcs::{create_evaluation_proof, setup, verify, DoryProof, ZK}; use rand::rngs::StdRng; use rand::SeedableRng; use std::collections::HashMap; @@ -59,27 +59,26 @@ fn bucket_from_serializable(elem: &T) -> usize { (bytes[0] as usize) % NUM_BUCKETS } +type ArkDoryProof = DoryProof< + dory_pcs::backends::arkworks::ArkG1, + dory_pcs::backends::arkworks::ArkG2, + dory_pcs::backends::arkworks::ArkGT, +>; + /// Collect bucket statistics from a full ZK proof (with hidden y) -fn collect_full_zk_proof_stats( - proof: &dory_pcs::ZkDoryProof< - dory_pcs::backends::arkworks::ArkG1, - dory_pcs::backends::arkworks::ArkG2, - ArkFr, - dory_pcs::backends::arkworks::ArkGT, - >, - y_com: &dory_pcs::backends::arkworks::ArkG1, - tracker: &mut BucketTracker, -) { - // ZK VMV message elements - includes y_com and prover-computed e2 +fn collect_full_zk_proof_stats(proof: &ArkDoryProof, tracker: &mut BucketTracker) { + // VMV message elements tracker.record("zk_vmv_c", bucket_from_serializable(&proof.vmv_message.c)); tracker.record("zk_vmv_d2", bucket_from_serializable(&proof.vmv_message.d2)); tracker.record("zk_vmv_e1", bucket_from_serializable(&proof.vmv_message.e1)); - tracker.record("zk_vmv_e2", bucket_from_serializable(&proof.vmv_message.e2)); // Prover-computed E2 - tracker.record( - "zk_vmv_y_com", - bucket_from_serializable(&proof.vmv_message.y_com), - ); // Commitment to y - tracker.record("zk_y_com_input", bucket_from_serializable(y_com)); // y_com returned to caller + + // ZK-specific fields from proof + if let Some(ref e2) = proof.e2 { + tracker.record("zk_vmv_e2", bucket_from_serializable(e2)); + } + if let Some(ref y_com) = proof.y_com { + tracker.record("zk_vmv_y_com", bucket_from_serializable(y_com)); + } // First reduce messages (D values only - e1_beta/e2_beta are public) for (i, msg) in proof.first_messages.iter().enumerate() { @@ -142,75 +141,33 @@ fn collect_full_zk_proof_stats( ); // Sigma1 proof (proves y_com and E2 commit to same y) - tracker.record( - "sigma1_a1", - bucket_from_serializable(&proof.sigma1_proof.a1), - ); - tracker.record( - "sigma1_a2", - bucket_from_serializable(&proof.sigma1_proof.a2), - ); - tracker.record( - "sigma1_z1", - bucket_from_serializable(&proof.sigma1_proof.z1), - ); - tracker.record( - "sigma1_z2", - bucket_from_serializable(&proof.sigma1_proof.z2), - ); - tracker.record( - "sigma1_z3", - bucket_from_serializable(&proof.sigma1_proof.z3), - ); + if let Some(ref sigma1) = proof.sigma1_proof { + tracker.record("sigma1_a1", bucket_from_serializable(&sigma1.a1)); + tracker.record("sigma1_a2", bucket_from_serializable(&sigma1.a2)); + tracker.record("sigma1_z1", bucket_from_serializable(&sigma1.z1)); + tracker.record("sigma1_z2", bucket_from_serializable(&sigma1.z2)); + tracker.record("sigma1_z3", bucket_from_serializable(&sigma1.z3)); + } // Sigma2 proof (proves VMV relation) - tracker.record("sigma2_a", bucket_from_serializable(&proof.sigma2_proof.a)); - tracker.record( - "sigma2_z1", - bucket_from_serializable(&proof.sigma2_proof.z1), - ); - tracker.record( - "sigma2_z2", - bucket_from_serializable(&proof.sigma2_proof.z2), - ); + if let Some(ref sigma2) = proof.sigma2_proof { + tracker.record("sigma2_a", bucket_from_serializable(&sigma2.a)); + tracker.record("sigma2_z1", bucket_from_serializable(&sigma2.z1)); + tracker.record("sigma2_z2", bucket_from_serializable(&sigma2.z2)); + } // Scalar product proof - tracker.record( - "zk_sp_p1", - bucket_from_serializable(&proof.scalar_product_proof.p1), - ); - tracker.record( - "zk_sp_p2", - bucket_from_serializable(&proof.scalar_product_proof.p2), - ); - tracker.record( - "zk_sp_q", - bucket_from_serializable(&proof.scalar_product_proof.q), - ); - tracker.record( - "zk_sp_r", - bucket_from_serializable(&proof.scalar_product_proof.r), - ); - tracker.record( - "zk_sp_e1", - bucket_from_serializable(&proof.scalar_product_proof.e1), - ); - tracker.record( - "zk_sp_e2", - bucket_from_serializable(&proof.scalar_product_proof.e2), - ); - tracker.record( - "zk_sp_r1", - bucket_from_serializable(&proof.scalar_product_proof.r1), - ); - tracker.record( - "zk_sp_r2", - bucket_from_serializable(&proof.scalar_product_proof.r2), - ); - tracker.record( - "zk_sp_r3", - bucket_from_serializable(&proof.scalar_product_proof.r3), - ); + if let Some(ref sp) = proof.scalar_product_proof { + tracker.record("zk_sp_p1", bucket_from_serializable(&sp.p1)); + tracker.record("zk_sp_p2", bucket_from_serializable(&sp.p2)); + tracker.record("zk_sp_q", bucket_from_serializable(&sp.q)); + tracker.record("zk_sp_r", bucket_from_serializable(&sp.r)); + tracker.record("zk_sp_e1", bucket_from_serializable(&sp.e1)); + tracker.record("zk_sp_e2", bucket_from_serializable(&sp.e2)); + tracker.record("zk_sp_r1", bucket_from_serializable(&sp.r1)); + tracker.record("zk_sp_r2", bucket_from_serializable(&sp.r2)); + tracker.record("zk_sp_r3", bucket_from_serializable(&sp.r3)); + } } /// Statistical test for zero-knowledge property (full ZK with hidden y). @@ -219,6 +176,7 @@ fn collect_full_zk_proof_stats( /// that all resulting proof elements (including y_com, Sigma1, Sigma2) are /// statistically indistinguishable from uniform random. #[test] +#[ignore] // Long-running statistical test fn test_zk_statistical_indistinguishability() { const NUM_TRIALS: usize = 100; @@ -248,9 +206,10 @@ fn test_zk_statistical_indistinguishability() { .commit::(nu, sigma, &prover_setup) .unwrap(); + let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); - let (proof, y_com) = - create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + let proof = + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, Some(tier_1), @@ -262,21 +221,19 @@ fn test_zk_statistical_indistinguishability() { ) .unwrap(); - // Verify proof is valid (without revealing y!) + // Verify proof is valid let mut verifier_transcript = fresh_transcript(); - assert!( - verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( - tier_2, - y_com, - &point, - &proof, - verifier_setup.clone(), - &mut verifier_transcript, - ) - .is_ok() - ); - - collect_full_zk_proof_stats(&proof, &y_com, &mut tracker_zeros); + assert!(verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup.clone(), + &mut verifier_transcript, + ) + .is_ok()); + + collect_full_zk_proof_stats(&proof, &mut tracker_zeros); } // Distribution B: All-ones polynomial (y=2^n for point=(0,0,...)) @@ -288,9 +245,10 @@ fn test_zk_statistical_indistinguishability() { .commit::(nu, sigma, &prover_setup) .unwrap(); + let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); - let (proof, y_com) = - create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + let proof = + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, Some(tier_1), @@ -303,19 +261,17 @@ fn test_zk_statistical_indistinguishability() { .unwrap(); let mut verifier_transcript = fresh_transcript(); - assert!( - verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( - tier_2, - y_com, - &point, - &proof, - verifier_setup.clone(), - &mut verifier_transcript, - ) - .is_ok() - ); - - collect_full_zk_proof_stats(&proof, &y_com, &mut tracker_ones); + assert!(verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup.clone(), + &mut verifier_transcript, + ) + .is_ok()); + + collect_full_zk_proof_stats(&proof, &mut tracker_ones); } // Distribution C: Random polynomial @@ -326,9 +282,10 @@ fn test_zk_statistical_indistinguishability() { .commit::(nu, sigma, &prover_setup) .unwrap(); + let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); - let (proof, y_com) = - create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + let proof = + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, Some(tier_1), @@ -341,19 +298,17 @@ fn test_zk_statistical_indistinguishability() { .unwrap(); let mut verifier_transcript = fresh_transcript(); - assert!( - verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( - tier_2, - y_com, - &point, - &proof, - verifier_setup.clone(), - &mut verifier_transcript, - ) - .is_ok() - ); - - collect_full_zk_proof_stats(&proof, &y_com, &mut tracker_random); + assert!(verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup.clone(), + &mut verifier_transcript, + ) + .is_ok()); + + collect_full_zk_proof_stats(&proof, &mut tracker_random); } } @@ -408,6 +363,7 @@ fn test_zk_statistical_indistinguishability() { /// Test that proof distributions from different witnesses are similar (two-sample test) /// Uses full ZK API with hidden y to test all proof elements including y_com. #[test] +#[ignore] // Long-running statistical test fn test_zk_witness_independence() { const NUM_TRIALS: usize = 80; @@ -435,9 +391,10 @@ fn test_zk_witness_independence() { .commit::(nu, sigma, &prover_setup) .unwrap(); + let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); - let (proof, y_com) = - create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + let proof = + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, Some(tier_1), @@ -450,19 +407,17 @@ fn test_zk_witness_independence() { .unwrap(); let mut verifier_transcript = fresh_transcript(); - assert!( - verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( - tier_2, - y_com, - &point, - &proof, - verifier_setup.clone(), - &mut verifier_transcript, - ) - .is_ok() - ); - - collect_full_zk_proof_stats(&proof, &y_com, &mut tracker_skewed); + assert!(verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup.clone(), + &mut verifier_transcript, + ) + .is_ok()); + + collect_full_zk_proof_stats(&proof, &mut tracker_skewed); } // Uniform: Random polynomial (y will be random) @@ -473,9 +428,10 @@ fn test_zk_witness_independence() { .commit::(nu, sigma, &prover_setup) .unwrap(); + let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); - let (proof, y_com) = - create_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, _>( + let proof = + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, Some(tier_1), @@ -488,19 +444,17 @@ fn test_zk_witness_independence() { .unwrap(); let mut verifier_transcript = fresh_transcript(); - assert!( - verify_zk_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _>( - tier_2, - y_com, - &point, - &proof, - verifier_setup.clone(), - &mut verifier_transcript, - ) - .is_ok() - ); - - collect_full_zk_proof_stats(&proof, &y_com, &mut tracker_uniform); + assert!(verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup.clone(), + &mut verifier_transcript, + ) + .is_ok()); + + collect_full_zk_proof_stats(&proof, &mut tracker_uniform); } } From 7bbfc69a0d80aaf5db802539238e1dbef73be9ef Mon Sep 17 00:00:00 2001 From: markosg04 Date: Thu, 26 Feb 2026 17:33:56 -0500 Subject: [PATCH 03/16] refactor: rm mds --- ZK_IMPLEMENTATION_AUDIT.md | 357 ------------------ dory_paper_reference.md | 176 --------- dory_zk_analysis.md | 363 ------------------ zk_refactor_plan.md | 737 ------------------------------------- 4 files changed, 1633 deletions(-) delete mode 100644 ZK_IMPLEMENTATION_AUDIT.md delete mode 100644 dory_paper_reference.md delete mode 100644 dory_zk_analysis.md delete mode 100644 zk_refactor_plan.md diff --git a/ZK_IMPLEMENTATION_AUDIT.md b/ZK_IMPLEMENTATION_AUDIT.md deleted file mode 100644 index c176ef0..0000000 --- a/ZK_IMPLEMENTATION_AUDIT.md +++ /dev/null @@ -1,357 +0,0 @@ -# Dory ZK Implementation Audit - -This document compares the current implementation against the Dory paper's ZK requirements. - ---- - -## Executive Summary - -**Current State: Partially Implemented (Phase 1 Complete, Phase 2 Missing)** - -The implementation has: -- ✅ Mode trait abstraction (`Transparent` / `ZK`) -- ✅ Blind sampling and masking for all protocol messages -- ✅ Blind accumulation logic in prover state -- ✅ Sigma proof structures defined (`Sigma1Proof`, `Sigma2Proof`, `ScalarProductProof`) -- ✅ Sigma proof generation functions implemented -- ✅ Sigma proof verification functions implemented - -**Critical Gap**: The ZK proofs produce the **same proof type** as transparent proofs (`DoryProof`), and verification uses the **same code path** that reveals `y`. The Sigma proofs exist but are **not wired into the main API**. - ---- - -## Detailed Analysis - -### 1. Mode Trait ✅ CORRECT - -**Implementation** (`src/mode.rs:14-65`): -```rust -pub trait Mode: 'static { - fn sample(rng: &mut R) -> F; - fn mask(value: G, base: &G, blind: &G::Scalar) -> G; -} - -impl Mode for Transparent { - fn sample(_rng: &mut R) -> F { F::zero() } - fn mask(value: G, _base: &G, _blind: &G::Scalar) -> G { value } -} - -impl Mode for ZK { - fn sample(rng: &mut R) -> F { F::random(rng) } - fn mask(value: G, base: &G, blind: &G::Scalar) -> G { value + base.scale(blind) } -} -``` - -**Assessment**: Correct. Blinds sampled from RNG (not transcript), masking is additive. - ---- - -### 2. VMV Message ⚠️ PARTIAL - -**What's Implemented** (`src/evaluation_proof.rs:141-165`): -```rust -let r_c: F = Mo::sample(rng); -let r_d2: F = Mo::sample(rng); -let r_e1: F = Mo::sample(rng); -let r_e2: F = Mo::sample(rng); - -let c = Mo::mask(c_raw, &setup.ht, &r_c); -let d2 = Mo::mask(d2_raw, &setup.ht, &r_d2); -let e1 = Mo::mask(e1_raw, &setup.h1, &r_e1); - -let vmv_message = VMVMessage { c, d2, e1 }; // <-- Uses non-ZK struct! -``` - -**What's Missing**: - -1. **`E_2` not computed/sent**: In ZK mode, the prover must send `E_2 = y·Γ_2,fin + r_e2·H_2`. Currently `r_e2` is sampled but `E_2` is not computed or included in the message. - -2. **`y_com` not computed**: The commitment `y_com = y·Γ_1,fin + r_y·H_1` is not created. - -3. **Wrong message type**: Uses `VMVMessage` instead of `ZkVMVMessage`. - -**Required Fix**: -```rust -// In ZK mode: -let y = polynomial.evaluate(point); -let e2 = Mo::mask(setup.g2_vec[0].scale(&y), &setup.h2, &r_e2); -let r_y: F = Mo::sample(rng); -let y_com = setup.g1_vec[0].scale(&y) + setup.h1.scale(&r_y); - -let vmv_message = ZkVMVMessage { c, d2, e1, e2, y_com }; -``` - ---- - -### 3. Reduce Round First Message ✅ CORRECT - -**Implementation** (`src/reduce_and_fold.rs:237-314`): -```rust -let r_d1_l: F = M::sample(rng); -let r_d1_r: F = M::sample(rng); -let r_d2_l: F = M::sample(rng); -let r_d2_r: F = M::sample(rng); - -let d1_left = M::mask(d1_left_base, &self.setup.ht, &r_d1_l); -let d1_right = M::mask(d1_right_base, &self.setup.ht, &r_d1_r); -let d2_left = M::mask(d2_left_base, &self.setup.ht, &r_d2_l); -let d2_right = M::mask(d2_right_base, &self.setup.ht, &r_d2_r); -``` - -**Assessment**: Correct. D values properly masked with H_T. - ---- - -### 4. Reduce Round Second Message ✅ CORRECT - -**Implementation** (`src/reduce_and_fold.rs:351-414`): -```rust -let r_c_plus: F = M::sample(rng); -let r_c_minus: F = M::sample(rng); -let r_e1_plus: F = M::sample(rng); -// ... etc - -let c_plus = M::mask(c_plus_base, &self.setup.ht, &r_c_plus); -let c_minus = M::mask(c_minus_base, &self.setup.ht, &r_c_minus); -let e1_plus = M::mask(e1_plus_base, &self.setup.h1, &r_e1_plus); -let e1_minus = M::mask(e1_minus_base, &self.setup.h1, &r_e1_minus); -let e2_plus = M::mask(e2_plus_base, &self.setup.h2, &r_e2_plus); -let e2_minus = M::mask(e2_minus_base, &self.setup.h2, &r_e2_minus); -``` - -**Assessment**: Correct. All values properly masked with respective generators. - ---- - -### 5. Blind Accumulation ⚠️ ISSUE - -**Implementation** (`src/reduce_and_fold.rs:340-343, 463-477`): - -After first challenge (β): -```rust -self.r_c = self.r_c + self.r_d2 * *beta + self.r_d1 * beta_inv; -``` - -After second challenge (α): -```rust -self.r_c = self.r_c + r_c_plus * *alpha + r_c_minus * alpha_inv; -self.r_d1 = r_d1_l * *alpha + r_d1_r; -self.r_d2 = r_d2_l * alpha_inv + r_d2_r; -self.r_e1 = self.r_e1 + r_e1_plus * *alpha + r_e1_minus * alpha_inv; -self.r_e2 = self.r_e2 + r_e2_plus * *alpha + r_e2_minus * alpha_inv; -``` - -**Issue**: The first challenge uses `self.r_d1` and `self.r_d2`, but these are **not yet set** from message blinds at that point. They should be folded from per-round blinds. - -**Paper says**: -- After β: `r_C ← r_C + β·(r_d2_l + r_d2_r) + β⁻¹·(r_d1_l + r_d1_r)` (sum of current round blinds) -- After α: `r_D1 ← α·r_d1_l + r_d1_r`, `r_D2 ← α⁻¹·r_d2_l + r_d2_r` - -**Current Code** uses `self.r_d1` and `self.r_d2` in `apply_first_challenge`, but those are set in `apply_second_challenge`. This is **inverted** - the accumulation happens after the challenge where it should use the blinds, but uses prior round's accumulated values. - -**Fix needed**: Pass message blinds to `apply_first_challenge` and use them directly, not the accumulated values. - ---- - -### 6. Final Message ✅ CORRECT - -**Implementation** (`src/reduce_and_fold.rs:486-515`): -```rust -let e1 = self.v1[0] + self.setup.h1.scale(&gamma_s1); -let e2 = self.v2[0] + self.setup.h2.scale(&gamma_inv_s2); - -// ZK: final blind accumulation -self.r_c = self.r_c + self.r_e2 * *gamma + self.r_e1 * gamma_inv; -``` - -**Assessment**: Correct formula for final blind accumulation. - ---- - -### 7. Scalar Product Σ-Protocol ✅ IMPLEMENTED (but not used) - -**Implementation** (`src/reduce_and_fold.rs:551-619`): -```rust -pub fn scalar_product_proof(&self, transcript: &mut T, rng: &mut R) -> ZkScalarProductProof { - // Sample d1, d2 - let s_d1 = F::random(rng); - let s_d2 = F::random(rng); - let d1 = gamma1.scale(&s_d1); - let d2 = gamma2.scale(&s_d2); - - // Sample blinding scalars - let r_p1 = F::random(rng); - // ... - - // Compute P1, P2, Q, R - let p1 = E::pair(&d1, &gamma2) + self.setup.ht.scale(&r_p1); - // ... - - // Get challenge c - let c = transcript.challenge_scalar(b"sigma_c"); - - // Compute responses - let e1 = d1 + v1.scale(&c); - let e2 = d2 + v2.scale(&c); - let r1 = r_p1 + c * self.r_d1; - let r2 = r_p2 + c * self.r_d2; - let r3 = r_r + c * r_q + c_sq * self.r_c; - - ScalarProductProof { p1, p2, q, r, e1, e2, r1, r2, r3 } -} -``` - -**Assessment**: Implementation looks correct per Dory paper. - -**Issue**: This function exists but is **never called** in the proof generation flow. - ---- - -### 8. Sigma1 Proof (VMV Consistency) ✅ IMPLEMENTED (but not used) - -**Implementation** (`src/reduce_and_fold.rs:630-672`): -- Proves `E_2 = y·Γ_2,fin + r_E2·H_2` -- Proves `y_com = y·Γ_1,fin + r_y·H_1` - -**Assessment**: Implementation looks correct. - -**Issue**: Never called - no `y_com` is created in the main flow. - ---- - -### 9. Sigma2 Proof (VMV Relation) ✅ IMPLEMENTED (but not used) - -**Implementation** (`src/reduce_and_fold.rs:725-762`): -- Proves `e(E_1, Γ_2,fin) - D_2 = e(H_1, t_1·Γ_2,fin + t_2·H_2)` - -**Assessment**: Implementation looks correct. - -**Issue**: Never called. - ---- - -### 10. Verification ❌ CRITICAL ISSUE - -**Current verify path** (`src/evaluation_proof.rs:341`): -```rust -// E2 = y · Γ2,fin where Γ2,fin = g2_0 -let e2 = setup.g2_0.scale(&evaluation); // <-- REVEALS y! -``` - -**Problem**: Even when proving with `ZK` mode, verification: -1. Takes `evaluation` as a parameter (reveals y) -2. Computes `E_2` from `y` (doesn't use prover's blinded E_2) -3. Uses `verify_final` not `verify_final_zk` - -**For true ZK**, verification should: -1. NOT take `evaluation` as input -2. Use prover's `E_2` from `ZkVMVMessage` -3. Verify Sigma1 proof to confirm `y_com` and `E_2` are consistent -4. Call `verify_final_zk` with the scalar product proof - ---- - -### 11. Proof Structure ❌ NOT USED - -**Defined** (`src/proof.rs:53-80`): -```rust -pub struct ZkDoryProof { - pub vmv_message: ZkVMVMessage, - pub first_messages: Vec>, - pub second_messages: Vec>, - pub final_message: ScalarProductMessage, - pub sigma1_proof: Sigma1Proof, - pub sigma2_proof: Sigma2Proof, - pub scalar_product_proof: ScalarProductProof, - pub nu: usize, - pub sigma: usize, -} -``` - -**Issue**: This struct is defined but: -1. No `create_zk_evaluation_proof` function produces it -2. No `verify_zk_evaluation_proof` function consumes it -3. Tests use regular `prove/verify` which produce/consume `DoryProof` - ---- - -## Test Analysis - -**Current ZK tests** (`tests/arkworks/zk.rs`): -```rust -let proof = prove::<_, BN254, ..., ZK, _>(...).unwrap(); // Returns DoryProof -verify::<_, BN254, ...>(tier_2, evaluation, &point, &proof, ...); // Takes evaluation! -``` - -**What's being tested**: The masking of intermediate protocol messages. - -**What's NOT tested**: -- Hiding the evaluation `y` -- Sigma proofs -- ZK verification path - -The tests pass because the underlying **values** are correct (masked values unmask to correct values), but the protocol is not actually zero-knowledge since `y` is still revealed to the verifier. - ---- - -## Summary of Issues - -| Component | Status | Issue | -|-----------|--------|-------| -| Mode trait | ✅ | - | -| VMV masking | ✅ | Values masked correctly | -| VMV message type | ❌ | Uses `VMVMessage` not `ZkVMVMessage` | -| E_2 computation | ❌ | Not sent by prover in ZK mode | -| y_com computation | ❌ | Not created | -| Reduce masking | ✅ | All values masked correctly | -| Blind accumulation | ⚠️ | Order may be wrong in first challenge | -| Final masking | ✅ | Correct | -| ScalarProductProof | ✅ | Implemented correctly | -| Sigma1Proof | ✅ | Implemented correctly | -| Sigma2Proof | ✅ | Implemented correctly | -| ZkDoryProof struct | ✅ | Defined correctly | -| `prove` API (ZK) | ❌ | Returns `DoryProof`, not `ZkDoryProof` | -| `verify` API (ZK) | ❌ | Takes `evaluation` as input, reveals `y` | -| verify_final_zk | ✅ | Implemented but not used | - ---- - -## Required Work to Complete ZK - -### Phase 1: Fix Blind Accumulation Order -1. Modify `apply_first_challenge` to take message blinds -2. Use message blinds (sum) directly, not accumulated state - -### Phase 2: Create ZK Proof Generation -1. Add `create_zk_evaluation_proof` function that: - - Computes `E_2 = y·Γ_2,fin + r_e2·H_2` - - Computes `y_com = y·Γ_1,fin + r_y·H_1` - - Creates `ZkVMVMessage` - - Generates `Sigma1Proof` - - Generates `Sigma2Proof` - - Generates `ScalarProductProof` - - Returns `ZkDoryProof` - -### Phase 3: Create ZK Verification -1. Add `verify_zk_evaluation_proof` function that: - - Does NOT take `evaluation` as input - - Verifies `Sigma1Proof` - - Verifies `Sigma2Proof` - - Uses prover's `E_2` from message - - Calls `verify_final_zk` with scalar product proof - -### Phase 4: Update Tests -1. Add tests that verify `y` is not revealed -2. Test that verification works without knowing `y` -3. Test Sigma proof soundness (invalid proofs rejected) - ---- - -## Conclusion - -The implementation has laid excellent groundwork with: -- Correct masking/blinding infrastructure -- Correct Sigma proof implementations -- Correct proof structures - -But it's **incomplete** because the ZK code paths are not integrated into the main `prove`/`verify` API. Currently, using `ZK` mode only masks intermediate values but still reveals `y` to the verifier - it's not truly zero-knowledge. diff --git a/dory_paper_reference.md b/dory_paper_reference.md deleted file mode 100644 index cb33427..0000000 --- a/dory_paper_reference.md +++ /dev/null @@ -1,176 +0,0 @@ -# Dory: Efficient, Transparent Arguments for Generalised Inner Products and Polynomial Commitments - -**Author:** Jonathan Lee -**Published:** TCC 2021 (Theory of Cryptography Conference) -**IACR ePrint:** [2020/1274](https://eprint.iacr.org/2020/1274) -**Springer:** [LNCS vol 13043](https://link.springer.com/chapter/10.1007/978-3-030-90453-1_1) -**License:** Creative Commons Attribution (CC BY) - ---- - -## Abstract - -Dory is a transparent setup, public-coin interactive argument for proving correctness of an inner-pairing product between committed vectors of elements of the two source groups. For a product of vectors of length n, proofs consist of 6 log n target group elements, one element from each source group, and 3 scalars. Verifier work is dominated by an O(log n) multi-exponentiation in the target group and O(1) pairings. Security is reduced to the standard SXDH assumption in the standard model. - -Dory is applied to build a multivariate polynomial commitment scheme via the Fiat-Shamir transform. For a dense polynomial with n coefficients: -- Prover work to compute a commitment is dominated by a multi-exponentiation in one source group of size n -- Prover work to show that a commitment to an evaluation is correct is O(n^(log 8/log 25)) in general, or O(n^(1/2)) for univariate or multilinear polynomials -- Communication complexity and Verifier work are both O(log n) - ---- - -## Complexity Summary - -| Metric | Complexity | -|--------|------------| -| Setup | Transparent (no toxic waste) | -| Proof size | 6 log n target group elements + O(1) | -| Verifier work | O(log n) multi-exp in G_T + O(1) pairings | -| Prover work (general) | O(n^(log 8/log 25)) ≈ O(n^0.65) | -| Prover work (univariate/multilinear) | O(√n) | -| Commitment size | 192 bytes (at n = 2²⁰) | -| Security assumption | SXDH (Symmetric External Diffie-Hellman) | - ---- - -## Core Technical Contributions - -### 1. Inner-Pairing-Product Commitments - -Dory employs inner-pairing-product commitments utilizing both G₁ and G₂ groups. The groups must not equal each other for DDH to hold and the scheme to be binding. The commitment is expressed as an inner product: ⟨vector, commitment_key⟩. - -For a pairing group (G₁, G₂, G_T): -- If the message is a vector in G₁, the commitment key is a vector in G₂ -- If the message is a vector in G₂, the commitment key is a vector in G₁ -- The commitment itself lives in G_T - -This structure-preserving symmetry between messages and commitment keys is a key insight. - -### 2. Recursive Folding (Reduce-and-Fold) - -The core technique is the observation that for any vectors u_L, u_R, v_L, v_R and any non-zero scalar α: - -``` -⟨u_L || u_R, v_L || v_R⟩ = ⟨α·u_L + u_R, α⁻¹·v_L + v_R⟩ - α·⟨u_L, v_R⟩ - α⁻¹·⟨u_R, v_L⟩ -``` - -This identity allows reducing a claim about the inner product ⟨u, v⟩ of vectors of length n to claims about inner products of vectors of length n/2. - -**Protocol Flow:** -1. Prover sends cross-terms: L = ⟨u_L, v_R⟩ and R = ⟨u_R, v_L⟩ -2. Verifier sends random challenge α -3. Both parties compute folded vectors: u' = α·u_L + u_R and v' = α⁻¹·v_L + v_R -4. Recurse until vectors have length 1 -5. Final claim verified with a sigma protocol - -The verifier uses homomorphic properties of the commitment scheme (with prover assistance) to find commitments to the shorter folded vectors. - -### 3. Parallel Recursion for Logarithmic Verification - -In addition to proving knowledge of u such that c_u = ⟨u, g⟩, the prover also shows knowledge of g such that c_g = ⟨g, Γ⟩. This is done via recursion by executing the protocol in parallel using the same randomness. - -The key optimization doubles the rounds of interaction to 2 log n in order to keep the messages per round constant, achieving O(log n) verification. - -### 4. Pre-processing with Matrix Commitments - -Using matrix commitments (similar to Hyrax), the prover: -1. Computes Generalized Pedersen Commitments internally (rows of the coefficient matrix) -2. Generates an inner-pairing-product commitment of the commitment vector - -**Pre-processing procedure (multi-round folding):** -- Round 1: Commits to g_L^(0) and g_R^(0) using public random key Γ^(1) ∈ G^(n/2), producing Δ_L^(1) and Δ_R^(1) -- Round i: Partitions Γ^(i−1) into Γ_L^(i−1) and Γ_R^(i−1), committing using Γ^(i) ∈ G^(n/2^i) - -The commitment key update formula: -``` -c_g^(i) = α_{i-1} · Δ_L^(i) + α_i · Δ_R^(i) -``` - ---- - -## Comparison with Related Work - -### vs. Bulletproofs -- Like Bulletproofs, uses recursive folding -- Unlike Bulletproofs, instead of updating g directly, the verifier computes a commitment to it in constant time using homomorphism - -### vs. Trusted Setup Schemes (KZG) -- Dory achieves similar asymptotics without trusted setup -- Previously, these asymptotics required trusted setup or concretely inefficient groups of unknown order - -### vs. DARK/Pietrzak -- Dory avoids groups of unknown order which are concretely inefficient - ---- - -## Concrete Performance (n = 2²⁰, single core) - -| Operation | Cost | -|-----------|------| -| Commitment size | 192 bytes | -| Evaluation proof size | ~18 KB | -| Proof generation time | ~3 seconds | -| Verification time | ~25 ms | - -**Batched evaluation (n = 2²⁰):** -| Metric | Marginal Cost | -|--------|---------------| -| Communication | < 1 KB | -| Prover time | ~300 ms | -| Verifier time | ~1 ms | - ---- - -## Protocol Messages (per round) - -In each round of the reduce-and-fold protocol, the prover sends: -- 6 target group elements (G_T) -- Cross-term products for the folding identity - -Total proof: 6 log n target group elements + 1 G₁ element + 1 G₂ element + 3 scalars - ---- - -## Security Model - -- **Assumption:** SXDH (Symmetric External Diffie-Hellman) -- **Model:** Standard model (no random oracle for security, though Fiat-Shamir used for non-interactivity) -- **Properties:** - - Computationally binding - - Perfectly hiding (in the interactive version) - - Knowledge sound - ---- - -## Application to Polynomial Commitments - -Dory constructs a multivariate polynomial commitment scheme: - -1. **Commitment:** Interpret polynomial coefficients as a matrix, commit using the two-tiered scheme -2. **Evaluation:** Reduce polynomial evaluation to inner-pairing-product claims -3. **Non-interactivity:** Apply Fiat-Shamir transform - -For multilinear polynomials (common in SNARKs), the structure enables O(√n) prover work for evaluation proofs. - ---- - -## Integration with Jolt zkVM - -Dory is used as the polynomial commitment scheme in Jolt, a RISC-V zkVM: -- The commitment key consists of random group elements generated by evaluating a cryptographic PRG -- Can be generated "on the fly" but explicit storage enables faster proving -- Space and Time's high-performance Dory implementation contributed to 6x speedup in Jolt - ---- - -## References - -1. Lee, J. (2021). Dory: Efficient, Transparent Arguments for Generalised Inner Products and Polynomial Commitments. In: Theory of Cryptography (TCC 2021), LNCS vol 13043, Springer. - -2. IACR ePrint: https://eprint.iacr.org/2020/1274 - -3. Bünz, B., Maller, M., Mishra, P., Tyagi, N., & Vesely, P. (2019). Proofs for Inner Pairing Products and Applications. IACR ePrint 2019/1177. - -4. Jolt zkVM: https://github.com/a16z/jolt - -5. Thaler, J. Proofs, Arguments, and Zero-Knowledge (Section 15.4 covers Dory). diff --git a/dory_zk_analysis.md b/dory_zk_analysis.md deleted file mode 100644 index bd7cb7b..0000000 --- a/dory_zk_analysis.md +++ /dev/null @@ -1,363 +0,0 @@ -2# Dory ZK vs Non-ZK: Deep Technical Analysis - -This document provides a concrete analysis of the differences between transparent (non-ZK) and zero-knowledge versions of the Dory polynomial commitment scheme. - ---- - -## 1. Overview: What Zero-Knowledge Means for Dory - -In the **transparent (non-ZK)** Dory protocol: -- The prover reveals the polynomial evaluation `y` in clear -- All protocol messages directly contain the computed values -- The verifier learns `y` and can verify `C(r) = y` - -In the **zero-knowledge (ZK)** Dory protocol: -- The prover commits to `y` without revealing it -- All protocol messages are **masked** with random blinds -- The verifier learns only that the prover knows a valid `(polynomial, evaluation)` pair - ---- - -## 2. Core Technique: Blinding/Masking - -### 2.1 The Masking Operation - -For any group element `V` computed by the prover, the ZK version masks it: - -``` -V_masked = V + r·H -``` - -Where: -- `V` is the actual computed value (e.g., a pairing result in G_T) -- `r` is a random scalar sampled from the prover's private RNG -- `H` is a **blinding generator** (H_T, H_1, or H_2 depending on the group) - -**Key Insight**: The blinding generators `H_T, H_1, H_2` must be linearly independent from the commitment generators `Γ_1, Γ_2`. This ensures: -1. The mask `r·H` looks random to the verifier -2. The binding property is preserved (prover can't find alternate openings) - -### 2.2 Mode Abstraction - -The implementation uses a `Mode` trait to unify ZK and non-ZK: - -```rust -pub trait Mode: 'static { - fn sample(rng: &mut R) -> F; - fn mask(value: G, base: &G, blind: &G::Scalar) -> G; -} - -// Transparent: sample returns 0, mask returns value unchanged -impl Mode for Transparent { - fn sample(_rng: &mut R) -> F { F::zero() } - fn mask(value: G, _base: &G, _blind: &G::Scalar) -> G { value } -} - -// ZK: sample returns random, mask adds blind -impl Mode for ZK { - fn sample(rng: &mut R) -> F { F::random(rng) } - fn mask(value: G, base: &G, blind: &G::Scalar) -> G { value + base.scale(blind) } -} -``` - ---- - -## 3. Protocol Differences by Phase - -### 3.1 VMV (Vector-Matrix-Vector) Message - -The VMV message initiates the evaluation proof. It contains `(C, D_2, E_1)`. - -#### Transparent Version -``` -C = e(⟨T_vec, v_vec⟩, Γ_2,fin) -D_2 = e(⟨Γ_1, v_vec⟩, Γ_2,fin) -E_1 = ⟨T_vec, L_vec⟩ -E_2 = y · Γ_2,fin // Computed by verifier from known y -``` - -#### ZK Version -``` -C = e(⟨T_vec, v_vec⟩, Γ_2,fin) + r_c · H_T -D_2 = e(⟨Γ_1, v_vec⟩, Γ_2,fin) + r_d2 · H_T -E_1 = ⟨T_vec, L_vec⟩ + r_e1 · H_1 -E_2 = y · Γ_2,fin + r_e2 · H_2 // Sent by prover (blinded) -y_com = y · Γ_1,fin + r_y · H_1 // Commitment to evaluation -``` - -**Key Differences**: -1. All values masked with their respective blinding generators -2. `E_2` sent by prover (verifier can't compute it without knowing `y`) -3. Additional commitment `y_com` for proving `y` consistency -4. Requires **Σ_1 proof** to prove `E_2` and `y_com` commit to same `y` -5. Requires **Σ_2 proof** to prove the VMV relation holds - -### 3.2 Reduce Rounds (First Message) - -Each reduce round computes cross-terms for the folding. First message contains `(D_1L, D_1R, D_2L, D_2R, E_1β, E_2β)`. - -#### Transparent Version -``` -D_1L = e(v_1L, Γ_2') -D_1R = e(v_1R, Γ_2') -D_2L = e(Γ_1', v_2L) -D_2R = e(Γ_1', v_2R) -E_1β = ⟨Γ_1, s_2⟩ -E_2β = ⟨Γ_2, s_1⟩ -``` - -#### ZK Version -``` -D_1L = e(v_1L, Γ_2') + r_d1l · H_T -D_1R = e(v_1R, Γ_2') + r_d1r · H_T -D_2L = e(Γ_1', v_2L) + r_d2l · H_T -D_2R = e(Γ_1', v_2R) + r_d2r · H_T -E_1β = ⟨Γ_1, s_2⟩ // Not masked (public generators) -E_2β = ⟨Γ_2, s_1⟩ // Not masked (public generators) -``` - -**Blind Sampling**: 4 new blinds per round: `r_d1l, r_d1r, r_d2l, r_d2r` - -### 3.3 Reduce Rounds (Second Message) - -Second message contains cross-products `(C_+, C_-, E_1+, E_1-, E_2+, E_2-)`. - -#### Transparent Version -``` -C_+ = e(v_1L, v_2R) -C_- = e(v_1R, v_2L) -E_1+ = ⟨v_1L, s_2R⟩ -E_1- = ⟨v_1R, s_2L⟩ -E_2+ = ⟨s_1L, v_2R⟩ -E_2- = ⟨s_1R, v_2L⟩ -``` - -#### ZK Version -``` -C_+ = e(v_1L, v_2R) + r_c+ · H_T -C_- = e(v_1R, v_2L) + r_c- · H_T -E_1+ = ⟨v_1L, s_2R⟩ + r_e1+ · H_1 -E_1- = ⟨v_1R, s_2L⟩ + r_e1- · H_1 -E_2+ = ⟨s_1L, v_2R⟩ + r_e2+ · H_2 -E_2- = ⟨s_1R, v_2L⟩ + r_e2- · H_2 -``` - -**Blind Sampling**: 6 new blinds per round: `r_c+, r_c-, r_e1+, r_e1-, r_e2+, r_e2-` - -### 3.4 Blind Accumulation - -In ZK mode, the prover must track how blinds combine through the protocol. - -After challenge `β` (first challenge): -``` -r_C ← r_C + β·r_D2 + β⁻¹·r_D1 -``` - -After challenge `α` (second challenge): -``` -r_C ← r_C + α·r_c+ + α⁻¹·r_c- -r_D1 ← α·r_d1l + r_d1r -r_D2 ← α⁻¹·r_d2l + r_d2r -r_E1 ← r_E1 + α·r_e1+ + α⁻¹·r_e1- -r_E2 ← r_E2 + α·r_e2+ + α⁻¹·r_e2- -``` - -After fold-scalars (challenge `γ`): -``` -r_C ← r_C + γ·r_E2 + γ⁻¹·r_E1 -``` - -### 3.5 Final Scalar Product - -#### Transparent Version - -Final message is `(E_1, E_2)`: -``` -E_1 = v_1 + γ·s_1·H_1 -E_2 = v_2 + γ⁻¹·s_2·H_2 -``` - -Verification: -``` -e(E_1 + d·Γ_1, E_2 + d⁻¹·Γ_2) = C' + χ_0 + d·D_2' + d⁻¹·D_1' -``` - -#### ZK Version - -Final message is `(E_1, E_2)` plus a **Σ-protocol proof**. - -The Σ-protocol proves knowledge of `(v_1, v_2, r_C, r_D1, r_D2)` satisfying: -- `C = e(v_1, v_2) + r_C·H_T` -- `D_1 = e(v_1, Γ_2) + r_D1·H_T` -- `D_2 = e(Γ_1, v_2) + r_D2·H_T` - -**Σ-Protocol Steps**: - -1. **Commitment Phase**: Prover samples `d_1 = s_d1·Γ_1`, `d_2 = s_d2·Γ_2` and blinds `r_P1, r_P2, r_Q, r_R`, then computes: - ``` - P_1 = e(d_1, Γ_2) + r_P1·H_T - P_2 = e(Γ_1, d_2) + r_P2·H_T - Q = e(d_1, v_2) + e(v_1, d_2) + r_Q·H_T - R = e(d_1, d_2) + r_R·H_T - ``` - -2. **Challenge**: Verifier sends random `c` (or derived via Fiat-Shamir) - -3. **Response Phase**: Prover computes: - ``` - E_1 = d_1 + c·v_1 - E_2 = d_2 + c·v_2 - r_1 = r_P1 + c·r_D1 - r_2 = r_P2 + c·r_D2 - r_3 = r_R + c·r_Q + c²·r_C - ``` - -4. **Verification**: Verifier checks: - ``` - e(E_1 + d·Γ_1, E_2 + d⁻¹·Γ_2) = χ + R + c·Q + c²·C - + d·P_2 + d·c·D_2 - + d⁻¹·P_1 + d⁻¹·c·D_1 - - (r_3 + d·r_2 + d⁻¹·r_1)·H_T - ``` - ---- - -## 4. Additional ZK-Only Structures - -### 4.1 Sigma1 Proof (VMV Consistency) - -Proves knowledge of `(y, r_E2, r_y)` such that: -- `E_2 = y·Γ_2,fin + r_E2·H_2` -- `y_com = y·Γ_1,fin + r_y·H_1` - -This is a standard Schnorr-like proof for DLOG equality across different bases. - -### 4.2 Sigma2 Proof (VMV Relation) - -Proves: -``` -e(E_1, Γ_2,fin) - D_2 = e(H_1, t_1·Γ_2,fin + t_2·H_2) -``` - -Where `t_1 = r_E1 + r_v` and `t_2 = -r_D2`. - -This proves the VMV constraint holds even with blinds. - ---- - -## 5. Proof Size Comparison - -| Component | Transparent | ZK | -|-----------|-------------|-----| -| VMV Message | 2 G_T + 1 G_1 | 2 G_T + 1 G_1 + 1 G_2 + 1 G_1 | -| Per Reduce Round | 4 G_T + 2 G_1 + 2 G_2 | Same (blinds absorbed) | -| Final Message | 1 G_1 + 1 G_2 | Same | -| Sigma1 Proof | - | 1 G_1 + 1 G_2 + 3 F | -| Sigma2 Proof | - | 1 G_T + 2 F | -| ScalarProduct Proof | - | 4 G_T + 1 G_1 + 1 G_2 + 3 F | - -**Total Overhead for ZK**: -- +1 G_1, +2 G_2 elements -- +5 G_T elements -- +8 F scalars - ---- - -## 6. Verification Complexity - -### Transparent -- Per round: O(1) scalar mults in G_T -- Final: 4 pairings (batched) - -### ZK -- Per round: Same (blind tracking is prover-side) -- Final: Same 4 pairings + Σ-protocol verification -- Additional: Sigma1 verification (2 scalar mults each in G_1, G_2) -- Additional: Sigma2 verification (1 pairing) - -**Total**: ~5-6 pairings equivalent (vs 4 for transparent) - ---- - -## 7. Security Properties - -### Transparent Dory -- **Binding**: Computational (under SXDH) -- **Hiding**: None (evaluation is revealed) -- **Soundness**: Computational (knowledge sound under SXDH) - -### ZK Dory -- **Binding**: Computational (under SXDH) -- **Hiding**: Statistical/Perfect (blinded messages are uniformly random) -- **Soundness**: Computational (knowledge sound under SXDH) -- **Zero-Knowledge**: Honest-Verifier Statistical ZK (HVSZK) - -The ZK property is **HVSZK** (Honest-Verifier Statistical Zero-Knowledge): -- The simulator can produce transcripts indistinguishable from real ones -- Requires honest verifier (challenges are random) -- Fiat-Shamir makes it non-interactive, achieving NIZK in ROM - ---- - -## 8. Implementation Summary - -### State Changes (Prover) - -```rust -pub struct DoryProverState<'a, E: PairingCurve, M: Mode = Transparent> { - // ... existing fields ... - - // ZK blind accumulators (zero for Transparent) - r_c: Scalar, // Accumulated blind for C - r_d1: Scalar, // Accumulated blind for D1 - r_d2: Scalar, // Accumulated blind for D2 - r_e1: Scalar, // Accumulated blind for E1 - r_e2: Scalar, // Accumulated blind for E2 - _mode: PhantomData, -} -``` - -### Proof Changes - -```rust -// Transparent proof -pub struct DoryProof { - pub vmv_message: VMVMessage, - pub first_messages: Vec>, - pub second_messages: Vec>, - pub final_message: ScalarProductMessage, -} - -// ZK proof adds Σ-proofs -pub struct ZkDoryProof { - pub vmv_message: ZkVMVMessage, // Includes E2, y_com - pub first_messages: Vec>, - pub second_messages: Vec>, - pub final_message: ScalarProductMessage, - pub sigma1_proof: Sigma1Proof, - pub sigma2_proof: Sigma2Proof, - pub scalar_product_proof: ScalarProductProof, -} -``` - ---- - -## 9. Key Insights - -1. **Blinding Generator Independence**: `H_T, H_1, H_2` must be sampled independently from `Γ_1, Γ_2`. In practice, derived from a hash of `Γ` or sampled from a separate random oracle. - -2. **Blind Folding Mirrors Value Folding**: When values fold as `v' = α·v_L + v_R`, blinds fold as `r' = α·r_L + r_R`. This maintains the invariant that `v'_masked = v' + r'·H`. - -3. **Σ-Protocols Are Necessary**: Simply masking isn't enough—the verifier needs to verify the final relation. The Σ-protocol proves knowledge of the witness without revealing the blinds. - -4. **RNG vs Transcript for Blinds**: Blinds are sampled from private RNG (not transcript) because they must not affect challenge derivation. Challenges come from the transcript after appending the masked values. - -5. **Additive vs Multiplicative Masking**: Dory uses additive masking (`V + r·H`) in groups. This is simpler than multiplicative approaches and works because group operations are efficient. - ---- - -## References - -1. Lee, J. (2021). "Dory: Efficient, Transparent Arguments for Generalised Inner Products and Polynomial Commitments." TCC 2021. -2. IACR ePrint: https://eprint.iacr.org/2020/1274 -3. Thaler, J. "Proofs, Arguments, and Zero-Knowledge" Section 15.4 diff --git a/zk_refactor_plan.md b/zk_refactor_plan.md deleted file mode 100644 index b32e121..0000000 --- a/zk_refactor_plan.md +++ /dev/null @@ -1,737 +0,0 @@ -# ZK Implementation Refactoring Plan (v2) - -## Design Philosophy - -Use Rust's trait system to unify Transparent and ZK modes into **single code paths**. The `Mode` trait with associated types determines all behavioral differences at compile time. - -**Two modes only:** -- `Transparent` - no blinds, evaluation `y` revealed to verifier -- `ZK` - full blinds, evaluation hidden (verifier receives `y_com`) - ---- - -## Core Design: The Mode Trait - -```rust -pub trait Mode: 'static + Clone { - /// Witness provided to verifier - /// - Transparent: the evaluation F - /// - ZK: commitment to evaluation G1 - type Witness>: Clone; - - /// Extra proof elements - /// - Transparent: () - /// - ZK: ZkProofs (sigma1, sigma2, scalar_product) - type Extras, GT: Group>: Clone + Default; - - /// VMV message extension - /// - Transparent: () - /// - ZK: (E2, y_com) tuple - type VmvExtras>: Clone + Default; - - /// Sample blinding factor (zero for Transparent, random for ZK) - fn sample(rng: &mut R) -> F; - - /// Mask group element (identity for Transparent, adds blind for ZK) - fn mask(value: G, base: &G, blind: &G::Scalar) -> G; - - /// Create witness and VMV extras from evaluation y - fn create_witness( - y: &E::Scalar, - r_e2: &E::Scalar, - setup: &ProverSetup, - rng: &mut R, - ) -> (Self::Witness, Self::VmvExtras, WitnessSecrets) - where - E::G1: Group, - E::G2: Group; - - /// Extract E2 for verifier state - /// - Transparent: compute from witness (y · Γ2,fin) - /// - ZK: extract from VMV extras - fn verifier_e2( - witness: &Self::Witness, - vmv_extras: &Self::VmvExtras, - setup: &VerifierSetup, - ) -> E::G2 - where - E::G1: Group, - E::G2: Group; - - /// Generate extra proofs (sigma proofs for ZK, no-op for Transparent) - fn generate_extras, R: RngCore>( - witness_secrets: &WitnessSecrets, - vmv_blinds: &VmvBlinds, - prover_state: &DoryProverState, - setup: &ProverSetup, - transcript: &mut T, - rng: &mut R, - ) -> Self::Extras - where - E::G1: Group, - E::G2: Group, - E::GT: Group; - - /// Verify extra proofs (no-op for Transparent) - fn verify_extras>( - extras: &Self::Extras, - vmv_extras: &Self::VmvExtras, - vmv_message: &VMVMessage, - setup: &VerifierSetup, - transcript: &mut T, - ) -> Result<(), DoryError> - where - E::G1: Group, - E::G2: Group, - E::GT: Group; - - /// Append VMV extras to transcript (no-op for Transparent) - fn append_vmv_extras_to_transcript>( - vmv_extras: &Self::VmvExtras, - transcript: &mut T, - ) where - E::G1: Group, - E::G2: Group; - - /// Final verification dispatch - fn verify_final( - verifier_state: &mut DoryVerifierState, - extras: &Self::Extras, - final_message: &ScalarProductMessage, - gamma: &E::Scalar, - d: &E::Scalar, - ) -> Result<(), DoryError> - where - E::G1: Group, - E::G2: Group, - E::GT: Group; -} -``` - ---- - -## Mode Implementations - -### Transparent Mode - -```rust -#[derive(Clone, Copy, Default)] -pub struct Transparent; - -impl Mode for Transparent { - type Witness> = F; - type Extras, GT: Group> = (); - type VmvExtras> = (); - - fn sample(_rng: &mut R) -> F { - F::zero() - } - - fn mask(value: G, _base: &G, _blind: &G::Scalar) -> G { - value - } - - fn create_witness( - y: &E::Scalar, - _r_e2: &E::Scalar, - _setup: &ProverSetup, - _rng: &mut R, - ) -> (E::Scalar, (), WitnessSecrets) - where - E::G1: Group, - E::G2: Group, - { - (*y, (), WitnessSecrets::default()) - } - - fn verifier_e2( - witness: &E::Scalar, - _vmv_extras: &(), - setup: &VerifierSetup, - ) -> E::G2 - where - E::G1: Group, - E::G2: Group, - { - // E2 = y · Γ2,fin - setup.g2_0.scale(witness) - } - - fn generate_extras, R: RngCore>( - _witness_secrets: &WitnessSecrets, - _vmv_blinds: &VmvBlinds, - _prover_state: &DoryProverState, - _setup: &ProverSetup, - _transcript: &mut T, - _rng: &mut R, - ) -> () - where - E::G1: Group, - E::G2: Group, - E::GT: Group, - { - () - } - - fn verify_extras>( - _extras: &(), - _vmv_extras: &(), - _vmv_message: &VMVMessage, - _setup: &VerifierSetup, - _transcript: &mut T, - ) -> Result<(), DoryError> - where - E::G1: Group, - E::G2: Group, - E::GT: Group, - { - Ok(()) - } - - fn append_vmv_extras_to_transcript>( - _vmv_extras: &(), - _transcript: &mut T, - ) where - E::G1: Group, - E::G2: Group, - { - // No extras to append - } - - fn verify_final( - verifier_state: &mut DoryVerifierState, - _extras: &(), - final_message: &ScalarProductMessage, - gamma: &E::Scalar, - d: &E::Scalar, - ) -> Result<(), DoryError> - where - E::G1: Group, - E::G2: Group, - E::GT: Group, - { - verifier_state.verify_final(final_message, gamma, d) - } -} -``` - -### ZK Mode - -```rust -#[derive(Clone, Copy, Default)] -#[cfg(feature = "zk")] -pub struct ZK; - -/// Secrets generated during witness creation, needed for sigma proofs -#[cfg(feature = "zk")] -pub struct WitnessSecrets { - pub r_y: F, // blind for y_com - pub r_e2: F, // blind for E2 -} - -/// VMV extras for ZK mode -#[cfg(feature = "zk")] -#[derive(Clone)] -pub struct ZkVmvExtras { - pub e2: G2, // E2 = y·Γ2,fin + r_e2·H2 - pub y_com: G1, // y_com = y·Γ1,fin + r_y·H1 -} - -/// Extra proofs for ZK mode -#[cfg(feature = "zk")] -#[derive(Clone)] -pub struct ZkProofs { - pub sigma1: Sigma1Proof, - pub sigma2: Sigma2Proof, - pub scalar_product: ScalarProductProof, -} - -#[cfg(feature = "zk")] -impl Mode for ZK { - type Witness> = G1; // y_com - type Extras, GT: Group> = - ZkProofs; - type VmvExtras> = ZkVmvExtras; - - fn sample(rng: &mut R) -> F { - F::random(rng) - } - - fn mask(value: G, base: &G, blind: &G::Scalar) -> G { - value + base.scale(blind) - } - - fn create_witness( - y: &E::Scalar, - r_e2: &E::Scalar, - setup: &ProverSetup, - rng: &mut R, - ) -> (E::G1, ZkVmvExtras, WitnessSecrets) - where - E::G1: Group, - E::G2: Group, - { - let r_y = E::Scalar::random(rng); - - // y_com = y·Γ1,fin + r_y·H1 - let y_com = setup.g1_vec[0].scale(y) + setup.h1.scale(&r_y); - - // E2 = y·Γ2,fin + r_e2·H2 - let e2 = setup.g2_vec[0].scale(y) + setup.h2.scale(r_e2); - - let vmv_extras = ZkVmvExtras { e2, y_com }; - let secrets = WitnessSecrets { r_y, r_e2: *r_e2 }; - - (y_com, vmv_extras, secrets) - } - - fn verifier_e2( - _witness: &E::G1, - vmv_extras: &ZkVmvExtras, - _setup: &VerifierSetup, - ) -> E::G2 - where - E::G1: Group, - E::G2: Group, - { - // E2 comes from prover's VMV message - vmv_extras.e2 - } - - fn generate_extras, R: RngCore>( - witness_secrets: &WitnessSecrets, - vmv_blinds: &VmvBlinds, - prover_state: &DoryProverState, - setup: &ProverSetup, - transcript: &mut T, - rng: &mut R, - ) -> ZkProofs - where - E::G1: Group, - E::G2: Group, - E::GT: Group, - { - // Generate sigma1: proves y_com and E2 commit to same y - let sigma1 = generate_sigma1_proof(/* ... */); - - // Generate sigma2: proves VMV relation holds with blinds - let sigma2 = generate_sigma2_proof(/* ... */); - - // Generate scalar product proof - let scalar_product = prover_state.scalar_product_proof(transcript, rng); - - ZkProofs { sigma1, sigma2, scalar_product } - } - - fn verify_extras>( - extras: &ZkProofs, - vmv_extras: &ZkVmvExtras, - vmv_message: &VMVMessage, - setup: &VerifierSetup, - transcript: &mut T, - ) -> Result<(), DoryError> - where - E::G1: Group, - E::G2: Group, - E::GT: Group, - { - verify_sigma1_proof(&vmv_extras.e2, &vmv_extras.y_com, &extras.sigma1, setup, transcript)?; - verify_sigma2_proof(&vmv_message.e1, &vmv_message.d2, &extras.sigma2, setup, transcript)?; - Ok(()) - } - - fn append_vmv_extras_to_transcript>( - vmv_extras: &ZkVmvExtras, - transcript: &mut T, - ) where - E::G1: Group, - E::G2: Group, - { - transcript.append_serde(b"vmv_e2", &vmv_extras.e2); - transcript.append_serde(b"vmv_y_com", &vmv_extras.y_com); - } - - fn verify_final( - verifier_state: &mut DoryVerifierState, - extras: &ZkProofs, - _final_message: &ScalarProductMessage, - _gamma: &E::Scalar, - d: &E::Scalar, - ) -> Result<(), DoryError> - where - E::G1: Group, - E::G2: Group, - E::GT: Group, - { - // Derive challenge c from scalar product proof (already in transcript) - let c = /* from transcript */; - verifier_state.verify_final_zk_with_challenge(&extras.scalar_product, &c, d) - } -} -``` - ---- - -## Unified Data Structures - -### DoryProof (Single Type) - -```rust -/// Complete Dory evaluation proof, parameterized by Mode -pub struct DoryProof -where - G1: Group, - G2: Group, - GT: Group, -{ - /// VMV message (C, D2, E1) - pub vmv_message: VMVMessage, - - /// VMV extras (empty for Transparent, E2/y_com for ZK) - pub vmv_extras: M::VmvExtras, - - /// Reduce round messages - pub first_messages: Vec>, - pub second_messages: Vec>, - - /// Final scalar product message - pub final_message: ScalarProductMessage, - - /// Mode-specific extra proofs (empty for Transparent, sigma proofs for ZK) - pub extras: M::Extras, - - /// Matrix dimensions - pub nu: usize, - pub sigma: usize, -} -``` - -### VMVMessage (Unchanged) - -```rust -/// VMV message - same for both modes -/// Mode-specific data (E2, y_com) stored in DoryProof::vmv_extras -pub struct VMVMessage { - pub c: GT, - pub d2: GT, - pub e1: G1, -} -``` - ---- - -## Unified API - -### Single prove Function - -```rust -/// Create evaluation proof -/// -/// Returns (proof, witness) where witness is: -/// - Transparent: the evaluation y (type F) -/// - ZK: commitment to y (type G1) -pub fn prove( - polynomial: &P, - point: &[F], - row_commitments: Vec, - nu: usize, - sigma: usize, - setup: &ProverSetup, - transcript: &mut T, - rng: &mut R, -) -> Result<(DoryProof, M::Witness), DoryError> -where - F: Field, - E: PairingCurve, - E::G1: Group, - E::G2: Group, - E::GT: Group, - M1: DoryRoutines, - M2: DoryRoutines, - P: MultilinearLagrange, - T: Transcript, - M: Mode, - R: RngCore, -{ - // Validation - if point.len() != nu + sigma { - return Err(DoryError::InvalidPointDimension { ... }); - } - if nu > sigma { - return Err(DoryError::InvalidSize { ... }); - } - - // Compute evaluation vectors - let (left_vec, right_vec) = polynomial.compute_evaluation_vectors(point, nu, sigma); - let v_vec = polynomial.vector_matrix_product(&left_vec, nu, sigma); - - // Compute y - let y = polynomial.evaluate(point); - - // Sample VMV blinds using Mode - let r_c: F = M::sample(rng); - let r_d2: F = M::sample(rng); - let r_e1: F = M::sample(rng); - let r_e2: F = M::sample(rng); - let vmv_blinds = VmvBlinds { r_c, r_d2, r_e1, r_e2 }; - - // Create witness and VMV extras using Mode - let (witness, vmv_extras, witness_secrets) = M::create_witness(&y, &r_e2, setup, rng); - - // Compute VMV message using Mode::mask - let g2_fin = &setup.g2_vec[0]; - let t_vec_v = M1::msm(&padded_row_commitments, &v_vec); - let c = M::mask(E::pair(&t_vec_v, g2_fin), &setup.ht, &r_c); - - let g1_bases = &setup.g1_vec[..1 << sigma]; - let gamma1_v = M1::msm(g1_bases, &v_vec); - let d2 = M::mask(E::pair(&gamma1_v, g2_fin), &setup.ht, &r_d2); - - let e1 = M::mask(M1::msm(&row_commitments, &left_vec), &setup.h1, &r_e1); - - let vmv_message = VMVMessage { c, d2, e1 }; - - // Append to transcript - transcript.append_serde(b"vmv_c", &vmv_message.c); - transcript.append_serde(b"vmv_d2", &vmv_message.d2); - transcript.append_serde(b"vmv_e1", &vmv_message.e1); - M::append_vmv_extras_to_transcript::(&vmv_extras, transcript); - - // Initialize prover state - let mut prover_state = DoryProverState::new_with_blinds( - padded_row_commitments, v2, Some(v_vec), - padded_right_vec, padded_left_vec, - setup, r_c, r_d2, r_e1, r_e2, - ); - - // Reduce-and-fold rounds (identical for both modes) - let mut first_messages = Vec::with_capacity(num_rounds); - let mut second_messages = Vec::with_capacity(num_rounds); - - for _ in 0..num_rounds { - let (first_msg, r_d1_l, r_d1_r, r_d2_l, r_d2_r) = - prover_state.compute_first_message::(rng); - // ... append to transcript, get beta ... - prover_state.apply_first_challenge::(&beta); - first_messages.push(first_msg); - - let (second_msg, ...) = prover_state.compute_second_message::(rng); - // ... append to transcript, get alpha ... - prover_state.apply_second_challenge::(...); - second_messages.push(second_msg); - } - - let gamma = transcript.challenge_scalar(b"gamma"); - - // Generate mode-specific extras (sigma proofs for ZK, nothing for Transparent) - let extras = M::generate_extras(&witness_secrets, &vmv_blinds, &prover_state, setup, transcript, rng); - - let final_message = prover_state.compute_final_message::(&gamma); - - transcript.append_serde(b"final_e1", &final_message.e1); - transcript.append_serde(b"final_e2", &final_message.e2); - let _d = transcript.challenge_scalar(b"d"); - - Ok(( - DoryProof { - vmv_message, - vmv_extras, - first_messages, - second_messages, - final_message, - extras, - nu, - sigma, - }, - witness, - )) -} -``` - -### Single verify Function - -```rust -/// Verify evaluation proof -/// -/// Takes witness which is: -/// - Transparent: the evaluation y (type F) -/// - ZK: commitment to y (type G1) -pub fn verify( - commitment: E::GT, - witness: M::Witness, - point: &[F], - proof: &DoryProof, - setup: VerifierSetup, - transcript: &mut T, -) -> Result<(), DoryError> -where - F: Field, - E: PairingCurve, - E::G1: Group, - E::G2: Group, - E::GT: Group, - M1: DoryRoutines, - M2: DoryRoutines, - T: Transcript, - M: Mode, -{ - let nu = proof.nu; - let sigma = proof.sigma; - - if point.len() != nu + sigma { - return Err(DoryError::InvalidPointDimension { ... }); - } - - // Append VMV to transcript - transcript.append_serde(b"vmv_c", &proof.vmv_message.c); - transcript.append_serde(b"vmv_d2", &proof.vmv_message.d2); - transcript.append_serde(b"vmv_e1", &proof.vmv_message.e1); - M::append_vmv_extras_to_transcript::(&proof.vmv_extras, transcript); - - // Verify mode-specific extras (sigma proofs for ZK) - M::verify_extras(&proof.extras, &proof.vmv_extras, &proof.vmv_message, &setup, transcript)?; - - // Get E2 based on mode - let e2 = M::verifier_e2::(&witness, &proof.vmv_extras, &setup); - - // Initialize verifier state - let mut verifier_state = DoryVerifierState::new( - proof.vmv_message.c, - commitment, - proof.vmv_message.d2, - proof.vmv_message.e1, - e2, - s1_coords, - s2_coords, - num_rounds, - setup.clone(), - ); - - // Process rounds (identical for both modes) - for round in 0..num_rounds { - let first_msg = &proof.first_messages[round]; - let second_msg = &proof.second_messages[round]; - // ... append to transcript, get challenges ... - verifier_state.process_round(first_msg, second_msg, &alpha, &beta); - } - - let gamma = transcript.challenge_scalar(b"gamma"); - - // Append final message - transcript.append_serde(b"final_e1", &proof.final_message.e1); - transcript.append_serde(b"final_e2", &proof.final_message.e2); - let d = transcript.challenge_scalar(b"d"); - - // Final verification dispatch based on mode - M::verify_final(&mut verifier_state, &proof.extras, &proof.final_message, &gamma, &d) -} -``` - ---- - -## Usage Examples - -### Transparent Mode - -```rust -// Prove -let (proof, evaluation) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( - &poly, &point, tier_1, nu, sigma, &prover_setup, &mut transcript, &mut rng -)?; - -// Verify -verify::<_, BN254, G1Routines, G2Routines, _, Transparent>( - tier_2, evaluation, &point, &proof, verifier_setup, &mut transcript -)?; -``` - -### ZK Mode - -```rust -// Prove - returns y_com instead of y -let (proof, y_com) = prove::<_, BN254, G1Routines, G2Routines, _, _, ZK, _>( - &poly, &point, tier_1, nu, sigma, &prover_setup, &mut transcript, &mut rng -)?; - -// Verify - takes y_com instead of y -verify::<_, BN254, G1Routines, G2Routines, _, ZK>( - tier_2, y_com, &point, &proof, verifier_setup, &mut transcript -)?; -``` - ---- - -## Files to Modify - -### Delete Entirely -- `src/backends/arkworks/ark_poly.rs`: `commit_zk` method (~60 lines) -- `src/primitives/poly.rs`: `commit_zk` trait method (~15 lines) - -### Major Refactor -| File | Changes | -|------|---------| -| `src/mode.rs` | Expand Mode trait with associated types and methods | -| `src/proof.rs` | Single `DoryProof<..., M: Mode>`, delete `ZkDoryProof` | -| `src/messages.rs` | Delete `ZkVMVMessage`, add `ZkVmvExtras` | -| `src/evaluation_proof.rs` | Single `prove`/`verify`, delete duplicates (~350 lines removed) | -| `src/reduce_and_fold.rs` | Delete duplicate `scalar_product_proof`, keep single impl | -| `src/lib.rs` | Update exports | - -### Minor Updates -| File | Changes | -|------|---------| -| `tests/arkworks/*.rs` | Update to new API | -| `examples/*.rs` | Update to new API (add `Transparent` parameter) | - ---- - -## Expected Diff Reduction - -| Component | Before | After | Reduction | -|-----------|--------|-------|-----------| -| `evaluation_proof.rs` | +453 | ~+50 | -400 | -| `reduce_and_fold.rs` | +783 | ~+300 | -483 | -| `proof.rs` | +56 | ~+20 | -36 | -| `messages.rs` | +97 | ~+40 | -57 | -| `ark_poly.rs` | +58 | 0 | -58 | -| **Total** | +1494 | ~+450 | **-1044 (70%)** | - ---- - -## Implementation Order - -1. **Expand Mode trait** in `src/mode.rs` - - Add associated types - - Add all trait methods with Transparent impl - - Add ZK impl (feature-gated) - -2. **Unify proof types** in `src/proof.rs` and `src/messages.rs` - - Single `DoryProof<..., M>` - - Add `ZkVmvExtras`, `ZkProofs` - - Delete `ZkDoryProof`, `ZkVMVMessage` - -3. **Unify prove/verify** in `src/evaluation_proof.rs` - - Single `prove` function using Mode methods - - Single `verify` function using Mode methods - - Delete `create_zk_evaluation_proof`, `verify_zk_evaluation_proof` - -4. **Clean up reduce_and_fold.rs** - - Delete duplicate `scalar_product_proof` - - Update to use Mode trait - -5. **Delete dead code** - - Remove `commit_zk` from trait and impl - -6. **Update tests and examples** - - Add explicit `Transparent` or `ZK` mode parameter - ---- - -## Verification Checklist - -- [ ] `cargo nextest run --features backends` passes (Transparent mode) -- [ ] `cargo nextest run --features "backends,zk"` passes (both modes) -- [ ] `cargo clippy --features "backends,zk"` clean -- [ ] `cargo doc --features "backends,zk"` builds -- [ ] Statistical ZK tests still pass -- [ ] No performance regression in Transparent mode From 1a666c29189e807858a51b80a6b00e300cab7a1c Mon Sep 17 00:00:00 2001 From: markosg04 Date: Thu, 26 Feb 2026 18:30:42 -0500 Subject: [PATCH 04/16] refactor: clean up diff --- goal.md | 36 +++ src/backends/arkworks/ark_poly.rs | 46 +-- src/evaluation_proof.rs | 149 +++++---- src/lib.rs | 1 - src/messages.rs | 50 +-- src/mode.rs | 40 +-- src/primitives/poly.rs | 15 +- src/proof.rs | 23 +- src/reduce_and_fold.rs | 490 ++++++++++++------------------ src/setup.rs | 17 +- 10 files changed, 352 insertions(+), 515 deletions(-) create mode 100644 goal.md diff --git a/goal.md b/goal.md new file mode 100644 index 0000000..56bcd48 --- /dev/null +++ b/goal.md @@ -0,0 +1,36 @@ +# Goal: ZK Feature Branch Diff Reduction + +## Hard Constraints +1. The diff of `src/` + `Cargo.toml` between `main` and `feat/zk` must have **< 500 added lines** (below 300 is the stretch goal) +2. Deletions are fine and encouraged — we want less code, not more +3. **All tests must pass**: `cargo nextest run -q` and `cargo nextest run -q --features zk` +4. Do not stop until all constraints are met + +## Current State +- **852 added** + 283 removed (source only) +- Biggest contributors: `reduce_and_fold.rs` (+427), `evaluation_proof.rs` (+156) + +## Strategy +1. **Do not touch existing comments/docs unnecessarily** — gratuitous doc edits create diff waste +2. **Store round blinds internally in DoryProverState** — eliminate return-value tuples and blind-array parameters between functions +3. **Consolidate `commit` and `commit_zk`** — single method with optional RNG +4. **Bundle ZK proof fields** into `Option` instead of 5 separate `Option` fields +5. **Keep Mode trait lean** — use it internally, minimize generic parameter spread +6. **Move sigma proof logic to `src/zk.rs`** — keep reduce_and_fold.rs focused on the core protocol +7. **Minimize API surface changes** — fewer generic parameters on public functions + +## Verification +```sh +# Added lines only (must be < 500, stretch goal < 300): +git diff main...feat/zk -- src/ Cargo.toml | grep -c '^+[^+]' + +# Tests pass: +cargo nextest run --features "arkworks" +cargo nextest run --features "arkworks,zk" + +# Clippy clean: +cargo clippy --features "arkworks" --message-format=short +cargo clippy --features "arkworks,zk" --message-format=short +``` + +## Status: IN PROGRESS diff --git a/src/backends/arkworks/ark_poly.rs b/src/backends/arkworks/ark_poly.rs index f9aee50..84c7757 100644 --- a/src/backends/arkworks/ark_poly.rs +++ b/src/backends/arkworks/ark_poly.rs @@ -95,7 +95,7 @@ impl Polynomial for ArkworksPolynomial { } #[cfg(feature = "zk")] - #[tracing::instrument(skip_all, name = "ArkworksPolynomial::commit_zk", fields(nu, sigma, num_rows = 1 << nu, num_cols = 1 << sigma))] + #[tracing::instrument(skip_all, name = "ArkworksPolynomial::commit_zk", fields(nu, sigma))] #[allow(clippy::type_complexity)] fn commit_zk( &self, @@ -117,39 +117,17 @@ impl Polynomial for ArkworksPolynomial { actual: self.coefficients.len(), }); } - - let num_rows = 1 << nu; - let num_cols = 1 << sigma; - - // Tier 1: Compute blinded row commitments - // T_i = ⟨row_i, Γ1⟩ + r_i·H1 - let mut row_commitments = Vec::with_capacity(num_rows); - let mut row_blinds = Vec::with_capacity(num_rows); - - for _ in 0..num_rows { - // Sample blind for this row from private randomness - let r_i = ArkFr::random(rng); - row_blinds.push(r_i); - } - - for (i, r_i) in row_blinds.iter().enumerate() { - let row_start = i * num_cols; - let row_end = row_start + num_cols; - let row = &self.coefficients[row_start..row_end]; - - // Compute blinded row commitment: T_i = MSM(Γ1, row) + r_i·H1 - let g1_bases = &setup.g1_vec[..num_cols]; - let row_commit_raw = M1::msm(g1_bases, row); - let row_commit = row_commit_raw + setup.h1.scale(r_i); - row_commitments.push(row_commit); - } - - // Tier 2: Compute final commitment via multi-pairing - // The commitment is derived from blinded row commitments - let g2_bases = &setup.g2_vec[..num_rows]; - let commitment = E::multi_pair_g2_setup(&row_commitments, g2_bases); - - Ok((commitment, row_commitments, row_blinds)) + let (num_rows, num_cols) = (1 << nu, 1 << sigma); + let g1_bases = &setup.g1_vec[..num_cols]; + let blinds: Vec = (0..num_rows).map(|_| ArkFr::random(rng)).collect(); + let row_commitments: Vec = (0..num_rows) + .map(|i| { + let row = &self.coefficients[i * num_cols..(i + 1) * num_cols]; + M1::msm(g1_bases, row) + setup.h1.scale(&blinds[i]) + }) + .collect(); + let commitment = E::multi_pair_g2_setup(&row_commitments, &setup.g2_vec[..num_rows]); + Ok((commitment, row_commitments, blinds)) } } diff --git a/src/evaluation_proof.rs b/src/evaluation_proof.rs index 8512d58..2552057 100644 --- a/src/evaluation_proof.rs +++ b/src/evaluation_proof.rs @@ -44,24 +44,35 @@ use crate::mode::ZK; /// The protocol proves that polynomial(point) = evaluation via the VMV relation: /// evaluation = L^T × M × R /// -/// # Mode Parameter -/// - `Transparent`: Non-hiding proof, evaluation revealed to verifier -/// - `ZK` (requires `zk` feature): Zero-knowledge proof, evaluation hidden -/// /// # Algorithm /// 1. Compute or use provided row commitments (Tier 1 commitment) /// 2. Split evaluation point into left and right vectors /// 3. Compute v_vec (column evaluations) -/// 4. Create VMV message (C, D2, E1) with mode-specific blinding -/// 5. In ZK mode: compute y_com, E2, and sigma proofs -/// 6. Run max(nu, sigma) rounds of reduce-and-fold +/// 4. Create VMV message (C, D2, E1) +/// 5. Initialize prover state for inner product / reduce-and-fold protocol +/// 6. Run max(nu, sigma) rounds of reduce-and-fold (with automatic padding for non-square): +/// - First reduce: compute message and apply beta challenge (reduce) +/// - Second reduce: compute message and apply alpha challenge (fold) /// 7. Compute final scalar product message /// +/// # Parameters +/// - `polynomial`: Polynomial to prove evaluation for +/// - `point`: Evaluation point (length nu + sigma) +/// - `row_commitments`: Optional precomputed row commitments from polynomial.commit() +/// - `nu`: Log₂ of number of rows (constraint: nu ≤ sigma) +/// - `sigma`: Log₂ of number of columns +/// - `setup`: Prover setup +/// - `transcript`: Fiat-Shamir transcript for challenge generation +/// /// # Returns -/// Complete Dory proof. In ZK mode, proof contains y_com for verifier. +/// Complete Dory proof containing VMV message, reduce messages, and final message /// /// # Errors /// Returns error if dimensions are invalid (nu > sigma) or protocol fails +/// +/// # Matrix Layout +/// Supports both square (nu = sigma) and non-square (nu < sigma) matrices. +/// For non-square matrices, vectors are automatically padded to length 2^sigma. #[allow(clippy::type_complexity)] #[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all, name = "create_evaluation_proof")] @@ -95,6 +106,7 @@ where }); } + // Validate matrix dimensions: nu must be ≤ sigma (rows ≤ columns) if nu > sigma { return Err(DoryError::InvalidSize { expected: sigma, @@ -102,12 +114,11 @@ where }); } - let row_commitments = match row_commitments { - Some(rc) => rc, - None => { - let (_commitment, rc) = polynomial.commit::(nu, sigma, setup)?; - rc - } + let row_commitments = if let Some(rc) = row_commitments { + rc + } else { + let (_commitment, rc) = polynomial.commit::(nu, sigma, setup)?; + rc }; let (left_vec, right_vec) = polynomial.compute_evaluation_vectors(point, nu, sigma); @@ -156,54 +167,46 @@ where let y = polynomial.evaluate(point); let r_y: F = Mo::sample(rng); - // E2 = y·Γ2,fin + r_e2·H2 let e2 = Mo::mask(g2_fin.scale(&y), &setup.h2, &r_e2); - // y_com = y·Γ1,fin + r_y·H1 let y_com = setup.g1_vec[0].scale(&y) + setup.h1.scale(&r_y); transcript.append_serde(b"vmv_e2", &e2); transcript.append_serde(b"vmv_y_com", &y_com); let sigma1 = generate_sigma1_proof::(&y, &r_e2, &r_y, setup, transcript, rng); - let t1 = r_e1; - let t2 = -r_d2; - let sigma2 = generate_sigma2_proof::(&t1, &t2, setup, transcript, rng); + let sigma2 = generate_sigma2_proof::(&r_e1, &-r_d2, setup, transcript, rng); (Some(e2), Some(y_com), Some(sigma1), Some(sigma2)) } else { (None, None, None, None) }; - // v₂ = v_vec · Γ₂,fin + // v₂ = v_vec · Γ₂,fin (each scalar scales g_fin) let v2 = M2::fixed_base_vector_scalar_mul(g2_fin, &v_vec); - let mut padded_right_vec = right_vec; - let mut padded_left_vec = left_vec; + let mut padded_right_vec = right_vec.clone(); + let mut padded_left_vec = left_vec.clone(); if nu < sigma { padded_right_vec.resize(1 << sigma, F::zero()); padded_left_vec.resize(1 << sigma, F::zero()); } - let mut prover_state: DoryProverState<'_, E, Mo> = DoryProverState::new_with_blinds( - padded_row_commitments, - v2, - Some(v_vec), - padded_right_vec, - padded_left_vec, + let mut prover_state: DoryProverState<'_, E, Mo> = DoryProverState::new( + padded_row_commitments, // v1 = T_vec_prime (row commitments, padded) + v2, // v2 = v_vec · g_fin + Some(v_vec), // v2_scalars for first-round MSM+pair optimization + padded_right_vec, // s1 = right_vec (padded) + padded_left_vec, // s2 = left_vec (padded) setup, - r_c, - r_d2, - r_e1, - r_e2, ); + prover_state.set_initial_blinds(r_c, r_d2, r_e1, r_e2); let num_rounds = nu.max(sigma); let mut first_messages = Vec::with_capacity(num_rounds); let mut second_messages = Vec::with_capacity(num_rounds); for _round in 0..num_rounds { - let (first_msg, d1_blinds, d2_blinds) = - prover_state.compute_first_message::(rng); + let first_msg = prover_state.compute_first_message::(rng); transcript.append_serde(b"d1_left", &first_msg.d1_left); transcript.append_serde(b"d1_right", &first_msg.d1_right); @@ -216,8 +219,7 @@ where prover_state.apply_first_challenge::(&beta); first_messages.push(first_msg); - let (second_msg, c_blinds, e1_blinds, e2_blinds) = - prover_state.compute_second_message::(rng); + let second_msg = prover_state.compute_second_message::(rng); transcript.append_serde(b"c_plus", &second_msg.c_plus); transcript.append_serde(b"c_minus", &second_msg.c_minus); @@ -227,18 +229,15 @@ where transcript.append_serde(b"e2_minus", &second_msg.e2_minus); let alpha = transcript.challenge_scalar(b"alpha"); - prover_state.apply_second_challenge::( - &alpha, d1_blinds, d2_blinds, c_blinds, e1_blinds, e2_blinds, - ); + prover_state.apply_second_challenge::(&alpha); second_messages.push(second_msg); } let gamma = transcript.challenge_scalar(b"gamma"); - // Generate scalar product proof in ZK mode #[cfg(feature = "zk")] let scalar_product_proof = if std::any::TypeId::of::() == std::any::TypeId::of::() { - Some(prover_state.scalar_product_proof_internal(transcript, rng)) + Some(prover_state.scalar_product_proof(transcript, rng)) } else { None }; @@ -274,20 +273,33 @@ where /// Verifies that a committed polynomial evaluates to the claimed value at the given point. /// Works with both square and non-square matrix layouts (nu ≤ sigma). /// -/// # Verification Modes -/// - **Transparent**: Takes evaluation `y` as input, computes E2 = y·Γ2,fin -/// - **ZK**: Takes `y_com` (from proof.y_com), uses E2 from proof, verifies sigma proofs +/// # Algorithm +/// 1. Extract VMV message from proof +/// 2. Compute e2 = Γ2,fin * evaluation (or use proof.e2 in ZK mode) +/// 3. Initialize verifier state with commitment and VMV message +/// 4. Run max(nu, sigma) rounds of reduce-and-fold verification (with automatic padding) +/// 5. Derive gamma and d challenges +/// 6. Verify final scalar product message /// /// # Parameters -/// - `commitment`: Polynomial commitment (in GT) -/// - `evaluation`: Claimed evaluation (transparent) or None (ZK uses proof.y_com) +/// - `commitment`: Polynomial commitment (in GT) - can be a homomorphically combined commitment +/// - `evaluation`: Claimed evaluation result /// - `point`: Evaluation point (length must equal proof.nu + proof.sigma) -/// - `proof`: Evaluation proof to verify +/// - `proof`: Evaluation proof to verify (contains nu and sigma dimensions) /// - `setup`: Verifier setup /// - `transcript`: Fiat-Shamir transcript for challenge generation /// +/// # Returns +/// `Ok(())` if proof is valid, `Err(DoryError)` otherwise +/// +/// # Homomorphic Verification +/// This function can verify proofs for homomorphically combined polynomials. +/// The commitment parameter should be the combined commitment, and the evaluation +/// should be the evaluation of the combined polynomial. +/// /// # Errors -/// Returns `DoryError::InvalidProof` if verification fails. +/// Returns `DoryError::InvalidProof` if verification fails, or other variants +/// if the input parameters are incorrect (e.g., point dimension mismatch). #[tracing::instrument(skip_all, name = "verify_evaluation_proof")] pub fn verify_evaluation_proof( commitment: E::GT, @@ -330,7 +342,6 @@ where transcript.append_serde(b"vmv_e2", proof_e2); transcript.append_serde(b"vmv_y_com", y_com); - // Verify sigma proofs if let Some(ref sigma1) = proof.sigma1_proof { verify_sigma1_proof::(proof_e2, y_com, sigma1, &setup, transcript)?; } @@ -352,22 +363,30 @@ where #[cfg(not(feature = "zk"))] let (e2, _is_zk) = (setup.g2_0.scale(&evaluation), false); - // Folded-scalar accumulation + // Folded-scalar accumulation with per-round coordinates. + // num_rounds = sigma (we fold column dimensions). let num_rounds = sigma; + // s1 (right/prover): the σ column coordinates in natural order (LSB→MSB). + // No padding here: the verifier folds across the σ column dimensions. + // With MSB-first folding, these coordinates are only consumed after the first σ−ν rounds, + // which correspond to the padded MSB dimensions on the left tensor, matching the prover. let col_coords = &point[..sigma]; let s1_coords: Vec = col_coords.to_vec(); + // s2 (left/prover): the ν row coordinates in natural order, followed by zeros for the extra + // MSB dimensions. Conceptually this is s ⊗ [1,0]^(σ−ν): under MSB-first folds, the first + // σ−ν rounds multiply s2 by α⁻¹ while contributing no right halves (since those entries are 0). let mut s2_coords: Vec = vec![F::zero(); sigma]; let row_coords = &point[sigma..sigma + nu]; s2_coords[..nu].copy_from_slice(&row_coords[..nu]); let mut verifier_state = DoryVerifierState::new( - vmv_message.c, - commitment, - vmv_message.d2, - vmv_message.e1, - e2, - s1_coords, - s2_coords, + vmv_message.c, // c from VMV message + commitment, // d1 = commitment + vmv_message.d2, // d2 from VMV message + vmv_message.e1, // e1 from VMV message + e2, // e2 computed from evaluation + s1_coords, // s1: columns c0..c_{σ−1} (LSB→MSB), no padding; folded across σ dims + s2_coords, // s2: rows r0..r_{ν−1} then zeros in MSB dims (emulates s ⊗ [1,0]^(σ−ν)) num_rounds, setup.clone(), ); @@ -400,26 +419,24 @@ where // ZK mode: verify with scalar product proof #[cfg(feature = "zk")] if is_zk { - if let Some(ref sigma_proof) = proof.scalar_product_proof { - transcript.append_serde(b"sigma_p1", &sigma_proof.p1); - transcript.append_serde(b"sigma_p2", &sigma_proof.p2); - transcript.append_serde(b"sigma_q", &sigma_proof.q); - transcript.append_serde(b"sigma_r", &sigma_proof.r); + if let Some(ref sp) = proof.scalar_product_proof { + transcript.append_serde(b"sigma_p1", &sp.p1); + transcript.append_serde(b"sigma_p2", &sp.p2); + transcript.append_serde(b"sigma_q", &sp.q); + transcript.append_serde(b"sigma_r", &sp.r); let c = transcript.challenge_scalar(b"sigma_c"); transcript.append_serde(b"final_e1", &proof.final_message.e1); transcript.append_serde(b"final_e2", &proof.final_message.e2); let d = transcript.challenge_scalar(b"d"); - return verifier_state.verify_final_zk_with_challenge(sigma_proof, &c, &d); + return verifier_state.verify_final_zk(sp, &c, &d); } } - // Transparent mode transcript.append_serde(b"final_e1", &proof.final_message.e1); transcript.append_serde(b"final_e2", &proof.final_message.e2); - let d = transcript.challenge_scalar(b"d"); - let _ = gamma; // Used in verify_final + let d = transcript.challenge_scalar(b"d"); verifier_state.verify_final(&proof.final_message, &gamma, &d) } diff --git a/src/lib.rs b/src/lib.rs index 4e57eed..f27d2d4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -292,7 +292,6 @@ where Mo: Mode, R: rand_core::RngCore, { - // Create evaluation proof using row_commitments evaluation_proof::create_evaluation_proof::( polynomial, point, diff --git a/src/messages.rs b/src/messages.rs index 3f6d746..1e1d74d 100644 --- a/src/messages.rs +++ b/src/messages.rs @@ -43,15 +43,14 @@ pub struct SecondReduceMessage { /// Vector-Matrix-Vector message for polynomial commitment transformation /// -/// Contains C, D₂, E₁. In transparent mode, E₂ = y·Γ₂,fin is computed by verifier. -/// In ZK mode, E₂ and y_com are stored in the proof's optional fields. +/// Contains C, D₂, E₁. Note: E₂ can be computed by verifier as y·Γ₂,fin #[derive(Clone, Debug)] pub struct VMVMessage { - /// C = e(MSM(T_vec', v_vec), Γ₂,fin) + r_c·HT + /// C = e(MSM(T_vec', v_vec), Γ₂,fin) pub c: GT, - /// D₂ = e(MSM(Γ₁\[nu\], v_vec), Γ₂,fin) + r_d2·HT + /// D₂ = e(MSM(Γ₁\[nu\], v_vec), Γ₂,fin) pub d2: GT, - /// E₁ = MSM(T_vec', L_vec) + r_e1·H1 + /// E₁ = MSM(T_vec', L_vec) pub e1: G1, } @@ -66,69 +65,40 @@ pub struct ScalarProductMessage { pub e2: G2, } -/// ZK VMV Σ-protocol 1: proves knowledge of (y, rE2, ry) such that: -/// - E2 = y·Γ2,fin + rE2·H2 -/// - yC = y·Γ1,fin + ry·H1 -/// -/// This proves the commitment yC is consistent with E2. +/// Σ-protocol 1: proves E2 and y_com commit to the same y. #[cfg(feature = "zk")] #[derive(Clone, Debug)] +#[allow(missing_docs)] pub struct Sigma1Proof { - /// Commitment A1 = k1·Γ2,fin + k2·H2 (for E2 relation) pub a1: G2, - /// Commitment A2 = k1·Γ1,fin + k3·H1 (for yC relation) pub a2: G1, - /// Response z1 = k1 + c·y pub z1: F, - /// Response z2 = k2 + c·rE2 pub z2: F, - /// Response z3 = k3 + c·ry pub z3: F, } -/// ZK VMV Σ-protocol 2: proves knowledge of (t1, t2) such that: -/// e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2) -/// -/// Where t1 = rE1 + rv and t2 = -rD2. -/// This proves the relation between E1 and D2 with blinds. +/// Σ-protocol 2: proves e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2). #[cfg(feature = "zk")] #[derive(Clone, Debug)] +#[allow(missing_docs)] pub struct Sigma2Proof { - /// Commitment A = e(H1, k1·Γ2,fin + k2·H2) pub a: GT, - /// Response z1 = k1 + c·t1 pub z1: F, - /// Response z2 = k2 + c·t2 pub z2: F, } -/// Zero-knowledge scalar product proof (Σ-protocol) -/// -/// Proves knowledge of (v1, v2, rC, rD1, rD2) for relation L1: -/// - C = e(v1, v2) + rC·HT -/// - D1 = e(v1, Γ2) + rD1·HT -/// - D2 = e(Γ1, v2) + rD2·HT -/// -/// Protocol from Dory paper Section 3.1. +/// ZK scalar product proof: proves (C, D1, D2) are consistent with blinded v1, v2. #[cfg(feature = "zk")] #[derive(Clone, Debug)] +#[allow(missing_docs)] pub struct ScalarProductProof { - /// P1 = e(d1, Γ2) + rP1·HT pub p1: GT, - /// P2 = e(Γ1, d2) + rP2·HT pub p2: GT, - /// Q = e(d1, v2) + e(v1, d2) + rQ·HT pub q: GT, - /// R = e(d1, d2) + rR·HT pub r: GT, - /// E1 = d1 + c·v1 pub e1: G1, - /// E2 = d2 + c·v2 pub e2: G2, - /// r1 = rP1 + c·rD1 pub r1: F, - /// r2 = rP2 + c·rD2 pub r2: F, - /// r3 = rR + c·rQ + c²·rC pub r3: F, } diff --git a/src/mode.rs b/src/mode.rs index 1fb297a..d2453dd 100644 --- a/src/mode.rs +++ b/src/mode.rs @@ -1,55 +1,28 @@ -//! Mode trait for transparent vs zero-knowledge proofs -//! -//! This module provides a mode abstraction that allows the same protocol implementation -//! to work for both transparent (non-hiding) and zero-knowledge (hiding) proofs. -//! -//! - [`Transparent`]: Default mode with no blinding. `sample` returns zero, `mask` is identity. -//! - [`ZK`]: Zero-knowledge mode (requires `zk` feature). Samples blinds from RNG. +//! Mode trait for transparent vs zero-knowledge proofs. use crate::primitives::arithmetic::{Field, Group}; -/// Mode marker trait for transparent vs ZK proofs. -/// -/// Determines whether blinds are sampled (ZK) or zero (transparent). +/// Determines whether protocol messages are blinded (ZK) or unblinded (transparent). pub trait Mode: 'static { - /// Sample a blinding factor. - /// - /// - Transparent: returns `F::zero()` without using RNG - /// - ZK: returns a random scalar from the RNG - /// - /// Note: Blinds are sampled from RNG (not transcript) because they are - /// private to the prover. The transcript is only used for public values - /// and deriving challenges that both prover and verifier compute. + /// Sample a blinding scalar: zero in Transparent mode, random in ZK mode. fn sample(rng: &mut R) -> F; - - /// Mask a group element with a blinding factor. - /// - /// - Transparent: returns `value` unchanged - /// - ZK: returns `value + base * blind` + /// Mask a group element: identity in Transparent mode, `value + base * blind` in ZK mode. fn mask(value: G, base: &G, blind: &G::Scalar) -> G; } -/// Transparent mode - no blinding. -/// -/// All samples return zero, all masks return the value unchanged. -/// This is the default mode and produces non-hiding proofs. +/// Transparent mode: no blinding, non-hiding proofs. pub struct Transparent; impl Mode for Transparent { fn sample(_rng: &mut R) -> F { F::zero() } - fn mask(value: G, _base: &G, _blind: &G::Scalar) -> G { value } } -/// Zero-knowledge mode - samples blinds from RNG. -/// -/// Produces hiding proofs by masking protocol messages with random blinds. -/// Blinds are sampled from private randomness (RNG), not the transcript, -/// because they must not affect the public challenge derivation. +/// Zero-knowledge mode: samples blinds from RNG for hiding proofs. #[cfg(feature = "zk")] pub struct ZK; @@ -58,7 +31,6 @@ impl Mode for ZK { fn sample(rng: &mut R) -> F { F::random(rng) } - fn mask(value: G, base: &G, blind: &G::Scalar) -> G { value + base.scale(blind) } diff --git a/src/primitives/poly.rs b/src/primitives/poly.rs index 873e256..edb857e 100644 --- a/src/primitives/poly.rs +++ b/src/primitives/poly.rs @@ -89,21 +89,10 @@ pub trait Polynomial { M1: DoryRoutines, E::G1: Group; - /// Commit to polynomial with ZK blinds - /// - /// Same as `commit`, but adds blinds to each row commitment for zero-knowledge: - /// `row_commit[i] = MSM(g1_generators, row_coefficients[i]) + r_i·H1` - /// - /// # Returns - /// `(commitment, row_commitments, row_blinds)` where: - /// - `commitment`: Final commitment in GT (derived from blinded row commitments) - /// - `row_commitments`: Blinded row commitments in G1 - /// - `row_blinds`: The blinds used for each row (needed for proof generation) - /// - /// The sum of row_blinds weighted by the left vector gives `r_v` used in Sigma2. + /// Commit with per-row ZK blinds. Returns `(commitment, row_commitments, blinds)`. /// /// # Errors - /// Returns error if coefficient length doesn't match 2^(nu + sigma) or if setup is insufficient. + /// Returns error if coefficient length doesn't match 2^(nu + sigma). #[cfg(feature = "zk")] #[allow(clippy::type_complexity)] fn commit_zk( diff --git a/src/proof.rs b/src/proof.rs index a4bd0a9..230ba5d 100644 --- a/src/proof.rs +++ b/src/proof.rs @@ -4,10 +4,6 @@ //! - VMV message (PCS transform) //! - Multiple rounds of reduce messages (log n rounds) //! - Final scalar product message -//! -//! For ZK mode, the proof additionally contains: -//! - E2 and y_com (blinded VMV extension) -//! - Sigma proofs for ZK verification use crate::messages::*; use crate::primitives::arithmetic::Group; @@ -18,17 +14,18 @@ use crate::primitives::arithmetic::Group; /// at a given point. It consists of messages from the interactive protocol made /// non-interactive via Fiat-Shamir. /// -/// In ZK mode (when `zk` feature is enabled), additional fields contain the -/// sigma proofs and blinded values needed for zero-knowledge verification. +/// The proof includes the matrix dimensions (nu, sigma) used during proof generation, +/// which the verifier uses to ensure consistency with the evaluation point. #[derive(Clone, Debug)] +#[allow(missing_docs)] pub struct DoryProof { /// Vector-Matrix-Vector message for PCS transformation pub vmv_message: VMVMessage, - /// First reduce messages for each round + /// First reduce messages for each round (nu rounds total) pub first_messages: Vec>, - /// Second reduce messages for each round + /// Second reduce messages for each round (nu rounds total) pub second_messages: Vec>, /// Final scalar product message @@ -40,24 +37,14 @@ pub struct DoryProof { /// Log₂ of number of columns in the coefficient matrix pub sigma: usize, - // ZK-specific fields (present when zk feature is enabled) - /// E2 = y·Γ2,fin + r_e2·H2 (blinded VMV extension) #[cfg(feature = "zk")] pub e2: Option, - - /// y_com = y·Γ1,fin + r_y·H1 (commitment to evaluation) #[cfg(feature = "zk")] pub y_com: Option, - - /// Sigma1 proof: proves y_com and e2 commit to same y #[cfg(feature = "zk")] pub sigma1_proof: Option>, - - /// Sigma2 proof: proves VMV relation holds with blinds #[cfg(feature = "zk")] pub sigma2_proof: Option>, - - /// ZK scalar product proof (Σ-protocol) #[cfg(feature = "zk")] pub scalar_product_proof: Option>, } diff --git a/src/reduce_and_fold.rs b/src/reduce_and_fold.rs index 4ab81d7..7331398 100644 --- a/src/reduce_and_fold.rs +++ b/src/reduce_and_fold.rs @@ -18,29 +18,12 @@ use std::marker::PhantomData; #[cfg(feature = "zk")] use crate::primitives::transcript::Transcript; -/// Scalar field type alias for a pairing curve. type Scalar = <::G1 as Group>::Scalar; -/// Accumulated blinds tuple (r_c, r_d1, r_d2) for ZK mode. -type Blinds = (Scalar, Scalar, Scalar); - -/// ZK scalar product proof type alias. -#[cfg(feature = "zk")] -type ZkScalarProductProof = ScalarProductProof< - ::G1, - ::G2, - Scalar, - ::GT, ->; - /// Prover state for the Dory opening protocol /// /// Maintains the current state of the prover during the interactive protocol. /// The state consists of vectors that get folded in each round. -/// -/// The `M` parameter controls whether the proof is transparent or zero-knowledge: -/// - `Transparent` (default): No blinding, produces non-hiding proofs -/// - `ZK` (requires `zk` feature): Samples blinds from transcript for hiding proofs pub struct DoryProverState<'a, E: PairingCurve, M: Mode = Transparent> { /// Current v1 vector (G1 elements) v1: Vec, @@ -63,19 +46,19 @@ pub struct DoryProverState<'a, E: PairingCurve, M: Mode = Transparent> { /// Reference to prover setup setup: &'a ProverSetup, - // ZK blind accumulators (zero for Transparent mode) - /// Accumulated blind for C (inner product) - r_c: ::Scalar, - /// Accumulated blind for D1 - r_d1: ::Scalar, - /// Accumulated blind for D2 - r_d2: ::Scalar, - /// Accumulated blind for E1 - r_e1: ::Scalar, - /// Accumulated blind for E2 - r_e2: ::Scalar, - - /// Phantom data for mode marker + // ZK accumulated blinds (zero in Transparent mode) + r_c: Scalar, + r_d1: Scalar, + r_d2: Scalar, + r_e1: Scalar, + r_e2: Scalar, + // Per-round blinds stored between compute and apply + round_d1: [Scalar; 2], + round_d2: [Scalar; 2], + round_c: [Scalar; 2], + round_e1: [Scalar; 2], + round_e2: [Scalar; 2], + _mode: PhantomData, } @@ -161,6 +144,7 @@ where } let num_rounds = v1.len().trailing_zeros() as usize; + let z = Scalar::::zero(); Self { v1, @@ -170,54 +154,47 @@ where s2, num_rounds, setup, - r_c: ::Scalar::zero(), - r_d1: ::Scalar::zero(), - r_d2: ::Scalar::zero(), - r_e1: ::Scalar::zero(), - r_e2: ::Scalar::zero(), + r_c: z, + r_d1: z, + r_d2: z, + r_e1: z, + r_e2: z, + round_d1: [z; 2], + round_d2: [z; 2], + round_c: [z; 2], + round_e1: [z; 2], + round_e2: [z; 2], _mode: PhantomData, } } - /// Create new prover state with initial blinds (for VMV message blinds) - /// - /// Used when the VMV message computation samples initial blinds. - #[allow(clippy::too_many_arguments)] - pub fn new_with_blinds( - v1: Vec, - v2: Vec, - v2_scalars: Option::Scalar>>, - s1: Vec<::Scalar>, - s2: Vec<::Scalar>, - setup: &'a ProverSetup, - r_c: ::Scalar, - r_d2: ::Scalar, - r_e1: ::Scalar, - r_e2: ::Scalar, - ) -> Self { - let mut state = Self::new(v1, v2, v2_scalars, s1, s2, setup); - state.r_c = r_c; - state.r_d2 = r_d2; - state.r_e1 = r_e1; - state.r_e2 = r_e2; - state + /// Set initial VMV blinds (r_c, r_d2, r_e1, r_e2). + pub fn set_initial_blinds( + &mut self, + r_c: Scalar, + r_d2: Scalar, + r_e1: Scalar, + r_e2: Scalar, + ) { + self.r_c = r_c; + self.r_d2 = r_d2; + self.r_e1 = r_e1; + self.r_e2 = r_e2; } - /// Compute first reduce message for current round. Returns (message, d1_blinds, d2_blinds). + /// Compute first reduce message for current round + /// + /// Computes D1L, D1R, D2L, D2R, E1β, E2β based on current state. #[tracing::instrument(skip_all, name = "DoryProverState::compute_first_message")] - #[allow(clippy::type_complexity)] - pub fn compute_first_message< + pub fn compute_first_message( + &mut self, + rng: &mut R, + ) -> FirstReduceMessage + where M1: DoryRoutines, M2: DoryRoutines, R: rand_core::RngCore, - >( - &self, - rng: &mut R, - ) -> ( - FirstReduceMessage, - [::Scalar; 2], - [::Scalar; 2], - ) { + { assert!( self.num_rounds > 0, "Not enough rounds left in prover state" @@ -233,20 +210,25 @@ where let g1_prime = &self.setup.g1_vec[..n2]; let g2_prime = &self.setup.g2_vec[..n2]; - // ZK: sample blinds from RNG (zero for Transparent mode) - let r_d1_l: ::Scalar = M::sample(rng); - let r_d1_r: ::Scalar = M::sample(rng); - let r_d2_l: ::Scalar = M::sample(rng); - let r_d2_r: ::Scalar = M::sample(rng); + // Sample round blinds (zero in Transparent mode) + self.round_d1 = [M::sample(rng), M::sample(rng)]; + self.round_d2 = [M::sample(rng), M::sample(rng)]; // Compute D values: multi-pairings between v-vectors and generators // D₁L = ⟨v₁L, Γ₂'⟩, D₁R = ⟨v₁R, Γ₂'⟩ - g2_prime is from setup, use cached version - let d1_left_base = E::multi_pair_g2_setup(v1_l, g2_prime); - let d1_right_base = E::multi_pair_g2_setup(v1_r, g2_prime); + let d1_left = M::mask( + E::multi_pair_g2_setup(v1_l, g2_prime), + &self.setup.ht, + &self.round_d1[0], + ); + let d1_right = M::mask( + E::multi_pair_g2_setup(v1_r, g2_prime), + &self.setup.ht, + &self.round_d1[1], + ); // D₂L = ⟨Γ₁', v₂L⟩, D₂R = ⟨Γ₁', v₂R⟩ - // If v2 was constructed as Γ2,fin * scalars (first round), compute MSM(Γ₁', scalars) then one pairing. - // Γ2,fin = g2_vec[0] (commitment base, NOT h2 which is the blinding generator) + // If v2 was constructed as h2 * scalars (first round), compute MSM(Γ₁', scalars) then one pairing. let (d2_left_base, d2_right_base) = if let Some(scalars) = self.v2_scalars.as_ref() { let (s_l, s_r) = scalars.split_at(n2); let sum_left = M1::msm(g1_prime, s_l); @@ -259,12 +241,8 @@ where E::multi_pair_g1_setup(g1_prime, v2_r), ) }; - - // ZK: mask D values (identity for Transparent mode) - let d1_left = M::mask(d1_left_base, &self.setup.ht, &r_d1_l); - let d1_right = M::mask(d1_right_base, &self.setup.ht, &r_d1_r); - let d2_left = M::mask(d2_left_base, &self.setup.ht, &r_d2_l); - let d2_right = M::mask(d2_right_base, &self.setup.ht, &r_d2_r); + let d2_left = M::mask(d2_left_base, &self.setup.ht, &self.round_d2[0]); + let d2_right = M::mask(d2_right_base, &self.setup.ht, &self.round_d2[1]); // Compute E values for extended protocol: MSMs with scalar vectors // E₁β = ⟨Γ₁, s₂⟩ @@ -273,30 +251,24 @@ where // E₂β = ⟨Γ₂, s₁⟩ let e2_beta = M2::msm(&self.setup.g2_vec[..1 << self.num_rounds], &self.s1[..]); - ( - FirstReduceMessage { - d1_left, - d1_right, - d2_left, - d2_right, - e1_beta, - e2_beta, - }, - [r_d1_l, r_d1_r], - [r_d2_l, r_d2_r], - ) + FirstReduceMessage { + d1_left, + d1_right, + d2_left, + d2_right, + e1_beta, + e2_beta, + } } /// Apply first challenge (beta) and combine vectors /// /// Updates the state by combining with generators scaled by beta. - /// Also accumulates blinds: rC ← rC + β·rD2 + β⁻¹·rD1 #[tracing::instrument(skip_all, name = "DoryProverState::apply_first_challenge")] pub fn apply_first_challenge(&mut self, beta: &::Scalar) where M1: DoryRoutines, M2: DoryRoutines, - E::G2: Group::Scalar>, { let beta_inv = (*beta).inv().expect("beta must be invertible"); @@ -311,27 +283,23 @@ where // After first combine, the `v2_scalars` optimization does not apply. self.v2_scalars = None; - // ZK: accumulate blinds using accumulated rD1, rD2 (not message blinds) - // rC ← rC + β·rD2 + β⁻¹·rD1 + // Accumulate blinds: rC ← rC + β·rD2 + β⁻¹·rD1 self.r_c = self.r_c + self.r_d2 * *beta + self.r_d1 * beta_inv; } - /// Compute second reduce message for current round. Returns (message, c_blinds, e1_blinds, e2_blinds). + /// Compute second reduce message for current round + /// + /// Computes C+, C-, E1+, E1-, E2+, E2- based on current state. #[tracing::instrument(skip_all, name = "DoryProverState::compute_second_message")] - #[allow(clippy::type_complexity)] - pub fn compute_second_message< + pub fn compute_second_message( + &mut self, + rng: &mut R, + ) -> SecondReduceMessage + where M1: DoryRoutines, M2: DoryRoutines, R: rand_core::RngCore, - >( - &self, - rng: &mut R, - ) -> ( - SecondReduceMessage, - [::Scalar; 2], - [::Scalar; 2], - [::Scalar; 2], - ) { + { let n2 = 1 << (self.num_rounds - 1); // n/2 // Split all vectors into left and right halves @@ -340,65 +308,40 @@ where let (s1_l, s1_r) = self.s1.split_at(n2); let (s2_l, s2_r) = self.s2.split_at(n2); - // ZK: sample blinds from RNG (zero for Transparent mode) - let r_c_plus: ::Scalar = M::sample(rng); - let r_c_minus: ::Scalar = M::sample(rng); - let r_e1_plus: ::Scalar = M::sample(rng); - let r_e1_minus: ::Scalar = M::sample(rng); - let r_e2_plus: ::Scalar = M::sample(rng); - let r_e2_minus: ::Scalar = M::sample(rng); + // Sample round blinds (zero in Transparent mode) + self.round_c = [M::sample(rng), M::sample(rng)]; + self.round_e1 = [M::sample(rng), M::sample(rng)]; + self.round_e2 = [M::sample(rng), M::sample(rng)]; // Compute C terms: cross products of v-vectors // C₊ = ⟨v₁L, v₂R⟩ - let c_plus_base = E::multi_pair(v1_l, v2_r); + let c_plus = M::mask(E::multi_pair(v1_l, v2_r), &self.setup.ht, &self.round_c[0]); // C₋ = ⟨v₁R, v₂L⟩ - let c_minus_base = E::multi_pair(v1_r, v2_l); - - // ZK: mask C values - let c_plus = M::mask(c_plus_base, &self.setup.ht, &r_c_plus); - let c_minus = M::mask(c_minus_base, &self.setup.ht, &r_c_minus); + let c_minus = M::mask(E::multi_pair(v1_r, v2_l), &self.setup.ht, &self.round_c[1]); // Compute E terms for extended protocol: cross products with scalars - // E₁₊ = ⟨v₁L, s₂R⟩ - let e1_plus_base = M1::msm(v1_l, s2_r); - // E₁₋ = ⟨v₁R, s₂L⟩ - let e1_minus_base = M1::msm(v1_r, s2_l); - // E₂₊ = ⟨s₁L, v₂R⟩ - let e2_plus_base = M2::msm(v2_r, s1_l); - // E₂₋ = ⟨s₁R, v₂L⟩ - let e2_minus_base = M2::msm(v2_l, s1_r); - - // ZK: mask E values - let e1_plus = M::mask(e1_plus_base, &self.setup.h1, &r_e1_plus); - let e1_minus = M::mask(e1_minus_base, &self.setup.h1, &r_e1_minus); - let e2_plus = M::mask(e2_plus_base, &self.setup.h2, &r_e2_plus); - let e2_minus = M::mask(e2_minus_base, &self.setup.h2, &r_e2_minus); - - ( - SecondReduceMessage { - c_plus, - c_minus, - e1_plus, - e1_minus, - e2_plus, - e2_minus, - }, - [r_c_plus, r_c_minus], - [r_e1_plus, r_e1_minus], - [r_e2_plus, r_e2_minus], - ) + let e1_plus = M::mask(M1::msm(v1_l, s2_r), &self.setup.h1, &self.round_e1[0]); + let e1_minus = M::mask(M1::msm(v1_r, s2_l), &self.setup.h1, &self.round_e1[1]); + let e2_plus = M::mask(M2::msm(v2_r, s1_l), &self.setup.h2, &self.round_e2[0]); + let e2_minus = M::mask(M2::msm(v2_l, s1_r), &self.setup.h2, &self.round_e2[1]); + + SecondReduceMessage { + c_plus, + c_minus, + e1_plus, + e1_minus, + e2_plus, + e2_minus, + } } - /// Apply second challenge (alpha) and fold vectors. + /// Apply second challenge (alpha) and fold vectors + /// + /// Reduces the vector size by half using the alpha challenge. #[tracing::instrument(skip_all, name = "DoryProverState::apply_second_challenge")] pub fn apply_second_challenge, M2: DoryRoutines>( &mut self, alpha: &::Scalar, - d1_blinds: [::Scalar; 2], - d2_blinds: [::Scalar; 2], - c_blinds: [::Scalar; 2], - e1_blinds: [::Scalar; 2], - e2_blinds: [::Scalar; 2], ) { let alpha_inv = (*alpha).inv().expect("alpha must be invertible"); let n2 = 1 << (self.num_rounds - 1); // n/2 @@ -423,12 +366,12 @@ where M1::fold_field_vectors(s2_l, s2_r, &alpha_inv); self.s2.truncate(n2); - // ZK: update accumulated blinds - self.r_c = self.r_c + c_blinds[0] * *alpha + c_blinds[1] * alpha_inv; - self.r_d1 = d1_blinds[0] * *alpha + d1_blinds[1]; - self.r_d2 = d2_blinds[0] * alpha_inv + d2_blinds[1]; - self.r_e1 = self.r_e1 + e1_blinds[0] * *alpha + e1_blinds[1] * alpha_inv; - self.r_e2 = self.r_e2 + e2_blinds[0] * *alpha + e2_blinds[1] * alpha_inv; + // Update accumulated blinds from stored round blinds + self.r_c = self.r_c + self.round_c[0] * *alpha + self.round_c[1] * alpha_inv; + self.r_d1 = self.round_d1[0] * *alpha + self.round_d1[1]; + self.r_d2 = self.round_d2[0] * alpha_inv + self.round_d2[1]; + self.r_e1 = self.r_e1 + self.round_e1[0] * *alpha + self.round_e1[1] * alpha_inv; + self.r_e2 = self.r_e2 + self.round_e2[0] * *alpha + self.round_e2[1] * alpha_inv; // Decrement round counter self.num_rounds -= 1; @@ -438,8 +381,6 @@ where /// /// Applies fold-scalars transformation and returns the final E1, E2 elements. /// Must be called when num_rounds=0 (vectors are size 1). - /// - /// Also accumulates the final blind: r_c ← r_c + γ·r_e2 + γ⁻¹·r_e1 #[tracing::instrument(skip_all, name = "DoryProverState::compute_final_message")] pub fn compute_final_message( &mut self, @@ -448,7 +389,6 @@ where where M1: DoryRoutines, M2: DoryRoutines, - E::G2: Group::Scalar>, { debug_assert_eq!(self.num_rounds, 0, "num_rounds must be 0 for final message"); debug_assert_eq!(self.v1.len(), 1, "v1 must have length 1"); @@ -465,120 +405,75 @@ where let gamma_inv_s2 = gamma_inv * self.s2[0]; let e2 = self.v2[0] + self.setup.h2.scale(&gamma_inv_s2); - // ZK: final blind accumulation - // r_c ← r_c + γ·r_e2 + γ⁻¹·r_e1 + // Final blind accumulation: r_c ← r_c + γ·r_e2 + γ⁻¹·r_e1 self.r_c = self.r_c + self.r_e2 * *gamma + self.r_e1 * gamma_inv; ScalarProductMessage { e1, e2 } } - /// Get accumulated blinds (r_c, r_d1, r_d2) for ZK mode Σ-protocol. - pub fn blinds(&self) -> Blinds { - (self.r_c, self.r_d1, self.r_d2) - } - - /// Generate ZK scalar product proof (Σ-protocol) - internal version - /// - /// This is callable from any Mode but only produces meaningful proofs in ZK mode. - /// In Transparent mode, all blinds are zero, so the proof is trivial. + /// Generate ZK scalar product proof (Σ-protocol). /// - /// Must be called BEFORE `compute_final_message` because that method modifies r_c, - /// but the Σ-protocol needs the pre-fold-scalars blinds. + /// Must be called BEFORE `compute_final_message` because that modifies r_c. #[cfg(feature = "zk")] - pub fn scalar_product_proof_internal< - T: crate::primitives::transcript::Transcript, - R: rand_core::RngCore, - >( + pub fn scalar_product_proof, R: rand_core::RngCore>( &self, transcript: &mut T, rng: &mut R, - ) -> crate::messages::ScalarProductProof::Scalar, E::GT> { - debug_assert_eq!(self.v1.len(), 1, "v1 must be length 1 after folding"); - debug_assert_eq!(self.v2.len(), 1, "v2 must be length 1 after folding"); - - let v1 = self.v1[0]; - let v2 = self.v2[0]; - let gamma1 = self.setup.g1_vec[0]; - let gamma2 = self.setup.g2_vec[0]; - - type F = <::G1 as Group>::Scalar; - - // Sample random scalars from RNG (private to prover) - let s_d1: F = Field::random(rng); - let s_d2: F = Field::random(rng); - let d1 = gamma1.scale(&s_d1); - let d2 = gamma2.scale(&s_d2); - - // Sample blinding scalars from RNG (private to prover) - let r_p1: F = Field::random(rng); - let r_p2: F = Field::random(rng); - let r_q: F = Field::random(rng); - let r_r: F = Field::random(rng); - - // Compute first message: P1, P2, Q, R - // P1 = e(d1, Γ2) + rP1·HT - let p1 = E::pair(&d1, &gamma2) + self.setup.ht.scale(&r_p1); - // P2 = e(Γ1, d2) + rP2·HT - let p2 = E::pair(&gamma1, &d2) + self.setup.ht.scale(&r_p2); - // Q = e(d1, v2) + e(v1, d2) + rQ·HT + ) -> ScalarProductProof, E::GT> { + debug_assert_eq!(self.v1.len(), 1); + debug_assert_eq!(self.v2.len(), 1); + + let (v1, v2) = (self.v1[0], self.v2[0]); + let (g1, g2) = (self.setup.g1_vec[0], self.setup.g2_vec[0]); + + let mut rand = || -> Scalar { Field::random(rng) }; + let (s_d1, s_d2) = (rand(), rand()); + let (d1, d2) = (g1.scale(&s_d1), g2.scale(&s_d2)); + let (r_p1, r_p2, r_q, r_r) = (rand(), rand(), rand(), rand()); + + let p1 = E::pair(&d1, &g2) + self.setup.ht.scale(&r_p1); + let p2 = E::pair(&g1, &d2) + self.setup.ht.scale(&r_p2); let q = E::pair(&d1, &v2) + E::pair(&v1, &d2) + self.setup.ht.scale(&r_q); - // R = e(d1, d2) + rR·HT let r = E::pair(&d1, &d2) + self.setup.ht.scale(&r_r); - // Append first message to transcript and derive challenge transcript.append_serde(b"sigma_p1", &p1); transcript.append_serde(b"sigma_p2", &p2); transcript.append_serde(b"sigma_q", &q); transcript.append_serde(b"sigma_r", &r); let c = transcript.challenge_scalar(b"sigma_c"); - // Compute response: E1, E2, r1, r2, r3 - // E1 = d1 + c·v1 - let e1 = d1 + v1.scale(&c); - // E2 = d2 + c·v2 - let e2 = d2 + v2.scale(&c); - // r1 = rP1 + c·rD1 - let r1 = r_p1 + c * self.r_d1; - // r2 = rP2 + c·rD2 - let r2 = r_p2 + c * self.r_d2; - // r3 = rR + c·rQ + c²·rC let c_sq = c * c; - let r3 = r_r + c * r_q + c_sq * self.r_c; - - crate::messages::ScalarProductProof { + ScalarProductProof { p1, p2, q, r, - e1, - e2, - r1, - r2, - r3, + e1: d1 + v1.scale(&c), + e2: d2 + v2.scale(&c), + r1: r_p1 + c * self.r_d1, + r2: r_p2 + c * self.r_d2, + r3: r_r + c * r_q + c_sq * self.r_c, } } } -/// Generate Sigma1 proof: proves knowledge of (y, rE2) s.t. E2 = y·Γ2,fin + rE2·H2. +/// Generate Sigma1 proof: proves knowledge of (y, rE2, ry). #[cfg(feature = "zk")] pub fn generate_sigma1_proof, R: rand_core::RngCore>( - y: &::Scalar, - r_e2: &::Scalar, - r_y: &::Scalar, + y: &Scalar, + r_e2: &Scalar, + r_y: &Scalar, setup: &ProverSetup, transcript: &mut T, rng: &mut R, -) -> Sigma1Proof::Scalar> +) -> Sigma1Proof> where - ::Scalar: Field, - E::G2: Group::Scalar>, + Scalar: Field, + E::G2: Group>, { let (g2_fin, g1_fin) = (&setup.g2_vec[0], &setup.g1_vec[0]); - let (k1, k2, k3) = ( - ::Scalar::random(rng), - ::Scalar::random(rng), - ::Scalar::random(rng), - ); + let (k1, k2, k3): (Scalar, _, _) = + (Field::random(rng), Field::random(rng), Field::random(rng)); let a1 = g2_fin.scale(&k1) + setup.h2.scale(&k2); let a2 = g1_fin.scale(&k1) + setup.h1.scale(&k3); transcript.append_serde(b"sigma1_a1", &a1); @@ -598,22 +493,20 @@ where pub fn verify_sigma1_proof>( e2: &E::G2, y_commit: &E::G1, - proof: &Sigma1Proof::Scalar>, + proof: &Sigma1Proof>, setup: &VerifierSetup, transcript: &mut T, ) -> Result<(), DoryError> where - ::Scalar: Field, - E::G2: Group::Scalar>, + Scalar: Field, + E::G2: Group>, { transcript.append_serde(b"sigma1_a1", &proof.a1); transcript.append_serde(b"sigma1_a2", &proof.a2); let c = transcript.challenge_scalar(b"sigma1_c"); - // Check E2 relation: z1·Γ2,fin + z2·H2 = A1 + c·E2 if setup.g2_0.scale(&proof.z1) + setup.h2.scale(&proof.z2) != proof.a1 + e2.scale(&c) { return Err(DoryError::InvalidProof); } - // Check yC relation: z1·Γ1,fin + z3·H1 = A2 + c·yC if setup.g1_0.scale(&proof.z1) + setup.h1.scale(&proof.z3) != proof.a2 + y_commit.scale(&c) { return Err(DoryError::InvalidProof); } @@ -623,21 +516,18 @@ where /// Generate Sigma2 proof: proves e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2). #[cfg(feature = "zk")] pub fn generate_sigma2_proof, R: rand_core::RngCore>( - t1: &::Scalar, - t2: &::Scalar, + t1: &Scalar, + t2: &Scalar, setup: &ProverSetup, transcript: &mut T, rng: &mut R, -) -> Sigma2Proof<::Scalar, E::GT> +) -> Sigma2Proof, E::GT> where - ::Scalar: Field, - E::G2: Group::Scalar>, - E::GT: Group::Scalar>, + Scalar: Field, + E::G2: Group>, + E::GT: Group>, { - let (k1, k2) = ( - ::Scalar::random(rng), - ::Scalar::random(rng), - ); + let (k1, k2): (Scalar, _) = (Field::random(rng), Field::random(rng)); let a = E::pair( &setup.h1, &(setup.g2_vec[0].scale(&k1) + setup.h2.scale(&k2)), @@ -656,14 +546,14 @@ where pub fn verify_sigma2_proof>( e1: &E::G1, d2: &E::GT, - proof: &Sigma2Proof<::Scalar, E::GT>, + proof: &Sigma2Proof, E::GT>, setup: &VerifierSetup, transcript: &mut T, ) -> Result<(), DoryError> where - ::Scalar: Field, - E::G2: Group::Scalar>, - E::GT: Group::Scalar>, + Scalar: Field, + E::G2: Group>, + E::GT: Group>, { transcript.append_serde(b"sigma2_a", &proof.a); let c = transcript.challenge_scalar(b"sigma2_c"); @@ -680,7 +570,26 @@ where } impl DoryVerifierState { - /// Create new verifier state for O(1) accumulation. + /// Create new verifier state + /// + /// # Parameters + /// - `c`: Initial inner product value + /// - `d1`: Initial d1 value (from VMV) + /// - `d2`: Initial d2 value (from VMV) + /// - `e1`: Initial e1 value + /// - `e2`: Initial e2 value + /// + /// Construct verifier state for O(1) accumulation + /// + /// - `s1_coords`: Per-round coordinates for s1 (right_vec in prover) + /// - `s2_coords`: Per-round coordinates for s2 (left_vec in prover) + /// - `num_rounds`: Number of rounds + /// - `setup`: Verifier setup parameters + /// + /// Note: `e1` and `d2` are stored both as initial values (for batched VMV check) + /// and as accumulators (updated during reduce rounds) + /// this is because the VMV check happens before the folding rounds, so we need to save + /// the value for the final batched pairing check. #[allow(clippy::too_many_arguments)] pub fn new( c: E::GT, @@ -832,18 +741,15 @@ impl DoryVerifierState { let p2_g2 = g2_term.scale(&neg_gamma); // Pair 3: ((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀), H₂) - // This is the fold-scalars γ⁻¹·e(E₁, H₂) term - uses H₂ (h2) let d_s2 = *d * self.s2_acc; let g1_term = self.e1 + self.setup.g1_0.scale(&d_s2); let p3_g1 = g1_term.scale(&neg_gamma_inv); let p3_g2 = self.setup.h2; - // Pair 4: (d²·E₁_init, Γ2,fin) - // This is the deferred VMV check: d²·e(E₁_init, Γ2,fin) - uses Γ2,fin (g2_0) + // Pair 4: (d²·E₁_init, Γ2,fin) - deferred VMV check let p4_g1 = self.e1_init.scale(&d_sq); let p4_g2 = self.setup.g2_0; - // Multi-pairing: 4 miller loops + 1 final exponentiation let lhs = E::multi_pair(&[p1_g1, p2_g1, p3_g1, p4_g1], &[p1_g2, p2_g2, p3_g2, p4_g2]); if lhs == rhs { @@ -853,12 +759,10 @@ impl DoryVerifierState { } } - /// Verify final scalar product with ZK proof using pre-derived challenge. #[cfg(feature = "zk")] - #[tracing::instrument(skip_all, name = "DoryVerifierState::verify_final_zk_with_challenge")] - pub fn verify_final_zk_with_challenge( + pub fn verify_final_zk( &mut self, - proof: &ZkScalarProductProof, + proof: &ScalarProductProof, E::GT>, c: &Scalar, d: &Scalar, ) -> Result<(), DoryError> @@ -867,32 +771,22 @@ impl DoryVerifierState { E::GT: Group::Scalar>, ::Scalar: Field, { - debug_assert_eq!( - self.num_rounds, 0, - "num_rounds must be 0 for final verification" - ); - let d_inv = (*d).inv().expect("d must be invertible"); - let c_sq = *c * *c; - - // LHS: e(E1 + d·Γ1, E2 + d⁻¹·Γ2) - let lhs_g1 = proof.e1 + self.setup.g1_0.scale(d); - let lhs_g2 = proof.e2 + self.setup.g2_0.scale(&d_inv); - let lhs = E::pair(&lhs_g1, &lhs_g2); - - // RHS: χ + R + c·Q + c²·C + d·P2 + d·c·D2 + d⁻¹·P1 + d⁻¹·c·D1 - (r3 + d·r2 + d⁻¹·r1)·HT - let mut rhs = self.setup.chi[0]; - rhs = rhs + proof.r; - rhs = rhs + proof.q.scale(c); - rhs = rhs + self.c.scale(&c_sq); - rhs = rhs + proof.p2.scale(d); - rhs = rhs + self.d2.scale(&(*d * *c)); - rhs = rhs + proof.p1.scale(&d_inv); - rhs = rhs + self.d1.scale(&(d_inv * *c)); - - // Blind correction: -(r3 + d·r2 + d⁻¹·r1)·HT - let r_total = proof.r3 + *d * proof.r2 + d_inv * proof.r1; - rhs = rhs - self.setup.ht.scale(&r_total); + let (c_sq, lhs) = ( + *c * *c, + E::pair( + &(proof.e1 + self.setup.g1_0.scale(d)), + &(proof.e2 + self.setup.g2_0.scale(&d_inv)), + ), + ); + let mut rhs = self.setup.chi[0] + proof.r + proof.q.scale(c) + self.c.scale(&c_sq); + rhs = rhs + proof.p2.scale(d) + self.d2.scale(&(*d * *c)); + rhs = rhs + proof.p1.scale(&d_inv) + self.d1.scale(&(d_inv * *c)); + rhs = rhs + - self + .setup + .ht + .scale(&(proof.r3 + *d * proof.r2 + d_inv * proof.r1)); if lhs == rhs { Ok(()) diff --git a/src/setup.rs b/src/setup.rs index 878e623..c1fec75 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -44,11 +44,6 @@ pub struct ProverSetup { /// /// Contains precomputed pairing values for efficient verification. /// Derived from the prover setup. -/// -/// # Generator semantics -/// - `g1_0`, `g2_0`: Final generators (Γ1,fin, Γ2,fin) used as commitment bases -/// - `h1`, `h2`: Blinding generators (H1, H2) used for zero-knowledge masking -/// - These MUST be linearly independent (discrete log unknown between them) #[derive(Clone, Debug, DorySerialize, DoryDeserialize)] pub struct VerifierSetup { /// Δ₁L\[k\] = e(Γ₁\[..2^(k-1)\], Γ₂\[..2^(k-1)\]) @@ -66,22 +61,22 @@ pub struct VerifierSetup { /// χ\[k\] = e(Γ₁\[..2^k\], Γ₂\[..2^k\]) pub chi: Vec, - /// Γ1,fin - first G1 generator (commitment base, NOT for blinding) + /// First G1 generator pub g1_0: E::G1, - /// Γ2,fin - first G2 generator (commitment base, NOT for blinding) + /// First G2 generator pub g2_0: E::G2, - /// H1 - blinding generator in G1 (linearly independent from g1_0) + /// Blinding generator in G1 pub h1: E::G1, - /// H2 - blinding generator in G2 (linearly independent from g2_0) + /// Blinding generator in G2 pub h2: E::G2, - /// HT = e(H1, H2) - blinding base in GT + /// h_t = e(h₁, h₂) pub ht: E::GT, - /// e(H1, Γ2,fin) = e(h1, g2_0) - precomputed for ZK Sigma2 verification + /// e(H1, Γ2,fin) - precomputed for ZK verification #[cfg(feature = "zk")] pub h1_g2_fin: E::GT, From 10b3f231092fe9524dad5e737cb2c382eab9d51b Mon Sep 17 00:00:00 2001 From: Andrew Tretyakov Date: Wed, 28 Jan 2026 16:54:22 -0500 Subject: [PATCH 05/16] feat: zk integration (#10) * feat: add ZK support for Jolt integration - Add ArkDoryProof y_com/y_blinding field accessors - Add ArkGT/ArkG1 serialization for transcript binding - Fix typo in transcript.rs - Fix clippy warnings in derive crate * fix: remove y_blinding from proof serialization y_blinding was never used by verifier but was being serialized, leaking the blinding factor. Now returned separately from prove(). * fix: update tests for new prove/create_evaluation_proof return type Destructure (proof, _) tuple from prove() and create_evaluation_proof() which now return (DoryProof, Option). Also add #[cfg(feature = "disk-persistence")] guards to tests using load_setup/save_setup. * fix: use separate URS paths for ZK and non-ZK modes ZK mode serializes additional fields (h1_g2_fin) making the URS format incompatible. Now uses dory_{n}_zk.urs for ZK mode. * chore: format code for CI compatibility * fix: update examples and benches for new prove return type * style: add empty .rustfmt.toml for CI consistency --- .rustfmt.toml | 0 benches/arkworks_proof.rs | 25 +-- examples/basic_e2e.rs | 2 +- examples/homomorphic.rs | 2 +- examples/homomorphic_mixed_sizes.rs | 2 +- examples/non_square.rs | 2 +- src/backends/arkworks/ark_serde.rs | 272 +++++++++++++++++++++++++++- src/evaluation_proof.rs | 18 +- src/lib.rs | 2 +- src/setup.rs | 6 +- tests/arkworks/evaluation.rs | 14 +- tests/arkworks/homomorphic.rs | 4 +- tests/arkworks/integration.rs | 12 +- tests/arkworks/non_square.rs | 6 +- tests/arkworks/setup.rs | 2 + tests/arkworks/soundness.rs | 2 +- tests/arkworks/zk.rs | 56 +++--- tests/arkworks/zk_statistical.rs | 10 +- 18 files changed, 357 insertions(+), 80 deletions(-) create mode 100644 .rustfmt.toml diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 0000000..e69de29 diff --git a/benches/arkworks_proof.rs b/benches/arkworks_proof.rs index 1004610..7dd0e09 100644 --- a/benches/arkworks_proof.rs +++ b/benches/arkworks_proof.rs @@ -111,7 +111,7 @@ fn bench_verify(c: &mut Criterion) { let mut prover_transcript = Blake2bTranscript::new(b"dory-bench"); let mut rng = thread_rng(); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, ThreadRng>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, ThreadRng>( &poly, &point, tier_1, @@ -178,17 +178,18 @@ fn bench_end_to_end(c: &mut Criterion) { // Prove let mut prover_transcript = Blake2bTranscript::new(b"dory-bench"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, ThreadRng>( - &poly, - &point, - tier_1, - nu, - sigma, - &prover_setup, - &mut prover_transcript, - &mut rng, - ) - .unwrap(); + let (proof, _) = + prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, ThreadRng>( + &poly, + &point, + tier_1, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + ) + .unwrap(); // Verify let mut verifier_transcript = Blake2bTranscript::new(b"dory-bench"); diff --git a/examples/basic_e2e.rs b/examples/basic_e2e.rs index 9624855..8b597ab 100644 --- a/examples/basic_e2e.rs +++ b/examples/basic_e2e.rs @@ -66,7 +66,7 @@ fn main() -> Result<(), Box> { // Step 5: Prove info!("5. Generating evaluation proof..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-basic-example"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( &poly, &point, tier_1, diff --git a/examples/homomorphic.rs b/examples/homomorphic.rs index c7427e4..a8d8238 100644 --- a/examples/homomorphic.rs +++ b/examples/homomorphic.rs @@ -139,7 +139,7 @@ fn main() -> Result<(), Box> { // Step 8: Generate proof info!("8. Generating evaluation proof for combined polynomial..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-homomorphic-example"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( &combined_poly, &point, combined_tier1, diff --git a/examples/homomorphic_mixed_sizes.rs b/examples/homomorphic_mixed_sizes.rs index 50f3cbb..d55e5e8 100644 --- a/examples/homomorphic_mixed_sizes.rs +++ b/examples/homomorphic_mixed_sizes.rs @@ -102,7 +102,7 @@ fn main() -> Result<(), Box> { info!("Generating evaluation proof with combined commitment..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-homomorphic-mixed"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( &combined_poly, &point, combined_tier1, diff --git a/examples/non_square.rs b/examples/non_square.rs index 7c80bd0..40c57ed 100644 --- a/examples/non_square.rs +++ b/examples/non_square.rs @@ -65,7 +65,7 @@ fn main() -> Result<(), Box> { // Step 5: Prove info!("5. Generating evaluation proof..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-non-square-example"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( &poly, &point, tier_1, diff --git a/src/backends/arkworks/ark_serde.rs b/src/backends/arkworks/ark_serde.rs index 94054c1..b2035cb 100644 --- a/src/backends/arkworks/ark_serde.rs +++ b/src/backends/arkworks/ark_serde.rs @@ -10,6 +10,8 @@ use std::io::{Read, Write}; use super::BN254; use crate::messages::{FirstReduceMessage, ScalarProductMessage, SecondReduceMessage, VMVMessage}; +#[cfg(feature = "zk")] +use crate::messages::{ScalarProductProof, Sigma1Proof, Sigma2Proof}; use crate::setup::{ProverSetup, VerifierSetup}; impl Valid for ArkFr { @@ -292,6 +294,45 @@ impl CanonicalSerialize for ArkDoryProof { CanonicalSerialize::serialize_with_mode(&(self.nu as u32), &mut writer, compress)?; CanonicalSerialize::serialize_with_mode(&(self.sigma as u32), &mut writer, compress)?; + #[cfg(feature = "zk")] + { + let is_zk = self.e2.is_some() + || self.y_com.is_some() + || self.sigma1_proof.is_some() + || self.sigma2_proof.is_some() + || self.scalar_product_proof.is_some(); + CanonicalSerialize::serialize_with_mode(&(is_zk as u8), &mut writer, compress)?; + if is_zk { + CanonicalSerialize::serialize_with_mode( + self.e2.as_ref().expect("zk proof missing e2"), + &mut writer, + compress, + )?; + CanonicalSerialize::serialize_with_mode( + self.y_com.as_ref().expect("zk proof missing y_com"), + &mut writer, + compress, + )?; + CanonicalSerialize::serialize_with_mode( + self.sigma1_proof.as_ref().expect("zk proof missing sigma1"), + &mut writer, + compress, + )?; + CanonicalSerialize::serialize_with_mode( + self.sigma2_proof.as_ref().expect("zk proof missing sigma2"), + &mut writer, + compress, + )?; + CanonicalSerialize::serialize_with_mode( + self.scalar_product_proof + .as_ref() + .expect("zk proof missing scalar product proof"), + &mut writer, + compress, + )?; + } + } + Ok(()) } @@ -333,6 +374,40 @@ impl CanonicalSerialize for ArkDoryProof { // nu and sigma size += 8; // 2 * u32 + #[cfg(feature = "zk")] + { + size += 1; // is_zk flag + if self.e2.is_some() + || self.y_com.is_some() + || self.sigma1_proof.is_some() + || self.sigma2_proof.is_some() + || self.scalar_product_proof.is_some() + { + size += CanonicalSerialize::serialized_size( + self.e2.as_ref().expect("zk proof missing e2"), + compress, + ); + size += CanonicalSerialize::serialized_size( + self.y_com.as_ref().expect("zk proof missing y_com"), + compress, + ); + size += CanonicalSerialize::serialized_size( + self.sigma1_proof.as_ref().expect("zk proof missing sigma1"), + compress, + ); + size += CanonicalSerialize::serialized_size( + self.sigma2_proof.as_ref().expect("zk proof missing sigma2"), + compress, + ); + size += CanonicalSerialize::serialized_size( + self.scalar_product_proof + .as_ref() + .expect("zk proof missing scalar product proof"), + compress, + ); + } + } + size } } @@ -417,6 +492,36 @@ impl CanonicalDeserialize for ArkDoryProof { ::deserialize_with_mode(&mut reader, compress, validate)? as usize; + #[cfg(feature = "zk")] + let (e2, y_com, sigma1_proof, sigma2_proof, scalar_product_proof) = { + let is_zk = ::deserialize_with_mode( + &mut reader, + compress, + validate, + )? != 0; + if is_zk { + let e2 = + CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let y_com = + CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let sigma1_proof = + CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let sigma2_proof = + CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let scalar_product_proof = + CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + ( + Some(e2), + Some(y_com), + Some(sigma1_proof), + Some(sigma2_proof), + Some(scalar_product_proof), + ) + } else { + (None, None, None, None, None) + } + }; + Ok(ArkDoryProof { vmv_message, first_messages, @@ -425,15 +530,172 @@ impl CanonicalDeserialize for ArkDoryProof { nu, sigma, #[cfg(feature = "zk")] - e2: None, + e2, #[cfg(feature = "zk")] - y_com: None, + y_com, #[cfg(feature = "zk")] - sigma1_proof: None, + sigma1_proof, #[cfg(feature = "zk")] - sigma2_proof: None, + sigma2_proof, #[cfg(feature = "zk")] - scalar_product_proof: None, + scalar_product_proof, + }) + } +} + +#[cfg(feature = "zk")] +impl CanonicalSerialize for Sigma1Proof { + fn serialize_with_mode( + &self, + mut writer: W, + compress: ArkCompress, + ) -> Result<(), ArkSerializationError> { + CanonicalSerialize::serialize_with_mode(&self.a1, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.a2, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.z1, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.z2, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.z3, &mut writer, compress)?; + Ok(()) + } + + fn serialized_size(&self, compress: ArkCompress) -> usize { + CanonicalSerialize::serialized_size(&self.a1, compress) + + CanonicalSerialize::serialized_size(&self.a2, compress) + + CanonicalSerialize::serialized_size(&self.z1, compress) + + CanonicalSerialize::serialized_size(&self.z2, compress) + + CanonicalSerialize::serialized_size(&self.z3, compress) + } +} + +#[cfg(feature = "zk")] +impl ArkValid for Sigma1Proof { + fn check(&self) -> Result<(), ArkSerializationError> { + Ok(()) + } +} + +#[cfg(feature = "zk")] +impl CanonicalDeserialize for Sigma1Proof { + fn deserialize_with_mode( + mut reader: R, + compress: ArkCompress, + validate: ArkValidate, + ) -> Result { + let a1 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let a2 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let z1 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let z2 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let z3 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + Ok(Sigma1Proof { a1, a2, z1, z2, z3 }) + } +} + +#[cfg(feature = "zk")] +impl CanonicalSerialize for Sigma2Proof { + fn serialize_with_mode( + &self, + mut writer: W, + compress: ArkCompress, + ) -> Result<(), ArkSerializationError> { + CanonicalSerialize::serialize_with_mode(&self.a, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.z1, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.z2, &mut writer, compress)?; + Ok(()) + } + + fn serialized_size(&self, compress: ArkCompress) -> usize { + CanonicalSerialize::serialized_size(&self.a, compress) + + CanonicalSerialize::serialized_size(&self.z1, compress) + + CanonicalSerialize::serialized_size(&self.z2, compress) + } +} + +#[cfg(feature = "zk")] +impl ArkValid for Sigma2Proof { + fn check(&self) -> Result<(), ArkSerializationError> { + Ok(()) + } +} + +#[cfg(feature = "zk")] +impl CanonicalDeserialize for Sigma2Proof { + fn deserialize_with_mode( + mut reader: R, + compress: ArkCompress, + validate: ArkValidate, + ) -> Result { + let a = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let z1 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let z2 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + Ok(Sigma2Proof { a, z1, z2 }) + } +} + +#[cfg(feature = "zk")] +impl CanonicalSerialize for ScalarProductProof { + fn serialize_with_mode( + &self, + mut writer: W, + compress: ArkCompress, + ) -> Result<(), ArkSerializationError> { + CanonicalSerialize::serialize_with_mode(&self.p1, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.p2, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.q, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.r, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.e1, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.e2, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.r1, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.r2, &mut writer, compress)?; + CanonicalSerialize::serialize_with_mode(&self.r3, &mut writer, compress)?; + Ok(()) + } + + fn serialized_size(&self, compress: ArkCompress) -> usize { + CanonicalSerialize::serialized_size(&self.p1, compress) + + CanonicalSerialize::serialized_size(&self.p2, compress) + + CanonicalSerialize::serialized_size(&self.q, compress) + + CanonicalSerialize::serialized_size(&self.r, compress) + + CanonicalSerialize::serialized_size(&self.e1, compress) + + CanonicalSerialize::serialized_size(&self.e2, compress) + + CanonicalSerialize::serialized_size(&self.r1, compress) + + CanonicalSerialize::serialized_size(&self.r2, compress) + + CanonicalSerialize::serialized_size(&self.r3, compress) + } +} + +#[cfg(feature = "zk")] +impl ArkValid for ScalarProductProof { + fn check(&self) -> Result<(), ArkSerializationError> { + Ok(()) + } +} + +#[cfg(feature = "zk")] +impl CanonicalDeserialize for ScalarProductProof { + fn deserialize_with_mode( + mut reader: R, + compress: ArkCompress, + validate: ArkValidate, + ) -> Result { + let p1 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let p2 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let q = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let r = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let e1 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let e2 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let r1 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let r2 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let r3 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + Ok(ScalarProductProof { + p1, + p2, + q, + r, + e1, + e2, + r1, + r2, + r3, }) } } diff --git a/src/evaluation_proof.rs b/src/evaluation_proof.rs index 2552057..cd8a957 100644 --- a/src/evaluation_proof.rs +++ b/src/evaluation_proof.rs @@ -85,7 +85,7 @@ pub fn create_evaluation_proof( setup: &ProverSetup, transcript: &mut T, rng: &mut R, -) -> Result, DoryError> +) -> Result<(DoryProof, Option), DoryError> where F: Field, E: PairingCurve, @@ -160,7 +160,7 @@ where // ZK mode: compute y, y_com, E2, and sigma proofs #[cfg(feature = "zk")] - let (zk_e2, zk_y_com, zk_sigma1, zk_sigma2) = + let (zk_e2, zk_y_com, zk_sigma1, zk_sigma2, zk_y_blinding) = if std::any::TypeId::of::() == std::any::TypeId::of::() { use crate::reduce_and_fold::{generate_sigma1_proof, generate_sigma2_proof}; @@ -176,9 +176,9 @@ where let sigma1 = generate_sigma1_proof::(&y, &r_e2, &r_y, setup, transcript, rng); let sigma2 = generate_sigma2_proof::(&r_e1, &-r_d2, setup, transcript, rng); - (Some(e2), Some(y_com), Some(sigma1), Some(sigma2)) + (Some(e2), Some(y_com), Some(sigma1), Some(sigma2), Some(r_y)) } else { - (None, None, None, None) + (None, None, None, None, None) }; // v₂ = v_vec · Γ₂,fin (each scalar scales g_fin) @@ -248,7 +248,7 @@ where transcript.append_serde(b"final_e2", &final_message.e2); let _d = transcript.challenge_scalar(b"d"); - Ok(DoryProof { + let proof = DoryProof { vmv_message, first_messages, second_messages, @@ -265,7 +265,13 @@ where sigma2_proof: zk_sigma2, #[cfg(feature = "zk")] scalar_product_proof, - }) + }; + + #[cfg(feature = "zk")] + return Ok((proof, zk_y_blinding)); + + #[cfg(not(feature = "zk"))] + Ok((proof, None)) } /// Verify an evaluation proof diff --git a/src/lib.rs b/src/lib.rs index f27d2d4..b4fc9db 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -278,7 +278,7 @@ pub fn prove( setup: &ProverSetup, transcript: &mut T, rng: &mut R, -) -> Result, DoryError> +) -> Result<(DoryProof, Option), DoryError> where F: Field, E: PairingCurve, diff --git a/src/setup.rs b/src/setup.rs index c1fec75..2033d1d 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -242,7 +242,11 @@ fn get_storage_path(max_log_n: usize) -> Option { cache_directory.map(|mut path| { path.push("dory"); - path.push(format!("dory_{max_log_n}.urs")); + #[cfg(feature = "zk")] + let filename = format!("dory_{max_log_n}_zk.urs"); + #[cfg(not(feature = "zk"))] + let filename = format!("dory_{max_log_n}.urs"); + path.push(filename); path }) } diff --git a/tests/arkworks/evaluation.rs b/tests/arkworks/evaluation.rs index 3d471a2..7831982 100644 --- a/tests/arkworks/evaluation.rs +++ b/tests/arkworks/evaluation.rs @@ -33,7 +33,7 @@ fn test_evaluation_proof_small() { ); assert!(result.is_ok()); - let proof = result.unwrap(); + let (proof, _) = result.unwrap(); let evaluation = poly.evaluate(&point); let mut verifier_transcript = fresh_transcript(); @@ -78,7 +78,7 @@ fn test_evaluation_proof_with_precomputed_commitment() { ); assert!(result.is_ok()); - let proof = result.unwrap(); + let (proof, _) = result.unwrap(); let evaluation = poly.evaluate(&point); let mut verifier_transcript = fresh_transcript(); @@ -114,7 +114,7 @@ fn test_evaluation_proof_constant_polynomial() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -159,7 +159,7 @@ fn test_evaluation_proof_wrong_evaluation_fails() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -200,7 +200,7 @@ fn test_evaluation_proof_different_sizes() { let (tier_2, tier_1) = poly.commit::(1, 1, &setup).unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -235,7 +235,7 @@ fn test_evaluation_proof_different_sizes() { let (tier_2, tier_1) = poly.commit::(3, 3, &setup).unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -279,7 +279,7 @@ fn test_multiple_evaluations_same_commitment() { let point = random_point(4); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1.clone(), diff --git a/tests/arkworks/homomorphic.rs b/tests/arkworks/homomorphic.rs index ec29f5d..9f0712c 100644 --- a/tests/arkworks/homomorphic.rs +++ b/tests/arkworks/homomorphic.rs @@ -87,7 +87,7 @@ fn test_homomorphic_combination_e2e() { // Create evaluation proof using combined commitment let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &combined_poly, &point, combined_tier1, @@ -178,7 +178,7 @@ fn test_homomorphic_combination_small() { let evaluation = combined_poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &combined_poly, &point, combined_tier1, diff --git a/tests/arkworks/integration.rs b/tests/arkworks/integration.rs index 14e3b61..8d4c198 100644 --- a/tests/arkworks/integration.rs +++ b/tests/arkworks/integration.rs @@ -24,7 +24,7 @@ fn test_full_workflow() { let expected_evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -68,7 +68,7 @@ fn test_workflow_without_precommitment() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -111,7 +111,7 @@ fn test_batched_proofs() { let point = random_point(8); let mut prover_transcript = Blake2bTranscript::new(format!("test-{i}").as_bytes()); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1.clone(), @@ -165,7 +165,7 @@ fn test_linear_polynomial() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -210,7 +210,7 @@ fn test_zero_polynomial() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -259,7 +259,7 @@ fn test_soundness_wrong_commitment() { .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly2, &point, tier_1_poly2, diff --git a/tests/arkworks/non_square.rs b/tests/arkworks/non_square.rs index 24fd0fd..b06c0af 100644 --- a/tests/arkworks/non_square.rs +++ b/tests/arkworks/non_square.rs @@ -23,7 +23,7 @@ fn test_non_square_matrix_nu_eq_sigma_minus_1() { .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -105,7 +105,7 @@ fn test_non_square_matrix_small() { .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, @@ -155,7 +155,7 @@ fn test_non_square_matrix_very_rectangular() { .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, diff --git a/tests/arkworks/setup.rs b/tests/arkworks/setup.rs index bb02021..e2fe8d3 100644 --- a/tests/arkworks/setup.rs +++ b/tests/arkworks/setup.rs @@ -57,6 +57,7 @@ fn test_setup_consistency() { } #[test] +#[cfg(feature = "disk-persistence")] fn test_setup_disk_persistence() { use dory_pcs::backends::arkworks::BN254; use dory_pcs::setup::{load_setup, save_setup}; @@ -88,6 +89,7 @@ fn test_setup_disk_persistence() { } #[test] +#[cfg(feature = "disk-persistence")] fn test_setup_function_uses_disk() { use dory_pcs::backends::arkworks::BN254; use dory_pcs::{generate_urs, setup}; diff --git a/tests/arkworks/soundness.rs b/tests/arkworks/soundness.rs index 39d0e38..9abe6b0 100644 --- a/tests/arkworks/soundness.rs +++ b/tests/arkworks/soundness.rs @@ -33,7 +33,7 @@ fn create_valid_proof_components( let mut rng = rand::thread_rng(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, &point, tier_1, diff --git a/tests/arkworks/zk.rs b/tests/arkworks/zk.rs index 101a7f6..9804915 100644 --- a/tests/arkworks/zk.rs +++ b/tests/arkworks/zk.rs @@ -23,7 +23,7 @@ fn test_zk_full_workflow() { let expected_evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, tier_1, @@ -67,7 +67,7 @@ fn test_zk_small_polynomial() { let evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, tier_1, @@ -113,7 +113,7 @@ fn test_zk_larger_polynomial() { let evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, tier_1, @@ -160,7 +160,7 @@ fn test_zk_non_square_matrix() { let evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, tier_1, @@ -209,17 +209,18 @@ fn test_zk_hidden_evaluation() { // Create ZK proof using unified API with ZK mode let mut prover_transcript = fresh_transcript(); - let proof = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( - &poly, - &point, - Some(tier_1), - nu, - sigma, - &prover_setup, - &mut prover_transcript, - &mut rng, - ) - .unwrap(); + let (proof, _) = + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + ) + .unwrap(); // Verify y_com is present in proof assert!(proof.y_com.is_some(), "ZK proof should contain y_com"); @@ -263,7 +264,7 @@ fn test_zk_tampered_e2_rejected() { let evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let mut proof = + let (mut proof, _) = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, @@ -312,17 +313,18 @@ fn test_zk_hidden_evaluation_larger() { let evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let proof = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( - &poly, - &point, - Some(tier_1), - nu, - sigma, - &prover_setup, - &mut prover_transcript, - &mut rng, - ) - .unwrap(); + let (proof, _) = + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + ) + .unwrap(); let mut verifier_transcript = fresh_transcript(); let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( diff --git a/tests/arkworks/zk_statistical.rs b/tests/arkworks/zk_statistical.rs index c58f429..eaf11d0 100644 --- a/tests/arkworks/zk_statistical.rs +++ b/tests/arkworks/zk_statistical.rs @@ -208,7 +208,7 @@ fn test_zk_statistical_indistinguishability() { let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); - let proof = + let (proof, _) = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, @@ -247,7 +247,7 @@ fn test_zk_statistical_indistinguishability() { let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); - let proof = + let (proof, _) = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, @@ -284,7 +284,7 @@ fn test_zk_statistical_indistinguishability() { let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); - let proof = + let (proof, _) = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, @@ -393,7 +393,7 @@ fn test_zk_witness_independence() { let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); - let proof = + let (proof, _) = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, @@ -430,7 +430,7 @@ fn test_zk_witness_independence() { let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); - let proof = + let (proof, _) = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( &poly, &point, From fe6addf1fee14e245331f195d0d2cb7436614329 Mon Sep 17 00:00:00 2001 From: markosg04 Date: Thu, 26 Feb 2026 18:59:30 -0500 Subject: [PATCH 06/16] refactor: further diff reduction --- goal.md | 36 --- src/backends/arkworks/ark_poly.rs | 15 +- src/backends/arkworks/ark_serde.rs | 340 +++++++---------------------- src/evaluation_proof.rs | 87 +++----- src/mode.rs | 3 - src/reduce_and_fold.rs | 111 +++++----- src/setup.rs | 10 +- zk.md | 296 ------------------------- 8 files changed, 185 insertions(+), 713 deletions(-) delete mode 100644 goal.md delete mode 100644 zk.md diff --git a/goal.md b/goal.md deleted file mode 100644 index 56bcd48..0000000 --- a/goal.md +++ /dev/null @@ -1,36 +0,0 @@ -# Goal: ZK Feature Branch Diff Reduction - -## Hard Constraints -1. The diff of `src/` + `Cargo.toml` between `main` and `feat/zk` must have **< 500 added lines** (below 300 is the stretch goal) -2. Deletions are fine and encouraged — we want less code, not more -3. **All tests must pass**: `cargo nextest run -q` and `cargo nextest run -q --features zk` -4. Do not stop until all constraints are met - -## Current State -- **852 added** + 283 removed (source only) -- Biggest contributors: `reduce_and_fold.rs` (+427), `evaluation_proof.rs` (+156) - -## Strategy -1. **Do not touch existing comments/docs unnecessarily** — gratuitous doc edits create diff waste -2. **Store round blinds internally in DoryProverState** — eliminate return-value tuples and blind-array parameters between functions -3. **Consolidate `commit` and `commit_zk`** — single method with optional RNG -4. **Bundle ZK proof fields** into `Option` instead of 5 separate `Option` fields -5. **Keep Mode trait lean** — use it internally, minimize generic parameter spread -6. **Move sigma proof logic to `src/zk.rs`** — keep reduce_and_fold.rs focused on the core protocol -7. **Minimize API surface changes** — fewer generic parameters on public functions - -## Verification -```sh -# Added lines only (must be < 500, stretch goal < 300): -git diff main...feat/zk -- src/ Cargo.toml | grep -c '^+[^+]' - -# Tests pass: -cargo nextest run --features "arkworks" -cargo nextest run --features "arkworks,zk" - -# Clippy clean: -cargo clippy --features "arkworks" --message-format=short -cargo clippy --features "arkworks,zk" --message-format=short -``` - -## Status: IN PROGRESS diff --git a/src/backends/arkworks/ark_poly.rs b/src/backends/arkworks/ark_poly.rs index 84c7757..a982897 100644 --- a/src/backends/arkworks/ark_poly.rs +++ b/src/backends/arkworks/ark_poly.rs @@ -118,16 +118,19 @@ impl Polynomial for ArkworksPolynomial { }); } let (num_rows, num_cols) = (1 << nu, 1 << sigma); - let g1_bases = &setup.g1_vec[..num_cols]; + let g1 = &setup.g1_vec[..num_cols]; let blinds: Vec = (0..num_rows).map(|_| ArkFr::random(rng)).collect(); - let row_commitments: Vec = (0..num_rows) + let rows: Vec = (0..num_rows) .map(|i| { - let row = &self.coefficients[i * num_cols..(i + 1) * num_cols]; - M1::msm(g1_bases, row) + setup.h1.scale(&blinds[i]) + M1::msm(g1, &self.coefficients[i * num_cols..(i + 1) * num_cols]) + + setup.h1.scale(&blinds[i]) }) .collect(); - let commitment = E::multi_pair_g2_setup(&row_commitments, &setup.g2_vec[..num_rows]); - Ok((commitment, row_commitments, blinds)) + Ok(( + E::multi_pair_g2_setup(&rows, &setup.g2_vec[..num_rows]), + rows, + blinds, + )) } } diff --git a/src/backends/arkworks/ark_serde.rs b/src/backends/arkworks/ark_serde.rs index b2035cb..6d43998 100644 --- a/src/backends/arkworks/ark_serde.rs +++ b/src/backends/arkworks/ark_serde.rs @@ -10,8 +10,6 @@ use std::io::{Read, Write}; use super::BN254; use crate::messages::{FirstReduceMessage, ScalarProductMessage, SecondReduceMessage, VMVMessage}; -#[cfg(feature = "zk")] -use crate::messages::{ScalarProductProof, Sigma1Proof, Sigma2Proof}; use crate::setup::{ProverSetup, VerifierSetup}; impl Valid for ArkFr { @@ -245,6 +243,72 @@ impl DoryDeserialize for ArkGT { // Arkworks-specific Dory proof type use super::ArkDoryProof; +#[cfg(feature = "zk")] +mod zk_serde { + use ark_serialize::{ + CanonicalDeserialize as De, CanonicalSerialize as Ser, Compress, SerializationError, Valid, + Validate, + }; + use std::io::{Read, Write}; + + pub(super) fn ser_opt( + v: &Option, + w: &mut W, + c: Compress, + ) -> Result<(), SerializationError> { + match v { + Some(val) => { + Ser::serialize_with_mode(&1u8, &mut *w, c)?; + Ser::serialize_with_mode(val, w, c) + } + None => Ser::serialize_with_mode(&0u8, w, c), + } + } + + pub(super) fn de_opt( + r: &mut R, + c: Compress, + v: Validate, + ) -> Result, SerializationError> { + match ::deserialize_with_mode(&mut *r, c, v)? { + 0 => Ok(None), + 1 => Ok(Some(T::deserialize_with_mode(r, c, v)?)), + _ => Err(SerializationError::InvalidData), + } + } + + pub(super) fn size_opt(v: &Option, c: Compress) -> usize { + 1 + v.as_ref().map_or(0, |val| Ser::serialized_size(val, c)) + } + + macro_rules! impl_serde { + ($ty:ty, [$($field:ident),+]) => { + impl Valid for $ty { fn check(&self) -> Result<(), SerializationError> { Ok(()) } } + impl Ser for $ty { + fn serialize_with_mode(&self, mut w: W, c: Compress) -> Result<(), SerializationError> { + $(Ser::serialize_with_mode(&self.$field, &mut w, c)?;)+ + Ok(()) + } + fn serialized_size(&self, c: Compress) -> usize { + 0 $(+ Ser::serialized_size(&self.$field, c))+ + } + } + impl De for $ty { + fn deserialize_with_mode(mut r: R, c: Compress, v: Validate) -> Result { + Ok(Self { $($field: De::deserialize_with_mode(&mut r, c, v)?),+ }) + } + } + }; + } + + use super::{ArkFr, ArkG1, ArkG2, ArkGT}; + use crate::messages::{ScalarProductProof, Sigma1Proof, Sigma2Proof}; + + impl_serde!(Sigma1Proof, [a1, a2, z1, z2, z3]); + impl_serde!(Sigma2Proof, [a, z1, z2]); + impl_serde!(ScalarProductProof, [p1, p2, q, r, e1, e2, r1, r2, r3]); +} + impl ArkValid for ArkDoryProof { fn check(&self) -> Result<(), ArkSerializationError> { Ok(()) @@ -296,41 +360,11 @@ impl CanonicalSerialize for ArkDoryProof { #[cfg(feature = "zk")] { - let is_zk = self.e2.is_some() - || self.y_com.is_some() - || self.sigma1_proof.is_some() - || self.sigma2_proof.is_some() - || self.scalar_product_proof.is_some(); - CanonicalSerialize::serialize_with_mode(&(is_zk as u8), &mut writer, compress)?; - if is_zk { - CanonicalSerialize::serialize_with_mode( - self.e2.as_ref().expect("zk proof missing e2"), - &mut writer, - compress, - )?; - CanonicalSerialize::serialize_with_mode( - self.y_com.as_ref().expect("zk proof missing y_com"), - &mut writer, - compress, - )?; - CanonicalSerialize::serialize_with_mode( - self.sigma1_proof.as_ref().expect("zk proof missing sigma1"), - &mut writer, - compress, - )?; - CanonicalSerialize::serialize_with_mode( - self.sigma2_proof.as_ref().expect("zk proof missing sigma2"), - &mut writer, - compress, - )?; - CanonicalSerialize::serialize_with_mode( - self.scalar_product_proof - .as_ref() - .expect("zk proof missing scalar product proof"), - &mut writer, - compress, - )?; - } + zk_serde::ser_opt(&self.e2, &mut writer, compress)?; + zk_serde::ser_opt(&self.y_com, &mut writer, compress)?; + zk_serde::ser_opt(&self.sigma1_proof, &mut writer, compress)?; + zk_serde::ser_opt(&self.sigma2_proof, &mut writer, compress)?; + zk_serde::ser_opt(&self.scalar_product_proof, &mut writer, compress)?; } Ok(()) @@ -376,36 +410,11 @@ impl CanonicalSerialize for ArkDoryProof { #[cfg(feature = "zk")] { - size += 1; // is_zk flag - if self.e2.is_some() - || self.y_com.is_some() - || self.sigma1_proof.is_some() - || self.sigma2_proof.is_some() - || self.scalar_product_proof.is_some() - { - size += CanonicalSerialize::serialized_size( - self.e2.as_ref().expect("zk proof missing e2"), - compress, - ); - size += CanonicalSerialize::serialized_size( - self.y_com.as_ref().expect("zk proof missing y_com"), - compress, - ); - size += CanonicalSerialize::serialized_size( - self.sigma1_proof.as_ref().expect("zk proof missing sigma1"), - compress, - ); - size += CanonicalSerialize::serialized_size( - self.sigma2_proof.as_ref().expect("zk proof missing sigma2"), - compress, - ); - size += CanonicalSerialize::serialized_size( - self.scalar_product_proof - .as_ref() - .expect("zk proof missing scalar product proof"), - compress, - ); - } + size += zk_serde::size_opt(&self.e2, compress); + size += zk_serde::size_opt(&self.y_com, compress); + size += zk_serde::size_opt(&self.sigma1_proof, compress); + size += zk_serde::size_opt(&self.sigma2_proof, compress); + size += zk_serde::size_opt(&self.scalar_product_proof, compress); } size @@ -492,36 +501,6 @@ impl CanonicalDeserialize for ArkDoryProof { ::deserialize_with_mode(&mut reader, compress, validate)? as usize; - #[cfg(feature = "zk")] - let (e2, y_com, sigma1_proof, sigma2_proof, scalar_product_proof) = { - let is_zk = ::deserialize_with_mode( - &mut reader, - compress, - validate, - )? != 0; - if is_zk { - let e2 = - CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let y_com = - CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let sigma1_proof = - CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let sigma2_proof = - CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let scalar_product_proof = - CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - ( - Some(e2), - Some(y_com), - Some(sigma1_proof), - Some(sigma2_proof), - Some(scalar_product_proof), - ) - } else { - (None, None, None, None, None) - } - }; - Ok(ArkDoryProof { vmv_message, first_messages, @@ -530,172 +509,15 @@ impl CanonicalDeserialize for ArkDoryProof { nu, sigma, #[cfg(feature = "zk")] - e2, + e2: zk_serde::de_opt(&mut reader, compress, validate)?, #[cfg(feature = "zk")] - y_com, + y_com: zk_serde::de_opt(&mut reader, compress, validate)?, #[cfg(feature = "zk")] - sigma1_proof, + sigma1_proof: zk_serde::de_opt(&mut reader, compress, validate)?, #[cfg(feature = "zk")] - sigma2_proof, + sigma2_proof: zk_serde::de_opt(&mut reader, compress, validate)?, #[cfg(feature = "zk")] - scalar_product_proof, - }) - } -} - -#[cfg(feature = "zk")] -impl CanonicalSerialize for Sigma1Proof { - fn serialize_with_mode( - &self, - mut writer: W, - compress: ArkCompress, - ) -> Result<(), ArkSerializationError> { - CanonicalSerialize::serialize_with_mode(&self.a1, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.a2, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.z1, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.z2, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.z3, &mut writer, compress)?; - Ok(()) - } - - fn serialized_size(&self, compress: ArkCompress) -> usize { - CanonicalSerialize::serialized_size(&self.a1, compress) - + CanonicalSerialize::serialized_size(&self.a2, compress) - + CanonicalSerialize::serialized_size(&self.z1, compress) - + CanonicalSerialize::serialized_size(&self.z2, compress) - + CanonicalSerialize::serialized_size(&self.z3, compress) - } -} - -#[cfg(feature = "zk")] -impl ArkValid for Sigma1Proof { - fn check(&self) -> Result<(), ArkSerializationError> { - Ok(()) - } -} - -#[cfg(feature = "zk")] -impl CanonicalDeserialize for Sigma1Proof { - fn deserialize_with_mode( - mut reader: R, - compress: ArkCompress, - validate: ArkValidate, - ) -> Result { - let a1 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let a2 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let z1 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let z2 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let z3 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - Ok(Sigma1Proof { a1, a2, z1, z2, z3 }) - } -} - -#[cfg(feature = "zk")] -impl CanonicalSerialize for Sigma2Proof { - fn serialize_with_mode( - &self, - mut writer: W, - compress: ArkCompress, - ) -> Result<(), ArkSerializationError> { - CanonicalSerialize::serialize_with_mode(&self.a, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.z1, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.z2, &mut writer, compress)?; - Ok(()) - } - - fn serialized_size(&self, compress: ArkCompress) -> usize { - CanonicalSerialize::serialized_size(&self.a, compress) - + CanonicalSerialize::serialized_size(&self.z1, compress) - + CanonicalSerialize::serialized_size(&self.z2, compress) - } -} - -#[cfg(feature = "zk")] -impl ArkValid for Sigma2Proof { - fn check(&self) -> Result<(), ArkSerializationError> { - Ok(()) - } -} - -#[cfg(feature = "zk")] -impl CanonicalDeserialize for Sigma2Proof { - fn deserialize_with_mode( - mut reader: R, - compress: ArkCompress, - validate: ArkValidate, - ) -> Result { - let a = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let z1 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let z2 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - Ok(Sigma2Proof { a, z1, z2 }) - } -} - -#[cfg(feature = "zk")] -impl CanonicalSerialize for ScalarProductProof { - fn serialize_with_mode( - &self, - mut writer: W, - compress: ArkCompress, - ) -> Result<(), ArkSerializationError> { - CanonicalSerialize::serialize_with_mode(&self.p1, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.p2, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.q, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.r, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.e1, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.e2, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.r1, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.r2, &mut writer, compress)?; - CanonicalSerialize::serialize_with_mode(&self.r3, &mut writer, compress)?; - Ok(()) - } - - fn serialized_size(&self, compress: ArkCompress) -> usize { - CanonicalSerialize::serialized_size(&self.p1, compress) - + CanonicalSerialize::serialized_size(&self.p2, compress) - + CanonicalSerialize::serialized_size(&self.q, compress) - + CanonicalSerialize::serialized_size(&self.r, compress) - + CanonicalSerialize::serialized_size(&self.e1, compress) - + CanonicalSerialize::serialized_size(&self.e2, compress) - + CanonicalSerialize::serialized_size(&self.r1, compress) - + CanonicalSerialize::serialized_size(&self.r2, compress) - + CanonicalSerialize::serialized_size(&self.r3, compress) - } -} - -#[cfg(feature = "zk")] -impl ArkValid for ScalarProductProof { - fn check(&self) -> Result<(), ArkSerializationError> { - Ok(()) - } -} - -#[cfg(feature = "zk")] -impl CanonicalDeserialize for ScalarProductProof { - fn deserialize_with_mode( - mut reader: R, - compress: ArkCompress, - validate: ArkValidate, - ) -> Result { - let p1 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let p2 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let q = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let r = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let e1 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let e2 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let r1 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let r2 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - let r3 = CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; - Ok(ScalarProductProof { - p1, - p2, - q, - r, - e1, - e2, - r1, - r2, - r3, + scalar_product_proof: zk_serde::de_opt(&mut reader, compress, validate)?, }) } } diff --git a/src/evaluation_proof.rs b/src/evaluation_proof.rs index cd8a957..b129f08 100644 --- a/src/evaluation_proof.rs +++ b/src/evaluation_proof.rs @@ -129,11 +129,13 @@ where padded_row_commitments.resize(1 << sigma, E::G1::identity()); } - // Sample VMV blinds (zero in Transparent mode, random in ZK mode) - let r_c: F = Mo::sample(rng); - let r_d2: F = Mo::sample(rng); - let r_e1: F = Mo::sample(rng); - let r_e2: F = Mo::sample(rng); + // Sample VMV blinds (zero in Transparent, random in ZK) + let (r_c, r_d2, r_e1, r_e2): (F, F, F, F) = ( + Mo::sample(rng), + Mo::sample(rng), + Mo::sample(rng), + Mo::sample(rng), + ); let g2_fin = &setup.g2_vec[0]; @@ -142,9 +144,8 @@ where let c = Mo::mask(E::pair(&t_vec_v, g2_fin), &setup.ht, &r_c); // D₂ = e(⟨Γ₁[sigma], v_vec⟩, Γ2,fin) + r_d2·HT - let g1_bases = &setup.g1_vec[..1 << sigma]; let d2 = Mo::mask( - E::pair(&M1::msm(g1_bases, &v_vec), g2_fin), + E::pair(&M1::msm(&setup.g1_vec[..1 << sigma], &v_vec), g2_fin), &setup.ht, &r_d2, ); @@ -158,25 +159,19 @@ where transcript.append_serde(b"vmv_d2", &vmv_message.d2); transcript.append_serde(b"vmv_e1", &vmv_message.e1); - // ZK mode: compute y, y_com, E2, and sigma proofs #[cfg(feature = "zk")] - let (zk_e2, zk_y_com, zk_sigma1, zk_sigma2, zk_y_blinding) = + let (zk_e2, zk_y_com, zk_sigma1, zk_sigma2, zk_r_y) = if std::any::TypeId::of::() == std::any::TypeId::of::() { use crate::reduce_and_fold::{generate_sigma1_proof, generate_sigma2_proof}; - let y = polynomial.evaluate(point); let r_y: F = Mo::sample(rng); - let e2 = Mo::mask(g2_fin.scale(&y), &setup.h2, &r_e2); let y_com = setup.g1_vec[0].scale(&y) + setup.h1.scale(&r_y); - transcript.append_serde(b"vmv_e2", &e2); transcript.append_serde(b"vmv_y_com", &y_com); - - let sigma1 = generate_sigma1_proof::(&y, &r_e2, &r_y, setup, transcript, rng); - let sigma2 = generate_sigma2_proof::(&r_e1, &-r_d2, setup, transcript, rng); - - (Some(e2), Some(y_com), Some(sigma1), Some(sigma2), Some(r_y)) + let s1 = generate_sigma1_proof::(&y, &r_e2, &r_y, setup, transcript, rng); + let s2 = generate_sigma2_proof::(&r_e1, &-r_d2, setup, transcript, rng); + (Some(e2), Some(y_com), Some(s1), Some(s2), Some(r_y)) } else { (None, None, None, None, None) }; @@ -266,10 +261,8 @@ where #[cfg(feature = "zk")] scalar_product_proof, }; - #[cfg(feature = "zk")] - return Ok((proof, zk_y_blinding)); - + return Ok((proof, zk_r_y)); #[cfg(not(feature = "zk"))] Ok((proof, None)) } @@ -340,32 +333,21 @@ where transcript.append_serde(b"vmv_d2", &vmv_message.d2); transcript.append_serde(b"vmv_e1", &vmv_message.e1); - // Determine E2 based on proof mode (ZK vs transparent) #[cfg(feature = "zk")] - let (e2, is_zk) = if let (Some(proof_e2), Some(y_com)) = (&proof.e2, &proof.y_com) { + let (e2, is_zk) = if let (Some(pe2), Some(yc)) = (&proof.e2, &proof.y_com) { use crate::reduce_and_fold::{verify_sigma1_proof, verify_sigma2_proof}; - - transcript.append_serde(b"vmv_e2", proof_e2); - transcript.append_serde(b"vmv_y_com", y_com); - - if let Some(ref sigma1) = proof.sigma1_proof { - verify_sigma1_proof::(proof_e2, y_com, sigma1, &setup, transcript)?; + transcript.append_serde(b"vmv_e2", pe2); + transcript.append_serde(b"vmv_y_com", yc); + if let Some(ref s) = proof.sigma1_proof { + verify_sigma1_proof::(pe2, yc, s, &setup, transcript)?; } - if let Some(ref sigma2) = proof.sigma2_proof { - verify_sigma2_proof::( - &vmv_message.e1, - &vmv_message.d2, - sigma2, - &setup, - transcript, - )?; + if let Some(ref s) = proof.sigma2_proof { + verify_sigma2_proof::(&vmv_message.e1, &vmv_message.d2, s, &setup, transcript)?; } - - (*proof_e2, true) + (*pe2, true) } else { (setup.g2_0.scale(&evaluation), false) }; - #[cfg(not(feature = "zk"))] let (e2, _is_zk) = (setup.g2_0.scale(&evaluation), false); @@ -422,27 +404,28 @@ where let gamma = transcript.challenge_scalar(b"gamma"); - // ZK mode: verify with scalar product proof #[cfg(feature = "zk")] if is_zk { if let Some(ref sp) = proof.scalar_product_proof { - transcript.append_serde(b"sigma_p1", &sp.p1); - transcript.append_serde(b"sigma_p2", &sp.p2); - transcript.append_serde(b"sigma_q", &sp.q); - transcript.append_serde(b"sigma_r", &sp.r); + for (l, v) in [ + (b"sigma_p1" as &[u8], &sp.p1), + (b"sigma_p2", &sp.p2), + (b"sigma_q", &sp.q), + (b"sigma_r", &sp.r), + ] { + transcript.append_serde(l, v); + } let c = transcript.challenge_scalar(b"sigma_c"); - transcript.append_serde(b"final_e1", &proof.final_message.e1); transcript.append_serde(b"final_e2", &proof.final_message.e2); - let d = transcript.challenge_scalar(b"d"); - - return verifier_state.verify_final_zk(sp, &c, &d); + return verifier_state.verify_final_zk(sp, &c, &transcript.challenge_scalar(b"d")); } } - transcript.append_serde(b"final_e1", &proof.final_message.e1); transcript.append_serde(b"final_e2", &proof.final_message.e2); - - let d = transcript.challenge_scalar(b"d"); - verifier_state.verify_final(&proof.final_message, &gamma, &d) + verifier_state.verify_final( + &proof.final_message, + &gamma, + &transcript.challenge_scalar(b"d"), + ) } diff --git a/src/mode.rs b/src/mode.rs index d2453dd..8d10863 100644 --- a/src/mode.rs +++ b/src/mode.rs @@ -1,5 +1,4 @@ //! Mode trait for transparent vs zero-knowledge proofs. - use crate::primitives::arithmetic::{Field, Group}; /// Determines whether protocol messages are blinded (ZK) or unblinded (transparent). @@ -12,7 +11,6 @@ pub trait Mode: 'static { /// Transparent mode: no blinding, non-hiding proofs. pub struct Transparent; - impl Mode for Transparent { fn sample(_rng: &mut R) -> F { F::zero() @@ -25,7 +23,6 @@ impl Mode for Transparent { /// Zero-knowledge mode: samples blinds from RNG for hiding proofs. #[cfg(feature = "zk")] pub struct ZK; - #[cfg(feature = "zk")] impl Mode for ZK { fn sample(rng: &mut R) -> F { diff --git a/src/reduce_and_fold.rs b/src/reduce_and_fold.rs index 7331398..ad13b06 100644 --- a/src/reduce_and_fold.rs +++ b/src/reduce_and_fold.rs @@ -176,10 +176,7 @@ where r_e1: Scalar, r_e2: Scalar, ) { - self.r_c = r_c; - self.r_d2 = r_d2; - self.r_e1 = r_e1; - self.r_e2 = r_e2; + (self.r_c, self.r_d2, self.r_e1, self.r_e2) = (r_c, r_d2, r_e1, r_e2); } /// Compute first reduce message for current round @@ -214,16 +211,16 @@ where self.round_d1 = [M::sample(rng), M::sample(rng)]; self.round_d2 = [M::sample(rng), M::sample(rng)]; - // Compute D values: multi-pairings between v-vectors and generators - // D₁L = ⟨v₁L, Γ₂'⟩, D₁R = ⟨v₁R, Γ₂'⟩ - g2_prime is from setup, use cached version + // D₁L = ⟨v₁L, Γ₂'⟩, D₁R = ⟨v₁R, Γ₂'⟩ + let ht = &self.setup.ht; let d1_left = M::mask( E::multi_pair_g2_setup(v1_l, g2_prime), - &self.setup.ht, + ht, &self.round_d1[0], ); let d1_right = M::mask( E::multi_pair_g2_setup(v1_r, g2_prime), - &self.setup.ht, + ht, &self.round_d1[1], ); @@ -241,8 +238,8 @@ where E::multi_pair_g1_setup(g1_prime, v2_r), ) }; - let d2_left = M::mask(d2_left_base, &self.setup.ht, &self.round_d2[0]); - let d2_right = M::mask(d2_right_base, &self.setup.ht, &self.round_d2[1]); + let d2_left = M::mask(d2_left_base, ht, &self.round_d2[0]); + let d2_right = M::mask(d2_right_base, ht, &self.round_d2[1]); // Compute E values for extended protocol: MSMs with scalar vectors // E₁β = ⟨Γ₁, s₂⟩ @@ -313,11 +310,10 @@ where self.round_e1 = [M::sample(rng), M::sample(rng)]; self.round_e2 = [M::sample(rng), M::sample(rng)]; - // Compute C terms: cross products of v-vectors - // C₊ = ⟨v₁L, v₂R⟩ - let c_plus = M::mask(E::multi_pair(v1_l, v2_r), &self.setup.ht, &self.round_c[0]); - // C₋ = ⟨v₁R, v₂L⟩ - let c_minus = M::mask(E::multi_pair(v1_r, v2_l), &self.setup.ht, &self.round_c[1]); + // C₊ = ⟨v₁L, v₂R⟩, C₋ = ⟨v₁R, v₂L⟩ + let ht = &self.setup.ht; + let c_plus = M::mask(E::multi_pair(v1_l, v2_r), ht, &self.round_c[0]); + let c_minus = M::mask(E::multi_pair(v1_r, v2_l), ht, &self.round_c[1]); // Compute E terms for extended protocol: cross products with scalars let e1_plus = M::mask(M1::msm(v1_l, s2_r), &self.setup.h1, &self.round_e1[0]); @@ -411,55 +407,50 @@ where ScalarProductMessage { e1, e2 } } - /// Generate ZK scalar product proof (Σ-protocol). - /// - /// Must be called BEFORE `compute_final_message` because that modifies r_c. + /// Generate ZK scalar product proof. Must be called BEFORE `compute_final_message`. #[cfg(feature = "zk")] pub fn scalar_product_proof, R: rand_core::RngCore>( &self, transcript: &mut T, rng: &mut R, ) -> ScalarProductProof, E::GT> { - debug_assert_eq!(self.v1.len(), 1); - debug_assert_eq!(self.v2.len(), 1); - let (v1, v2) = (self.v1[0], self.v2[0]); let (g1, g2) = (self.setup.g1_vec[0], self.setup.g2_vec[0]); - - let mut rand = || -> Scalar { Field::random(rng) }; - let (s_d1, s_d2) = (rand(), rand()); - let (d1, d2) = (g1.scale(&s_d1), g2.scale(&s_d2)); - let (r_p1, r_p2, r_q, r_r) = (rand(), rand(), rand(), rand()); - - let p1 = E::pair(&d1, &g2) + self.setup.ht.scale(&r_p1); - let p2 = E::pair(&g1, &d2) + self.setup.ht.scale(&r_p2); - let q = E::pair(&d1, &v2) + E::pair(&v1, &d2) + self.setup.ht.scale(&r_q); - let r = E::pair(&d1, &d2) + self.setup.ht.scale(&r_r); - - transcript.append_serde(b"sigma_p1", &p1); - transcript.append_serde(b"sigma_p2", &p2); - transcript.append_serde(b"sigma_q", &q); - transcript.append_serde(b"sigma_r", &r); + let ht = &self.setup.ht; + let mut r = || -> Scalar { Field::random(rng) }; + let (sd1, sd2) = (r(), r()); + let (d1, d2) = (g1.scale(&sd1), g2.scale(&sd2)); + let (rp1, rp2, rq, rr) = (r(), r(), r(), r()); + let p1 = E::pair(&d1, &g2) + ht.scale(&rp1); + let p2 = E::pair(&g1, &d2) + ht.scale(&rp2); + let q = E::pair(&d1, &v2) + E::pair(&v1, &d2) + ht.scale(&rq); + let rr_val = E::pair(&d1, &d2) + ht.scale(&rr); + for (label, val) in [ + (b"sigma_p1" as &[u8], &p1), + (b"sigma_p2", &p2), + (b"sigma_q", &q), + (b"sigma_r", &rr_val), + ] { + transcript.append_serde(label, val); + } let c = transcript.challenge_scalar(b"sigma_c"); - - let c_sq = c * c; ScalarProductProof { p1, p2, q, - r, + r: rr_val, e1: d1 + v1.scale(&c), e2: d2 + v2.scale(&c), - r1: r_p1 + c * self.r_d1, - r2: r_p2 + c * self.r_d2, - r3: r_r + c * r_q + c_sq * self.r_c, + r1: rp1 + c * self.r_d1, + r2: rp2 + c * self.r_d2, + r3: rr + c * rq + c * c * self.r_c, } } } /// Generate Sigma1 proof: proves knowledge of (y, rE2, ry). #[cfg(feature = "zk")] -pub fn generate_sigma1_proof, R: rand_core::RngCore>( +pub fn generate_sigma1_proof( y: &Scalar, r_e2: &Scalar, r_y: &Scalar, @@ -468,12 +459,18 @@ pub fn generate_sigma1_proof, R: rand_ rng: &mut R, ) -> Sigma1Proof> where + E: PairingCurve, + T: Transcript, + R: rand_core::RngCore, Scalar: Field, E::G2: Group>, { let (g2_fin, g1_fin) = (&setup.g2_vec[0], &setup.g1_vec[0]); - let (k1, k2, k3): (Scalar, _, _) = - (Field::random(rng), Field::random(rng), Field::random(rng)); + let (k1, k2, k3) = ( + Scalar::::random(rng), + Scalar::::random(rng), + Scalar::::random(rng), + ); let a1 = g2_fin.scale(&k1) + setup.h2.scale(&k2); let a2 = g1_fin.scale(&k1) + setup.h1.scale(&k3); transcript.append_serde(b"sigma1_a1", &a1); @@ -515,7 +512,7 @@ where /// Generate Sigma2 proof: proves e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2). #[cfg(feature = "zk")] -pub fn generate_sigma2_proof, R: rand_core::RngCore>( +pub fn generate_sigma2_proof( t1: &Scalar, t2: &Scalar, setup: &ProverSetup, @@ -523,11 +520,14 @@ pub fn generate_sigma2_proof, R: rand_ rng: &mut R, ) -> Sigma2Proof, E::GT> where + E: PairingCurve, + T: Transcript, + R: rand_core::RngCore, Scalar: Field, E::G2: Group>, E::GT: Group>, { - let (k1, k2): (Scalar, _) = (Field::random(rng), Field::random(rng)); + let (k1, k2) = (Scalar::::random(rng), Scalar::::random(rng)); let a = E::pair( &setup.h1, &(setup.g2_vec[0].scale(&k1) + setup.h2.scale(&k2)), @@ -767,17 +767,15 @@ impl DoryVerifierState { d: &Scalar, ) -> Result<(), DoryError> where - E::G2: Group::Scalar>, - E::GT: Group::Scalar>, - ::Scalar: Field, + E::G2: Group>, + E::GT: Group>, + Scalar: Field, { let d_inv = (*d).inv().expect("d must be invertible"); - let (c_sq, lhs) = ( - *c * *c, - E::pair( - &(proof.e1 + self.setup.g1_0.scale(d)), - &(proof.e2 + self.setup.g2_0.scale(&d_inv)), - ), + let c_sq = *c * *c; + let lhs = E::pair( + &(proof.e1 + self.setup.g1_0.scale(d)), + &(proof.e2 + self.setup.g2_0.scale(&d_inv)), ); let mut rhs = self.setup.chi[0] + proof.r + proof.q.scale(c) + self.c.scale(&c_sq); rhs = rhs + proof.p2.scale(d) + self.d2.scale(&(*d * *c)); @@ -787,7 +785,6 @@ impl DoryVerifierState { .setup .ht .scale(&(proof.r3 + *d * proof.r2 + d_inv * proof.r1)); - if lhs == rhs { Ok(()) } else { diff --git a/src/setup.rs b/src/setup.rs index 2033d1d..6883f61 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -79,7 +79,6 @@ pub struct VerifierSetup { /// e(H1, Γ2,fin) - precomputed for ZK verification #[cfg(feature = "zk")] pub h1_g2_fin: E::GT, - /// Maximum log₂ of polynomial size supported pub max_log_n: usize, } @@ -243,10 +242,13 @@ fn get_storage_path(max_log_n: usize) -> Option { cache_directory.map(|mut path| { path.push("dory"); #[cfg(feature = "zk")] - let filename = format!("dory_{max_log_n}_zk.urs"); + { + path.push(format!("dory_{max_log_n}_zk.urs")); + } #[cfg(not(feature = "zk"))] - let filename = format!("dory_{max_log_n}.urs"); - path.push(filename); + { + path.push(format!("dory_{max_log_n}.urs")); + } path }) } diff --git a/zk.md b/zk.md deleted file mode 100644 index 174684d..0000000 --- a/zk.md +++ /dev/null @@ -1,296 +0,0 @@ -# Zero-Knowledge Dory Implementation Guide - -Integrate ZK into existing functions via `Mode` trait. No new protocol functions. - ---- - -## Part 1: Mode Trait - -```rust -// src/mode.rs - -pub trait Mode: 'static { - fn sample(transcript: &mut T, label: &[u8]) -> F; - fn mask>(value: G, base: &G, blind: &F) -> G; -} - -pub struct Transparent; - -impl Mode for Transparent { - fn sample(_: &mut T, _: &[u8]) -> F { F::ZERO } - fn mask>(value: G, _: &G, _: &F) -> G { value } -} - -#[cfg(feature = "zk")] -pub struct ZK; - -#[cfg(feature = "zk")] -impl Mode for ZK { - fn sample(transcript: &mut T, label: &[u8]) -> F { - transcript.challenge_scalar(label) - } - fn mask>(value: G, base: &G, blind: &F) -> G { - value.add(&base.scale(blind)) - } -} -``` - ---- - -## Part 2: Existing Struct Changes - -```diff -// src/reduce_and_fold.rs - --pub struct DoryProverState<'a, E: PairingCurve> { -+pub struct DoryProverState<'a, E: PairingCurve, M: Mode = Transparent> { - pub v1: Vec, - pub v2: Vec, - pub v2_scalars: Option>, - pub s1: Vec, - pub s2: Vec, - pub num_rounds: usize, - pub setup: &'a ProverSetup, -+ -+ // Blinds (zero for Transparent, accumulated for ZK) -+ pub r_c: E::Scalar, -+ pub r_d1: E::Scalar, -+ pub r_d2: E::Scalar, -+ pub r_e1: E::Scalar, -+ pub r_e2: E::Scalar, -+ _mode: PhantomData, - } -``` - ---- - -## Part 3: Existing `reduce_round` Changes - -```diff --impl<'a, E: PairingCurve> DoryProverState<'a, E> { -+impl<'a, E: PairingCurve, M: Mode> DoryProverState<'a, E, M> { - pub fn reduce_round>( - &mut self, - transcript: &mut T, - ) -> (...) { - let half = self.v1.len() / 2; - let (v1_l, v1_r) = self.v1.split_at(half); - let (v2_l, v2_r) = self.v2.split_at(half); - let g2_prime = &self.setup.g2_vec[..half]; - let g1_prime = &self.setup.g1_vec[..half]; - -+ // ZK: sample blinds -+ let r_d1_l = M::sample(transcript, b"r_d1_l"); -+ let r_d1_r = M::sample(transcript, b"r_d1_r"); -+ let r_d2_l = M::sample(transcript, b"r_d2_l"); -+ let r_d2_r = M::sample(transcript, b"r_d2_r"); - -- let d1_left = E::multi_pair(v1_l, g2_prime); -- let d1_right = E::multi_pair(v1_r, g2_prime); -- let d2_left = E::multi_pair(g1_prime, v2_l); -- let d2_right = E::multi_pair(g1_prime, v2_r); -+ // ZK: mask pairing results -+ let d1_left = M::mask(E::multi_pair(v1_l, g2_prime), &self.setup.ht, &r_d1_l); -+ let d1_right = M::mask(E::multi_pair(v1_r, g2_prime), &self.setup.ht, &r_d1_r); -+ let d2_left = M::mask(E::multi_pair(g1_prime, v2_l), &self.setup.ht, &r_d2_l); -+ let d2_right = M::mask(E::multi_pair(g1_prime, v2_r), &self.setup.ht, &r_d2_r); - - // ... e1_beta, e2_beta unchanged ... - - let first_msg = FirstReduceMessage { d1_left, d1_right, d2_left, d2_right, e1_beta, e2_beta }; - transcript.append_serde(b"first", &first_msg); - - let beta = transcript.challenge_scalar(b"beta"); - let beta_inv = beta.inv().unwrap(); - - // ... vector updates unchanged ... - -+ // ZK: accumulate blinds -+ self.r_c = self.r_c -+ .add(&r_d2_l.add(&r_d2_r).mul(&beta)) -+ .add(&r_d1_l.add(&r_d1_r).mul(&beta_inv)); - -+ // ZK: sample second round blinds -+ let r_c_plus = M::sample(transcript, b"r_c+"); -+ let r_c_minus = M::sample(transcript, b"r_c-"); -+ let r_e1_plus = M::sample(transcript, b"r_e1+"); -+ let r_e1_minus = M::sample(transcript, b"r_e1-"); -+ let r_e2_plus = M::sample(transcript, b"r_e2+"); -+ let r_e2_minus = M::sample(transcript, b"r_e2-"); - -- let c_plus = E::multi_pair(v1_l, v2_r); -- let c_minus = E::multi_pair(v1_r, v2_l); -- let e1_plus = G1Routines::msm(v1_l, s2_r); -- let e1_minus = G1Routines::msm(v1_r, s2_l); -- let e2_plus = G2Routines::msm(v2_r, s1_l); -- let e2_minus = G2Routines::msm(v2_l, s1_r); -+ // ZK: mask cross terms -+ let c_plus = M::mask(E::multi_pair(v1_l, v2_r), &self.setup.ht, &r_c_plus); -+ let c_minus = M::mask(E::multi_pair(v1_r, v2_l), &self.setup.ht, &r_c_minus); -+ let e1_plus = M::mask(G1Routines::msm(v1_l, s2_r), &self.setup.h1, &r_e1_plus); -+ let e1_minus = M::mask(G1Routines::msm(v1_r, s2_l), &self.setup.h1, &r_e1_minus); -+ let e2_plus = M::mask(G2Routines::msm(v2_r, s1_l), &self.setup.h2, &r_e2_plus); -+ let e2_minus = M::mask(G2Routines::msm(v2_l, s1_r), &self.setup.h2, &r_e2_minus); - - // ... second_msg, alpha challenge, vector folding unchanged ... - -+ // ZK: update accumulated blinds -+ self.r_c = self.r_c -+ .add(&r_c_plus.mul(&alpha)) -+ .add(&r_c_minus.mul(&alpha_inv)); -+ self.r_d1 = r_d1_l.mul(&alpha).add(&r_d1_r); -+ self.r_d2 = r_d2_l.mul(&alpha_inv).add(&r_d2_r); -+ self.r_e1 = self.r_e1 -+ .add(&r_e1_plus.mul(&alpha)) -+ .add(&r_e1_minus.mul(&alpha_inv)); -+ self.r_e2 = self.r_e2 -+ .add(&r_e2_plus.mul(&alpha)) -+ .add(&r_e2_minus.mul(&alpha_inv)); - - self.num_rounds -= 1; - (first_msg, second_msg) - } -``` - ---- - -## Part 4: Existing `fold_scalars` Changes - -```diff - pub fn fold_scalars(&mut self, gamma: &E::Scalar) { - let gamma_inv = gamma.inv().unwrap(); - self.v1[0] = self.v1[0].add(&self.setup.h1.scale(&gamma.mul(&self.s1[0]))); - self.v2[0] = self.v2[0].add(&self.setup.h2.scale(&gamma_inv.mul(&self.s2[0]))); -+ -+ // ZK: final blind accumulation -+ self.r_c = self.r_c -+ .add(&self.r_e2.mul(gamma)) -+ .add(&self.r_e1.mul(&gamma_inv)); - } -``` - ---- - -## Part 5: Existing `compute_vmv_message` Changes - -```diff --pub fn compute_vmv_message( -+pub fn compute_vmv_message>( - row_commitments: &[E::G1], - v_vec: &[E::Scalar], - left_vec: &[E::Scalar], - setup: &ProverSetup, - transcript: &mut T, --) -> VMVMessage -+) -> (VMVMessage, E::Scalar, E::Scalar, E::Scalar, E::Scalar) - where - E: PairingCurve, - T: Transcript, - { -+ let r_c = M::sample(transcript, b"vmv_r_c"); -+ let r_d2 = M::sample(transcript, b"vmv_r_d2"); -+ let r_e1 = M::sample(transcript, b"vmv_r_e1"); -+ let r_e2 = M::sample(transcript, b"vmv_r_e2"); - - let v_dot_t0 = G1Routines::msm(row_commitments, v_vec); -- let c = E::pair(&v_dot_t0, &setup.g2_vec[0]); -+ let c = M::mask(E::pair(&v_dot_t0, &setup.g2_vec[0]), &setup.ht, &r_c); - - let g1_dot_v = G1Routines::msm(&setup.g1_vec[..v_vec.len()], v_vec); -- let d2 = E::pair(&g1_dot_v, &setup.g2_vec[0]); -+ let d2 = M::mask(E::pair(&g1_dot_v, &setup.g2_vec[0]), &setup.ht, &r_d2); - -- let e1 = G1Routines::msm(row_commitments, left_vec); -+ let e1 = M::mask(G1Routines::msm(row_commitments, left_vec), &setup.h1, &r_e1); - -- VMVMessage { c, d2, e1 } -+ (VMVMessage { c, d2, e1 }, r_c, r_d2, r_e1, r_e2) - } -``` - ---- - -## Part 6: Existing `scalar_product` Changes - -```diff - pub fn scalar_product(&self) -> (E::G1, E::G2) { - (self.v1[0], self.v2[0]) - } -+ -+ // ZK-only: Σ-protocol for final step -+ #[cfg(feature = "zk")] -+ pub fn scalar_product_zk>(&self, transcript: &mut T) -> ScalarProductProof -+ where -+ M: Mode, // Only callable when M = ZK -+ { -+ // ... Σ-protocol using self.r_c, self.r_d1, self.r_d2 ... -+ } -``` - ---- - -## Part 7: Usage - -```rust -// Transparent (default) - unchanged call sites -let state: DoryProverState = DoryProverState::new(...); -// or explicitly: -let state: DoryProverState = DoryProverState::new(...); - -// ZK mode -#[cfg(feature = "zk")] -let state: DoryProverState = DoryProverState::new_zk(...); -``` - ---- - -## Part 8: Summary - -| Mode | `M::sample()` | `M::mask()` | -|------|---------------|-------------| -| `Transparent` | `F::ZERO` | `value` (identity) | -| `ZK` | `transcript.challenge_scalar()` | `value + base*blind` | - -**Integration pattern**: -1. Add `M: Mode` parameter to existing structs/functions -2. Insert `M::sample()` before operations needing blinds -3. Wrap computed values with `M::mask(value, base, &blind)` -4. Add blind accumulation after challenges - -For `Transparent`: sample returns zero, mask returns value unchanged, accumulation is `0 + 0*x = 0`. - ---- - -## Part 9: ZK-Only Additions - -Only these are truly new (feature-gated): - -```rust -#[cfg(feature = "zk")] -pub struct ScalarProductProof { ... } - -#[cfg(feature = "zk")] -pub struct Sigma1Proof { ... } - -#[cfg(feature = "zk")] -pub struct Sigma2Proof { ... } -``` - ---- - -## Part 10: Checklist - -- [ ] Add `Mode` trait to `src/mode.rs` -- [ ] Add `M: Mode` parameter to `DoryProverState` -- [ ] Add blind fields (`r_c`, `r_d1`, `r_d2`, `r_e1`, `r_e2`) to state -- [ ] Modify `reduce_round`: insert `M::sample()` and `M::mask()` calls -- [ ] Modify `fold_scalars`: add blind accumulation -- [ ] Modify `compute_vmv_message`: add sampling and masking -- [ ] Add `scalar_product_zk` impl for `DoryProverState` -- [ ] Add `ScalarProductProof`, `Sigma1Proof`, `Sigma2Proof` (ZK only) - ---- - -## References - -- IACR 2020/1274: "Dory: Efficient, Transparent arguments for Generalised Inner Products and Polynomial Commitments" From 9d5ae48f68b56e4352cc1ea0d2c8a3a6f5479df6 Mon Sep 17 00:00:00 2001 From: markosg04 Date: Fri, 27 Feb 2026 11:41:10 -0500 Subject: [PATCH 07/16] fix: zk serialization --- tests/arkworks/mod.rs | 1 + tests/arkworks/serialization.rs | 241 ++++++++++++++++++++++++++++++++ 2 files changed, 242 insertions(+) create mode 100644 tests/arkworks/serialization.rs diff --git a/tests/arkworks/mod.rs b/tests/arkworks/mod.rs index 43aecde..ad585fd 100644 --- a/tests/arkworks/mod.rs +++ b/tests/arkworks/mod.rs @@ -16,6 +16,7 @@ pub mod evaluation; pub mod homomorphic; pub mod integration; pub mod non_square; +pub mod serialization; pub mod setup; pub mod soundness; #[cfg(feature = "zk")] diff --git a/tests/arkworks/serialization.rs b/tests/arkworks/serialization.rs new file mode 100644 index 0000000..08d331d --- /dev/null +++ b/tests/arkworks/serialization.rs @@ -0,0 +1,241 @@ +//! Proof serialization round-trip tests + +use super::*; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use dory_pcs::backends::arkworks::ArkDoryProof; +use dory_pcs::primitives::poly::Polynomial; +use dory_pcs::{prove, verify, Transparent}; + +fn make_transparent_proof() -> ( + ArkDoryProof, + dory_pcs::backends::arkworks::ArkGT, + Vec, +) { + let mut rng = rand::thread_rng(); + let (setup, verifier_setup) = test_setup_pair(4); + + let poly = random_polynomial(16); + let point = random_point(4); + let (tier_2, tier_1) = poly.commit::(2, 2, &setup).unwrap(); + let mut transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + &poly, + &point, + tier_1, + 2, + 2, + &setup, + &mut transcript, + &mut rng, + ) + .unwrap(); + + // Sanity: verify before serialization + let eval = poly.evaluate(&point); + let mut vt = fresh_transcript(); + verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + eval, + &point, + &proof, + verifier_setup, + &mut vt, + ) + .unwrap(); + + (proof, tier_2, point) +} + +#[test] +fn test_transparent_proof_roundtrip_compressed() { + let (proof, _, _) = make_transparent_proof(); + + let mut buf = Vec::new(); + proof.serialize_compressed(&mut buf).unwrap(); + assert_eq!(buf.len(), proof.compressed_size()); + + let decoded = ArkDoryProof::deserialize_compressed(&buf[..]).unwrap(); + + assert_eq!(proof.nu, decoded.nu); + assert_eq!(proof.sigma, decoded.sigma); + assert_eq!(proof.first_messages.len(), decoded.first_messages.len()); + assert_eq!(proof.second_messages.len(), decoded.second_messages.len()); +} + +#[test] +fn test_transparent_proof_roundtrip_uncompressed() { + let (proof, _, _) = make_transparent_proof(); + + let mut buf = Vec::new(); + proof.serialize_uncompressed(&mut buf).unwrap(); + assert_eq!(buf.len(), proof.uncompressed_size()); + + let decoded = ArkDoryProof::deserialize_uncompressed(&buf[..]).unwrap(); + assert_eq!(proof.nu, decoded.nu); + assert_eq!(proof.sigma, decoded.sigma); +} + +#[test] +fn test_transparent_proof_roundtrip_verifies() { + let mut rng = rand::thread_rng(); + let (setup, verifier_setup) = test_setup_pair(4); + + let poly = random_polynomial(16); + let point = random_point(4); + let (tier_2, tier_1) = poly.commit::(2, 2, &setup).unwrap(); + + let mut transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + &poly, + &point, + tier_1, + 2, + 2, + &setup, + &mut transcript, + &mut rng, + ) + .unwrap(); + + // Round-trip through serialization + let mut buf = Vec::new(); + proof.serialize_compressed(&mut buf).unwrap(); + let decoded = ArkDoryProof::deserialize_compressed(&buf[..]).unwrap(); + + // Verify the deserialized proof + let eval = poly.evaluate(&point); + let mut vt = fresh_transcript(); + verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + eval, + &point, + &decoded, + verifier_setup, + &mut vt, + ) + .unwrap(); +} + +#[cfg(feature = "zk")] +mod zk_roundtrip { + use super::*; + use dory_pcs::{prove, verify, ZK}; + + fn make_zk_proof() -> ( + ArkDoryProof, + dory_pcs::backends::arkworks::ArkGT, + Vec, + ) { + let mut rng = rand::thread_rng(); + let (setup, verifier_setup) = test_setup_pair(4); + + let poly = random_polynomial(16); + let point = random_point(4); + let (tier_2, tier_1) = poly.commit::(2, 2, &setup).unwrap(); + + let mut transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + &poly, + &point, + tier_1, + 2, + 2, + &setup, + &mut transcript, + &mut rng, + ) + .unwrap(); + + // Sanity: ZK fields must be populated + assert!(proof.e2.is_some()); + assert!(proof.y_com.is_some()); + assert!(proof.sigma1_proof.is_some()); + assert!(proof.sigma2_proof.is_some()); + assert!(proof.scalar_product_proof.is_some()); + + let eval = poly.evaluate(&point); + let mut vt = fresh_transcript(); + verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + eval, + &point, + &proof, + verifier_setup, + &mut vt, + ) + .unwrap(); + + (proof, tier_2, point) + } + + #[test] + fn test_zk_proof_roundtrip_compressed() { + let (proof, _, _) = make_zk_proof(); + + let mut buf = Vec::new(); + proof.serialize_compressed(&mut buf).unwrap(); + assert_eq!(buf.len(), proof.compressed_size()); + + let decoded = ArkDoryProof::deserialize_compressed(&buf[..]).unwrap(); + assert_eq!(proof.nu, decoded.nu); + assert_eq!(proof.sigma, decoded.sigma); + assert!(decoded.e2.is_some()); + assert!(decoded.y_com.is_some()); + assert!(decoded.sigma1_proof.is_some()); + assert!(decoded.sigma2_proof.is_some()); + assert!(decoded.scalar_product_proof.is_some()); + } + + #[test] + fn test_zk_proof_roundtrip_verifies() { + let mut rng = rand::thread_rng(); + let (setup, verifier_setup) = test_setup_pair(4); + + let poly = random_polynomial(16); + let point = random_point(4); + let (tier_2, tier_1) = poly.commit::(2, 2, &setup).unwrap(); + + let mut transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + &poly, + &point, + tier_1, + 2, + 2, + &setup, + &mut transcript, + &mut rng, + ) + .unwrap(); + + let mut buf = Vec::new(); + proof.serialize_compressed(&mut buf).unwrap(); + let decoded = ArkDoryProof::deserialize_compressed(&buf[..]).unwrap(); + + let eval = poly.evaluate(&point); + let mut vt = fresh_transcript(); + verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + eval, + &point, + &decoded, + verifier_setup, + &mut vt, + ) + .unwrap(); + } + + #[test] + fn test_zk_proof_larger_size_than_transparent() { + let (zk_proof, _, _) = make_zk_proof(); + let (transparent_proof, _, _) = super::make_transparent_proof(); + + let zk_size = zk_proof.compressed_size(); + let transparent_size = transparent_proof.compressed_size(); + + assert!( + zk_size > transparent_size, + "ZK proof ({zk_size}) should be larger than transparent ({transparent_size})" + ); + } +} From 608401cd7e933f6a6723cbd388663c49876304c0 Mon Sep 17 00:00:00 2001 From: markosg04 Date: Fri, 27 Feb 2026 12:45:48 -0500 Subject: [PATCH 08/16] refactor: commit_zk -> commit --- .github/workflows/ci.yml | 25 +- Cargo.toml | 4 + README.md | 6 +- benches/arkworks_proof.rs | 18 +- examples/basic_e2e.rs | 3 +- examples/homomorphic.rs | 2 +- examples/homomorphic_mixed_sizes.rs | 6 +- examples/non_square.rs | 3 +- examples/zk_e2e.rs | 109 +++++++ src/backends/arkworks/ark_poly.rs | 72 ++--- src/evaluation_proof.rs | 39 ++- src/lib.rs | 7 +- src/mode.rs | 4 + src/primitives/poly.rs | 31 +- src/reduce_and_fold.rs | 24 +- tests/arkworks/commitment.rs | 39 ++- tests/arkworks/evaluation.rs | 28 +- tests/arkworks/homomorphic.rs | 4 +- tests/arkworks/integration.rs | 28 +- tests/arkworks/non_square.rs | 16 +- tests/arkworks/serialization.rs | 16 +- tests/arkworks/soundness.rs | 7 +- tests/arkworks/zk.rs | 30 +- tests/arkworks/zk_statistical.rs | 47 ++- zk.md | 449 ++++++++++++++++++++++++++++ 25 files changed, 811 insertions(+), 206 deletions(-) create mode 100644 examples/zk_e2e.rs create mode 100644 zk.md diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1b83b4c..5d41e04 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,13 +27,21 @@ jobs: clippy: runs-on: ubuntu-latest + strategy: + matrix: + include: + - name: transparent + features: backends,parallel,cache,disk-persistence + - name: zk + features: backends,parallel,cache,disk-persistence,zk + name: Clippy (${{ matrix.name }}) steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: components: clippy - - name: cargo clippy --all-features - run: cargo clippy --all --all-targets --all-features + - name: cargo clippy --features ${{ matrix.features }} + run: cargo clippy --all --all-targets --features ${{ matrix.features }} - name: cargo clippy --no-default-features run: cargo clippy --all --all-targets --no-default-features @@ -50,14 +58,21 @@ jobs: test: runs-on: ubuntu-latest - name: Test + strategy: + matrix: + include: + - name: transparent + features: backends,parallel,cache,disk-persistence + - name: zk + features: backends,parallel,cache,disk-persistence,zk + name: Test (${{ matrix.name }}) steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 - name: Install nextest uses: taiki-e/install-action@nextest - name: Run tests - run: cargo nextest run --all-features + run: cargo nextest run --features ${{ matrix.features }} examples: runs-on: ubuntu-latest @@ -73,3 +88,5 @@ jobs: run: cargo run --example non_square --features backends - name: Run homomorphic_mixed_sizes example run: cargo run --example homomorphic_mixed_sizes --features backends + - name: Run zk_e2e example + run: cargo run --example zk_e2e --features backends,zk diff --git a/Cargo.toml b/Cargo.toml index 9a6a11a..1940eaf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -89,6 +89,10 @@ required-features = ["backends"] name = "homomorphic_mixed_sizes" required-features = ["backends"] +[[example]] +name = "zk_e2e" +required-features = ["backends", "zk"] + [[bench]] name = "arkworks_proof" harness = false diff --git a/README.md b/README.md index 8d9cce0..01db1b1 100644 --- a/README.md +++ b/README.md @@ -96,7 +96,7 @@ This property enables efficient proof aggregation and batch verification. See `e ## Usage ```rust -use dory_pcs::{setup, prove, verify}; +use dory_pcs::{setup, prove, verify, Transparent}; use dory_pcs::backends::arkworks::{ BN254, G1Routines, G2Routines, ArkworksPolynomial, Blake2bTranscript }; @@ -119,8 +119,8 @@ fn main() -> Result<(), Box> { let sigma = 4; // log₂(cols) = 4 → 16 columns // 4. Commit to polynomial to get tier-2 commitment and row commitments - let (tier_2, row_commitments) = polynomial - .commit::(nu, sigma, &prover_setup)?; + let (tier_2, row_commitments, _) = polynomial + .commit::(nu, sigma, &prover_setup, &mut rng)?; // 5. Create evaluation proof using row commitments let mut prover_transcript = Blake2bTranscript::new(b"dory-example"); diff --git a/benches/arkworks_proof.rs b/benches/arkworks_proof.rs index 7dd0e09..2c6bdd5 100644 --- a/benches/arkworks_proof.rs +++ b/benches/arkworks_proof.rs @@ -62,10 +62,12 @@ fn bench_commitment(c: &mut Criterion) { c.bench_function("commitment_2^26_coefficients", |b| { b.iter(|| { - poly.commit::( + let mut rng = thread_rng(); + poly.commit::( black_box(nu), black_box(sigma), black_box(&prover_setup), + black_box(&mut rng), ) .unwrap() }) @@ -76,9 +78,10 @@ fn bench_prove(c: &mut Criterion) { let (poly, point, prover_setup, _) = setup_benchmark_data(); let nu = 13; let sigma = 13; + let mut rng = thread_rng(); - let (_, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (_, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); c.bench_function("prove_2^26_coefficients", |b| { @@ -104,9 +107,10 @@ fn bench_verify(c: &mut Criterion) { let (poly, point, prover_setup, verifier_setup) = setup_benchmark_data(); let nu = 13; let sigma = 13; + let mut rng = thread_rng(); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let mut prover_transcript = Blake2bTranscript::new(b"dory-bench"); @@ -168,8 +172,8 @@ fn bench_end_to_end(c: &mut Criterion) { let poly = ArkworksPolynomial::new(coefficients); // Commit - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); // Evaluate diff --git a/examples/basic_e2e.rs b/examples/basic_e2e.rs index 8b597ab..9815a4c 100644 --- a/examples/basic_e2e.rs +++ b/examples/basic_e2e.rs @@ -50,7 +50,8 @@ fn main() -> Result<(), Box> { // Step 3: Commit info!("3. Computing polynomial commitment..."); - let (tier_2, tier_1) = poly.commit::(nu, sigma, &prover_setup)?; + let (tier_2, tier_1, _) = + poly.commit::(nu, sigma, &prover_setup, &mut rng)?; info!( " ✓ Tier-1 commitment: {} row commitments (G1)", tier_1.len() diff --git a/examples/homomorphic.rs b/examples/homomorphic.rs index a8d8238..3f5cbb0 100644 --- a/examples/homomorphic.rs +++ b/examples/homomorphic.rs @@ -54,7 +54,7 @@ fn main() -> Result<(), Box> { let commitments: Vec<_> = polys .iter() .map(|poly| { - poly.commit::(nu, sigma, &prover_setup) + poly.commit::(nu, sigma, &prover_setup, &mut rng) .unwrap() }) .collect(); diff --git a/examples/homomorphic_mixed_sizes.rs b/examples/homomorphic_mixed_sizes.rs index d55e5e8..708c85a 100644 --- a/examples/homomorphic_mixed_sizes.rs +++ b/examples/homomorphic_mixed_sizes.rs @@ -35,10 +35,10 @@ fn main() -> Result<(), Box> { info!("Poly2: {:?}", poly2); let commitment1 = poly1 - .commit::(2, 2, &prover_setup) + .commit::(2, 2, &prover_setup, &mut rng) .unwrap(); let commitment2 = poly2 - .commit::(1, 1, &prover_setup) + .commit::(1, 1, &prover_setup, &mut rng) .unwrap(); info!("✓ Commitments ready\n"); @@ -128,7 +128,7 @@ fn main() -> Result<(), Box> { info!("==========================================="); let padded_poly_commitment = padded_poly2 - .commit::(2, 2, &prover_setup) + .commit::(2, 2, &prover_setup, &mut rng) .unwrap(); assert_eq!(padded_poly_commitment.0, commitment2.0); info!("✓ Padded poly commitment matches original poly2 commitment"); diff --git a/examples/non_square.rs b/examples/non_square.rs index 40c57ed..c6c83b6 100644 --- a/examples/non_square.rs +++ b/examples/non_square.rs @@ -49,7 +49,8 @@ fn main() -> Result<(), Box> { // Step 3: Commit info!("3. Computing polynomial commitment..."); - let (tier_2, tier_1) = poly.commit::(nu, sigma, &prover_setup)?; + let (tier_2, tier_1, _) = + poly.commit::(nu, sigma, &prover_setup, &mut rng)?; info!( " ✓ Tier-1 commitment: {} row commitments (G1)", tier_1.len() diff --git a/examples/zk_e2e.rs b/examples/zk_e2e.rs new file mode 100644 index 0000000..ac24acc --- /dev/null +++ b/examples/zk_e2e.rs @@ -0,0 +1,109 @@ +//! Zero-knowledge end-to-end example of Dory polynomial commitment scheme +//! +//! This example demonstrates the ZK workflow where the prover generates a +//! hiding proof. The only API difference from transparent mode is switching +//! the mode type parameter from `Transparent` to `ZK`. +//! +//! In ZK mode: +//! - Protocol messages are blinded with random scalars +//! - The proof contains additional sigma and scalar-product sub-proofs +//! - The evaluation commitment (`y_com`) hides the actual evaluation value +//! +//! Matrix dimensions: 16×16 (nu=4, sigma=4, total 256 coefficients) + +use dory_pcs::backends::arkworks::{ + ArkFr, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, +}; +use dory_pcs::primitives::arithmetic::Field; +use dory_pcs::primitives::poly::Polynomial; +use dory_pcs::{prove, setup, verify, Transparent, ZK}; +use rand::thread_rng; +use tracing::info; + +fn main() -> Result<(), Box> { + info!("Dory PCS - Zero-Knowledge End-to-End Example"); + info!("==============================================\n"); + + let mut rng = thread_rng(); + + // Step 1: Setup (identical to transparent mode) + let max_log_n = 10; + info!("1. Generating setup (max_log_n = {})...", max_log_n); + let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + info!(" Setup complete\n"); + + // Step 2: Create polynomial + let nu = 4; + let sigma = 4; + let poly_size = 1 << (nu + sigma); // 256 + let num_vars = nu + sigma; // 8 + + info!("2. Creating random polynomial..."); + info!(" Matrix layout: {}x{} (square)", 1 << nu, 1 << sigma); + info!(" Total coefficients: {}", poly_size); + + let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random(&mut rng)).collect(); + let poly = ArkworksPolynomial::new(coefficients); + info!(" Polynomial created\n"); + + // Step 3: Commit (identical to transparent mode) + info!("3. Computing polynomial commitment..."); + let (tier_2, tier_1, _) = + poly.commit::(nu, sigma, &prover_setup, &mut rng)?; + info!(" Tier-1: {} row commitments", tier_1.len()); + info!(" Tier-2: final GT commitment\n"); + + // Step 4: Evaluate + let point: Vec = (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect(); + let evaluation = poly.evaluate(&point); + info!("4. Evaluated polynomial at random point\n"); + + // Step 5: Prove in ZK mode + // The only API difference: `ZK` replaces `Transparent` as the mode parameter. + // This causes all protocol messages to be blinded with random scalars and + // generates additional sigma1, sigma2, and scalar-product sub-proofs. + info!("5. Generating ZK evaluation proof..."); + let mut prover_transcript = Blake2bTranscript::new(b"dory-zk-example"); + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, ZK, _>( + &poly, + &point, + tier_1, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + &mut rng, + )?; + info!(" Proof generated"); + info!( + " ZK sub-proofs present: e2={}, y_com={}, sigma1={}, sigma2={}, scalar_product={}", + proof.e2.is_some(), + proof.y_com.is_some(), + proof.sigma1_proof.is_some(), + proof.sigma2_proof.is_some(), + proof.scalar_product_proof.is_some(), + ); + info!( + " {} reduce rounds (logarithmic)\n", + proof.first_messages.len() + ); + + // Step 6: Verify (identical call signature to transparent mode) + // The verifier detects ZK mode from the proof's e2/y_com fields. + info!("6. Verifying ZK proof..."); + let mut verifier_transcript = Blake2bTranscript::new(b"dory-zk-example"); + verify::<_, BN254, G1Routines, G2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + )?; + info!(" Proof verified successfully!\n"); + + info!("=============================================="); + info!("ZK example completed successfully!"); + + Ok(()) +} diff --git a/src/backends/arkworks/ark_poly.rs b/src/backends/arkworks/ark_poly.rs index a982897..8702132 100644 --- a/src/backends/arkworks/ark_poly.rs +++ b/src/backends/arkworks/ark_poly.rs @@ -4,6 +4,7 @@ use super::ark_field::ArkFr; use crate::error::DoryError; +use crate::mode::Mode; use crate::primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; use crate::primitives::poly::{MultilinearLagrange, Polynomial}; use crate::setup::ProverSetup; @@ -53,16 +54,20 @@ impl Polynomial for ArkworksPolynomial { } #[tracing::instrument(skip_all, name = "ArkworksPolynomial::commit", fields(nu, sigma, num_rows = 1 << nu, num_cols = 1 << sigma))] - fn commit( + #[allow(clippy::type_complexity)] + fn commit( &self, nu: usize, sigma: usize, setup: &ProverSetup, - ) -> Result<(E::GT, Vec), DoryError> + rng: &mut R, + ) -> Result<(E::GT, Vec, Option>), DoryError> where E: PairingCurve, + Mo: Mode, M1: DoryRoutines, E::G1: Group, + R: rand_core::RngCore, { let expected_len = 1 << (nu + sigma); if self.coefficients.len() != expected_len { @@ -74,63 +79,22 @@ impl Polynomial for ArkworksPolynomial { let num_rows = 1 << nu; let num_cols = 1 << sigma; + let g1 = &setup.g1_vec[..num_cols]; - // Tier 1: Compute row commitments - let mut row_commitments = Vec::with_capacity(num_rows); - for i in 0..num_rows { - let row_start = i * num_cols; - let row_end = row_start + num_cols; - let row = &self.coefficients[row_start..row_end]; - - let g1_bases = &setup.g1_vec[..num_cols]; - let row_commit = M1::msm(g1_bases, row); - row_commitments.push(row_commit); - } - - // Tier 2: Compute final commitment via multi-pairing (g2_bases from setup) - let g2_bases = &setup.g2_vec[..num_rows]; - let commitment = E::multi_pair_g2_setup(&row_commitments, g2_bases); - - Ok((commitment, row_commitments)) - } + let blinds: Vec = (0..num_rows).map(|_| Mo::sample(rng)).collect(); - #[cfg(feature = "zk")] - #[tracing::instrument(skip_all, name = "ArkworksPolynomial::commit_zk", fields(nu, sigma))] - #[allow(clippy::type_complexity)] - fn commit_zk( - &self, - nu: usize, - sigma: usize, - setup: &ProverSetup, - rng: &mut R, - ) -> Result<(E::GT, Vec, Vec), DoryError> - where - E: PairingCurve, - M1: DoryRoutines, - E::G1: Group, - R: rand_core::RngCore, - { - let expected_len = 1 << (nu + sigma); - if self.coefficients.len() != expected_len { - return Err(DoryError::InvalidSize { - expected: expected_len, - actual: self.coefficients.len(), - }); - } - let (num_rows, num_cols) = (1 << nu, 1 << sigma); - let g1 = &setup.g1_vec[..num_cols]; - let blinds: Vec = (0..num_rows).map(|_| ArkFr::random(rng)).collect(); - let rows: Vec = (0..num_rows) + let row_commitments: Vec = (0..num_rows) .map(|i| { - M1::msm(g1, &self.coefficients[i * num_cols..(i + 1) * num_cols]) - + setup.h1.scale(&blinds[i]) + let row = &self.coefficients[i * num_cols..(i + 1) * num_cols]; + Mo::mask(M1::msm(g1, row), &setup.h1, &blinds[i]) }) .collect(); - Ok(( - E::multi_pair_g2_setup(&rows, &setup.g2_vec[..num_rows]), - rows, - blinds, - )) + + let commitment = E::multi_pair_g2_setup(&row_commitments, &setup.g2_vec[..num_rows]); + + let opt_blinds = if Mo::BLINDING { Some(blinds) } else { None }; + + Ok((commitment, row_commitments, opt_blinds)) } } diff --git a/src/evaluation_proof.rs b/src/evaluation_proof.rs index b129f08..c8f4dd0 100644 --- a/src/evaluation_proof.rs +++ b/src/evaluation_proof.rs @@ -35,9 +35,6 @@ use crate::proof::DoryProof; use crate::reduce_and_fold::{DoryProverState, DoryVerifierState}; use crate::setup::{ProverSetup, VerifierSetup}; -#[cfg(feature = "zk")] -use crate::mode::ZK; - /// Create evaluation proof for a polynomial at a point /// /// Implements Eval-VMV-RE protocol from Dory Section 5. @@ -117,7 +114,8 @@ where let row_commitments = if let Some(rc) = row_commitments { rc } else { - let (_commitment, rc) = polynomial.commit::(nu, sigma, setup)?; + let (_commitment, rc, _blinds) = + polynomial.commit::(nu, sigma, setup, rng)?; rc }; @@ -160,21 +158,20 @@ where transcript.append_serde(b"vmv_e1", &vmv_message.e1); #[cfg(feature = "zk")] - let (zk_e2, zk_y_com, zk_sigma1, zk_sigma2, zk_r_y) = - if std::any::TypeId::of::() == std::any::TypeId::of::() { - use crate::reduce_and_fold::{generate_sigma1_proof, generate_sigma2_proof}; - let y = polynomial.evaluate(point); - let r_y: F = Mo::sample(rng); - let e2 = Mo::mask(g2_fin.scale(&y), &setup.h2, &r_e2); - let y_com = setup.g1_vec[0].scale(&y) + setup.h1.scale(&r_y); - transcript.append_serde(b"vmv_e2", &e2); - transcript.append_serde(b"vmv_y_com", &y_com); - let s1 = generate_sigma1_proof::(&y, &r_e2, &r_y, setup, transcript, rng); - let s2 = generate_sigma2_proof::(&r_e1, &-r_d2, setup, transcript, rng); - (Some(e2), Some(y_com), Some(s1), Some(s2), Some(r_y)) - } else { - (None, None, None, None, None) - }; + let (zk_e2, zk_y_com, zk_sigma1, zk_sigma2, zk_r_y) = if Mo::BLINDING { + use crate::reduce_and_fold::{generate_sigma1_proof, generate_sigma2_proof}; + let y = polynomial.evaluate(point); + let r_y: F = Mo::sample(rng); + let e2 = Mo::mask(g2_fin.scale(&y), &setup.h2, &r_e2); + let y_com = setup.g1_vec[0].scale(&y) + setup.h1.scale(&r_y); + transcript.append_serde(b"vmv_e2", &e2); + transcript.append_serde(b"vmv_y_com", &y_com); + let s1 = generate_sigma1_proof::(&y, &r_e2, &r_y, setup, transcript, rng); + let s2 = generate_sigma2_proof::(&r_e1, &-r_d2, setup, transcript, rng); + (Some(e2), Some(y_com), Some(s1), Some(s2), Some(r_y)) + } else { + (None, None, None, None, None) + }; // v₂ = v_vec · Γ₂,fin (each scalar scales g_fin) let v2 = M2::fixed_base_vector_scalar_mul(g2_fin, &v_vec); @@ -231,13 +228,13 @@ where let gamma = transcript.challenge_scalar(b"gamma"); #[cfg(feature = "zk")] - let scalar_product_proof = if std::any::TypeId::of::() == std::any::TypeId::of::() { + let scalar_product_proof = if Mo::BLINDING { Some(prover_state.scalar_product_proof(transcript, rng)) } else { None }; - let final_message = prover_state.compute_final_message::(&gamma); + let final_message = prover_state.compute_final_message::(&gamma, rng); transcript.append_serde(b"final_e1", &final_message.e1); transcript.append_serde(b"final_e2", &final_message.e2); diff --git a/src/lib.rs b/src/lib.rs index b4fc9db..688f07d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -39,15 +39,15 @@ //! ### Basic Example //! //! ```ignore -//! use dory_pcs::{setup, prove, verify}; +//! use dory_pcs::{setup, prove, verify, Transparent}; //! use dory_pcs::backends::arkworks::{BN254, G1Routines, G2Routines, Blake2bTranscript}; //! //! // 1. Generate setup (automatically loads from/saves to disk) //! let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); //! //! // 2. Commit to polynomial -//! let (tier_2_commitment, tier_1_commitments) = polynomial -//! .commit::(nu, sigma, &prover_setup)?; +//! let (tier_2_commitment, tier_1_commitments, _blinds) = polynomial +//! .commit::(nu, sigma, &prover_setup, &mut rng)?; //! //! // 3. Generate evaluation proof //! let mut prover_transcript = Blake2bTranscript::new(b"domain-separation"); @@ -81,6 +81,7 @@ //! - `basic_e2e.rs` - Standard square matrix workflow //! - `homomorphic.rs` - Homomorphic combination of multiple polynomials //! - `non_square.rs` - Non-square matrix layout (nu < sigma) +//! - `zk_e2e.rs` - Zero-knowledge proof workflow (requires `zk` feature) //! //! ## Feature Flags //! diff --git a/src/mode.rs b/src/mode.rs index 8d10863..ece2678 100644 --- a/src/mode.rs +++ b/src/mode.rs @@ -3,6 +3,8 @@ use crate::primitives::arithmetic::{Field, Group}; /// Determines whether protocol messages are blinded (ZK) or unblinded (transparent). pub trait Mode: 'static { + /// Whether this mode produces blinding values that callers must retain. + const BLINDING: bool; /// Sample a blinding scalar: zero in Transparent mode, random in ZK mode. fn sample(rng: &mut R) -> F; /// Mask a group element: identity in Transparent mode, `value + base * blind` in ZK mode. @@ -12,6 +14,7 @@ pub trait Mode: 'static { /// Transparent mode: no blinding, non-hiding proofs. pub struct Transparent; impl Mode for Transparent { + const BLINDING: bool = false; fn sample(_rng: &mut R) -> F { F::zero() } @@ -25,6 +28,7 @@ impl Mode for Transparent { pub struct ZK; #[cfg(feature = "zk")] impl Mode for ZK { + const BLINDING: bool = true; fn sample(rng: &mut R) -> F { F::random(rng) } diff --git a/src/primitives/poly.rs b/src/primitives/poly.rs index edb857e..b178748 100644 --- a/src/primitives/poly.rs +++ b/src/primitives/poly.rs @@ -4,6 +4,7 @@ use crate::error::DoryError; use crate::setup::ProverSetup; use super::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; +use crate::mode::Mode; /// Trait for multilinear Lagrange polynomial operations pub trait MultilinearLagrange: Polynomial { @@ -56,13 +57,16 @@ pub trait Polynomial { /// Polynomial evaluation result fn evaluate(&self, point: &[F]) -> F; - /// Commit to polynomial using Dory's 2-tier (AFGHO) homomorphic commitment + /// Commit to polynomial using Dory's 2-tier (AFGHO) homomorphic commitment. /// /// The polynomial coefficients are arranged as a 2D matrix with 2^nu rows and 2^sigma columns. /// /// # Tier 1 (Row Commitments) /// For each row i: `row_commit[i] = MSM(g1_generators[0..2^sigma], row_coefficients[i])` /// + /// In ZK mode (`Mo = ZK`), each row commitment is additionally blinded: + /// `row_commit[i] += H₁ · blind[i]` where `blind[i]` is a fresh random scalar. + /// /// # Tier 2 (Final Commitment) /// `commitment = Σ e(row_commit[i], g2_generators[i])` for i in 0..2^nu /// @@ -70,40 +74,27 @@ pub trait Polynomial { /// - `nu`: Log₂ of number of rows /// - `sigma`: Log₂ of number of columns /// - `setup`: Prover setup containing generators + /// - `rng`: Random number generator (unused in Transparent mode) /// /// # Returns - /// `(commitment, row_commitments)` where: + /// `(commitment, row_commitments, blinds)` where: /// - `commitment`: Final commitment in GT /// - `row_commitments`: Intermediate row commitments in G1 (used in opening proof) + /// - `blinds`: Per-row blinding scalars in ZK mode (`Some`), or `None` in Transparent mode /// /// # Errors /// Returns error if coefficient length doesn't match 2^(nu + sigma) or if setup is insufficient. - fn commit( - &self, - nu: usize, - sigma: usize, - setup: &ProverSetup, - ) -> Result<(E::GT, Vec), DoryError> - where - E: PairingCurve, - M1: DoryRoutines, - E::G1: Group; - - /// Commit with per-row ZK blinds. Returns `(commitment, row_commitments, blinds)`. - /// - /// # Errors - /// Returns error if coefficient length doesn't match 2^(nu + sigma). - #[cfg(feature = "zk")] #[allow(clippy::type_complexity)] - fn commit_zk( + fn commit( &self, nu: usize, sigma: usize, setup: &ProverSetup, rng: &mut R, - ) -> Result<(E::GT, Vec, Vec), DoryError> + ) -> Result<(E::GT, Vec, Option>), DoryError> where E: PairingCurve, + Mo: Mode, M1: DoryRoutines, E::G1: Group, R: rand_core::RngCore; diff --git a/src/reduce_and_fold.rs b/src/reduce_and_fold.rs index ad13b06..3f62a7b 100644 --- a/src/reduce_and_fold.rs +++ b/src/reduce_and_fold.rs @@ -377,14 +377,22 @@ where /// /// Applies fold-scalars transformation and returns the final E1, E2 elements. /// Must be called when num_rounds=0 (vectors are size 1). + /// + /// In ZK mode, E₁ and E₂ are additionally blinded with fresh randomness so + /// that the folded vectors `v₁[0]`, `v₂[0]` cannot be recovered from the + /// proof. These blinds do not affect the scalar product proof or the + /// accumulated `r_c` — they only add entropy to the Fiat-Shamir transcript + /// from which the `d` challenge is derived. #[tracing::instrument(skip_all, name = "DoryProverState::compute_final_message")] - pub fn compute_final_message( + pub fn compute_final_message( &mut self, gamma: &::Scalar, + rng: &mut R, ) -> ScalarProductMessage where M1: DoryRoutines, M2: DoryRoutines, + R: rand_core::RngCore, { debug_assert_eq!(self.num_rounds, 0, "num_rounds must be 0 for final message"); debug_assert_eq!(self.v1.len(), 1, "v1 must have length 1"); @@ -392,13 +400,17 @@ where let gamma_inv = (*gamma).inv().expect("gamma must be invertible"); - // Apply fold-scalars transform: - // E₁ = v₁ + γ·s₁·H₁ - let gamma_s1 = *gamma * self.s1[0]; + // Sample independent blinds for the final message (zero in Transparent mode). + let r_final1: ::Scalar = M::sample(rng); + let r_final2: ::Scalar = M::sample(rng); + + // Apply fold-scalars transform with blinding: + // E₁ = v₁ + (γ·s₁ + r_final1)·H₁ + let gamma_s1 = *gamma * self.s1[0] + r_final1; let e1 = self.v1[0] + self.setup.h1.scale(&gamma_s1); - // E₂ = v₂ + γ⁻¹·s₂·H₂ - let gamma_inv_s2 = gamma_inv * self.s2[0]; + // E₂ = v₂ + (γ⁻¹·s₂ + r_final2)·H₂ + let gamma_inv_s2 = gamma_inv * self.s2[0] + r_final2; let e2 = self.v2[0] + self.setup.h2.scale(&gamma_inv_s2); // Final blind accumulation: r_c ← r_c + γ·r_e2 + γ⁻¹·r_e1 diff --git a/tests/arkworks/commitment.rs b/tests/arkworks/commitment.rs index 382d4df..aef339b 100644 --- a/tests/arkworks/commitment.rs +++ b/tests/arkworks/commitment.rs @@ -2,89 +2,104 @@ use super::*; use dory_pcs::primitives::poly::Polynomial; +use dory_pcs::Transparent; #[test] fn test_commit_small_polynomial() { + let mut rng = rand::thread_rng(); let setup = test_setup(4); let poly = random_polynomial(16); let nu = 2; let sigma = 2; - let result = poly.commit::(nu, sigma, &setup); + let result = poly.commit::(nu, sigma, &setup, &mut rng); assert!(result.is_ok()); - let (_commitment, row_commitments) = result.unwrap(); + let (_commitment, row_commitments, _) = result.unwrap(); assert_eq!(row_commitments.len(), 1 << nu); } #[test] fn test_commit_constant_polynomial() { + let mut rng = rand::thread_rng(); let setup = test_setup(4); let poly = constant_polynomial(42, 4); let nu = 2; let sigma = 2; - let result = poly.commit::(nu, sigma, &setup); + let result = poly.commit::(nu, sigma, &setup, &mut rng); assert!(result.is_ok()); - let (_commitment, row_commitments) = result.unwrap(); + let (_commitment, row_commitments, _) = result.unwrap(); assert_eq!(row_commitments.len(), 1 << nu); } #[test] fn test_commit_different_sizes() { + let mut rng = rand::thread_rng(); let setup = test_setup(8); let poly_4 = random_polynomial(4); - let result = poly_4.commit::(1, 1, &setup); + let result = poly_4.commit::(1, 1, &setup, &mut rng); assert!(result.is_ok()); let poly_16 = random_polynomial(16); - let result = poly_16.commit::(2, 2, &setup); + let result = poly_16.commit::(2, 2, &setup, &mut rng); assert!(result.is_ok()); let poly_64 = random_polynomial(64); - let result = poly_64.commit::(3, 3, &setup); + let result = poly_64.commit::(3, 3, &setup, &mut rng); assert!(result.is_ok()); } #[test] fn test_commit_invalid_size() { + let mut rng = rand::thread_rng(); let setup = test_setup(4); let poly = random_polynomial(16); let nu = 3; let sigma = 2; - let result = poly.commit::(nu, sigma, &setup); + let result = poly.commit::(nu, sigma, &setup, &mut rng); assert!(result.is_err()); } #[test] fn test_commit_deterministic() { + let mut rng = rand::thread_rng(); let setup = test_setup(6); let coefficients: Vec = (0..16).map(|i| ArkFr::from_u64(i as u64)).collect(); let poly1 = ArkworksPolynomial::new(coefficients.clone()); let poly2 = ArkworksPolynomial::new(coefficients); - let (comm1, _) = poly1.commit::(2, 2, &setup).unwrap(); - let (comm2, _) = poly2.commit::(2, 2, &setup).unwrap(); + let (comm1, _, _) = poly1 + .commit::(2, 2, &setup, &mut rng) + .unwrap(); + let (comm2, _, _) = poly2 + .commit::(2, 2, &setup, &mut rng) + .unwrap(); assert_eq!(comm1, comm2); } #[test] fn test_commit_different_polynomials() { + let mut rng = rand::thread_rng(); let setup = test_setup(6); let poly1 = random_polynomial(16); let poly2 = random_polynomial(16); - let (comm1, _) = poly1.commit::(2, 2, &setup).unwrap(); - let (comm2, _) = poly2.commit::(2, 2, &setup).unwrap(); + let (comm1, _, _) = poly1 + .commit::(2, 2, &setup, &mut rng) + .unwrap(); + let (comm2, _, _) = poly2 + .commit::(2, 2, &setup, &mut rng) + .unwrap(); assert_ne!(comm1, comm2); } diff --git a/tests/arkworks/evaluation.rs b/tests/arkworks/evaluation.rs index 7831982..85a996e 100644 --- a/tests/arkworks/evaluation.rs +++ b/tests/arkworks/evaluation.rs @@ -16,8 +16,8 @@ fn test_evaluation_proof_small() { let nu = 2; let sigma = 2; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &setup, &mut rng) .unwrap(); let mut prover_transcript = fresh_transcript(); @@ -61,8 +61,8 @@ fn test_evaluation_proof_with_precomputed_commitment() { let nu = 2; let sigma = 2; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &setup, &mut rng) .unwrap(); let mut prover_transcript = fresh_transcript(); @@ -109,8 +109,8 @@ fn test_evaluation_proof_constant_polynomial() { let expected_eval = poly.evaluate(&point); assert_eq!(expected_eval, ArkFr::from_u64(7)); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &setup, &mut rng) .unwrap(); let mut prover_transcript = fresh_transcript(); @@ -154,8 +154,8 @@ fn test_evaluation_proof_wrong_evaluation_fails() { let nu = 2; let sigma = 2; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &setup, &mut rng) .unwrap(); let mut prover_transcript = fresh_transcript(); @@ -197,7 +197,9 @@ fn test_evaluation_proof_different_sizes() { let poly = random_polynomial(4); let point = random_point(2); - let (tier_2, tier_1) = poly.commit::(1, 1, &setup).unwrap(); + let (tier_2, tier_1, _) = poly + .commit::(1, 1, &setup, &mut rng) + .unwrap(); let mut prover_transcript = fresh_transcript(); let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( @@ -232,7 +234,9 @@ fn test_evaluation_proof_different_sizes() { let poly = random_polynomial(64); let point = random_point(6); - let (tier_2, tier_1) = poly.commit::(3, 3, &setup).unwrap(); + let (tier_2, tier_1, _) = poly + .commit::(3, 3, &setup, &mut rng) + .unwrap(); let mut prover_transcript = fresh_transcript(); let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( @@ -271,8 +275,8 @@ fn test_multiple_evaluations_same_commitment() { let nu = 2; let sigma = 2; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &setup, &mut rng) .unwrap(); for _ in 0..3 { diff --git a/tests/arkworks/homomorphic.rs b/tests/arkworks/homomorphic.rs index 9f0712c..43ab7e2 100644 --- a/tests/arkworks/homomorphic.rs +++ b/tests/arkworks/homomorphic.rs @@ -22,7 +22,7 @@ fn test_homomorphic_combination_e2e() { let commitments: Vec<_> = polys .iter() .map(|poly| { - poly.commit::(nu, sigma, &prover_setup) + poly.commit::(nu, sigma, &prover_setup, &mut rng) .unwrap() }) .collect(); @@ -130,7 +130,7 @@ fn test_homomorphic_combination_small() { let commitments: Vec<_> = polys .iter() .map(|poly| { - poly.commit::(nu, sigma, &prover_setup) + poly.commit::(nu, sigma, &prover_setup, &mut rng) .unwrap() }) .collect(); diff --git a/tests/arkworks/integration.rs b/tests/arkworks/integration.rs index 8d4c198..66c9fd5 100644 --- a/tests/arkworks/integration.rs +++ b/tests/arkworks/integration.rs @@ -16,8 +16,8 @@ fn test_full_workflow() { let nu = 4; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let point = random_point(8); @@ -63,8 +63,8 @@ fn test_workflow_without_precommitment() { let nu = 4; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let mut prover_transcript = fresh_transcript(); @@ -103,8 +103,8 @@ fn test_batched_proofs() { let nu = 4; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); for i in 0..5 { @@ -160,8 +160,8 @@ fn test_linear_polynomial() { let nu = 4; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let mut prover_transcript = fresh_transcript(); @@ -205,8 +205,8 @@ fn test_zero_polynomial() { let nu = 4; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let mut prover_transcript = fresh_transcript(); @@ -250,12 +250,12 @@ fn test_soundness_wrong_commitment() { let nu = 4; let sigma = 4; - let (commitment1, _) = poly1 - .commit::(nu, sigma, &prover_setup) + let (commitment1, _, _) = poly1 + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); - let (_, tier_1_poly2) = poly2 - .commit::(nu, sigma, &prover_setup) + let (_, tier_1_poly2, _) = poly2 + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let mut prover_transcript = fresh_transcript(); diff --git a/tests/arkworks/non_square.rs b/tests/arkworks/non_square.rs index b06c0af..be6c333 100644 --- a/tests/arkworks/non_square.rs +++ b/tests/arkworks/non_square.rs @@ -18,8 +18,8 @@ fn test_non_square_matrix_nu_eq_sigma_minus_1() { let poly = random_polynomial(poly_size); let point = random_point(num_vars); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); @@ -64,8 +64,8 @@ fn test_non_square_matrix_nu_greater_than_sigma_rejected() { let poly = random_polynomial(poly_size); let point = random_point(num_vars); - let (_, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (_, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); @@ -100,8 +100,8 @@ fn test_non_square_matrix_small() { let poly = random_polynomial(poly_size); let point = random_point(num_vars); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); @@ -150,8 +150,8 @@ fn test_non_square_matrix_very_rectangular() { let poly = random_polynomial(poly_size); let point = random_point(num_vars); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); diff --git a/tests/arkworks/serialization.rs b/tests/arkworks/serialization.rs index 08d331d..ddec013 100644 --- a/tests/arkworks/serialization.rs +++ b/tests/arkworks/serialization.rs @@ -16,7 +16,9 @@ fn make_transparent_proof() -> ( let poly = random_polynomial(16); let point = random_point(4); - let (tier_2, tier_1) = poly.commit::(2, 2, &setup).unwrap(); + let (tier_2, tier_1, _) = poly + .commit::(2, 2, &setup, &mut rng) + .unwrap(); let mut transcript = fresh_transcript(); let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, @@ -82,7 +84,9 @@ fn test_transparent_proof_roundtrip_verifies() { let poly = random_polynomial(16); let point = random_point(4); - let (tier_2, tier_1) = poly.commit::(2, 2, &setup).unwrap(); + let (tier_2, tier_1, _) = poly + .commit::(2, 2, &setup, &mut rng) + .unwrap(); let mut transcript = fresh_transcript(); let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( @@ -131,7 +135,9 @@ mod zk_roundtrip { let poly = random_polynomial(16); let point = random_point(4); - let (tier_2, tier_1) = poly.commit::(2, 2, &setup).unwrap(); + let (tier_2, tier_1, _) = poly + .commit::(2, 2, &setup, &mut rng) + .unwrap(); let mut transcript = fresh_transcript(); let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( @@ -193,7 +199,9 @@ mod zk_roundtrip { let poly = random_polynomial(16); let point = random_point(4); - let (tier_2, tier_1) = poly.commit::(2, 2, &setup).unwrap(); + let (tier_2, tier_1, _) = poly + .commit::(2, 2, &setup, &mut rng) + .unwrap(); let mut transcript = fresh_transcript(); let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( diff --git a/tests/arkworks/soundness.rs b/tests/arkworks/soundness.rs index 9abe6b0..4f4e27d 100644 --- a/tests/arkworks/soundness.rs +++ b/tests/arkworks/soundness.rs @@ -27,11 +27,10 @@ fn create_valid_proof_components( let poly = random_polynomial(size); let point = random_point(nu + sigma); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) - .unwrap(); - let mut rng = rand::thread_rng(); + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) + .unwrap(); let mut prover_transcript = fresh_transcript(); let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( &poly, diff --git a/tests/arkworks/zk.rs b/tests/arkworks/zk.rs index 9804915..fd30f0b 100644 --- a/tests/arkworks/zk.rs +++ b/tests/arkworks/zk.rs @@ -2,7 +2,7 @@ use super::*; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{create_evaluation_proof, prove, setup, verify, ZK}; +use dory_pcs::{create_evaluation_proof, prove, setup, verify, Transparent, ZK}; #[test] fn test_zk_full_workflow() { @@ -15,8 +15,8 @@ fn test_zk_full_workflow() { let nu = 4; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let point = random_point(8); @@ -59,8 +59,8 @@ fn test_zk_small_polynomial() { let nu = 1; let sigma = 1; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let point = random_point(2); @@ -105,8 +105,8 @@ fn test_zk_larger_polynomial() { let nu = 5; let sigma = 5; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let point = random_point(10); @@ -152,8 +152,8 @@ fn test_zk_non_square_matrix() { let nu = 3; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let point = random_point(7); // nu + sigma = 7 @@ -200,8 +200,8 @@ fn test_zk_hidden_evaluation() { let nu = 2; let sigma = 2; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let point = random_point(4); @@ -256,8 +256,8 @@ fn test_zk_tampered_e2_rejected() { let nu = 2; let sigma = 2; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let point = random_point(4); @@ -305,8 +305,8 @@ fn test_zk_hidden_evaluation_larger() { let nu = 4; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup, &mut rng) .unwrap(); let point = random_point(8); diff --git a/tests/arkworks/zk_statistical.rs b/tests/arkworks/zk_statistical.rs index eaf11d0..88282a9 100644 --- a/tests/arkworks/zk_statistical.rs +++ b/tests/arkworks/zk_statistical.rs @@ -7,7 +7,7 @@ use super::*; use ark_serialize::CanonicalSerialize; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{create_evaluation_proof, setup, verify, DoryProof, ZK}; +use dory_pcs::{create_evaluation_proof, setup, verify, DoryProof, Transparent, ZK}; use rand::rngs::StdRng; use rand::SeedableRng; use std::collections::HashMap; @@ -202,8 +202,13 @@ fn test_zk_statistical_indistinguishability() { let coeffs = vec![ArkFr::zero(); poly_size]; let poly = ArkworksPolynomial::new(coeffs); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::( + nu, + sigma, + &prover_setup, + &mut trial_rng, + ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -241,8 +246,13 @@ fn test_zk_statistical_indistinguishability() { let coeffs = vec![ArkFr::one(); poly_size]; let poly = ArkworksPolynomial::new(coeffs); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::( + nu, + sigma, + &prover_setup, + &mut trial_rng, + ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -278,8 +288,13 @@ fn test_zk_statistical_indistinguishability() { { let poly = random_polynomial(poly_size); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::( + nu, + sigma, + &prover_setup, + &mut trial_rng, + ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -387,8 +402,13 @@ fn test_zk_witness_independence() { coeffs[0] = ArkFr::from_u64(42); let poly = ArkworksPolynomial::new(coeffs); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::( + nu, + sigma, + &prover_setup, + &mut trial_rng, + ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -424,8 +444,13 @@ fn test_zk_witness_independence() { { let poly = random_polynomial(poly_size); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, _) = poly + .commit::( + nu, + sigma, + &prover_setup, + &mut trial_rng, + ) .unwrap(); let evaluation = poly.evaluate(&point); diff --git a/zk.md b/zk.md new file mode 100644 index 0000000..06d7c6a --- /dev/null +++ b/zk.md @@ -0,0 +1,449 @@ +# Zero-Knowledge Analysis of Dory PCS Implementation + +This document audits every value the prover sends to the verifier (via the +Fiat-Shamir transcript or proof struct) and determines whether it is blinded, +public, or leaks witness information. + +**Security model**: Honest-Verifier Zero-Knowledge (HVZK) in the Random Oracle +Model (ROM), which is standard for Fiat-Shamir-transformed protocols. + +**Notation**: +- `Γ₁, Γ₂` — public setup generators in G₁, G₂ +- `H₁, H₂` — blinding generators; `HT = e(H₁, H₂)` +- `r_*` — random blinding scalars sampled via `Mode::sample` +- `f` — the committed polynomial (the witness) +- `z` — the evaluation point; `y = f(z)` — the evaluation + +--- + +## 0. Mode Dispatch (`mode.rs`) + +The `Mode` trait controls all blinding: + +| Mode | `sample()` | `mask(v, base, r)` | +|------|-----------|---------------------| +| `Transparent` | Returns `0` | Returns `v` (identity) | +| `ZK` | Returns `F::random(rng)` | Returns `v + base · r` | + +**Every `M::mask(...)` call below is a no-op in Transparent mode.** The rest of +this document focuses exclusively on ZK mode (`feature = "zk"`). + +--- + +## 1. Commitment Phase (`ark_poly.rs`) + +### 1a. `commit()` — Transparent commitment + +``` +row_commit[i] = MSM(Γ₁, coeffs_row_i) (no blinding) +tier2 = Σ e(row_commit[i], Γ₂[i]) +``` + +**Verdict**: NOT hiding. The tier-1 row commitments are deterministic functions +of the polynomial coefficients. The tier-2 commitment is a deterministic pairing +aggregate. + +### 1b. `commit_zk()` — ZK commitment + +``` +blind[i] ← random (fresh per row) +row_commit[i] = MSM(Γ₁, coeffs_row_i) + H₁ · blind[i] +tier2 = Σ e(row_commit[i], Γ₂[i]) +``` + +**Verdict**: HIDING (Pedersen). Each row commitment is a Pedersen commitment +with independent randomness. The tier-2 commitment inherits computational +hiding from the DL-hard blinding in each row. + +Returns `(tier2, row_commitments, blinds)` — the caller must keep `blinds` +secret and supply the blinded `row_commitments` to the prover. + +--- + +## 2. VMV Phase (`evaluation_proof.rs:132–177`) + +Four independent blinds are sampled: + +``` +r_c, r_d2, r_e1, r_e2 ← random +``` + +### 2a. `C` — Inner product value (GT) + +``` +C = e(MSM(row_comms, v_vec), Γ₂,fin) + r_c · HT +``` + +**Sent to transcript**: `vmv_c` +**Verdict**: BLINDED. Masked by `r_c · HT` with fresh randomness. Reveals +nothing about the polynomial's internal structure. + +### 2b. `D₂` — Generator inner product (GT) + +``` +D₂ = e(MSM(Γ₁[..2^σ], v_vec), Γ₂,fin) + r_d2 · HT +``` + +**Sent to transcript**: `vmv_d2` +**Verdict**: BLINDED. `v_vec = Lᵀ × M` encodes the polynomial, but `r_d2 · HT` +masks the value. Computationally hiding under DL in GT. + +### 2c. `E₁` — Row-scalar MSM (G₁) + +``` +E₁ = MSM(row_comms, left_vec) + r_e1 · H₁ +``` + +**Sent to transcript**: `vmv_e1` +**Verdict**: BLINDED. Masked by `r_e1 · H₁`. Even if `row_comms` are +themselves blinded (via `commit_zk`), the additional `r_e1` ensures +independence. + +### 2d. `E₂` — Evaluation commitment (G₂, ZK-only) + +``` +E₂ = Γ₂,fin · y + r_e2 · H₂ +``` + +**Sent to transcript**: `vmv_e2` +**Verdict**: BLINDED. Pedersen commitment to the evaluation `y` with fresh +randomness `r_e2`. The verifier learns nothing about `y` from `E₂` alone. + +In transparent mode, the verifier computes `e₂ = Γ₂,fin · y` directly from +the public evaluation — no commitment is needed. + +### 2e. `y_com` — Evaluation Pedersen commitment (G₁, ZK-only) + +``` +y_com = Γ₁,fin · y + H₁ · r_y (r_y ← random) +``` + +**Sent to transcript**: `vmv_y_com` +**Verdict**: BLINDED. Standard Pedersen commitment. Together with `E₂`, this +enables the Sigma1 proof (§4a) to attest that both commit to the same `y` +without revealing it. + +--- + +## 3. Sigma Proofs (ZK-only, `reduce_and_fold.rs:452–570`) + +### 3a. Sigma1 — Same-`y` proof + +**Relation proved**: `E₂` and `y_com` commit to the same scalar `y`. + +``` +Prover: + k₁, k₂, k₃ ← random + a₁ = Γ₂,fin · k₁ + H₂ · k₂ + a₂ = Γ₁,fin · k₁ + H₁ · k₃ + (append a₁, a₂ → transcript) + c = H(transcript) + z₁ = k₁ + c·y, z₂ = k₂ + c·r_e2, z₃ = k₃ + c·r_y + +Verifier checks: + Γ₂,fin · z₁ + H₂ · z₂ == a₁ + c · E₂ + Γ₁,fin · z₁ + H₁ · z₃ == a₂ + c · y_com +``` + +**Sent**: `(a₁, a₂, z₁, z₂, z₃)` +**Verdict**: HVZK. Standard Sigma protocol with 3 random nonces. The +simulator picks `z₁, z₂, z₃` uniformly, then programs `a₁, a₂` to satisfy +the verification equations. Response distribution is identical to real proofs. + +### 3b. Sigma2 — VMV consistency proof + +**Relation proved**: `e(E₁, Γ₂,fin) − D₂` lies in the span of `H₁`, i.e., +the difference between `E₁`'s and `D₂`'s blinding is consistent: +`e(E₁, Γ₂,fin) − D₂ = e(H₁, r_e1 · Γ₂,fin + (−r_d2) · H₂)`. + +``` +Prover: + k₁, k₂ ← random + a = e(H₁, Γ₂,fin · k₁ + H₂ · k₂) + (append a → transcript) + c = H(transcript) + z₁ = k₁ + c · r_e1, z₂ = k₂ + c · (−r_d2) + +Verifier checks: + e(H₁, Γ₂,fin · z₁ + H₂ · z₂) == a + c · (e(E₁, Γ₂,fin) − D₂) +``` + +**Sent**: `(a, z₁, z₂)` +**Verdict**: HVZK. Standard 2-nonce Sigma protocol. Simulatable by the same +strategy as Sigma1. + +--- + +## 4. Reduce-and-Fold Rounds (`reduce_and_fold.rs`) + +Each round `t` (of `σ` total) produces two messages and consumes two +challenges `(βₜ, αₜ)`. + +### 4a. First Reduce Message (6 values per round) + +**Per-round blinds sampled**: +``` +round_d1[0], round_d1[1] ← random +round_d2[0], round_d2[1] ← random +``` + +| Value | Formula | Blinding | Verdict | +|-------|---------|----------|---------| +| `D₁L` | `e(v₁L, Γ₂') + round_d1[0] · HT` | `round_d1[0]` | **BLINDED** | +| `D₁R` | `e(v₁R, Γ₂') + round_d1[1] · HT` | `round_d1[1]` | **BLINDED** | +| `D₂L` | `e(Γ₁', v₂L) + round_d2[0] · HT` | `round_d2[0]` | **BLINDED** | +| `D₂R` | `e(Γ₁', v₂R) + round_d2[1] · HT` | `round_d2[1]` | **BLINDED** | +| `E₁β` | `MSM(Γ₁, s₂)` | **NONE** | **PUBLIC** (see below) | +| `E₂β` | `MSM(Γ₂, s₁)` | **NONE** | **PUBLIC** (see below) | + +**E₁β and E₂β are not masked**, but `s₁` and `s₂` are scalar vectors derived +solely from the evaluation point coordinates and the Fiat-Shamir challenges +(both public to the verifier). The generators `Γ₁, Γ₂` are public setup +parameters. Therefore: + +- `E₁β = MSM(Γ₁, s₂)` is a deterministic, publicly computable value. +- `E₂β = MSM(Γ₂, s₁)` is a deterministic, publicly computable value. +- **No polynomial information leaks through these values.** + +> **Note for committed-point applications**: If the evaluation point `z` is +> committed rather than public, the verifier could in principle reconstruct +> `E₁β` and `E₂β` only if they knew `z`. Since these values appear in the +> proof, a third party observing the proof (but not knowing `z`) could use +> `E₁β`, `E₂β` to recover information about the evaluation point. +> However, the standard Dory verification API (`verify()`) takes `point` as +> a **public input**. Hiding the point requires an additional protocol layer +> on top of Dory (e.g., proving in ZK that the committed point was used). + +### 4b. Second Reduce Message (6 values per round) + +**Per-round blinds sampled**: +``` +round_c[0], round_c[1] ← random +round_e1[0], round_e1[1] ← random +round_e2[0], round_e2[1] ← random +``` + +| Value | Formula | Blinding | Verdict | +|-------|---------|----------|---------| +| `C₊` | `e(v₁L, v₂R) + round_c[0] · HT` | `round_c[0]` | **BLINDED** | +| `C₋` | `e(v₁R, v₂L) + round_c[1] · HT` | `round_c[1]` | **BLINDED** | +| `E₁₊` | `MSM(v₁L, s₂R) + round_e1[0] · H₁` | `round_e1[0]` | **BLINDED** | +| `E₁₋` | `MSM(v₁R, s₂L) + round_e1[1] · H₁` | `round_e1[1]` | **BLINDED** | +| `E₂₊` | `MSM(v₂R, s₁L) + round_e2[0] · H₂` | `round_e2[0]` | **BLINDED** | +| `E₂₋` | `MSM(v₂L, s₁R) + round_e2[1] · H₂` | `round_e2[1]` | **BLINDED** | + +**Verdict**: All six values are independently blinded with fresh randomness. +The cross-products `e(v₁L, v₂R)` etc. encode polynomial information but are +fully hidden by the masks. + +### 4c. Blinding Accumulation + +After each challenge application, the prover accumulates blinds: + +``` +After β: r_c ← r_c + β · r_d2 + β⁻¹ · r_d1 + +After α: r_c ← r_c + α · round_c[0] + α⁻¹ · round_c[1] + r_d1 ← α · round_d1[0] + round_d1[1] + r_d2 ← α⁻¹ · round_d2[0] + round_d2[1] + r_e1 ← r_e1 + α · round_e1[0] + α⁻¹ · round_e1[1] + r_e2 ← r_e2 + α · round_e2[0] + α⁻¹ · round_e2[1] +``` + +This tracking is correct: the accumulated blinds mirror the verifier's +accumulation of the GT/G₁/G₂ elements, ensuring that the final scalar +product proof can account for all blinding contributions. + +--- + +## 5. Final Message (`reduce_and_fold.rs:381–420`) + +After `γ` is derived from the transcript: + +``` +r_final1, r_final2 ← random (zero in Transparent mode) + +E₁_final = v₁[0] + (γ · s₁[0] + r_final1) · H₁ +E₂_final = v₂[0] + (γ⁻¹ · s₂[0] + r_final2) · H₂ + +r_c ← r_c + γ · r_e2 + γ⁻¹ · r_e1 +``` + +**Sent to transcript**: `final_e1`, `final_e2` + +In ZK mode, `r_final1` and `r_final2` are fresh random scalars that mask the +deterministic offset `γ · s₁[0]` (resp. `γ⁻¹ · s₂[0]`), preventing recovery +of the folded vectors `v₁[0]` and `v₂[0]`. + +These blinds do **not** need to be tracked in the accumulated `r_c` because +`verify_final_zk` does not use the final message elements in the pairing check +— it uses the scalar product proof's blinded `e₁, e₂` instead. The final +message values serve only as entropy for the Fiat-Shamir `d` challenge. + +In Transparent mode, `r_final1 = r_final2 = 0` (via `Mode::sample`), so the +transparent verification path (`verify_final`) is unaffected. + +**Verdict**: **BLINDED** (ZK mode). The folded vectors `v₁[0]`, `v₂[0]` +cannot be recovered from the proof. + +--- + +## 6. Scalar Product Proof (ZK-only, `reduce_and_fold.rs:411–448`) + +Generated **before** `compute_final_message`, using the current `v₁[0], v₂[0]`: + +``` +sd₁, sd₂ ← random +d₁ = Γ₁,₀ · sd₁, d₂ = Γ₂,₀ · sd₂ + +rp₁, rp₂, rq, rr ← random +p₁ = e(d₁, Γ₂,₀) + rp₁ · HT +p₂ = e(Γ₁,₀, d₂) + rp₂ · HT +q = e(d₁, v₂) + e(v₁, d₂) + rq · HT +r = e(d₁, d₂) + rr · HT + +(append p₁, p₂, q, r → transcript) +c = H(transcript) ← Fiat-Shamir challenge + +Response: + e₁ = d₁ + v₁ · c (blinded by d₁) + e₂ = d₂ + v₂ · c (blinded by d₂) + r₁ = rp₁ + c · r_d1 (blinded by rp₁) + r₂ = rp₂ + c · r_d2 (blinded by rp₂) + r₃ = rr + c · rq + c² · r_c (blinded by rr, rq) +``` + +**Sent**: `(p₁, p₂, q, r, e₁, e₂, r₁, r₂, r₃)` + +**Verdict**: HVZK. This is a batched Sigma protocol for the relation: + +``` +e(v₁, v₂) + r_c · HT = C_accumulated +e(v₁, Γ₂,₀) + r_d1 · HT = D₁_accumulated +e(Γ₁,₀, v₂) + r_d2 · HT = D₂_accumulated +``` + +The 4 commitments `(p₁, p₂, q, r)` are independently blinded. The responses +are standard Sigma-protocol responses. A simulator can produce +indistinguishable transcripts by choosing responses first and computing +commitments. + +### Interaction with Final Message + +Since the final message is now independently blinded (§5), `v₁[0]` and `v₂[0]` +cannot be recovered. This means the scalar product proof's blinding commitments +`d₁, d₂` remain hidden, preserving the full HVZK property of the Sigma +protocol. + +--- + +## 7. Verification Paths + +### 7a. Transparent Final Check (`verify_final`) + +``` +LHS = e(E₁ + d·Γ₁,₀, E₂ + d⁻¹·Γ₂,₀) (4 pairings batched) +RHS = C + s₁·s₂·HT + χ₀ + d·D₂ + d⁻¹·D₁ + d²·D₂_init +``` + +All values on both sides are unblinded → no ZK. + +### 7b. ZK Final Check (`verify_final_zk`) + +``` +LHS = e(sp.e₁ + d·Γ₁,₀, sp.e₂ + d⁻¹·Γ₂,₀) +RHS = χ₀ + sp.r + c·sp.q + c²·C + + d·(sp.p₂ + c·D₂) + d⁻¹·(sp.p₁ + c·D₁) + − HT·(sp.r₃ + d·sp.r₂ + d⁻¹·sp.r₁) +``` + +Uses `sp.e₁/e₂` (randomly blinded) instead of `final_message.e₁/e₂`. +The random scalar `d` (derived after `final_e1, final_e2` are appended to the +transcript) batches 4 verification equations into one pairing. + +**Note**: `final_message.e1/e2` are appended to the transcript purely for +Fiat-Shamir entropy — the actual verification uses the scalar product proof's +blinded elements. + +--- + +## 8. Summary Table + +| Proof Element | Group | Blinded? | Blind Source | Leaks Polynomial Info? | +|---------------|-------|----------|-------------|----------------------| +| **VMV C** | GT | Yes | `r_c · HT` | No | +| **VMV D₂** | GT | Yes | `r_d2 · HT` | No | +| **VMV E₁** | G₁ | Yes | `r_e1 · H₁` | No | +| **VMV E₂** (ZK) | G₂ | Yes | `r_e2 · H₂` | No | +| **y_com** (ZK) | G₁ | Yes | `r_y · H₁` | No | +| **Sigma1** (a₁,a₂,z₁,z₂,z₃) | mixed | HVZK | `k₁,k₂,k₃` | No | +| **Sigma2** (a,z₁,z₂) | GT,F,F | HVZK | `k₁,k₂` | No | +| **D₁L, D₁R** | GT | Yes | `round_d1[·] · HT` | No | +| **D₂L, D₂R** | GT | Yes | `round_d2[·] · HT` | No | +| **E₁β** | G₁ | No | — | No (public: MSM(Γ₁, s₂)) | +| **E₂β** | G₂ | No | — | No (public: MSM(Γ₂, s₁)) | +| **C₊, C₋** | GT | Yes | `round_c[·] · HT` | No | +| **E₁₊, E₁₋** | G₁ | Yes | `round_e1[·] · H₁` | No | +| **E₂₊, E₂₋** | G₂ | Yes | `round_e2[·] · H₂` | No | +| **E₁_final** | G₁ | Yes | `r_final1 · H₁` | No | +| **E₂_final** | G₂ | Yes | `r_final2 · H₂` | No | +| **SP p₁,p₂,q,r** | GT | Yes | `rp₁,rp₂,rq,rr` | No | +| **SP e₁,e₂** | G₁,G₂ | Yes | `d₁ + v₁·c` | No | +| **SP r₁,r₂,r₃** | F | Yes | Sigma response | No | + +--- + +## 9. Blinding Budget + +Per proof (with `σ` rounds): + +| Phase | Blinds Sampled | Count | +|-------|---------------|-------| +| VMV | `r_c, r_d2, r_e1, r_e2` | 4 | +| VMV (ZK) | `r_y` | 1 | +| Sigma1 | `k₁, k₂, k₃` | 3 | +| Sigma2 | `k₁, k₂` | 2 | +| Per round (×σ) | `round_d1[2], round_d2[2], round_c[2], round_e1[2], round_e2[2]` | 10σ | +| Final message | `r_final1, r_final2` | 2 | +| Scalar product | `sd₁, sd₂, rp₁, rp₂, rq, rr` | 6 | + +**Total**: `18 + 10σ` random field elements per proof. + +--- + +## 10. Conclusions + +### What IS hidden (ZK mode) + +1. **All polynomial coefficients** — no intermediate message reveals any + individual coefficient or coefficient relation beyond the public evaluation. + +2. **The evaluation value `y`** — hidden behind Pedersen commitments `E₂` and + `y_com`, with consistency proven via Sigma1. + +3. **Internal protocol state** — all round messages involving `v₁, v₂` + cross-products and MSMs are independently blinded. + +4. **The accumulated blinding** `r_c` — correctly tracked through all challenge + transformations and consumed by the scalar product proof. + +### What is NOT hidden + +1. **The evaluation point `z`** — passed as a public input to `verify()`. + `E₁β` and `E₂β` are computable from `z` + setup, so they don't leak + anything the verifier doesn't already know. In a committed-point protocol, + these would need an additional hiding layer. + +2. **Matrix dimensions `ν, σ`** — stored in the proof struct as `nu, sigma`. + These reveal the matrix layout used for the polynomial but not the + polynomial itself. + +### Overall Verdict + +The implementation achieves **computational HVZK in the Random Oracle Model** +when the `zk` feature is enabled. Every protocol message that encodes +polynomial-dependent information is masked by independent randomness, with one +exception (`E₁β/E₂β` which are public, not polynomial-dependent). The three +Sigma sub-protocols (Sigma1, Sigma2, scalar product) are all standard and HVZK. +The final message elements are independently blinded, preventing recovery of +the folded vectors `v₁[0], v₂[0]`. From cdc32ae398f9a1c87874dddadd4c7e93a3cdd664 Mon Sep 17 00:00:00 2001 From: markosg04 Date: Fri, 27 Feb 2026 13:13:00 -0500 Subject: [PATCH 09/16] refactor: get rid of rng arg --- Cargo.toml | 2 +- README.md | 16 ++--- benches/arkworks_proof.rs | 60 +++++++------------ examples/basic_e2e.rs | 14 ++--- examples/homomorphic.rs | 16 ++--- examples/homomorphic_mixed_sizes.rs | 21 +++---- examples/non_square.rs | 14 ++--- examples/zk_e2e.rs | 14 ++--- src/backends/arkworks/ark_cache.rs | 2 +- src/backends/arkworks/ark_field.rs | 5 +- src/backends/arkworks/ark_group.rs | 13 ++-- src/backends/arkworks/ark_poly.rs | 6 +- src/backends/arkworks/ark_setup.rs | 10 ++-- src/evaluation_proof.rs | 29 ++++----- src/lib.rs | 33 ++++------- src/mode.rs | 8 +-- src/primitives/arithmetic.rs | 5 +- src/primitives/poly.rs | 7 +-- src/reduce_and_fold.rs | 51 ++++++---------- src/setup.rs | 12 ++-- tests/arkworks/cache.rs | 32 ++++------ tests/arkworks/commitment.rs | 26 ++++---- tests/arkworks/evaluation.rs | 41 +++++-------- tests/arkworks/homomorphic.rs | 20 +++---- tests/arkworks/integration.rs | 50 ++++++---------- tests/arkworks/mod.rs | 10 +--- tests/arkworks/non_square.rs | 32 ++++------ tests/arkworks/serialization.rs | 24 +++----- tests/arkworks/setup.rs | 17 ++---- tests/arkworks/soundness.rs | 6 +- tests/arkworks/zk.rs | 92 +++++++++++------------------ tests/arkworks/zk_statistical.rs | 69 +++++----------------- 32 files changed, 273 insertions(+), 484 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1940eaf..0d49143 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,7 +53,7 @@ disk-persistence = [] [dependencies] thiserror = "2.0" -rand_core = "0.6" +rand_core = { version = "0.6", features = ["getrandom"] } dory-derive = { version = "0.2.0", path = "derive" } tracing = "0.1" diff --git a/README.md b/README.md index 01db1b1..4d7f0d5 100644 --- a/README.md +++ b/README.md @@ -98,33 +98,33 @@ This property enables efficient proof aggregation and batch verification. See `e ```rust use dory_pcs::{setup, prove, verify, Transparent}; use dory_pcs::backends::arkworks::{ - BN254, G1Routines, G2Routines, ArkworksPolynomial, Blake2bTranscript + BN254, G1Routines, G2Routines, ArkworksPolynomial, ArkFr, Blake2bTranscript }; +use dory_pcs::primitives::arithmetic::Field; +use dory_pcs::primitives::poly::Polynomial; fn main() -> Result<(), Box> { - let mut rng = rand::thread_rng(); - // 1. Generate setup for polynomials up to 2^10 coefficients let max_log_n = 10; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); // 2. Create a polynomial with 256 coefficients (nu=4, sigma=4) - let coefficients: Vec<_> = (0..256).map(|_| rand::random()).collect(); + let coefficients: Vec = (0..256).map(|_| ArkFr::random()).collect(); let polynomial = ArkworksPolynomial::new(coefficients); // 3. Define evaluation point (length = nu + sigma = 8) - let point: Vec<_> = (0..8).map(|_| rand::random()).collect(); + let point: Vec = (0..8).map(|_| ArkFr::random()).collect(); let nu = 4; // log₂(rows) = 4 → 16 rows let sigma = 4; // log₂(cols) = 4 → 16 columns // 4. Commit to polynomial to get tier-2 commitment and row commitments let (tier_2, row_commitments, _) = polynomial - .commit::(nu, sigma, &prover_setup, &mut rng)?; + .commit::(nu, sigma, &prover_setup)?; // 5. Create evaluation proof using row commitments let mut prover_transcript = Blake2bTranscript::new(b"dory-example"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &polynomial, &point, row_commitments, diff --git a/benches/arkworks_proof.rs b/benches/arkworks_proof.rs index 2c6bdd5..b3bbaa4 100644 --- a/benches/arkworks_proof.rs +++ b/benches/arkworks_proof.rs @@ -19,8 +19,6 @@ use dory_pcs::mode::Transparent; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify}; -use rand::rngs::ThreadRng; -use rand::thread_rng; #[cfg(feature = "cache")] use dory_pcs::backends::arkworks::init_cache; @@ -31,10 +29,9 @@ fn setup_benchmark_data() -> ( dory_pcs::setup::ProverSetup, dory_pcs::setup::VerifierSetup, ) { - let mut rng = thread_rng(); let max_log_n = 26; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); // Initialize cache with setup generators for optimized pairings #[cfg(feature = "cache")] @@ -47,10 +44,10 @@ fn setup_benchmark_data() -> ( // Create polynomial with 2^26 coefficients (nu=13, sigma=13) let poly_size = 1 << 26; // 67,108,864 coefficients let num_vars = 26; - let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random(&mut rng)).collect(); + let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); let poly = ArkworksPolynomial::new(coefficients); - let point: Vec = (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); (poly, point, prover_setup, verifier_setup) } @@ -62,12 +59,10 @@ fn bench_commitment(c: &mut Criterion) { c.bench_function("commitment_2^26_coefficients", |b| { b.iter(|| { - let mut rng = thread_rng(); - poly.commit::( + poly.commit::( black_box(nu), black_box(sigma), black_box(&prover_setup), - black_box(&mut rng), ) .unwrap() }) @@ -78,17 +73,15 @@ fn bench_prove(c: &mut Criterion) { let (poly, point, prover_setup, _) = setup_benchmark_data(); let nu = 13; let sigma = 13; - let mut rng = thread_rng(); let (_, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); c.bench_function("prove_2^26_coefficients", |b| { b.iter(|| { let mut transcript = Blake2bTranscript::new(b"dory-bench"); - let mut rng = thread_rng(); - prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, ThreadRng>( + prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( black_box(&poly), black_box(&point), black_box(tier_1.clone()), @@ -96,7 +89,6 @@ fn bench_prove(c: &mut Criterion) { black_box(sigma), black_box(&prover_setup), black_box(&mut transcript), - black_box(&mut rng), ) .unwrap() }) @@ -107,15 +99,13 @@ fn bench_verify(c: &mut Criterion) { let (poly, point, prover_setup, verifier_setup) = setup_benchmark_data(); let nu = 13; let sigma = 13; - let mut rng = thread_rng(); let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = Blake2bTranscript::new(b"dory-bench"); - let mut rng = thread_rng(); - let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, ThreadRng>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -123,7 +113,6 @@ fn bench_verify(c: &mut Criterion) { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); @@ -146,9 +135,8 @@ fn bench_verify(c: &mut Criterion) { } fn bench_end_to_end(c: &mut Criterion) { - let mut rng = thread_rng(); let max_log_n = 26; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); // Initialize cache once #[cfg(feature = "cache")] @@ -160,40 +148,36 @@ fn bench_end_to_end(c: &mut Criterion) { c.bench_function("end_to_end_2^26_coefficients", |b| { b.iter(|| { - let mut rng = thread_rng(); let nu = 13; let sigma = 13; let poly_size = 1 << 26; // 67,108,864 coefficients let num_vars = 26; // Create polynomial - let coefficients: Vec = - (0..poly_size).map(|_| ArkFr::random(&mut rng)).collect(); + let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); let poly = ArkworksPolynomial::new(coefficients); // Commit let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); // Evaluate - let point: Vec = (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = poly.evaluate(&point); // Prove let mut prover_transcript = Blake2bTranscript::new(b"dory-bench"); - let (proof, _) = - prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, ThreadRng>( - &poly, - &point, - tier_1, - nu, - sigma, - &prover_setup, - &mut prover_transcript, - &mut rng, - ) - .unwrap(); + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( + &poly, + &point, + tier_1, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + ) + .unwrap(); // Verify let mut verifier_transcript = Blake2bTranscript::new(b"dory-bench"); diff --git a/examples/basic_e2e.rs b/examples/basic_e2e.rs index 9815a4c..a1b6d6f 100644 --- a/examples/basic_e2e.rs +++ b/examples/basic_e2e.rs @@ -14,22 +14,19 @@ use dory_pcs::backends::arkworks::{ use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent}; -use rand::thread_rng; use tracing::info; fn main() -> Result<(), Box> { info!("Dory PCS - Basic End-to-End Example"); info!("====================================\n"); - let mut rng = thread_rng(); - // Step 1: Setup let max_log_n = 10; info!( "1. Generating transparent setup (max_log_n = {})...", max_log_n ); - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); info!(" ✓ Setup complete\n"); // Step 2: Create polynomial @@ -44,14 +41,14 @@ fn main() -> Result<(), Box> { info!(" Total coefficients: {}", poly_size); info!(" Number of variables: {}", num_vars); - let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random(&mut rng)).collect(); + let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); let poly = ArkworksPolynomial::new(coefficients); info!(" ✓ Polynomial created\n"); // Step 3: Commit info!("3. Computing polynomial commitment..."); let (tier_2, tier_1, _) = - poly.commit::(nu, sigma, &prover_setup, &mut rng)?; + poly.commit::(nu, sigma, &prover_setup)?; info!( " ✓ Tier-1 commitment: {} row commitments (G1)", tier_1.len() @@ -59,7 +56,7 @@ fn main() -> Result<(), Box> { info!(" ✓ Tier-2 commitment: final commitment (GT)\n"); // Step 4: Evaluation - let point: Vec = (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = poly.evaluate(&point); info!("4. Evaluating polynomial at random point..."); info!(" ✓ Evaluation result computed\n"); @@ -67,7 +64,7 @@ fn main() -> Result<(), Box> { // Step 5: Prove info!("5. Generating evaluation proof..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-basic-example"); - let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -75,7 +72,6 @@ fn main() -> Result<(), Box> { sigma, &prover_setup, &mut prover_transcript, - &mut rng, )?; info!(" ✓ Proof generated (logarithmic size)\n"); diff --git a/examples/homomorphic.rs b/examples/homomorphic.rs index 3f5cbb0..e1f40ae 100644 --- a/examples/homomorphic.rs +++ b/examples/homomorphic.rs @@ -14,22 +14,19 @@ use dory_pcs::backends::arkworks::{ use dory_pcs::primitives::arithmetic::{Field, Group}; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent}; -use rand::thread_rng; use tracing::info; fn main() -> Result<(), Box> { info!("Dory PCS - Homomorphic Combination Example"); info!("===========================================\n"); - let mut rng = thread_rng(); - // Step 1: Setup let max_log_n = 10; info!( "1. Generating transparent setup (max_log_n = {})...", max_log_n ); - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); info!(" ✓ Setup complete\n"); // Parameters @@ -43,7 +40,7 @@ fn main() -> Result<(), Box> { info!(" Each polynomial: {} coefficients", poly_size); let polys: Vec = (0..num_polys) .map(|_| { - let coeffs: Vec = (0..poly_size).map(|_| ArkFr::random(&mut rng)).collect(); + let coeffs: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); ArkworksPolynomial::new(coeffs) }) .collect(); @@ -54,7 +51,7 @@ fn main() -> Result<(), Box> { let commitments: Vec<_> = polys .iter() .map(|poly| { - poly.commit::(nu, sigma, &prover_setup, &mut rng) + poly.commit::(nu, sigma, &prover_setup) .unwrap() }) .collect(); @@ -62,7 +59,7 @@ fn main() -> Result<(), Box> { // Step 4: Generate random coefficients for linear combination info!("4. Generating random combination coefficients..."); - let coeffs: Vec = (0..num_polys).map(|_| ArkFr::random(&mut rng)).collect(); + let coeffs: Vec = (0..num_polys).map(|_| ArkFr::random()).collect(); info!(" ✓ Coefficients: r₁, r₂, ..., r{}\n", num_polys); // Step 5: Homomorphically combine commitments @@ -121,7 +118,7 @@ fn main() -> Result<(), Box> { // Step 7: Evaluate and verify consistency info!("7. Verifying homomorphic property..."); - let point: Vec = (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = combined_poly.evaluate(&point); // Check that combined polynomial evaluation matches linear combination @@ -139,7 +136,7 @@ fn main() -> Result<(), Box> { // Step 8: Generate proof info!("8. Generating evaluation proof for combined polynomial..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-homomorphic-example"); - let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &combined_poly, &point, combined_tier1, @@ -147,7 +144,6 @@ fn main() -> Result<(), Box> { sigma, &prover_setup, &mut prover_transcript, - &mut rng, )?; info!(" ✓ Proof generated\n"); diff --git a/examples/homomorphic_mixed_sizes.rs b/examples/homomorphic_mixed_sizes.rs index 708c85a..6e6bd27 100644 --- a/examples/homomorphic_mixed_sizes.rs +++ b/examples/homomorphic_mixed_sizes.rs @@ -10,23 +10,21 @@ use dory_pcs::backends::arkworks::{ use dory_pcs::primitives::arithmetic::{Field, Group}; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent}; -use rand::thread_rng; use tracing::info; fn main() -> Result<(), Box> { info!("Dory PCS - Mixed-size Homomorphic Combination Example"); - let mut rng = thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 4); + let (prover_setup, verifier_setup) = setup::(4); info!("Creating two polynomials with logical sizes 16 and 4..."); let mut coeffs_poly1 = vec![ArkFr::zero(); 16]; let mut coeffs_poly2 = vec![ArkFr::zero(); 4]; for coeff in coeffs_poly1.iter_mut() { - *coeff = ArkFr::random(&mut rng); + *coeff = ArkFr::random(); } for coeff in coeffs_poly2.iter_mut() { - *coeff = ArkFr::random(&mut rng); + *coeff = ArkFr::random(); } let poly1 = ArkworksPolynomial::new(coeffs_poly1.clone()); let poly2 = ArkworksPolynomial::new(coeffs_poly2.clone()); @@ -35,15 +33,15 @@ fn main() -> Result<(), Box> { info!("Poly2: {:?}", poly2); let commitment1 = poly1 - .commit::(2, 2, &prover_setup, &mut rng) + .commit::(2, 2, &prover_setup) .unwrap(); let commitment2 = poly2 - .commit::(1, 1, &prover_setup, &mut rng) + .commit::(1, 1, &prover_setup) .unwrap(); info!("✓ Commitments ready\n"); info!("Sampling random combination scalars r1, r2..."); - let coeff_scalars = [ArkFr::random(&mut rng), ArkFr::random(&mut rng)]; + let coeff_scalars = [ArkFr::random(), ArkFr::random()]; info!("Combining tier-2 commitments (GT)..."); let combined_tier2 = coeff_scalars[0] * commitment1.0 + coeff_scalars[1] * commitment2.0; @@ -84,7 +82,7 @@ fn main() -> Result<(), Box> { let padded_poly2 = ArkworksPolynomial::new(padded_poly2_coefficients); info!("Evaluating combined polynomial at a random point..."); - let point: Vec = (0..4).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..4).map(|_| ArkFr::random()).collect(); let evaluation = combined_poly.evaluate(&point); info!("Checking that evaluation matches r1·P1(x) + r2·P2(x)..."); @@ -102,7 +100,7 @@ fn main() -> Result<(), Box> { info!("Generating evaluation proof with combined commitment..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-homomorphic-mixed"); - let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &combined_poly, &point, combined_tier1, @@ -110,7 +108,6 @@ fn main() -> Result<(), Box> { 2, &prover_setup, &mut prover_transcript, - &mut rng, )?; info!("✓ Proof generated\n"); @@ -128,7 +125,7 @@ fn main() -> Result<(), Box> { info!("==========================================="); let padded_poly_commitment = padded_poly2 - .commit::(2, 2, &prover_setup, &mut rng) + .commit::(2, 2, &prover_setup) .unwrap(); assert_eq!(padded_poly_commitment.0, commitment2.0); info!("✓ Padded poly commitment matches original poly2 commitment"); diff --git a/examples/non_square.rs b/examples/non_square.rs index c6c83b6..7ba5226 100644 --- a/examples/non_square.rs +++ b/examples/non_square.rs @@ -12,22 +12,19 @@ use dory_pcs::backends::arkworks::{ use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent}; -use rand::thread_rng; use tracing::info; fn main() -> Result<(), Box> { info!("Dory PCS - Non-Square Matrix Example"); info!("=====================================\n"); - let mut rng = thread_rng(); - // Step 1: Setup let max_log_n = 10; info!( "1. Generating transparent setup (max_log_n = {})...", max_log_n ); - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); info!(" ✓ Setup complete\n"); // Step 2: Create polynomial with non-square matrix layout @@ -43,14 +40,14 @@ fn main() -> Result<(), Box> { info!(" Number of variables: {}", num_vars); info!(" Constraint: nu ({}) ≤ sigma ({})", nu, sigma); - let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random(&mut rng)).collect(); + let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); let poly = ArkworksPolynomial::new(coefficients); info!(" ✓ Polynomial created\n"); // Step 3: Commit info!("3. Computing polynomial commitment..."); let (tier_2, tier_1, _) = - poly.commit::(nu, sigma, &prover_setup, &mut rng)?; + poly.commit::(nu, sigma, &prover_setup)?; info!( " ✓ Tier-1 commitment: {} row commitments (G1)", tier_1.len() @@ -58,7 +55,7 @@ fn main() -> Result<(), Box> { info!(" ✓ Tier-2 commitment: final commitment (GT)\n"); // Step 4: Evaluation - let point: Vec = (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = poly.evaluate(&point); info!("4. Evaluating polynomial at random point..."); info!(" ✓ Evaluation result computed\n"); @@ -66,7 +63,7 @@ fn main() -> Result<(), Box> { // Step 5: Prove info!("5. Generating evaluation proof..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-non-square-example"); - let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -74,7 +71,6 @@ fn main() -> Result<(), Box> { sigma, &prover_setup, &mut prover_transcript, - &mut rng, )?; info!(" ✓ Proof generated (logarithmic size)\n"); diff --git a/examples/zk_e2e.rs b/examples/zk_e2e.rs index ac24acc..8ddfb6d 100644 --- a/examples/zk_e2e.rs +++ b/examples/zk_e2e.rs @@ -17,19 +17,16 @@ use dory_pcs::backends::arkworks::{ use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent, ZK}; -use rand::thread_rng; use tracing::info; fn main() -> Result<(), Box> { info!("Dory PCS - Zero-Knowledge End-to-End Example"); info!("==============================================\n"); - let mut rng = thread_rng(); - // Step 1: Setup (identical to transparent mode) let max_log_n = 10; info!("1. Generating setup (max_log_n = {})...", max_log_n); - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); info!(" Setup complete\n"); // Step 2: Create polynomial @@ -42,19 +39,19 @@ fn main() -> Result<(), Box> { info!(" Matrix layout: {}x{} (square)", 1 << nu, 1 << sigma); info!(" Total coefficients: {}", poly_size); - let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random(&mut rng)).collect(); + let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); let poly = ArkworksPolynomial::new(coefficients); info!(" Polynomial created\n"); // Step 3: Commit (identical to transparent mode) info!("3. Computing polynomial commitment..."); let (tier_2, tier_1, _) = - poly.commit::(nu, sigma, &prover_setup, &mut rng)?; + poly.commit::(nu, sigma, &prover_setup)?; info!(" Tier-1: {} row commitments", tier_1.len()); info!(" Tier-2: final GT commitment\n"); // Step 4: Evaluate - let point: Vec = (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = poly.evaluate(&point); info!("4. Evaluated polynomial at random point\n"); @@ -64,7 +61,7 @@ fn main() -> Result<(), Box> { // generates additional sigma1, sigma2, and scalar-product sub-proofs. info!("5. Generating ZK evaluation proof..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-zk-example"); - let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, ZK, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, ZK>( &poly, &point, tier_1, @@ -72,7 +69,6 @@ fn main() -> Result<(), Box> { sigma, &prover_setup, &mut prover_transcript, - &mut rng, )?; info!(" Proof generated"); info!( diff --git a/src/backends/arkworks/ark_cache.rs b/src/backends/arkworks/ark_cache.rs index 1cfd8ac..f7b2331 100644 --- a/src/backends/arkworks/ark_cache.rs +++ b/src/backends/arkworks/ark_cache.rs @@ -44,7 +44,7 @@ static CACHE: RwLock>> = RwLock::new(None); /// use dory_pcs::backends::arkworks::{init_cache, BN254}; /// use dory_pcs::setup::ProverSetup; /// -/// let setup = ProverSetup::::new(&mut rng, max_log_n); +/// let setup = ProverSetup::::new(max_log_n); /// init_cache(&setup.g1_vec, &setup.g2_vec); /// ``` pub fn init_cache(g1_vec: &[ArkG1], g2_vec: &[ArkG2]) { diff --git a/src/backends/arkworks/ark_field.rs b/src/backends/arkworks/ark_field.rs index 7fdd57a..a0859d7 100644 --- a/src/backends/arkworks/ark_field.rs +++ b/src/backends/arkworks/ark_field.rs @@ -7,7 +7,6 @@ use ark_bn254::Fr; use ark_ff::{Field as ArkField, UniformRand, Zero as ArkZero}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::ops::{Add, Mul, Neg, Sub}; -use rand_core::RngCore; #[derive(Clone, Copy, PartialEq, Eq, Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct ArkFr(pub Fr); @@ -41,8 +40,8 @@ impl Field for ArkFr { ArkField::inverse(&self.0).map(ArkFr) } - fn random(rng: &mut R) -> Self { - ArkFr(Fr::rand(rng)) + fn random() -> Self { + ArkFr(Fr::rand(&mut rand_core::OsRng)) } fn from_u64(val: u64) -> Self { diff --git a/src/backends/arkworks/ark_group.rs b/src/backends/arkworks/ark_group.rs index 812738a..00658b6 100644 --- a/src/backends/arkworks/ark_group.rs +++ b/src/backends/arkworks/ark_group.rs @@ -11,7 +11,6 @@ use ark_ec::{CurveGroup, VariableBaseMSM}; use ark_ff::{Field as ArkField, One, PrimeField, UniformRand, Zero as ArkZero}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::ops::{Add, Mul, Neg, Sub}; -use rand_core::RngCore; #[derive(Default, Clone, Copy, PartialEq, Eq, Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct ArkG1(pub G1Projective); @@ -41,8 +40,8 @@ impl Group for ArkG1 { ArkG1(self.0 * k.0) } - fn random(rng: &mut R) -> Self { - ArkG1(G1Projective::rand(rng)) + fn random() -> Self { + ArkG1(G1Projective::rand(&mut rand_core::OsRng)) } } @@ -114,8 +113,8 @@ impl Group for ArkG2 { ArkG2(self.0 * k.0) } - fn random(rng: &mut R) -> Self { - ArkG2(G2Projective::rand(rng)) + fn random() -> Self { + ArkG2(G2Projective::rand(&mut rand_core::OsRng)) } } @@ -187,8 +186,8 @@ impl Group for ArkGT { ArkGT(self.0.pow(k.0.into_bigint())) } - fn random(rng: &mut R) -> Self { - ArkGT(Fq12::rand(rng)) + fn random() -> Self { + ArkGT(Fq12::rand(&mut rand_core::OsRng)) } } diff --git a/src/backends/arkworks/ark_poly.rs b/src/backends/arkworks/ark_poly.rs index 8702132..81d1cfd 100644 --- a/src/backends/arkworks/ark_poly.rs +++ b/src/backends/arkworks/ark_poly.rs @@ -55,19 +55,17 @@ impl Polynomial for ArkworksPolynomial { #[tracing::instrument(skip_all, name = "ArkworksPolynomial::commit", fields(nu, sigma, num_rows = 1 << nu, num_cols = 1 << sigma))] #[allow(clippy::type_complexity)] - fn commit( + fn commit( &self, nu: usize, sigma: usize, setup: &ProverSetup, - rng: &mut R, ) -> Result<(E::GT, Vec, Option>), DoryError> where E: PairingCurve, Mo: Mode, M1: DoryRoutines, E::G1: Group, - R: rand_core::RngCore, { let expected_len = 1 << (nu + sigma); if self.coefficients.len() != expected_len { @@ -81,7 +79,7 @@ impl Polynomial for ArkworksPolynomial { let num_cols = 1 << sigma; let g1 = &setup.g1_vec[..num_cols]; - let blinds: Vec = (0..num_rows).map(|_| Mo::sample(rng)).collect(); + let blinds: Vec = (0..num_rows).map(|_| Mo::sample()).collect(); let row_commitments: Vec = (0..num_rows) .map(|i| { diff --git a/src/backends/arkworks/ark_setup.rs b/src/backends/arkworks/ark_setup.rs index ece2669..77aebbe 100644 --- a/src/backends/arkworks/ark_setup.rs +++ b/src/backends/arkworks/ark_setup.rs @@ -5,7 +5,6 @@ //! are in the `ark_serde` module. use crate::setup::{ProverSetup, VerifierSetup}; -use rand_core::RngCore; use std::ops::{Deref, DerefMut}; use super::BN254; @@ -33,16 +32,15 @@ impl ArkworksProverSetup { /// supporting polynomials up to 2^max_log_n coefficients arranged as n×n matrices. /// /// # Parameters - /// - `rng`: Random number generator /// - `max_log_n`: Maximum log₂ of polynomial size (for n×n matrix with n² = 2^max_log_n) - pub fn new(rng: &mut R, max_log_n: usize) -> Self { - Self(ProverSetup::new(rng, max_log_n)) + pub fn new(max_log_n: usize) -> Self { + Self(ProverSetup::new(max_log_n)) } /// Load prover setup from disk cache, or generate and cache if not available #[cfg(all(feature = "disk-persistence", not(target_arch = "wasm32")))] - pub fn new_from_urs(rng: &mut R, max_log_n: usize) -> Self { - let (prover_setup, _) = crate::setup::(rng, max_log_n); + pub fn new_from_urs(max_log_n: usize) -> Self { + let (prover_setup, _) = crate::setup::(max_log_n); Self(prover_setup) } diff --git a/src/evaluation_proof.rs b/src/evaluation_proof.rs index c8f4dd0..9fc5d5d 100644 --- a/src/evaluation_proof.rs +++ b/src/evaluation_proof.rs @@ -73,7 +73,7 @@ use crate::setup::{ProverSetup, VerifierSetup}; #[allow(clippy::type_complexity)] #[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all, name = "create_evaluation_proof")] -pub fn create_evaluation_proof( +pub fn create_evaluation_proof( polynomial: &P, point: &[F], row_commitments: Option>, @@ -81,7 +81,6 @@ pub fn create_evaluation_proof( sigma: usize, setup: &ProverSetup, transcript: &mut T, - rng: &mut R, ) -> Result<(DoryProof, Option), DoryError> where F: Field, @@ -94,7 +93,6 @@ where T: Transcript, P: MultilinearLagrange, Mo: Mode, - R: rand_core::RngCore, { if point.len() != nu + sigma { return Err(DoryError::InvalidPointDimension { @@ -114,8 +112,7 @@ where let row_commitments = if let Some(rc) = row_commitments { rc } else { - let (_commitment, rc, _blinds) = - polynomial.commit::(nu, sigma, setup, rng)?; + let (_commitment, rc, _blinds) = polynomial.commit::(nu, sigma, setup)?; rc }; @@ -128,12 +125,8 @@ where } // Sample VMV blinds (zero in Transparent, random in ZK) - let (r_c, r_d2, r_e1, r_e2): (F, F, F, F) = ( - Mo::sample(rng), - Mo::sample(rng), - Mo::sample(rng), - Mo::sample(rng), - ); + let (r_c, r_d2, r_e1, r_e2): (F, F, F, F) = + (Mo::sample(), Mo::sample(), Mo::sample(), Mo::sample()); let g2_fin = &setup.g2_vec[0]; @@ -161,13 +154,13 @@ where let (zk_e2, zk_y_com, zk_sigma1, zk_sigma2, zk_r_y) = if Mo::BLINDING { use crate::reduce_and_fold::{generate_sigma1_proof, generate_sigma2_proof}; let y = polynomial.evaluate(point); - let r_y: F = Mo::sample(rng); + let r_y: F = Mo::sample(); let e2 = Mo::mask(g2_fin.scale(&y), &setup.h2, &r_e2); let y_com = setup.g1_vec[0].scale(&y) + setup.h1.scale(&r_y); transcript.append_serde(b"vmv_e2", &e2); transcript.append_serde(b"vmv_y_com", &y_com); - let s1 = generate_sigma1_proof::(&y, &r_e2, &r_y, setup, transcript, rng); - let s2 = generate_sigma2_proof::(&r_e1, &-r_d2, setup, transcript, rng); + let s1 = generate_sigma1_proof::(&y, &r_e2, &r_y, setup, transcript); + let s2 = generate_sigma2_proof::(&r_e1, &-r_d2, setup, transcript); (Some(e2), Some(y_com), Some(s1), Some(s2), Some(r_y)) } else { (None, None, None, None, None) @@ -198,7 +191,7 @@ where let mut second_messages = Vec::with_capacity(num_rounds); for _round in 0..num_rounds { - let first_msg = prover_state.compute_first_message::(rng); + let first_msg = prover_state.compute_first_message::(); transcript.append_serde(b"d1_left", &first_msg.d1_left); transcript.append_serde(b"d1_right", &first_msg.d1_right); @@ -211,7 +204,7 @@ where prover_state.apply_first_challenge::(&beta); first_messages.push(first_msg); - let second_msg = prover_state.compute_second_message::(rng); + let second_msg = prover_state.compute_second_message::(); transcript.append_serde(b"c_plus", &second_msg.c_plus); transcript.append_serde(b"c_minus", &second_msg.c_minus); @@ -229,12 +222,12 @@ where #[cfg(feature = "zk")] let scalar_product_proof = if Mo::BLINDING { - Some(prover_state.scalar_product_proof(transcript, rng)) + Some(prover_state.scalar_product_proof(transcript)) } else { None }; - let final_message = prover_state.compute_final_message::(&gamma, rng); + let final_message = prover_state.compute_final_message::(&gamma); transcript.append_serde(b"final_e1", &final_message.e1); transcript.append_serde(b"final_e2", &final_message.e2); diff --git a/src/lib.rs b/src/lib.rs index 688f07d..bcd2ac6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -43,15 +43,15 @@ //! use dory_pcs::backends::arkworks::{BN254, G1Routines, G2Routines, Blake2bTranscript}; //! //! // 1. Generate setup (automatically loads from/saves to disk) -//! let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); +//! let (prover_setup, verifier_setup) = setup::(max_log_n); //! //! // 2. Commit to polynomial //! let (tier_2_commitment, tier_1_commitments, _blinds) = polynomial -//! .commit::(nu, sigma, &prover_setup, &mut rng)?; +//! .commit::(nu, sigma, &prover_setup)?; //! //! // 3. Generate evaluation proof //! let mut prover_transcript = Blake2bTranscript::new(b"domain-separation"); -//! let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( +//! let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( //! &polynomial, &point, tier_1_commitments, nu, sigma, //! &prover_setup, &mut prover_transcript //! )?; @@ -70,7 +70,7 @@ //! ```ignore //! use dory_pcs::backends::arkworks::init_cache; //! -//! let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); +//! let (prover_setup, verifier_setup) = setup::(max_log_n); //! init_cache(&prover_setup.g1_vec, &prover_setup.g2_vec); //! // Subsequent operations will automatically use cached prepared points //! ``` @@ -132,7 +132,6 @@ pub use setup::{ProverSetup, VerifierSetup}; /// - Windows: `{FOLDERID_LocalAppData}\dory\dory_{max_log_n}.urs` /// /// # Parameters -/// - `rng`: Random number generator for setup generation (used only if not found on disk) /// - `max_log_n`: Maximum log₂ of polynomial size /// /// # Returns @@ -144,10 +143,7 @@ pub use setup::{ProverSetup, VerifierSetup}; /// /// # Panics /// Panics if the setup file exists on disk but is corrupted or cannot be deserialized. -pub fn setup( - rng: &mut R, - max_log_n: usize, -) -> (ProverSetup, VerifierSetup) +pub fn setup(max_log_n: usize) -> (ProverSetup, VerifierSetup) where ProverSetup: DorySerialize + DoryDeserialize, VerifierSetup: DorySerialize + DoryDeserialize, @@ -172,7 +168,7 @@ where "Setup not found on disk, generating new setup for max_log_n={}", max_log_n ); - let prover_setup = ProverSetup::new(rng, max_log_n); + let prover_setup = ProverSetup::new(max_log_n); let verifier_setup = prover_setup.to_verifier_setup(); // Save to disk @@ -185,7 +181,7 @@ where { tracing::info!("Generating new setup for max_log_n={}", max_log_n); - let prover_setup = ProverSetup::new(rng, max_log_n); + let prover_setup = ProverSetup::new(max_log_n); let verifier_setup = prover_setup.to_verifier_setup(); (prover_setup, verifier_setup) @@ -202,7 +198,6 @@ where /// or when you suspect the saved setup file is corrupted). /// /// # Parameters -/// - `rng`: Random number generator for setup generation /// - `max_log_n`: Maximum log₂ of polynomial size /// /// # Returns @@ -211,17 +206,14 @@ where /// # Availability /// This function is only available when the `disk-persistence` feature is enabled. #[cfg(all(feature = "disk-persistence", not(target_arch = "wasm32")))] -pub fn generate_urs( - rng: &mut R, - max_log_n: usize, -) -> (ProverSetup, VerifierSetup) +pub fn generate_urs(max_log_n: usize) -> (ProverSetup, VerifierSetup) where ProverSetup: DorySerialize + DoryDeserialize, VerifierSetup: DorySerialize + DoryDeserialize, { tracing::info!("Force-generating new setup for max_log_n={}", max_log_n); - let prover_setup = ProverSetup::new(rng, max_log_n); + let prover_setup = ProverSetup::new(max_log_n); let verifier_setup = prover_setup.to_verifier_setup(); // Overwrites existing @@ -270,7 +262,7 @@ where #[allow(clippy::type_complexity)] #[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all, name = "prove")] -pub fn prove( +pub fn prove( polynomial: &P, point: &[F], row_commitments: Vec, @@ -278,7 +270,6 @@ pub fn prove( sigma: usize, setup: &ProverSetup, transcript: &mut T, - rng: &mut R, ) -> Result<(DoryProof, Option), DoryError> where F: Field, @@ -291,9 +282,8 @@ where P: MultilinearLagrange, T: primitives::transcript::Transcript, Mo: Mode, - R: rand_core::RngCore, { - evaluation_proof::create_evaluation_proof::( + evaluation_proof::create_evaluation_proof::( polynomial, point, Some(row_commitments), @@ -301,7 +291,6 @@ where sigma, setup, transcript, - rng, ) } diff --git a/src/mode.rs b/src/mode.rs index ece2678..9ebe95a 100644 --- a/src/mode.rs +++ b/src/mode.rs @@ -6,7 +6,7 @@ pub trait Mode: 'static { /// Whether this mode produces blinding values that callers must retain. const BLINDING: bool; /// Sample a blinding scalar: zero in Transparent mode, random in ZK mode. - fn sample(rng: &mut R) -> F; + fn sample() -> F; /// Mask a group element: identity in Transparent mode, `value + base * blind` in ZK mode. fn mask(value: G, base: &G, blind: &G::Scalar) -> G; } @@ -15,7 +15,7 @@ pub trait Mode: 'static { pub struct Transparent; impl Mode for Transparent { const BLINDING: bool = false; - fn sample(_rng: &mut R) -> F { + fn sample() -> F { F::zero() } fn mask(value: G, _base: &G, _blind: &G::Scalar) -> G { @@ -29,8 +29,8 @@ pub struct ZK; #[cfg(feature = "zk")] impl Mode for ZK { const BLINDING: bool = true; - fn sample(rng: &mut R) -> F { - F::random(rng) + fn sample() -> F { + F::random() } fn mask(value: G, base: &G, blind: &G::Scalar) -> G { value + base.scale(blind) diff --git a/src/primitives/arithmetic.rs b/src/primitives/arithmetic.rs index 94dc54c..494682f 100644 --- a/src/primitives/arithmetic.rs +++ b/src/primitives/arithmetic.rs @@ -1,7 +1,6 @@ #![allow(missing_docs)] use super::{DoryDeserialize, DorySerialize}; -use rand_core::RngCore; pub trait Field: Sized @@ -30,7 +29,7 @@ pub trait Field: fn inv(self) -> Option; - fn random(rng: &mut R) -> Self; + fn random() -> Self; fn from_u64(val: u64) -> Self; fn from_i64(val: i64) -> Self; @@ -60,7 +59,7 @@ pub trait Group: fn neg(&self) -> Self; fn scale(&self, k: &Self::Scalar) -> Self; - fn random(rng: &mut R) -> Self; + fn random() -> Self; } pub trait PairingCurve: Clone { diff --git a/src/primitives/poly.rs b/src/primitives/poly.rs index b178748..1f1b951 100644 --- a/src/primitives/poly.rs +++ b/src/primitives/poly.rs @@ -74,7 +74,6 @@ pub trait Polynomial { /// - `nu`: Log₂ of number of rows /// - `sigma`: Log₂ of number of columns /// - `setup`: Prover setup containing generators - /// - `rng`: Random number generator (unused in Transparent mode) /// /// # Returns /// `(commitment, row_commitments, blinds)` where: @@ -85,19 +84,17 @@ pub trait Polynomial { /// # Errors /// Returns error if coefficient length doesn't match 2^(nu + sigma) or if setup is insufficient. #[allow(clippy::type_complexity)] - fn commit( + fn commit( &self, nu: usize, sigma: usize, setup: &ProverSetup, - rng: &mut R, ) -> Result<(E::GT, Vec, Option>), DoryError> where E: PairingCurve, Mo: Mode, M1: DoryRoutines, - E::G1: Group, - R: rand_core::RngCore; + E::G1: Group; } /// Compute multilinear Lagrange basis evaluations at a point diff --git a/src/reduce_and_fold.rs b/src/reduce_and_fold.rs index 3f62a7b..a53eecf 100644 --- a/src/reduce_and_fold.rs +++ b/src/reduce_and_fold.rs @@ -183,14 +183,10 @@ where /// /// Computes D1L, D1R, D2L, D2R, E1β, E2β based on current state. #[tracing::instrument(skip_all, name = "DoryProverState::compute_first_message")] - pub fn compute_first_message( - &mut self, - rng: &mut R, - ) -> FirstReduceMessage + pub fn compute_first_message(&mut self) -> FirstReduceMessage where M1: DoryRoutines, M2: DoryRoutines, - R: rand_core::RngCore, { assert!( self.num_rounds > 0, @@ -208,8 +204,8 @@ where let g2_prime = &self.setup.g2_vec[..n2]; // Sample round blinds (zero in Transparent mode) - self.round_d1 = [M::sample(rng), M::sample(rng)]; - self.round_d2 = [M::sample(rng), M::sample(rng)]; + self.round_d1 = [M::sample(), M::sample()]; + self.round_d2 = [M::sample(), M::sample()]; // D₁L = ⟨v₁L, Γ₂'⟩, D₁R = ⟨v₁R, Γ₂'⟩ let ht = &self.setup.ht; @@ -288,14 +284,10 @@ where /// /// Computes C+, C-, E1+, E1-, E2+, E2- based on current state. #[tracing::instrument(skip_all, name = "DoryProverState::compute_second_message")] - pub fn compute_second_message( - &mut self, - rng: &mut R, - ) -> SecondReduceMessage + pub fn compute_second_message(&mut self) -> SecondReduceMessage where M1: DoryRoutines, M2: DoryRoutines, - R: rand_core::RngCore, { let n2 = 1 << (self.num_rounds - 1); // n/2 @@ -306,9 +298,9 @@ where let (s2_l, s2_r) = self.s2.split_at(n2); // Sample round blinds (zero in Transparent mode) - self.round_c = [M::sample(rng), M::sample(rng)]; - self.round_e1 = [M::sample(rng), M::sample(rng)]; - self.round_e2 = [M::sample(rng), M::sample(rng)]; + self.round_c = [M::sample(), M::sample()]; + self.round_e1 = [M::sample(), M::sample()]; + self.round_e2 = [M::sample(), M::sample()]; // C₊ = ⟨v₁L, v₂R⟩, C₋ = ⟨v₁R, v₂L⟩ let ht = &self.setup.ht; @@ -384,15 +376,13 @@ where /// accumulated `r_c` — they only add entropy to the Fiat-Shamir transcript /// from which the `d` challenge is derived. #[tracing::instrument(skip_all, name = "DoryProverState::compute_final_message")] - pub fn compute_final_message( + pub fn compute_final_message( &mut self, gamma: &::Scalar, - rng: &mut R, ) -> ScalarProductMessage where M1: DoryRoutines, M2: DoryRoutines, - R: rand_core::RngCore, { debug_assert_eq!(self.num_rounds, 0, "num_rounds must be 0 for final message"); debug_assert_eq!(self.v1.len(), 1, "v1 must have length 1"); @@ -401,8 +391,8 @@ where let gamma_inv = (*gamma).inv().expect("gamma must be invertible"); // Sample independent blinds for the final message (zero in Transparent mode). - let r_final1: ::Scalar = M::sample(rng); - let r_final2: ::Scalar = M::sample(rng); + let r_final1: ::Scalar = M::sample(); + let r_final2: ::Scalar = M::sample(); // Apply fold-scalars transform with blinding: // E₁ = v₁ + (γ·s₁ + r_final1)·H₁ @@ -421,15 +411,14 @@ where /// Generate ZK scalar product proof. Must be called BEFORE `compute_final_message`. #[cfg(feature = "zk")] - pub fn scalar_product_proof, R: rand_core::RngCore>( + pub fn scalar_product_proof>( &self, transcript: &mut T, - rng: &mut R, ) -> ScalarProductProof, E::GT> { let (v1, v2) = (self.v1[0], self.v2[0]); let (g1, g2) = (self.setup.g1_vec[0], self.setup.g2_vec[0]); let ht = &self.setup.ht; - let mut r = || -> Scalar { Field::random(rng) }; + let r = || -> Scalar { Field::random() }; let (sd1, sd2) = (r(), r()); let (d1, d2) = (g1.scale(&sd1), g2.scale(&sd2)); let (rp1, rp2, rq, rr) = (r(), r(), r(), r()); @@ -462,26 +451,24 @@ where /// Generate Sigma1 proof: proves knowledge of (y, rE2, ry). #[cfg(feature = "zk")] -pub fn generate_sigma1_proof( +pub fn generate_sigma1_proof( y: &Scalar, r_e2: &Scalar, r_y: &Scalar, setup: &ProverSetup, transcript: &mut T, - rng: &mut R, ) -> Sigma1Proof> where E: PairingCurve, T: Transcript, - R: rand_core::RngCore, Scalar: Field, E::G2: Group>, { let (g2_fin, g1_fin) = (&setup.g2_vec[0], &setup.g1_vec[0]); let (k1, k2, k3) = ( - Scalar::::random(rng), - Scalar::::random(rng), - Scalar::::random(rng), + Scalar::::random(), + Scalar::::random(), + Scalar::::random(), ); let a1 = g2_fin.scale(&k1) + setup.h2.scale(&k2); let a2 = g1_fin.scale(&k1) + setup.h1.scale(&k3); @@ -524,22 +511,20 @@ where /// Generate Sigma2 proof: proves e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2). #[cfg(feature = "zk")] -pub fn generate_sigma2_proof( +pub fn generate_sigma2_proof( t1: &Scalar, t2: &Scalar, setup: &ProverSetup, transcript: &mut T, - rng: &mut R, ) -> Sigma2Proof, E::GT> where E: PairingCurve, T: Transcript, - R: rand_core::RngCore, Scalar: Field, E::G2: Group>, E::GT: Group>, { - let (k1, k2) = (Scalar::::random(rng), Scalar::::random(rng)); + let (k1, k2) = (Scalar::::random(), Scalar::::random()); let a = E::pair( &setup.h1, &(setup.g2_vec[0].scale(&k1) + setup.h2.scale(&k2)), diff --git a/src/setup.rs b/src/setup.rs index 6883f61..0012c79 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -6,7 +6,6 @@ use crate::primitives::arithmetic::{Group, PairingCurve}; use crate::primitives::serialization::{DoryDeserialize, DorySerialize}; -use rand_core::RngCore; #[cfg(all(feature = "disk-persistence", not(target_arch = "wasm32")))] use std::fs::{self, File}; @@ -90,22 +89,21 @@ impl ProverSetup { /// supporting polynomials up to 2^max_log_n coefficients arranged as n×n matrices. /// /// # Parameters - /// - `rng`: Random number generator /// - `max_log_n`: Maximum log₂ of polynomial size (for n×n matrix with n² = 2^max_log_n) /// /// # Returns /// A new `ProverSetup` with randomly generated parameters - pub fn new(rng: &mut R, max_log_n: usize) -> Self { + pub fn new(max_log_n: usize) -> Self { // For square matrices: n = 2^((max_log_n+1)/2) let n = 1 << max_log_n.div_ceil(2); // Generate n random G1 generators (Γ₁) - let g1_vec: Vec = (0..n).map(|_| E::G1::random(rng)).collect(); + let g1_vec: Vec = (0..n).map(|_| E::G1::random()).collect(); // Generate n random G2 generators (Γ₂) - let g2_vec: Vec = (0..n).map(|_| E::G2::random(rng)).collect(); + let g2_vec: Vec = (0..n).map(|_| E::G2::random()).collect(); // Generate blinding generators - let h1 = E::G1::random(rng); - let h2 = E::G2::random(rng); + let h1 = E::G1::random(); + let h2 = E::G2::random(); // Precompute e(h₁, h₂) let ht = E::pair(&h1, &h2); diff --git a/tests/arkworks/cache.rs b/tests/arkworks/cache.rs index a3bbdb6..e66f866 100644 --- a/tests/arkworks/cache.rs +++ b/tests/arkworks/cache.rs @@ -1,17 +1,15 @@ use dory_pcs::backends::arkworks::{ArkG1, ArkG2, ArkGT, BN254}; use dory_pcs::primitives::arithmetic::{Group, PairingCurve}; -use rand::thread_rng; #[cfg(feature = "cache")] use dory_pcs::backends::arkworks::ark_cache; #[test] fn multi_pair_correctness() { - let mut rng = thread_rng(); let n = 10; - let ps: Vec = (0..n).map(|_| ArkG1::random(&mut rng)).collect(); - let qs: Vec = (0..n).map(|_| ArkG2::random(&mut rng)).collect(); + let ps: Vec = (0..n).map(|_| ArkG1::random()).collect(); + let qs: Vec = (0..n).map(|_| ArkG2::random()).collect(); let result = BN254::multi_pair(&ps, &qs); @@ -35,10 +33,8 @@ fn multi_pair_empty() { #[test] #[should_panic(expected = "multi_pair requires equal length vectors")] fn multi_pair_length_mismatch() { - let mut rng = thread_rng(); - - let ps: Vec = (0..5).map(|_| ArkG1::random(&mut rng)).collect(); - let qs: Vec = (0..3).map(|_| ArkG2::random(&mut rng)).collect(); + let ps: Vec = (0..5).map(|_| ArkG1::random()).collect(); + let qs: Vec = (0..3).map(|_| ArkG2::random()).collect(); BN254::multi_pair(&ps, &qs); } @@ -46,9 +42,8 @@ fn multi_pair_length_mismatch() { #[cfg(feature = "cache")] #[test] fn cache_initialization() { - let mut rng = thread_rng(); - let g1_vec: Vec = (0..10).map(|_| ArkG1::random(&mut rng)).collect(); - let g2_vec: Vec = (0..10).map(|_| ArkG2::random(&mut rng)).collect(); + let g1_vec: Vec = (0..10).map(|_| ArkG1::random()).collect(); + let g2_vec: Vec = (0..10).map(|_| ArkG2::random()).collect(); ark_cache::init_cache(&g1_vec, &g2_vec); @@ -61,11 +56,9 @@ fn cache_initialization() { #[cfg(feature = "cache")] #[test] fn cache_smart_reinit() { - let mut rng = thread_rng(); - // Initialize with small size - let g1_small: Vec = (0..5).map(|_| ArkG1::random(&mut rng)).collect(); - let g2_small: Vec = (0..5).map(|_| ArkG2::random(&mut rng)).collect(); + let g1_small: Vec = (0..5).map(|_| ArkG1::random()).collect(); + let g2_small: Vec = (0..5).map(|_| ArkG2::random()).collect(); ark_cache::init_cache(&g1_small, &g2_small); let cache = ark_cache::get_prepared_cache().unwrap(); @@ -77,8 +70,8 @@ fn cache_smart_reinit() { assert_eq!(cache.g1_prepared.len(), small_len); // Re-init with larger size — should replace cache - let g1_large: Vec = (0..20).map(|_| ArkG1::random(&mut rng)).collect(); - let g2_large: Vec = (0..20).map(|_| ArkG2::random(&mut rng)).collect(); + let g1_large: Vec = (0..20).map(|_| ArkG1::random()).collect(); + let g2_large: Vec = (0..20).map(|_| ArkG2::random()).collect(); ark_cache::init_cache(&g1_large, &g2_large); let cache = ark_cache::get_prepared_cache().unwrap(); @@ -89,11 +82,10 @@ fn cache_smart_reinit() { #[cfg(feature = "cache")] #[test] fn multi_pair_with_cache_optimization() { - let mut rng = thread_rng(); let n = 20; - let g1_vec: Vec = (0..n).map(|_| ArkG1::random(&mut rng)).collect(); - let g2_vec: Vec = (0..n).map(|_| ArkG2::random(&mut rng)).collect(); + let g1_vec: Vec = (0..n).map(|_| ArkG1::random()).collect(); + let g2_vec: Vec = (0..n).map(|_| ArkG2::random()).collect(); if !ark_cache::is_cached() { ark_cache::init_cache(&g1_vec, &g2_vec); diff --git a/tests/arkworks/commitment.rs b/tests/arkworks/commitment.rs index aef339b..b7aa500 100644 --- a/tests/arkworks/commitment.rs +++ b/tests/arkworks/commitment.rs @@ -6,14 +6,13 @@ use dory_pcs::Transparent; #[test] fn test_commit_small_polynomial() { - let mut rng = rand::thread_rng(); let setup = test_setup(4); let poly = random_polynomial(16); let nu = 2; let sigma = 2; - let result = poly.commit::(nu, sigma, &setup, &mut rng); + let result = poly.commit::(nu, sigma, &setup); assert!(result.is_ok()); let (_commitment, row_commitments, _) = result.unwrap(); @@ -22,14 +21,13 @@ fn test_commit_small_polynomial() { #[test] fn test_commit_constant_polynomial() { - let mut rng = rand::thread_rng(); let setup = test_setup(4); let poly = constant_polynomial(42, 4); let nu = 2; let sigma = 2; - let result = poly.commit::(nu, sigma, &setup, &mut rng); + let result = poly.commit::(nu, sigma, &setup); assert!(result.is_ok()); let (_commitment, row_commitments, _) = result.unwrap(); @@ -38,38 +36,35 @@ fn test_commit_constant_polynomial() { #[test] fn test_commit_different_sizes() { - let mut rng = rand::thread_rng(); let setup = test_setup(8); let poly_4 = random_polynomial(4); - let result = poly_4.commit::(1, 1, &setup, &mut rng); + let result = poly_4.commit::(1, 1, &setup); assert!(result.is_ok()); let poly_16 = random_polynomial(16); - let result = poly_16.commit::(2, 2, &setup, &mut rng); + let result = poly_16.commit::(2, 2, &setup); assert!(result.is_ok()); let poly_64 = random_polynomial(64); - let result = poly_64.commit::(3, 3, &setup, &mut rng); + let result = poly_64.commit::(3, 3, &setup); assert!(result.is_ok()); } #[test] fn test_commit_invalid_size() { - let mut rng = rand::thread_rng(); let setup = test_setup(4); let poly = random_polynomial(16); let nu = 3; let sigma = 2; - let result = poly.commit::(nu, sigma, &setup, &mut rng); + let result = poly.commit::(nu, sigma, &setup); assert!(result.is_err()); } #[test] fn test_commit_deterministic() { - let mut rng = rand::thread_rng(); let setup = test_setup(6); let coefficients: Vec = (0..16).map(|i| ArkFr::from_u64(i as u64)).collect(); @@ -77,10 +72,10 @@ fn test_commit_deterministic() { let poly2 = ArkworksPolynomial::new(coefficients); let (comm1, _, _) = poly1 - .commit::(2, 2, &setup, &mut rng) + .commit::(2, 2, &setup) .unwrap(); let (comm2, _, _) = poly2 - .commit::(2, 2, &setup, &mut rng) + .commit::(2, 2, &setup) .unwrap(); assert_eq!(comm1, comm2); @@ -88,17 +83,16 @@ fn test_commit_deterministic() { #[test] fn test_commit_different_polynomials() { - let mut rng = rand::thread_rng(); let setup = test_setup(6); let poly1 = random_polynomial(16); let poly2 = random_polynomial(16); let (comm1, _, _) = poly1 - .commit::(2, 2, &setup, &mut rng) + .commit::(2, 2, &setup) .unwrap(); let (comm2, _, _) = poly2 - .commit::(2, 2, &setup, &mut rng) + .commit::(2, 2, &setup) .unwrap(); assert_ne!(comm1, comm2); diff --git a/tests/arkworks/evaluation.rs b/tests/arkworks/evaluation.rs index 85a996e..839c016 100644 --- a/tests/arkworks/evaluation.rs +++ b/tests/arkworks/evaluation.rs @@ -6,7 +6,6 @@ use dory_pcs::{prove, verify, Transparent}; #[test] fn test_evaluation_proof_small() { - let mut rng = rand::thread_rng(); let setup = test_setup(4); let verifier_setup = setup.to_verifier_setup(); @@ -17,11 +16,11 @@ fn test_evaluation_proof_small() { let sigma = 2; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &setup, &mut rng) + .commit::(nu, sigma, &setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -29,7 +28,6 @@ fn test_evaluation_proof_small() { sigma, &setup, &mut prover_transcript, - &mut rng, ); assert!(result.is_ok()); @@ -51,7 +49,6 @@ fn test_evaluation_proof_small() { #[test] fn test_evaluation_proof_with_precomputed_commitment() { - let mut rng = rand::thread_rng(); let setup = test_setup(4); let verifier_setup = setup.to_verifier_setup(); @@ -62,11 +59,11 @@ fn test_evaluation_proof_with_precomputed_commitment() { let sigma = 2; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &setup, &mut rng) + .commit::(nu, sigma, &setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -74,7 +71,6 @@ fn test_evaluation_proof_with_precomputed_commitment() { sigma, &setup, &mut prover_transcript, - &mut rng, ); assert!(result.is_ok()); @@ -96,7 +92,6 @@ fn test_evaluation_proof_with_precomputed_commitment() { #[test] fn test_evaluation_proof_constant_polynomial() { - let mut rng = rand::thread_rng(); let setup = test_setup(4); let verifier_setup = setup.to_verifier_setup(); @@ -110,11 +105,11 @@ fn test_evaluation_proof_constant_polynomial() { assert_eq!(expected_eval, ArkFr::from_u64(7)); let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &setup, &mut rng) + .commit::(nu, sigma, &setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -122,7 +117,6 @@ fn test_evaluation_proof_constant_polynomial() { sigma, &setup, &mut prover_transcript, - &mut rng, ) .unwrap(); @@ -144,7 +138,6 @@ fn test_evaluation_proof_constant_polynomial() { #[test] fn test_evaluation_proof_wrong_evaluation_fails() { - let mut rng = rand::thread_rng(); let setup = test_setup(4); let verifier_setup = setup.to_verifier_setup(); @@ -155,11 +148,11 @@ fn test_evaluation_proof_wrong_evaluation_fails() { let sigma = 2; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &setup, &mut rng) + .commit::(nu, sigma, &setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -167,7 +160,6 @@ fn test_evaluation_proof_wrong_evaluation_fails() { sigma, &setup, &mut prover_transcript, - &mut rng, ) .unwrap(); @@ -189,7 +181,6 @@ fn test_evaluation_proof_wrong_evaluation_fails() { #[test] fn test_evaluation_proof_different_sizes() { - let mut rng = rand::thread_rng(); { let setup = test_setup(4); let verifier_setup = setup.to_verifier_setup(); @@ -198,11 +189,11 @@ fn test_evaluation_proof_different_sizes() { let point = random_point(2); let (tier_2, tier_1, _) = poly - .commit::(1, 1, &setup, &mut rng) + .commit::(1, 1, &setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -210,7 +201,6 @@ fn test_evaluation_proof_different_sizes() { 1, &setup, &mut prover_transcript, - &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -235,11 +225,11 @@ fn test_evaluation_proof_different_sizes() { let point = random_point(6); let (tier_2, tier_1, _) = poly - .commit::(3, 3, &setup, &mut rng) + .commit::(3, 3, &setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -247,7 +237,6 @@ fn test_evaluation_proof_different_sizes() { 3, &setup, &mut prover_transcript, - &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -267,7 +256,6 @@ fn test_evaluation_proof_different_sizes() { #[test] fn test_multiple_evaluations_same_commitment() { - let mut rng = rand::thread_rng(); let setup = test_setup(4); let verifier_setup = setup.to_verifier_setup(); @@ -276,14 +264,14 @@ fn test_multiple_evaluations_same_commitment() { let sigma = 2; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &setup, &mut rng) + .commit::(nu, sigma, &setup) .unwrap(); for _ in 0..3 { let point = random_point(4); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1.clone(), @@ -291,7 +279,6 @@ fn test_multiple_evaluations_same_commitment() { sigma, &setup, &mut prover_transcript, - &mut rng, ) .unwrap(); diff --git a/tests/arkworks/homomorphic.rs b/tests/arkworks/homomorphic.rs index 43ab7e2..454c405 100644 --- a/tests/arkworks/homomorphic.rs +++ b/tests/arkworks/homomorphic.rs @@ -8,9 +8,8 @@ use dory_pcs::{prove, setup, verify, Transparent}; #[test] fn test_homomorphic_combination_e2e() { - let mut rng = rand::thread_rng(); let max_log_n = 10; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); let nu = 4; let sigma = 4; @@ -22,12 +21,12 @@ fn test_homomorphic_combination_e2e() { let commitments: Vec<_> = polys .iter() .map(|poly| { - poly.commit::(nu, sigma, &prover_setup, &mut rng) + poly.commit::(nu, sigma, &prover_setup) .unwrap() }) .collect(); - let coeffs: Vec = (0..5).map(|_| ArkFr::random(&mut rng)).collect(); + let coeffs: Vec = (0..5).map(|_| ArkFr::random()).collect(); // Homomorphically combine commitments #[allow(clippy::op_ref)] @@ -87,7 +86,7 @@ fn test_homomorphic_combination_e2e() { // Create evaluation proof using combined commitment let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &combined_poly, &point, combined_tier1, @@ -95,7 +94,6 @@ fn test_homomorphic_combination_e2e() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); @@ -117,8 +115,7 @@ fn test_homomorphic_combination_e2e() { #[test] fn test_homomorphic_combination_small() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 6); + let (prover_setup, verifier_setup) = setup::(6); let nu = 2; let sigma = 2; @@ -130,12 +127,12 @@ fn test_homomorphic_combination_small() { let commitments: Vec<_> = polys .iter() .map(|poly| { - poly.commit::(nu, sigma, &prover_setup, &mut rng) + poly.commit::(nu, sigma, &prover_setup) .unwrap() }) .collect(); - let coeffs: Vec = (0..5).map(|_| ArkFr::random(&mut rng)).collect(); + let coeffs: Vec = (0..5).map(|_| ArkFr::random()).collect(); #[allow(clippy::op_ref)] let mut combined_tier2 = coeffs[0] * &commitments[0].0; for i in 1..5 { @@ -178,7 +175,7 @@ fn test_homomorphic_combination_small() { let evaluation = combined_poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &combined_poly, &point, combined_tier1, @@ -186,7 +183,6 @@ fn test_homomorphic_combination_small() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); diff --git a/tests/arkworks/integration.rs b/tests/arkworks/integration.rs index 66c9fd5..5a76f68 100644 --- a/tests/arkworks/integration.rs +++ b/tests/arkworks/integration.rs @@ -7,24 +7,23 @@ use dory_pcs::{prove, setup, verify, Transparent}; #[test] fn test_full_workflow() { - let mut rng = rand::thread_rng(); let max_log_n = 10; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); let poly = random_polynomial(256); let nu = 4; let sigma = 4; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(8); let expected_evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -32,7 +31,6 @@ fn test_full_workflow() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -53,10 +51,9 @@ fn test_full_workflow() { #[test] fn test_workflow_without_precommitment() { - let mut rng = rand::thread_rng(); let max_log_n = 10; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); let poly = random_polynomial(256); let point = random_point(8); @@ -64,11 +61,11 @@ fn test_workflow_without_precommitment() { let sigma = 4; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -76,7 +73,6 @@ fn test_workflow_without_precommitment() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -96,22 +92,21 @@ fn test_workflow_without_precommitment() { #[test] fn test_batched_proofs() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); let poly = random_polynomial(256); let nu = 4; let sigma = 4; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); for i in 0..5 { let point = random_point(8); let mut prover_transcript = Blake2bTranscript::new(format!("test-{i}").as_bytes()); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1.clone(), @@ -119,7 +114,6 @@ fn test_batched_proofs() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -140,8 +134,7 @@ fn test_batched_proofs() { #[test] fn test_linear_polynomial() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); let coefficients: Vec = (0..256).map(|i| ArkFr::from_u64(i as u64)).collect(); let poly = ArkworksPolynomial::new(coefficients); @@ -161,11 +154,11 @@ fn test_linear_polynomial() { let sigma = 4; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -173,7 +166,6 @@ fn test_linear_polynomial() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); @@ -196,8 +188,7 @@ fn test_linear_polynomial() { #[test] fn test_zero_polynomial() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); let poly = constant_polynomial(0, 8); let point = random_point(8); @@ -206,11 +197,11 @@ fn test_zero_polynomial() { let sigma = 4; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -218,7 +209,6 @@ fn test_zero_polynomial() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); @@ -240,8 +230,7 @@ fn test_zero_polynomial() { #[test] fn test_soundness_wrong_commitment() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); let poly1 = random_polynomial(256); let poly2 = random_polynomial(256); @@ -251,15 +240,15 @@ fn test_soundness_wrong_commitment() { let sigma = 4; let (commitment1, _, _) = poly1 - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let (_, tier_1_poly2, _) = poly2 - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly2, &point, tier_1_poly2, @@ -267,7 +256,6 @@ fn test_soundness_wrong_commitment() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); let evaluation = poly2.evaluate(&point); diff --git a/tests/arkworks/mod.rs b/tests/arkworks/mod.rs index ad585fd..cf480e8 100644 --- a/tests/arkworks/mod.rs +++ b/tests/arkworks/mod.rs @@ -8,7 +8,6 @@ use dory_pcs::backends::arkworks::{ use dory_pcs::primitives::arithmetic::Field; use dory_pcs::proof::DoryProof; use dory_pcs::setup::{ProverSetup, VerifierSetup}; -use rand::thread_rng; pub mod cache; pub mod commitment; @@ -25,8 +24,7 @@ pub mod zk; pub mod zk_statistical; pub fn random_polynomial(size: usize) -> ArkworksPolynomial { - let mut rng = thread_rng(); - let coefficients: Vec = (0..size).map(|_| ArkFr::random(&mut rng)).collect(); + let coefficients: Vec = (0..size).map(|_| ArkFr::random()).collect(); ArkworksPolynomial::new(coefficients) } @@ -38,13 +36,11 @@ pub fn constant_polynomial(value: u64, num_vars: usize) -> ArkworksPolynomial { } pub fn random_point(num_vars: usize) -> Vec { - let mut rng = thread_rng(); - (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect() + (0..num_vars).map(|_| ArkFr::random()).collect() } pub fn test_setup(max_log_n: usize) -> ProverSetup { - let mut rng = thread_rng(); - ProverSetup::new(&mut rng, max_log_n) + ProverSetup::new(max_log_n) } pub fn test_setup_pair(max_log_n: usize) -> (ProverSetup, VerifierSetup) { diff --git a/tests/arkworks/non_square.rs b/tests/arkworks/non_square.rs index be6c333..0b8f6a9 100644 --- a/tests/arkworks/non_square.rs +++ b/tests/arkworks/non_square.rs @@ -6,8 +6,7 @@ use dory_pcs::{prove, setup, verify, Transparent}; #[test] fn test_non_square_matrix_nu_eq_sigma_minus_1() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); // nu = 3, sigma = 4 => 2^3 x 2^4 = 8 rows x 16 columns = 128 coefficients let nu = 3; @@ -19,11 +18,11 @@ fn test_non_square_matrix_nu_eq_sigma_minus_1() { let point = random_point(num_vars); let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -31,7 +30,6 @@ fn test_non_square_matrix_nu_eq_sigma_minus_1() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .expect("Proof generation should succeed"); @@ -52,8 +50,7 @@ fn test_non_square_matrix_nu_eq_sigma_minus_1() { #[test] fn test_non_square_matrix_nu_greater_than_sigma_rejected() { - let mut rng = rand::thread_rng(); - let (prover_setup, _verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, _verifier_setup) = setup::(10); // nu = 4, sigma = 3 => This should be rejected let nu = 4; @@ -65,11 +62,11 @@ fn test_non_square_matrix_nu_greater_than_sigma_rejected() { let point = random_point(num_vars); let (_, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let proof_result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let proof_result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -77,7 +74,6 @@ fn test_non_square_matrix_nu_greater_than_sigma_rejected() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ); assert!( @@ -88,8 +84,7 @@ fn test_non_square_matrix_nu_greater_than_sigma_rejected() { #[test] fn test_non_square_matrix_small() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 6); + let (prover_setup, verifier_setup) = setup::(6); // nu = 2, sigma = 3 => 2^2 x 2^3 = 4 rows x 8 columns = 32 coefficients let nu = 2; @@ -101,11 +96,11 @@ fn test_non_square_matrix_small() { let point = random_point(num_vars); let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -113,7 +108,6 @@ fn test_non_square_matrix_small() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .expect("Proof generation should succeed"); @@ -137,8 +131,7 @@ fn test_non_square_matrix_small() { #[test] fn test_non_square_matrix_very_rectangular() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); // nu = 2, sigma = 5 => 2^2 x 2^5 = 4 rows x 32 columns = 128 coefficients // This is much "less square" than nu = sigma - 1 @@ -151,11 +144,11 @@ fn test_non_square_matrix_very_rectangular() { let point = random_point(num_vars); let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -163,7 +156,6 @@ fn test_non_square_matrix_very_rectangular() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .expect("Proof generation should succeed"); diff --git a/tests/arkworks/serialization.rs b/tests/arkworks/serialization.rs index ddec013..e3b191e 100644 --- a/tests/arkworks/serialization.rs +++ b/tests/arkworks/serialization.rs @@ -11,16 +11,15 @@ fn make_transparent_proof() -> ( dory_pcs::backends::arkworks::ArkGT, Vec, ) { - let mut rng = rand::thread_rng(); let (setup, verifier_setup) = test_setup_pair(4); let poly = random_polynomial(16); let point = random_point(4); let (tier_2, tier_1, _) = poly - .commit::(2, 2, &setup, &mut rng) + .commit::(2, 2, &setup) .unwrap(); let mut transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -28,7 +27,6 @@ fn make_transparent_proof() -> ( 2, &setup, &mut transcript, - &mut rng, ) .unwrap(); @@ -79,17 +77,16 @@ fn test_transparent_proof_roundtrip_uncompressed() { #[test] fn test_transparent_proof_roundtrip_verifies() { - let mut rng = rand::thread_rng(); let (setup, verifier_setup) = test_setup_pair(4); let poly = random_polynomial(16); let point = random_point(4); let (tier_2, tier_1, _) = poly - .commit::(2, 2, &setup, &mut rng) + .commit::(2, 2, &setup) .unwrap(); let mut transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -97,7 +94,6 @@ fn test_transparent_proof_roundtrip_verifies() { 2, &setup, &mut transcript, - &mut rng, ) .unwrap(); @@ -130,17 +126,16 @@ mod zk_roundtrip { dory_pcs::backends::arkworks::ArkGT, Vec, ) { - let mut rng = rand::thread_rng(); let (setup, verifier_setup) = test_setup_pair(4); let poly = random_polynomial(16); let point = random_point(4); let (tier_2, tier_1, _) = poly - .commit::(2, 2, &setup, &mut rng) + .commit::(2, 2, &setup) .unwrap(); let mut transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, tier_1, @@ -148,7 +143,6 @@ mod zk_roundtrip { 2, &setup, &mut transcript, - &mut rng, ) .unwrap(); @@ -194,17 +188,16 @@ mod zk_roundtrip { #[test] fn test_zk_proof_roundtrip_verifies() { - let mut rng = rand::thread_rng(); let (setup, verifier_setup) = test_setup_pair(4); let poly = random_polynomial(16); let point = random_point(4); let (tier_2, tier_1, _) = poly - .commit::(2, 2, &setup, &mut rng) + .commit::(2, 2, &setup) .unwrap(); let mut transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, tier_1, @@ -212,7 +205,6 @@ mod zk_roundtrip { 2, &setup, &mut transcript, - &mut rng, ) .unwrap(); diff --git a/tests/arkworks/setup.rs b/tests/arkworks/setup.rs index e2fe8d3..4bcbcfd 100644 --- a/tests/arkworks/setup.rs +++ b/tests/arkworks/setup.rs @@ -93,15 +93,12 @@ fn test_setup_disk_persistence() { fn test_setup_function_uses_disk() { use dory_pcs::backends::arkworks::BN254; use dory_pcs::{generate_urs, setup}; - use rand::thread_rng; - - let mut rng = thread_rng(); let max_log_n = 11; - let (prover1, verifier1) = generate_urs::(&mut rng, max_log_n); + let (prover1, verifier1) = generate_urs::(max_log_n); - let (prover2, verifier2) = setup::(&mut rng, max_log_n); + let (prover2, verifier2) = setup::(max_log_n); assert_eq!(prover1.g1_vec[0], prover2.g1_vec[0]); assert_eq!(prover1.g2_vec[0], prover2.g2_vec[0]); @@ -112,12 +109,10 @@ fn test_setup_function_uses_disk() { fn test_arkworks_setup_canonical_serialization() { use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use dory_pcs::backends::arkworks::{ArkworksProverSetup, ArkworksVerifierSetup}; - use rand::thread_rng; - let mut rng = thread_rng(); let max_log_n = 6; - let prover = ArkworksProverSetup::new(&mut rng, max_log_n); + let prover = ArkworksProverSetup::new(max_log_n); let verifier = prover.to_verifier_setup(); let mut prover_bytes = Vec::new(); @@ -152,9 +147,7 @@ fn test_arkworks_setup_canonical_serialization() { fn test_arkworks_setup_new_from_urs() { use dory_pcs::backends::arkworks::ArkworksProverSetup; use dory_pcs::{backends::arkworks::BN254, generate_urs}; - use rand::thread_rng; - let mut rng = thread_rng(); let max_log_n = 14; // Clean up any existing cache file first @@ -196,9 +189,9 @@ fn test_arkworks_setup_new_from_urs() { let _ = std::fs::remove_file(&cache_file); } - let (prover1, _) = generate_urs::(&mut rng, max_log_n); + let (prover1, _) = generate_urs::(max_log_n); - let prover2 = ArkworksProverSetup::new_from_urs(&mut rng, max_log_n); + let prover2 = ArkworksProverSetup::new_from_urs(max_log_n); // Verify they match (proving it loaded from disk) assert_eq!( diff --git a/tests/arkworks/soundness.rs b/tests/arkworks/soundness.rs index 4f4e27d..fd28da1 100644 --- a/tests/arkworks/soundness.rs +++ b/tests/arkworks/soundness.rs @@ -27,12 +27,11 @@ fn create_valid_proof_components( let poly = random_polynomial(size); let point = random_point(nu + sigma); - let mut rng = rand::thread_rng(); let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, @@ -40,7 +39,6 @@ fn create_valid_proof_components( sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); diff --git a/tests/arkworks/zk.rs b/tests/arkworks/zk.rs index fd30f0b..2d161e3 100644 --- a/tests/arkworks/zk.rs +++ b/tests/arkworks/zk.rs @@ -6,24 +6,23 @@ use dory_pcs::{create_evaluation_proof, prove, setup, verify, Transparent, ZK}; #[test] fn test_zk_full_workflow() { - let mut rng = rand::thread_rng(); let max_log_n = 10; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); let poly = random_polynomial(256); let nu = 4; let sigma = 4; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(8); let expected_evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, tier_1, @@ -31,7 +30,6 @@ fn test_zk_full_workflow() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); let evaluation = poly.evaluate(&point); @@ -52,7 +50,6 @@ fn test_zk_full_workflow() { #[test] fn test_zk_small_polynomial() { - let mut rng = rand::thread_rng(); let (prover_setup, verifier_setup) = test_setup_pair(4); let poly = random_polynomial(4); @@ -60,14 +57,14 @@ fn test_zk_small_polynomial() { let sigma = 1; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(2); let evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, tier_1, @@ -75,7 +72,6 @@ fn test_zk_small_polynomial() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); @@ -98,22 +94,21 @@ fn test_zk_small_polynomial() { #[test] fn test_zk_larger_polynomial() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 12); + let (prover_setup, verifier_setup) = setup::(12); let poly = random_polynomial(1024); let nu = 5; let sigma = 5; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(10); let evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, tier_1, @@ -121,7 +116,6 @@ fn test_zk_larger_polynomial() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); @@ -144,8 +138,7 @@ fn test_zk_larger_polynomial() { #[test] fn test_zk_non_square_matrix() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); // Non-square: nu=3, sigma=4 (8 rows, 16 columns = 128 coefficients) let poly = random_polynomial(128); @@ -153,14 +146,14 @@ fn test_zk_non_square_matrix() { let sigma = 4; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(7); // nu + sigma = 7 let evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, tier_1, @@ -168,7 +161,6 @@ fn test_zk_non_square_matrix() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); @@ -189,11 +181,8 @@ fn test_zk_non_square_matrix() { ); } -/// Test the full ZK API where y is hidden from the verifier -/// With unified API, verifier extracts y_com from proof.y_com #[test] fn test_zk_hidden_evaluation() { - let mut rng = rand::thread_rng(); let (prover_setup, verifier_setup) = test_setup_pair(6); let poly = random_polynomial(16); @@ -201,7 +190,7 @@ fn test_zk_hidden_evaluation() { let sigma = 2; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(4); @@ -209,24 +198,20 @@ fn test_zk_hidden_evaluation() { // Create ZK proof using unified API with ZK mode let mut prover_transcript = fresh_transcript(); - let (proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( - &poly, - &point, - Some(tier_1), - nu, - sigma, - &prover_setup, - &mut prover_transcript, - &mut rng, - ) - .unwrap(); + let (proof, _) = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut prover_transcript, + ) + .unwrap(); - // Verify y_com is present in proof assert!(proof.y_com.is_some(), "ZK proof should contain y_com"); assert!(proof.e2.is_some(), "ZK proof should contain e2"); - // Verify ZK proof - for ZK proofs, evaluation is ignored (e2 from proof is used) let mut verifier_transcript = fresh_transcript(); let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( tier_2, @@ -249,7 +234,6 @@ fn test_zk_hidden_evaluation() { fn test_zk_tampered_e2_rejected() { use dory_pcs::primitives::arithmetic::Group; - let mut rng = rand::thread_rng(); let (prover_setup, verifier_setup) = test_setup_pair(6); let poly = random_polynomial(16); @@ -257,7 +241,7 @@ fn test_zk_tampered_e2_rejected() { let sigma = 2; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(4); @@ -265,7 +249,7 @@ fn test_zk_tampered_e2_rejected() { let mut prover_transcript = fresh_transcript(); let (mut proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, Some(tier_1), @@ -273,7 +257,6 @@ fn test_zk_tampered_e2_rejected() { sigma, &prover_setup, &mut prover_transcript, - &mut rng, ) .unwrap(); @@ -298,33 +281,30 @@ fn test_zk_tampered_e2_rejected() { /// Test full ZK with larger polynomial #[test] fn test_zk_hidden_evaluation_larger() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); let poly = random_polynomial(256); let nu = 4; let sigma = 4; let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup, &mut rng) + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(8); let evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let (proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( - &poly, - &point, - Some(tier_1), - nu, - sigma, - &prover_setup, - &mut prover_transcript, - &mut rng, - ) - .unwrap(); + let (proof, _) = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + Some(tier_1), + nu, + sigma, + &prover_setup, + &mut prover_transcript, + ) + .unwrap(); let mut verifier_transcript = fresh_transcript(); let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( diff --git a/tests/arkworks/zk_statistical.rs b/tests/arkworks/zk_statistical.rs index 88282a9..b920586 100644 --- a/tests/arkworks/zk_statistical.rs +++ b/tests/arkworks/zk_statistical.rs @@ -8,13 +8,11 @@ use ark_serialize::CanonicalSerialize; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{create_evaluation_proof, setup, verify, DoryProof, Transparent, ZK}; -use rand::rngs::StdRng; -use rand::SeedableRng; use std::collections::HashMap; const NUM_BUCKETS: usize = 16; -/// Bucket distribution tracker for statistical analysis +/// Distribution tracker for statistical analysis struct BucketTracker { buckets: HashMap>, } @@ -180,8 +178,7 @@ fn collect_full_zk_proof_stats(proof: &ArkDoryProof, tracker: &mut BucketTracker fn test_zk_statistical_indistinguishability() { const NUM_TRIALS: usize = 100; - let mut rng = StdRng::seed_from_u64(0xDEADBEEF); - let (prover_setup, verifier_setup) = setup::(&mut rng, 6); + let (prover_setup, verifier_setup) = setup::(6); let nu = 2; let sigma = 2; @@ -193,28 +190,20 @@ fn test_zk_statistical_indistinguishability() { let mut tracker_ones = BucketTracker::new(); let mut tracker_random = BucketTracker::new(); - for trial in 0..NUM_TRIALS { - // Reseed RNG for reproducibility within each trial type - let mut trial_rng = StdRng::seed_from_u64(0xCAFEBABE + trial as u64); - + for _trial in 0..NUM_TRIALS { // Distribution A: All-zeros polynomial (y=0 for all points) { let coeffs = vec![ArkFr::zero(); poly_size]; let poly = ArkworksPolynomial::new(coeffs); let (tier_2, tier_1, _) = poly - .commit::( - nu, - sigma, - &prover_setup, - &mut trial_rng, - ) + .commit::(nu, sigma, &prover_setup) .unwrap(); let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); let (proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, Some(tier_1), @@ -222,7 +211,6 @@ fn test_zk_statistical_indistinguishability() { sigma, &prover_setup, &mut transcript, - &mut trial_rng, ) .unwrap(); @@ -247,18 +235,13 @@ fn test_zk_statistical_indistinguishability() { let poly = ArkworksPolynomial::new(coeffs); let (tier_2, tier_1, _) = poly - .commit::( - nu, - sigma, - &prover_setup, - &mut trial_rng, - ) + .commit::(nu, sigma, &prover_setup) .unwrap(); let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); let (proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, Some(tier_1), @@ -266,7 +249,6 @@ fn test_zk_statistical_indistinguishability() { sigma, &prover_setup, &mut transcript, - &mut trial_rng, ) .unwrap(); @@ -289,18 +271,13 @@ fn test_zk_statistical_indistinguishability() { let poly = random_polynomial(poly_size); let (tier_2, tier_1, _) = poly - .commit::( - nu, - sigma, - &prover_setup, - &mut trial_rng, - ) + .commit::(nu, sigma, &prover_setup) .unwrap(); let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); let (proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, Some(tier_1), @@ -308,7 +285,6 @@ fn test_zk_statistical_indistinguishability() { sigma, &prover_setup, &mut transcript, - &mut trial_rng, ) .unwrap(); @@ -382,8 +358,7 @@ fn test_zk_statistical_indistinguishability() { fn test_zk_witness_independence() { const NUM_TRIALS: usize = 80; - let mut rng = StdRng::seed_from_u64(0xFEEDFACE); - let (prover_setup, verifier_setup) = setup::(&mut rng, 6); + let (prover_setup, verifier_setup) = setup::(6); let nu = 2; let sigma = 2; @@ -393,9 +368,7 @@ fn test_zk_witness_independence() { let mut tracker_skewed = BucketTracker::new(); let mut tracker_uniform = BucketTracker::new(); - for trial in 0..NUM_TRIALS { - let mut trial_rng = StdRng::seed_from_u64(0xABCDEF00 + trial as u64); - + for _trial in 0..NUM_TRIALS { // Skewed: Single non-zero coefficient at position 0 (y will be small/predictable) { let mut coeffs = vec![ArkFr::zero(); poly_size]; @@ -403,18 +376,13 @@ fn test_zk_witness_independence() { let poly = ArkworksPolynomial::new(coeffs); let (tier_2, tier_1, _) = poly - .commit::( - nu, - sigma, - &prover_setup, - &mut trial_rng, - ) + .commit::(nu, sigma, &prover_setup) .unwrap(); let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); let (proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, Some(tier_1), @@ -422,7 +390,6 @@ fn test_zk_witness_independence() { sigma, &prover_setup, &mut transcript, - &mut trial_rng, ) .unwrap(); @@ -445,18 +412,13 @@ fn test_zk_witness_independence() { let poly = random_polynomial(poly_size); let (tier_2, tier_1, _) = poly - .commit::( - nu, - sigma, - &prover_setup, - &mut trial_rng, - ) + .commit::(nu, sigma, &prover_setup) .unwrap(); let evaluation = poly.evaluate(&point); let mut transcript = fresh_transcript(); let (proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK, _>( + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, Some(tier_1), @@ -464,7 +426,6 @@ fn test_zk_witness_independence() { sigma, &prover_setup, &mut transcript, - &mut trial_rng, ) .unwrap(); From 0e4d40df27c68e04c583b3ab0f12d725b3fb989b Mon Sep 17 00:00:00 2001 From: markosg04 Date: Fri, 27 Feb 2026 14:20:21 -0500 Subject: [PATCH 10/16] tests: more tampering --- src/evaluation_proof.rs | 35 +++-- tests/arkworks/zk.rs | 281 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 303 insertions(+), 13 deletions(-) diff --git a/src/evaluation_proof.rs b/src/evaluation_proof.rs index 9fc5d5d..75e5851 100644 --- a/src/evaluation_proof.rs +++ b/src/evaluation_proof.rs @@ -324,19 +324,28 @@ where transcript.append_serde(b"vmv_e1", &vmv_message.e1); #[cfg(feature = "zk")] - let (e2, is_zk) = if let (Some(pe2), Some(yc)) = (&proof.e2, &proof.y_com) { - use crate::reduce_and_fold::{verify_sigma1_proof, verify_sigma2_proof}; - transcript.append_serde(b"vmv_e2", pe2); - transcript.append_serde(b"vmv_y_com", yc); - if let Some(ref s) = proof.sigma1_proof { - verify_sigma1_proof::(pe2, yc, s, &setup, transcript)?; - } - if let Some(ref s) = proof.sigma2_proof { - verify_sigma2_proof::(&vmv_message.e1, &vmv_message.d2, s, &setup, transcript)?; + let (e2, is_zk) = match (&proof.e2, &proof.y_com) { + (Some(pe2), Some(yc)) => { + use crate::reduce_and_fold::{verify_sigma1_proof, verify_sigma2_proof}; + transcript.append_serde(b"vmv_e2", pe2); + transcript.append_serde(b"vmv_y_com", yc); + match (&proof.sigma1_proof, &proof.sigma2_proof) { + (Some(s1), Some(s2)) => { + verify_sigma1_proof::(pe2, yc, s1, &setup, transcript)?; + verify_sigma2_proof::( + &vmv_message.e1, + &vmv_message.d2, + s2, + &setup, + transcript, + )?; + } + _ => return Err(DoryError::InvalidProof), + } + (*pe2, true) } - (*pe2, true) - } else { - (setup.g2_0.scale(&evaluation), false) + (None, None) => (setup.g2_0.scale(&evaluation), false), + _ => return Err(DoryError::InvalidProof), }; #[cfg(not(feature = "zk"))] let (e2, _is_zk) = (setup.g2_0.scale(&evaluation), false); @@ -409,6 +418,8 @@ where transcript.append_serde(b"final_e1", &proof.final_message.e1); transcript.append_serde(b"final_e2", &proof.final_message.e2); return verifier_state.verify_final_zk(sp, &c, &transcript.challenge_scalar(b"d")); + } else { + return Err(DoryError::InvalidProof); } } transcript.append_serde(b"final_e1", &proof.final_message.e1); diff --git a/tests/arkworks/zk.rs b/tests/arkworks/zk.rs index 2d161e3..351e217 100644 --- a/tests/arkworks/zk.rs +++ b/tests/arkworks/zk.rs @@ -1,6 +1,9 @@ //! Zero-knowledge mode tests for Dory PCS use super::*; +use ark_bn254::{Fq12, Fr, G1Projective, G2Projective}; +use ark_ff::UniformRand; +use dory_pcs::backends::arkworks::{ArkFr, ArkG1, ArkG2, ArkGT}; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{create_evaluation_proof, prove, setup, verify, Transparent, ZK}; @@ -260,7 +263,6 @@ fn test_zk_tampered_e2_rejected() { ) .unwrap(); - // Tamper with e2 in the proof if let Some(ref mut e2) = proof.e2 { *e2 = *e2 + prover_setup.h2.scale(&ArkFr::from_u64(42)); } @@ -322,3 +324,280 @@ fn test_zk_hidden_evaluation_larger() { result ); } + +// --------------------------------------------------------------------------- +// ZK Soundness Tests +// --------------------------------------------------------------------------- + +#[allow(clippy::type_complexity)] +fn create_valid_zk_proof_components( + size: usize, + nu: usize, + sigma: usize, +) -> ( + VerifierSetup, + Vec, + ArkGT, + ArkFr, + DoryProof, +) { + let (prover_setup, verifier_setup) = test_setup_pair(nu + sigma + 2); + + let poly = random_polynomial(size); + let point = random_point(nu + sigma); + + let (tier_2, tier_1, _) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + let mut prover_transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + tier_1, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + ) + .unwrap(); + let evaluation = poly.evaluate(&point); + + (verifier_setup, point, tier_2, evaluation, proof) +} + +fn verify_tampered_zk_proof( + commitment: ArkGT, + evaluation: ArkFr, + point: &[ArkFr], + proof: &DoryProof, + verifier_setup: VerifierSetup, +) -> Result<(), dory_pcs::DoryError> { + let mut verifier_transcript = fresh_transcript(); + verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + commitment, + evaluation, + point, + proof, + verifier_setup, + &mut verifier_transcript, + ) +} + +#[test] +fn test_zk_soundness_missing_sigma1_proof() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.sigma1_proof = None; + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with missing sigma1_proof"); +} + +#[test] +fn test_zk_soundness_missing_sigma2_proof() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.sigma2_proof = None; + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with missing sigma2_proof"); +} + +#[test] +fn test_zk_soundness_missing_scalar_product_proof() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.scalar_product_proof = None; + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!( + result.is_err(), + "Should fail with missing scalar_product_proof" + ); +} + +#[test] +fn test_zk_soundness_partial_zk_e2_only() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.y_com = None; + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!( + result.is_err(), + "Should fail with partial ZK fields (e2 only)" + ); +} + +#[test] +fn test_zk_soundness_partial_zk_ycom_only() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.e2 = None; + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!( + result.is_err(), + "Should fail with partial ZK fields (y_com only)" + ); +} + +#[test] +fn test_zk_soundness_tampered_sigma1_z1() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut s) = proof.sigma1_proof { + s.z1 = ArkFr(Fr::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered sigma1 z1"); +} + +#[test] +fn test_zk_soundness_tampered_sigma1_a1() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut s) = proof.sigma1_proof { + s.a1 = ArkG2(G2Projective::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered sigma1 a1"); +} + +#[test] +fn test_zk_soundness_tampered_sigma2_z1() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut s) = proof.sigma2_proof { + s.z1 = ArkFr(Fr::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered sigma2 z1"); +} + +#[test] +fn test_zk_soundness_tampered_sigma2_a() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut s) = proof.sigma2_proof { + s.a = ArkGT(Fq12::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered sigma2 a"); +} + +#[test] +fn test_zk_soundness_tampered_sp_e1() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut sp) = proof.scalar_product_proof { + sp.e1 = ArkG1(G1Projective::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!( + result.is_err(), + "Should fail with tampered scalar product e1" + ); +} + +#[test] +fn test_zk_soundness_tampered_sp_p1() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut sp) = proof.scalar_product_proof { + sp.p1 = ArkGT(Fq12::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!( + result.is_err(), + "Should fail with tampered scalar product p1" + ); +} + +#[test] +fn test_zk_soundness_tampered_sp_r3() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut sp) = proof.scalar_product_proof { + sp.r3 = ArkFr(Fr::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!( + result.is_err(), + "Should fail with tampered scalar product r3" + ); +} + +#[test] +fn test_zk_soundness_tampered_vmv_c() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.vmv_message.c = ArkGT(Fq12::rand(&mut rand::thread_rng())); + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered VMV c in ZK"); +} + +#[test] +fn test_zk_soundness_tampered_vmv_d2() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.vmv_message.d2 = ArkGT(Fq12::rand(&mut rand::thread_rng())); + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered VMV d2 in ZK"); +} + +#[test] +fn test_zk_soundness_tampered_vmv_e1() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.vmv_message.e1 = ArkG1(G1Projective::rand(&mut rand::thread_rng())); + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered VMV e1 in ZK"); +} + +#[test] +fn test_zk_soundness_tampered_e2() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.e2 = Some(ArkG2(G2Projective::rand(&mut rand::thread_rng()))); + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered e2 in ZK"); +} + +#[test] +fn test_zk_soundness_tampered_y_com() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.y_com = Some(ArkG1(G1Projective::rand(&mut rand::thread_rng()))); + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered y_com in ZK"); +} From 4ad40e36e1bc6c31386cea20b0ff76bb1b16fb3a Mon Sep 17 00:00:00 2001 From: markosg04 Date: Fri, 27 Feb 2026 16:17:31 -0500 Subject: [PATCH 11/16] style: cosmetics --- README.md | 26 +- src/backends/arkworks/blake2b_transcript.rs | 7 - src/evaluation_proof.rs | 52 ++- src/lib.rs | 6 +- src/messages.rs | 1 - src/reduce_and_fold.rs | 457 +++++++++++--------- 6 files changed, 303 insertions(+), 246 deletions(-) diff --git a/README.md b/README.md index 4d7f0d5..301e351 100644 --- a/README.md +++ b/README.md @@ -12,11 +12,12 @@ Dory is a transparent polynomial commitment scheme with excellent asymptotic per **Key Features:** - **Transparent setup**: No trusted setup ceremony required with optional disk persistence - **Logarithmic proof size**: O(log n) group elements -- **Logarithmic verification**: O(log n) GT exps and 5 pairings +- **Logarithmic verification**: O(log n) GT exps and 1 multi-pairing - **Modular design**: Pluggable backends for curves and cryptographic primitives - **Performance-optimized**: Vectorized operations, optional prepared point caching, and parallelization with Rayon - **Flexible matrix layouts**: Supports both square and non-square matrices (nu ≤ sigma) - **Homomorphic properties**: Commitment linearity enables proof aggregation +- **Zero-knowledge mode**: Toggable hiding proofs ## Installation @@ -153,7 +154,7 @@ fn main() -> Result<(), Box> { ## Examples -The repository includes four comprehensive examples demonstrating different aspects of Dory: +The repository includes five comprehensive examples demonstrating different aspects of Dory: 1. **`basic_e2e`** - Standard end-to-end workflow with square matrix (nu=4, sigma=4) ```bash @@ -175,6 +176,11 @@ The repository includes four comprehensive examples demonstrating different aspe cargo run --example homomorphic_mixed_sizes --features backends ``` +5. **`zk_e2e`** - Zero-knowledge end-to-end workflow with hiding proofs + ```bash + cargo run --example zk_e2e --features backends,zk + ``` + ## Development Setup After cloning the repository, install Git hooks to ensure code quality: @@ -242,6 +248,7 @@ cargo bench --features backends,cache,parallel - `backends` - Enable concrete backends. Currently supports Arkworks BN254. - `cache` - Enable prepared point caching for ~20-30% pairing speedup. Requires `arkworks` and `parallel`. - `parallel` - Enable parallelization using Rayon for MSMs and pairings. Works with both `arkworks` backend and enables parallel features in `ark-ec` and `ark-ff`. +- `zk` - Enable zero-knowledge mode. Adds the `ZK` mode type for generating hiding proofs with blinded protocol messages, sigma proofs, and scalar-product sub-proofs. - `disk-persistence` - Enable automatic setup caching to disk. When enabled, `setup()` will load from OS-specific cache directories if available, avoiding regeneration. ## Project Structure @@ -260,9 +267,14 @@ src/ │ ├── mod.rs # Module exports │ ├── ark_field.rs # Field wrapper (ArkFr) │ ├── ark_group.rs # Group wrappers (ArkG1, ArkG2, ArkGT) +│ ├── ark_pairing.rs # Pairing curve (BN254) │ ├── ark_poly.rs # Polynomial implementation +│ ├── ark_proof.rs # Proof type alias and serialization +│ ├── ark_cache.rs # Prepared point caching +│ ├── ark_setup.rs # Setup wrapper with disk persistence │ ├── ark_serde.rs # Serialization bridge │ └── blake2b_transcript.rs # Blake2b transcript +├── mode.rs # Transparent and ZK mode types ├── setup.rs # Transparent setup generation ├── evaluation_proof.rs # Proof creation and verification ├── reduce_and_fold.rs # Inner product protocol @@ -276,7 +288,13 @@ tests/arkworks/ ├── commitment.rs # Commitment tests ├── evaluation.rs # Evaluation tests ├── integration.rs # End-to-end tests -└── soundness.rs # Soundness tests +├── homomorphic.rs # Homomorphic combination tests +├── non_square.rs # Non-square matrix tests +├── serialization.rs # Proof serialization round-trip tests +├── cache.rs # Prepared point caching tests +├── soundness.rs # Soundness tests +├── zk.rs # Zero-knowledge mode and ZK soundness tests +└── zk_statistical.rs # ZK statistical indistinguishability tests ``` ## Test Coverage @@ -288,6 +306,8 @@ The implementation includes comprehensive tests covering: - End-to-end workflows - Homomorphic combination - Non-square matrix support (nu < sigma, nu = sigma - 1, and very rectangular cases) +- Zero-knowledge mode (hidden evaluations, sigma proofs, scalar-product proofs, soundness) +- Statistical indistinguishability of ZK proofs (witness independence) - Soundness (tampering resistance for all proof components across 20+ attack vectors) - Prepared point caching correctness diff --git a/src/backends/arkworks/blake2b_transcript.rs b/src/backends/arkworks/blake2b_transcript.rs index 1b1f497..63cd149 100644 --- a/src/backends/arkworks/blake2b_transcript.rs +++ b/src/backends/arkworks/blake2b_transcript.rs @@ -47,13 +47,6 @@ impl Blake2bTranscript { self.append_bytes_impl(label, &bytes); } - pub fn append_serde_impl(&mut self, label: &[u8], s: &S) { - match bincode::serialize(s) { - Ok(bytes) => self.append_bytes_impl(label, &bytes), - Err(_) => panic!("Bincode serialization failed"), - } - } - pub fn challenge_scalar_impl(&mut self, label: &[u8]) -> F { self.hasher.update(label); diff --git a/src/evaluation_proof.rs b/src/evaluation_proof.rs index 75e5851..7ea9889 100644 --- a/src/evaluation_proof.rs +++ b/src/evaluation_proof.rs @@ -109,11 +109,9 @@ where }); } - let row_commitments = if let Some(rc) = row_commitments { - rc - } else { - let (_commitment, rc, _blinds) = polynomial.commit::(nu, sigma, setup)?; - rc + let row_commitments = match row_commitments { + Some(rc) => rc, + None => polynomial.commit::(nu, sigma, setup)?.1, }; let (left_vec, right_vec) = polynomial.compute_evaluation_vectors(point, nu, sigma); @@ -353,18 +351,26 @@ where // Folded-scalar accumulation with per-round coordinates. // num_rounds = sigma (we fold column dimensions). let num_rounds = sigma; + + // Bounds check: reject proofs with mismatched message counts or that exceed setup capacity. + let max_rounds = setup.max_log_n / 2; + if num_rounds > max_rounds + || proof.first_messages.len() != num_rounds + || proof.second_messages.len() != num_rounds + { + return Err(DoryError::InvalidProof); + } + // s1 (right/prover): the σ column coordinates in natural order (LSB→MSB). // No padding here: the verifier folds across the σ column dimensions. // With MSB-first folding, these coordinates are only consumed after the first σ−ν rounds, // which correspond to the padded MSB dimensions on the left tensor, matching the prover. - let col_coords = &point[..sigma]; - let s1_coords: Vec = col_coords.to_vec(); + let s1_coords: Vec = point[..sigma].to_vec(); // s2 (left/prover): the ν row coordinates in natural order, followed by zeros for the extra // MSB dimensions. Conceptually this is s ⊗ [1,0]^(σ−ν): under MSB-first folds, the first // σ−ν rounds multiply s2 by α⁻¹ while contributing no right halves (since those entries are 0). let mut s2_coords: Vec = vec![F::zero(); sigma]; - let row_coords = &point[sigma..sigma + nu]; - s2_coords[..nu].copy_from_slice(&row_coords[..nu]); + s2_coords[..nu].copy_from_slice(&point[sigma..sigma + nu]); let mut verifier_state = DoryVerifierState::new( vmv_message.c, // c from VMV message @@ -398,13 +404,14 @@ where transcript.append_serde(b"e2_minus", &second_msg.e2_minus); let alpha = transcript.challenge_scalar(b"alpha"); - verifier_state.process_round(first_msg, second_msg, &alpha, &beta); + verifier_state.process_round(first_msg, second_msg, &alpha, &beta)?; } let gamma = transcript.challenge_scalar(b"gamma"); + // In ZK mode: absorb scalar product proof into transcript before deriving d. #[cfg(feature = "zk")] - if is_zk { + let zk_data = if is_zk { if let Some(ref sp) = proof.scalar_product_proof { for (l, v) in [ (b"sigma_p1" as &[u8], &sp.p1), @@ -415,18 +422,23 @@ where transcript.append_serde(l, v); } let c = transcript.challenge_scalar(b"sigma_c"); - transcript.append_serde(b"final_e1", &proof.final_message.e1); - transcript.append_serde(b"final_e2", &proof.final_message.e2); - return verifier_state.verify_final_zk(sp, &c, &transcript.challenge_scalar(b"d")); + Some((sp, c)) } else { return Err(DoryError::InvalidProof); } - } + } else { + None + }; + + // Shared: absorb final message and derive d. transcript.append_serde(b"final_e1", &proof.final_message.e1); transcript.append_serde(b"final_e2", &proof.final_message.e2); - verifier_state.verify_final( - &proof.final_message, - &gamma, - &transcript.challenge_scalar(b"d"), - ) + let d = transcript.challenge_scalar(b"d"); + + #[cfg(feature = "zk")] + let zk = zk_data.as_ref().map(|(sp, c)| (*sp, c)); + #[cfg(not(feature = "zk"))] + let zk: Option<(&crate::messages::ScalarProductProof<_, _, _, _>, _)> = None; + + verifier_state.verify_final(&proof.final_message, &gamma, &d, zk) } diff --git a/src/lib.rs b/src/lib.rs index bcd2ac6..aef27f4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -105,9 +105,11 @@ pub mod backends; pub use error::DoryError; pub use evaluation_proof::create_evaluation_proof; -pub use messages::{FirstReduceMessage, ScalarProductMessage, SecondReduceMessage, VMVMessage}; +pub use messages::{ + FirstReduceMessage, ScalarProductMessage, ScalarProductProof, SecondReduceMessage, VMVMessage, +}; #[cfg(feature = "zk")] -pub use messages::{ScalarProductProof, Sigma1Proof, Sigma2Proof}; +pub use messages::{Sigma1Proof, Sigma2Proof}; #[cfg(feature = "zk")] pub use mode::ZK; pub use mode::{Mode, Transparent}; diff --git a/src/messages.rs b/src/messages.rs index 1e1d74d..49ae1ea 100644 --- a/src/messages.rs +++ b/src/messages.rs @@ -88,7 +88,6 @@ pub struct Sigma2Proof { } /// ZK scalar product proof: proves (C, D1, D2) are consistent with blinded v1, v2. -#[cfg(feature = "zk")] #[derive(Clone, Debug)] #[allow(missing_docs)] pub struct ScalarProductProof { diff --git a/src/reduce_and_fold.rs b/src/reduce_and_fold.rs index a53eecf..c59ef92 100644 --- a/src/reduce_and_fold.rs +++ b/src/reduce_and_fold.rs @@ -32,13 +32,13 @@ pub struct DoryProverState<'a, E: PairingCurve, M: Mode = Transparent> { v2: Vec, /// For first round only: scalars used to construct v2 from fixed base h2 - v2_scalars: Option::Scalar>>, + v2_scalars: Option>>, /// Current s1 vector (scalars) - s1: Vec<::Scalar>, + s1: Vec>, /// Current s2 vector (scalars) - s2: Vec<::Scalar>, + s2: Vec>, /// Number of rounds remaining (log₂ of vector length) num_rounds: usize, @@ -83,24 +83,24 @@ pub struct DoryVerifierState { e2: E::G2, /// Initial e1 from VMV message - /// Used in verify_final to batch the VMV constraint: D₂_init = e(E₁_init, H₂) + /// Used in verify_final to batch the VMV constraint: D₂_init = e(E₁_init, Γ₂₀) e1_init: E::G1, /// Initial d2 from VMV message - /// Used in verify_final to batch the VMV constraint: D₂_init = e(E₁_init, H₂) + /// Used in verify_final to batch the VMV constraint: D₂_init = e(E₁_init, Γ₂₀) d2_init: E::GT, /// Accumulated scalar for s1 after folding across rounds - s1_acc: ::Scalar, + s1_acc: Scalar, /// Accumulated scalar for s2 after folding across rounds - s2_acc: ::Scalar, + s2_acc: Scalar, /// Per-round coordinates for s1 (length = num_rounds). Order matches folding order. - s1_coords: Vec<::Scalar>, + s1_coords: Vec>, /// Per-round coordinates for s2 (length = num_rounds). Order matches folding order. - s2_coords: Vec<::Scalar>, + s2_coords: Vec>, /// Number of rounds remaining for indexing setup arrays num_rounds: usize, @@ -127,9 +127,9 @@ where pub fn new( v1: Vec, v2: Vec, - v2_scalars: Option::Scalar>>, - s1: Vec<::Scalar>, - s2: Vec<::Scalar>, + v2_scalars: Option>>, + s1: Vec>, + s2: Vec>, setup: &'a ProverSetup, ) -> Self { debug_assert_eq!(v1.len(), v2.len(), "v1 and v2 must have equal length"); @@ -258,26 +258,20 @@ where /// /// Updates the state by combining with generators scaled by beta. #[tracing::instrument(skip_all, name = "DoryProverState::apply_first_challenge")] - pub fn apply_first_challenge(&mut self, beta: &::Scalar) + pub fn apply_first_challenge(&mut self, beta: &Scalar) where M1: DoryRoutines, M2: DoryRoutines, { - let beta_inv = (*beta).inv().expect("beta must be invertible"); - + let beta_inv = beta.inv().expect("beta must be invertible"); let n = 1 << self.num_rounds; - // Combine: v₁ ← v₁ + β·Γ₁ + // v₁ ← v₁ + β·Γ₁, v₂ ← v₂ + β⁻¹·Γ₂ M1::fixed_scalar_mul_bases_then_add(&self.setup.g1_vec[..n], &mut self.v1, beta); - - // Combine: v₂ ← v₂ + β⁻¹·Γ₂ M2::fixed_scalar_mul_bases_then_add(&self.setup.g2_vec[..n], &mut self.v2, &beta_inv); - - // After first combine, the `v2_scalars` optimization does not apply. self.v2_scalars = None; - // Accumulate blinds: rC ← rC + β·rD2 + β⁻¹·rD1 - self.r_c = self.r_c + self.r_d2 * *beta + self.r_d1 * beta_inv; + self.r_c = self.r_c + self.r_d2 * beta + self.r_d1 * beta_inv; } /// Compute second reduce message for current round @@ -297,7 +291,6 @@ where let (s1_l, s1_r) = self.s1.split_at(n2); let (s2_l, s2_r) = self.s2.split_at(n2); - // Sample round blinds (zero in Transparent mode) self.round_c = [M::sample(), M::sample()]; self.round_e1 = [M::sample(), M::sample()]; self.round_e2 = [M::sample(), M::sample()]; @@ -329,9 +322,9 @@ where #[tracing::instrument(skip_all, name = "DoryProverState::apply_second_challenge")] pub fn apply_second_challenge, M2: DoryRoutines>( &mut self, - alpha: &::Scalar, + alpha: &Scalar, ) { - let alpha_inv = (*alpha).inv().expect("alpha must be invertible"); + let alpha_inv = alpha.inv().expect("alpha must be invertible"); let n2 = 1 << (self.num_rounds - 1); // n/2 // Fold v₁: v₁ ← α·v₁L + v₁R @@ -354,14 +347,12 @@ where M1::fold_field_vectors(s2_l, s2_r, &alpha_inv); self.s2.truncate(n2); - // Update accumulated blinds from stored round blinds - self.r_c = self.r_c + self.round_c[0] * *alpha + self.round_c[1] * alpha_inv; - self.r_d1 = self.round_d1[0] * *alpha + self.round_d1[1]; + self.r_c = self.r_c + self.round_c[0] * alpha + self.round_c[1] * alpha_inv; + self.r_d1 = self.round_d1[0] * alpha + self.round_d1[1]; self.r_d2 = self.round_d2[0] * alpha_inv + self.round_d2[1]; - self.r_e1 = self.r_e1 + self.round_e1[0] * *alpha + self.round_e1[1] * alpha_inv; - self.r_e2 = self.r_e2 + self.round_e2[0] * *alpha + self.round_e2[1] * alpha_inv; + self.r_e1 = self.r_e1 + self.round_e1[0] * alpha + self.round_e1[1] * alpha_inv; + self.r_e2 = self.r_e2 + self.round_e2[0] * alpha + self.round_e2[1] * alpha_inv; - // Decrement round counter self.num_rounds -= 1; } @@ -378,7 +369,7 @@ where #[tracing::instrument(skip_all, name = "DoryProverState::compute_final_message")] pub fn compute_final_message( &mut self, - gamma: &::Scalar, + gamma: &Scalar, ) -> ScalarProductMessage where M1: DoryRoutines, @@ -388,23 +379,20 @@ where debug_assert_eq!(self.v1.len(), 1, "v1 must have length 1"); debug_assert_eq!(self.v2.len(), 1, "v2 must have length 1"); - let gamma_inv = (*gamma).inv().expect("gamma must be invertible"); + let gamma_inv = gamma.inv().expect("gamma must be invertible"); - // Sample independent blinds for the final message (zero in Transparent mode). - let r_final1: ::Scalar = M::sample(); - let r_final2: ::Scalar = M::sample(); + let r_final1: Scalar = M::sample(); + let r_final2: Scalar = M::sample(); - // Apply fold-scalars transform with blinding: // E₁ = v₁ + (γ·s₁ + r_final1)·H₁ let gamma_s1 = *gamma * self.s1[0] + r_final1; - let e1 = self.v1[0] + self.setup.h1.scale(&gamma_s1); + let e1 = self.v1[0] + gamma_s1 * self.setup.h1; // E₂ = v₂ + (γ⁻¹·s₂ + r_final2)·H₂ let gamma_inv_s2 = gamma_inv * self.s2[0] + r_final2; let e2 = self.v2[0] + self.setup.h2.scale(&gamma_inv_s2); - // Final blind accumulation: r_c ← r_c + γ·r_e2 + γ⁻¹·r_e1 - self.r_c = self.r_c + self.r_e2 * *gamma + self.r_e1 * gamma_inv; + self.r_c = self.r_c + self.r_e2 * gamma + self.r_e1 * gamma_inv; ScalarProductMessage { e1, e2 } } @@ -418,9 +406,9 @@ where let (v1, v2) = (self.v1[0], self.v2[0]); let (g1, g2) = (self.setup.g1_vec[0], self.setup.g2_vec[0]); let ht = &self.setup.ht; - let r = || -> Scalar { Field::random() }; + let r = || Scalar::::random(); let (sd1, sd2) = (r(), r()); - let (d1, d2) = (g1.scale(&sd1), g2.scale(&sd2)); + let (d1, d2) = (sd1 * g1, g2.scale(&sd2)); let (rp1, rp2, rq, rr) = (r(), r(), r(), r()); let p1 = E::pair(&d1, &g2) + ht.scale(&rp1); let p2 = E::pair(&g1, &d2) + ht.scale(&rp2); @@ -440,7 +428,7 @@ where p2, q, r: rr_val, - e1: d1 + v1.scale(&c), + e1: d1 + c * v1, e2: d2 + v2.scale(&c), r1: rp1 + c * self.r_d1, r2: rp2 + c * self.r_d2, @@ -471,16 +459,16 @@ where Scalar::::random(), ); let a1 = g2_fin.scale(&k1) + setup.h2.scale(&k2); - let a2 = g1_fin.scale(&k1) + setup.h1.scale(&k3); + let a2 = k1 * g1_fin + k3 * setup.h1; transcript.append_serde(b"sigma1_a1", &a1); transcript.append_serde(b"sigma1_a2", &a2); let c = transcript.challenge_scalar(b"sigma1_c"); Sigma1Proof { a1, a2, - z1: k1 + c * *y, - z2: k2 + c * *r_e2, - z3: k3 + c * *r_y, + z1: k1 + c * y, + z2: k2 + c * r_e2, + z3: k3 + c * r_y, } } @@ -503,7 +491,7 @@ where if setup.g2_0.scale(&proof.z1) + setup.h2.scale(&proof.z2) != proof.a1 + e2.scale(&c) { return Err(DoryError::InvalidProof); } - if setup.g1_0.scale(&proof.z1) + setup.h1.scale(&proof.z3) != proof.a2 + y_commit.scale(&c) { + if proof.z1 * setup.g1_0 + proof.z3 * setup.h1 != proof.a2 + c * y_commit { return Err(DoryError::InvalidProof); } Ok(()) @@ -533,8 +521,8 @@ where let c = transcript.challenge_scalar(b"sigma2_c"); Sigma2Proof { a, - z1: k1 + c * *t1, - z2: k2 + c * *t2, + z1: k1 + c * t1, + z2: k2 + c * t2, } } @@ -567,26 +555,11 @@ where } impl DoryVerifierState { - /// Create new verifier state - /// - /// # Parameters - /// - `c`: Initial inner product value - /// - `d1`: Initial d1 value (from VMV) - /// - `d2`: Initial d2 value (from VMV) - /// - `e1`: Initial e1 value - /// - `e2`: Initial e2 value - /// - /// Construct verifier state for O(1) accumulation - /// - /// - `s1_coords`: Per-round coordinates for s1 (right_vec in prover) - /// - `s2_coords`: Per-round coordinates for s2 (left_vec in prover) - /// - `num_rounds`: Number of rounds - /// - `setup`: Verifier setup parameters + /// Create new verifier state for O(1) accumulation. /// - /// Note: `e1` and `d2` are stored both as initial values (for batched VMV check) - /// and as accumulators (updated during reduce rounds) - /// this is because the VMV check happens before the folding rounds, so we need to save - /// the value for the final batched pairing check. + /// `e1` and `d2` are stored both as initial values (for batched VMV check) + /// and as accumulators (updated during reduce rounds), since the VMV check + /// is deferred to the final batched pairing. #[allow(clippy::too_many_arguments)] pub fn new( c: E::GT, @@ -594,8 +567,8 @@ impl DoryVerifierState { d2: E::GT, e1: E::G1, e2: E::G2, - s1_coords: Vec<::Scalar>, - s2_coords: Vec<::Scalar>, + s1_coords: Vec>, + s2_coords: Vec>, num_rounds: usize, setup: VerifierSetup, ) -> Self { @@ -610,8 +583,8 @@ impl DoryVerifierState { e2, e1_init: e1, d2_init: d2, - s1_acc: ::Scalar::one(), - s2_acc: ::Scalar::one(), + s1_acc: Scalar::::one(), + s2_acc: Scalar::::one(), s1_coords, s2_coords, num_rounds, @@ -628,164 +601,222 @@ impl DoryVerifierState { &mut self, first_msg: &FirstReduceMessage, second_msg: &SecondReduceMessage, - alpha: &::Scalar, - beta: &::Scalar, - ) where - E::G2: Group::Scalar>, - E::GT: Group::Scalar>, - ::Scalar: Field, + alpha: &Scalar, + beta: &Scalar, + ) -> Result<(), DoryError> + where + E::G2: Group>, + E::GT: Group>, + Scalar: Field, { - assert!(self.num_rounds > 0, "No rounds remaining"); - - let alpha_inv = (*alpha).inv().expect("alpha must be invertible"); - let beta_inv = (*beta).inv().expect("beta must be invertible"); - - // Update C: C' ← C + χᵢ + β·D₂ + β⁻¹·D₁ + α·C₊ + α⁻¹·C₋ - let chi = &self.setup.chi[self.num_rounds]; - self.c = self.c + chi; - self.c = self.c + self.d2.scale(beta); - self.c = self.c + self.d1.scale(&beta_inv); - self.c = self.c + second_msg.c_plus.scale(alpha); - self.c = self.c + second_msg.c_minus.scale(&alpha_inv); - - // Update D₁: D₁' ← α·D₁L + D₁R + α·β·Δ₁L + β·Δ₁R - let delta_1l = &self.setup.delta_1l[self.num_rounds]; - let delta_1r = &self.setup.delta_1r[self.num_rounds]; - let alpha_beta = *alpha * *beta; - self.d1 = first_msg.d1_left.scale(alpha); - self.d1 = self.d1 + first_msg.d1_right; - self.d1 = self.d1 + delta_1l.scale(&alpha_beta); - self.d1 = self.d1 + delta_1r.scale(beta); - - // Update D₂: D₂' ← α⁻¹·D₂L + D₂R + α⁻¹·β⁻¹·Δ₂L + β⁻¹·Δ₂R - let delta_2l = &self.setup.delta_2l[self.num_rounds]; - let delta_2r = &self.setup.delta_2r[self.num_rounds]; + if self.num_rounds == 0 { + return Err(DoryError::InvalidProof); + } + + let alpha_inv = alpha.inv().ok_or(DoryError::InvalidProof)?; + let beta_inv = beta.inv().ok_or(DoryError::InvalidProof)?; + + // C' ← C + χᵢ + β·D₂ + β⁻¹·D₁ + α·C₊ + α⁻¹·C₋ + self.c = self.c + + self.setup.chi[self.num_rounds] + + self.d2.scale(beta) + + self.d1.scale(&beta_inv) + + second_msg.c_plus.scale(alpha) + + second_msg.c_minus.scale(&alpha_inv); + + // D₁' ← α·D₁L + D₁R + αβ·Δ₁L + β·Δ₁R + let alpha_beta = *alpha * beta; + self.d1 = first_msg.d1_left.scale(alpha) + + first_msg.d1_right + + self.setup.delta_1l[self.num_rounds].scale(&alpha_beta) + + self.setup.delta_1r[self.num_rounds].scale(beta); + + // D₂' ← α⁻¹·D₂L + D₂R + α⁻¹β⁻¹·Δ₂L + β⁻¹·Δ₂R let alpha_inv_beta_inv = alpha_inv * beta_inv; - self.d2 = first_msg.d2_left.scale(&alpha_inv); - self.d2 = self.d2 + first_msg.d2_right; - self.d2 = self.d2 + delta_2l.scale(&alpha_inv_beta_inv); - self.d2 = self.d2 + delta_2r.scale(&beta_inv); - - // Update E₁: E₁' ← E₁ + β·E₁β + α·E₁₊ + α⁻¹·E₁₋ - self.e1 = self.e1 + first_msg.e1_beta.scale(beta); - self.e1 = self.e1 + second_msg.e1_plus.scale(alpha); - self.e1 = self.e1 + second_msg.e1_minus.scale(&alpha_inv); - - // Update E₂: E₂' ← E₂ + β⁻¹·E₂β + α·E₂₊ + α⁻¹·E₂₋ - self.e2 = self.e2 + first_msg.e2_beta.scale(&beta_inv); - self.e2 = self.e2 + second_msg.e2_plus.scale(alpha); - self.e2 = self.e2 + second_msg.e2_minus.scale(&alpha_inv); - - // Update folded scalars in O(1): s1_acc *= (α·(1−y_t) + y_t), s2_acc *= (α⁻¹·(1−x_t) + x_t) - // Endianness note: s*_coords are stored in increasing dimension index (little-endian by dimension). - // Folding processes the most significant dimension first (MSB-first), so we index from the end: idx = num_rounds - 1. + self.d2 = first_msg.d2_left.scale(&alpha_inv) + + first_msg.d2_right + + self.setup.delta_2l[self.num_rounds].scale(&alpha_inv_beta_inv) + + self.setup.delta_2r[self.num_rounds].scale(&beta_inv); + + // E₁' ← E₁ + β·E₁β + α·E₁₊ + α⁻¹·E₁₋ + self.e1 = self.e1 + + *beta * first_msg.e1_beta + + *alpha * second_msg.e1_plus + + alpha_inv * second_msg.e1_minus; + + // E₂' ← E₂ + β⁻¹·E₂β + α·E₂₊ + α⁻¹·E₂₋ + self.e2 = self.e2 + + first_msg.e2_beta.scale(&beta_inv) + + second_msg.e2_plus.scale(alpha) + + second_msg.e2_minus.scale(&alpha_inv); + + // Folded scalars: s_acc *= (α·(1−coord) + coord) indexed MSB-first let idx = self.num_rounds - 1; - let y_t = self.s1_coords[idx]; - let x_t = self.s2_coords[idx]; - let one = ::Scalar::one(); - let s1_term = (*alpha) * (one - y_t) + y_t; - let s2_term = alpha_inv * (one - x_t) + x_t; - self.s1_acc = self.s1_acc * s1_term; - self.s2_acc = self.s2_acc * s2_term; - - // Decrement round counter + let (y_t, x_t) = (self.s1_coords[idx], self.s2_coords[idx]); + let one = Scalar::::one(); + self.s1_acc = self.s1_acc * (*alpha * (one - y_t) + y_t); + self.s2_acc = self.s2_acc * (alpha_inv * (one - x_t) + x_t); + self.num_rounds -= 1; + Ok(()) } - /// Verify final scalar product message (4 pairings batched with VMV check). + /// Verify the final scalar product equation. + /// + /// Must be called when `num_rounds == 0` after all reduce rounds are complete. + /// + /// When `zk_data` is `None`, performs the transparent 4-pairing check. + /// When `zk_data` is `Some((sp, sigma_c))`, performs the ZK 1-pairing check. + /// + /// # Non-optimized Protocol Equations + /// + /// ## VMV Check (batched together with the final pairing check) + /// + /// The VMV protocol requires: `D₂_init = e(E₁_init, Γ₂₀)` + /// (proven by the Sigma₂ proof in ZK mode, deferred here for batching in transparent mode). + /// + /// ## Fold-Scalars Updates + /// + /// ```text + /// C' ← C + (s₁·s₂)·HT + γ·e(H₁, E₂) + γ⁻¹·e(E₁, H₂) + /// D₁' ← D₁ + e(H₁, (s₁·γ)·Γ₂₀) + /// D₂' ← D₂ + e((s₂·γ⁻¹)·Γ₁₀, H₂) + /// ``` + /// + /// ## Final Verification + /// + /// ```text + /// e(E₁ + d·Γ₁₀, E₂ + d⁻¹·Γ₂₀) = C' + χ₀ + d·D₂' + d⁻¹·D₁' + /// ``` + /// + /// # Transparent Mode — Multi-Pairing Check (4 ML + 1 FE) + /// + /// ## Batching the VMV Check + /// + /// We use random linear combination with challenge `d²` to defer the VMV check. + /// We use `d²` (not `d`) to ensure sufficient independence from the existing `d·D₂` term. + /// + /// Soundness: `d` is derived from the transcript AFTER `D₂_init` and `E₁_init` are + /// committed, so if `D₂_init ≠ e(E₁_init, Γ₂₀)`, then with overwhelming probability + /// `T + d²·D₂_init ≠ multi_pair([...]) + d²·e(E₁_init, Γ₂₀)`. + /// + /// ## Final Combined Check + /// + /// The final check verifies both: + /// - (a) The fold-scalars/reduce protocol equation + /// - (b) The VMV constraint `D₂_init = e(E₁_init, Γ₂₀)` + /// + /// Combined via: `(a) + d²·(b)` where `d` is the final challenge. + /// + /// ```text + /// e(E₁_final + d·Γ₁₀, E₂_final + d⁻¹·Γ₂₀) [Pair 1: scalar product] + /// · e(H₁, (-γ)·(E₂_acc + (d⁻¹·s₁)·Γ₂₀)) [Pair 2: E₂ accumulator] + /// · e((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀), H₂) [Pair 3: E₁ accumulator] + /// · e(d²·E₁_init, Γ₂₀) [Pair 4: deferred VMV] + /// = C + (s₁·s₂)·HT + χ₀ + d·D₂ + d⁻¹·D₁ + d²·D₂_init + /// ``` + /// + /// Note: Pairs 3 and 4 cannot be combined into 3 ML because they use different + /// G2 elements (H₂ vs Γ₂₀). This differs from the original Dory construction + /// where `D₂ = e(Γ₁·v, H₂)` allowed H₂-sharing. + /// + /// # ZK Mode (1 ML + 1 FE) + /// + /// In ZK mode, the scalar product proof replaces the transparent check with a + /// Sigma-protocol equation proving knowledge of (v₁, v₂) opening (C, D₁, D₂). + /// E-accumulator and VMV binding are handled separately by Sigma₁/Sigma₂ proofs + /// verified earlier in the protocol. + /// + /// ```text + /// e(sp.e₁ + d·Γ₁₀, sp.e₂ + d⁻¹·Γ₂₀) + /// = χ₀ + sp.r + c·sp.q + c²·C + /// + d·(sp.p₂ + c·D₂) + d⁻¹·(sp.p₁ + c·D₁) + /// − (sp.r₃ + d·sp.r₂ + d⁻¹·sp.r₁)·HT + /// ``` + #[allow(clippy::type_complexity)] #[tracing::instrument(skip_all, name = "DoryVerifierState::verify_final")] pub fn verify_final( - &mut self, + &self, msg: &ScalarProductMessage, - gamma: &::Scalar, - d: &::Scalar, - ) -> Result<(), DoryError> - where - E::G2: Group::Scalar>, - E::GT: Group::Scalar>, - ::Scalar: Field, - { - debug_assert_eq!( - self.num_rounds, 0, - "num_rounds must be 0 for final verification" - ); - - let gamma_inv = (*gamma).inv().expect("gamma must be invertible"); - let d_inv = (*d).inv().expect("d must be invertible"); - let d_sq = *d * *d; - let neg_gamma = -*gamma; - let neg_gamma_inv = -gamma_inv; - - // Compute RHS (non-pairing GT terms): - // T = C + (s₁·s₂)·HT + χ₀ + d·D₂ + d⁻¹·D₁ + d²·D₂_init - // The d²·D₂_init term is the deferred VMV check contribution. - // We use d² instead of d to ensure independence from the d·D₂ term. - let s_product = self.s1_acc * self.s2_acc; - let mut rhs = self.c + self.setup.ht.scale(&s_product); - rhs = rhs + self.setup.chi[0]; - rhs = rhs + self.d2.scale(d); - rhs = rhs + self.d1.scale(&d_inv); - rhs = rhs + self.d2_init.scale(&d_sq); - - // Pair 1: (E₁_final + d·Γ₁₀, E₂_final + d⁻¹·Γ₂₀) - let p1_g1 = msg.e1 + self.setup.g1_0.scale(d); - let p1_g2 = msg.e2 + self.setup.g2_0.scale(&d_inv); - - // Pair 2: (H₁, (-γ)·(E₂_acc + (d⁻¹·s₁)·Γ₂₀)) - let d_inv_s1 = d_inv * self.s1_acc; - let g2_term = self.e2 + self.setup.g2_0.scale(&d_inv_s1); - let p2_g1 = self.setup.h1; - let p2_g2 = g2_term.scale(&neg_gamma); - - // Pair 3: ((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀), H₂) - let d_s2 = *d * self.s2_acc; - let g1_term = self.e1 + self.setup.g1_0.scale(&d_s2); - let p3_g1 = g1_term.scale(&neg_gamma_inv); - let p3_g2 = self.setup.h2; - - // Pair 4: (d²·E₁_init, Γ2,fin) - deferred VMV check - let p4_g1 = self.e1_init.scale(&d_sq); - let p4_g2 = self.setup.g2_0; - - let lhs = E::multi_pair(&[p1_g1, p2_g1, p3_g1, p4_g1], &[p1_g2, p2_g2, p3_g2, p4_g2]); - - if lhs == rhs { - Ok(()) - } else { - Err(DoryError::InvalidProof) - } - } - - #[cfg(feature = "zk")] - pub fn verify_final_zk( - &mut self, - proof: &ScalarProductProof, E::GT>, - c: &Scalar, + gamma: &Scalar, d: &Scalar, + zk_data: Option<( + &ScalarProductProof, E::GT>, + &Scalar, + )>, ) -> Result<(), DoryError> where E::G2: Group>, E::GT: Group>, Scalar: Field, { - let d_inv = (*d).inv().expect("d must be invertible"); - let c_sq = *c * *c; - let lhs = E::pair( - &(proof.e1 + self.setup.g1_0.scale(d)), - &(proof.e2 + self.setup.g2_0.scale(&d_inv)), + debug_assert_eq!( + self.num_rounds, 0, + "num_rounds must be 0 for final verification" ); - let mut rhs = self.setup.chi[0] + proof.r + proof.q.scale(c) + self.c.scale(&c_sq); - rhs = rhs + proof.p2.scale(d) + self.d2.scale(&(*d * *c)); - rhs = rhs + proof.p1.scale(&d_inv) + self.d1.scale(&(d_inv * *c)); - rhs = rhs - - self - .setup - .ht - .scale(&(proof.r3 + *d * proof.r2 + d_inv * proof.r1)); - if lhs == rhs { - Ok(()) + + let d_inv = d.inv().ok_or(DoryError::InvalidProof)?; + + if let Some((sp, sigma_c)) = zk_data { + // ZK mode: 1 ML + 1 FE + let c = *sigma_c; + let c_sq = c * c; + + let lhs = E::pair( + &(sp.e1 + self.setup.g1_0.scale(d)), + &(sp.e2 + self.setup.g2_0.scale(&d_inv)), + ); + + let ht_scalar = sp.r3 + *d * sp.r2 + d_inv * sp.r1; + let mut rhs = self.setup.chi[0] + sp.r + sp.q.scale(&c) + self.c.scale(&c_sq); + rhs = rhs + sp.p2.scale(d) + self.d2.scale(&(*d * c)); + rhs = rhs + sp.p1.scale(&d_inv) + self.d1.scale(&(d_inv * c)); + rhs = rhs - self.setup.ht.scale(&ht_scalar); + + if lhs == rhs { + Ok(()) + } else { + Err(DoryError::InvalidProof) + } } else { - Err(DoryError::InvalidProof) + // Transparent mode: 4 ML + 1 FE + let gamma_inv = gamma.inv().ok_or(DoryError::InvalidProof)?; + let d_sq = *d * *d; + let neg_gamma = -*gamma; + let neg_gamma_inv = -gamma_inv; + + let s_product = self.s1_acc * self.s2_acc; + let rhs = self.c + + self.setup.ht.scale(&s_product) + + self.setup.chi[0] + + self.d2.scale(d) + + self.d1.scale(&d_inv) + + self.d2_init.scale(&d_sq); + + // Pair 1: e(E₁_final + d·Γ₁₀, E₂_final + d⁻¹·Γ₂₀) + let p1_g1 = msg.e1 + self.setup.g1_0.scale(d); + let p1_g2 = msg.e2 + self.setup.g2_0.scale(&d_inv); + + // Pair 2: e(H₁, (-γ)·(E₂_acc + (d⁻¹·s₁)·Γ₂₀)) + let p2_g1 = self.setup.h1; + let p2_g2 = (self.e2 + self.setup.g2_0.scale(&(d_inv * self.s1_acc))).scale(&neg_gamma); + + // Pair 3: e((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀), H₂) + let p3_g1 = + (self.e1 + self.setup.g1_0.scale(&(*d * self.s2_acc))).scale(&neg_gamma_inv); + let p3_g2 = self.setup.h2; + + // Pair 4: e(d²·E₁_init, Γ₂₀) — deferred VMV check + let p4_g1 = self.e1_init.scale(&d_sq); + let p4_g2 = self.setup.g2_0; + + let lhs = E::multi_pair(&[p1_g1, p2_g1, p3_g1, p4_g1], &[p1_g2, p2_g2, p3_g2, p4_g2]); + + if lhs == rhs { + Ok(()) + } else { + Err(DoryError::InvalidProof) + } } } } From 9db019e19dc9e412f2316d57fd6141b663e19d48 Mon Sep 17 00:00:00 2001 From: markosg04 Date: Fri, 27 Feb 2026 16:18:56 -0500 Subject: [PATCH 12/16] chore: remove md --- zk.md | 449 ---------------------------------------------------------- 1 file changed, 449 deletions(-) delete mode 100644 zk.md diff --git a/zk.md b/zk.md deleted file mode 100644 index 06d7c6a..0000000 --- a/zk.md +++ /dev/null @@ -1,449 +0,0 @@ -# Zero-Knowledge Analysis of Dory PCS Implementation - -This document audits every value the prover sends to the verifier (via the -Fiat-Shamir transcript or proof struct) and determines whether it is blinded, -public, or leaks witness information. - -**Security model**: Honest-Verifier Zero-Knowledge (HVZK) in the Random Oracle -Model (ROM), which is standard for Fiat-Shamir-transformed protocols. - -**Notation**: -- `Γ₁, Γ₂` — public setup generators in G₁, G₂ -- `H₁, H₂` — blinding generators; `HT = e(H₁, H₂)` -- `r_*` — random blinding scalars sampled via `Mode::sample` -- `f` — the committed polynomial (the witness) -- `z` — the evaluation point; `y = f(z)` — the evaluation - ---- - -## 0. Mode Dispatch (`mode.rs`) - -The `Mode` trait controls all blinding: - -| Mode | `sample()` | `mask(v, base, r)` | -|------|-----------|---------------------| -| `Transparent` | Returns `0` | Returns `v` (identity) | -| `ZK` | Returns `F::random(rng)` | Returns `v + base · r` | - -**Every `M::mask(...)` call below is a no-op in Transparent mode.** The rest of -this document focuses exclusively on ZK mode (`feature = "zk"`). - ---- - -## 1. Commitment Phase (`ark_poly.rs`) - -### 1a. `commit()` — Transparent commitment - -``` -row_commit[i] = MSM(Γ₁, coeffs_row_i) (no blinding) -tier2 = Σ e(row_commit[i], Γ₂[i]) -``` - -**Verdict**: NOT hiding. The tier-1 row commitments are deterministic functions -of the polynomial coefficients. The tier-2 commitment is a deterministic pairing -aggregate. - -### 1b. `commit_zk()` — ZK commitment - -``` -blind[i] ← random (fresh per row) -row_commit[i] = MSM(Γ₁, coeffs_row_i) + H₁ · blind[i] -tier2 = Σ e(row_commit[i], Γ₂[i]) -``` - -**Verdict**: HIDING (Pedersen). Each row commitment is a Pedersen commitment -with independent randomness. The tier-2 commitment inherits computational -hiding from the DL-hard blinding in each row. - -Returns `(tier2, row_commitments, blinds)` — the caller must keep `blinds` -secret and supply the blinded `row_commitments` to the prover. - ---- - -## 2. VMV Phase (`evaluation_proof.rs:132–177`) - -Four independent blinds are sampled: - -``` -r_c, r_d2, r_e1, r_e2 ← random -``` - -### 2a. `C` — Inner product value (GT) - -``` -C = e(MSM(row_comms, v_vec), Γ₂,fin) + r_c · HT -``` - -**Sent to transcript**: `vmv_c` -**Verdict**: BLINDED. Masked by `r_c · HT` with fresh randomness. Reveals -nothing about the polynomial's internal structure. - -### 2b. `D₂` — Generator inner product (GT) - -``` -D₂ = e(MSM(Γ₁[..2^σ], v_vec), Γ₂,fin) + r_d2 · HT -``` - -**Sent to transcript**: `vmv_d2` -**Verdict**: BLINDED. `v_vec = Lᵀ × M` encodes the polynomial, but `r_d2 · HT` -masks the value. Computationally hiding under DL in GT. - -### 2c. `E₁` — Row-scalar MSM (G₁) - -``` -E₁ = MSM(row_comms, left_vec) + r_e1 · H₁ -``` - -**Sent to transcript**: `vmv_e1` -**Verdict**: BLINDED. Masked by `r_e1 · H₁`. Even if `row_comms` are -themselves blinded (via `commit_zk`), the additional `r_e1` ensures -independence. - -### 2d. `E₂` — Evaluation commitment (G₂, ZK-only) - -``` -E₂ = Γ₂,fin · y + r_e2 · H₂ -``` - -**Sent to transcript**: `vmv_e2` -**Verdict**: BLINDED. Pedersen commitment to the evaluation `y` with fresh -randomness `r_e2`. The verifier learns nothing about `y` from `E₂` alone. - -In transparent mode, the verifier computes `e₂ = Γ₂,fin · y` directly from -the public evaluation — no commitment is needed. - -### 2e. `y_com` — Evaluation Pedersen commitment (G₁, ZK-only) - -``` -y_com = Γ₁,fin · y + H₁ · r_y (r_y ← random) -``` - -**Sent to transcript**: `vmv_y_com` -**Verdict**: BLINDED. Standard Pedersen commitment. Together with `E₂`, this -enables the Sigma1 proof (§4a) to attest that both commit to the same `y` -without revealing it. - ---- - -## 3. Sigma Proofs (ZK-only, `reduce_and_fold.rs:452–570`) - -### 3a. Sigma1 — Same-`y` proof - -**Relation proved**: `E₂` and `y_com` commit to the same scalar `y`. - -``` -Prover: - k₁, k₂, k₃ ← random - a₁ = Γ₂,fin · k₁ + H₂ · k₂ - a₂ = Γ₁,fin · k₁ + H₁ · k₃ - (append a₁, a₂ → transcript) - c = H(transcript) - z₁ = k₁ + c·y, z₂ = k₂ + c·r_e2, z₃ = k₃ + c·r_y - -Verifier checks: - Γ₂,fin · z₁ + H₂ · z₂ == a₁ + c · E₂ - Γ₁,fin · z₁ + H₁ · z₃ == a₂ + c · y_com -``` - -**Sent**: `(a₁, a₂, z₁, z₂, z₃)` -**Verdict**: HVZK. Standard Sigma protocol with 3 random nonces. The -simulator picks `z₁, z₂, z₃` uniformly, then programs `a₁, a₂` to satisfy -the verification equations. Response distribution is identical to real proofs. - -### 3b. Sigma2 — VMV consistency proof - -**Relation proved**: `e(E₁, Γ₂,fin) − D₂` lies in the span of `H₁`, i.e., -the difference between `E₁`'s and `D₂`'s blinding is consistent: -`e(E₁, Γ₂,fin) − D₂ = e(H₁, r_e1 · Γ₂,fin + (−r_d2) · H₂)`. - -``` -Prover: - k₁, k₂ ← random - a = e(H₁, Γ₂,fin · k₁ + H₂ · k₂) - (append a → transcript) - c = H(transcript) - z₁ = k₁ + c · r_e1, z₂ = k₂ + c · (−r_d2) - -Verifier checks: - e(H₁, Γ₂,fin · z₁ + H₂ · z₂) == a + c · (e(E₁, Γ₂,fin) − D₂) -``` - -**Sent**: `(a, z₁, z₂)` -**Verdict**: HVZK. Standard 2-nonce Sigma protocol. Simulatable by the same -strategy as Sigma1. - ---- - -## 4. Reduce-and-Fold Rounds (`reduce_and_fold.rs`) - -Each round `t` (of `σ` total) produces two messages and consumes two -challenges `(βₜ, αₜ)`. - -### 4a. First Reduce Message (6 values per round) - -**Per-round blinds sampled**: -``` -round_d1[0], round_d1[1] ← random -round_d2[0], round_d2[1] ← random -``` - -| Value | Formula | Blinding | Verdict | -|-------|---------|----------|---------| -| `D₁L` | `e(v₁L, Γ₂') + round_d1[0] · HT` | `round_d1[0]` | **BLINDED** | -| `D₁R` | `e(v₁R, Γ₂') + round_d1[1] · HT` | `round_d1[1]` | **BLINDED** | -| `D₂L` | `e(Γ₁', v₂L) + round_d2[0] · HT` | `round_d2[0]` | **BLINDED** | -| `D₂R` | `e(Γ₁', v₂R) + round_d2[1] · HT` | `round_d2[1]` | **BLINDED** | -| `E₁β` | `MSM(Γ₁, s₂)` | **NONE** | **PUBLIC** (see below) | -| `E₂β` | `MSM(Γ₂, s₁)` | **NONE** | **PUBLIC** (see below) | - -**E₁β and E₂β are not masked**, but `s₁` and `s₂` are scalar vectors derived -solely from the evaluation point coordinates and the Fiat-Shamir challenges -(both public to the verifier). The generators `Γ₁, Γ₂` are public setup -parameters. Therefore: - -- `E₁β = MSM(Γ₁, s₂)` is a deterministic, publicly computable value. -- `E₂β = MSM(Γ₂, s₁)` is a deterministic, publicly computable value. -- **No polynomial information leaks through these values.** - -> **Note for committed-point applications**: If the evaluation point `z` is -> committed rather than public, the verifier could in principle reconstruct -> `E₁β` and `E₂β` only if they knew `z`. Since these values appear in the -> proof, a third party observing the proof (but not knowing `z`) could use -> `E₁β`, `E₂β` to recover information about the evaluation point. -> However, the standard Dory verification API (`verify()`) takes `point` as -> a **public input**. Hiding the point requires an additional protocol layer -> on top of Dory (e.g., proving in ZK that the committed point was used). - -### 4b. Second Reduce Message (6 values per round) - -**Per-round blinds sampled**: -``` -round_c[0], round_c[1] ← random -round_e1[0], round_e1[1] ← random -round_e2[0], round_e2[1] ← random -``` - -| Value | Formula | Blinding | Verdict | -|-------|---------|----------|---------| -| `C₊` | `e(v₁L, v₂R) + round_c[0] · HT` | `round_c[0]` | **BLINDED** | -| `C₋` | `e(v₁R, v₂L) + round_c[1] · HT` | `round_c[1]` | **BLINDED** | -| `E₁₊` | `MSM(v₁L, s₂R) + round_e1[0] · H₁` | `round_e1[0]` | **BLINDED** | -| `E₁₋` | `MSM(v₁R, s₂L) + round_e1[1] · H₁` | `round_e1[1]` | **BLINDED** | -| `E₂₊` | `MSM(v₂R, s₁L) + round_e2[0] · H₂` | `round_e2[0]` | **BLINDED** | -| `E₂₋` | `MSM(v₂L, s₁R) + round_e2[1] · H₂` | `round_e2[1]` | **BLINDED** | - -**Verdict**: All six values are independently blinded with fresh randomness. -The cross-products `e(v₁L, v₂R)` etc. encode polynomial information but are -fully hidden by the masks. - -### 4c. Blinding Accumulation - -After each challenge application, the prover accumulates blinds: - -``` -After β: r_c ← r_c + β · r_d2 + β⁻¹ · r_d1 - -After α: r_c ← r_c + α · round_c[0] + α⁻¹ · round_c[1] - r_d1 ← α · round_d1[0] + round_d1[1] - r_d2 ← α⁻¹ · round_d2[0] + round_d2[1] - r_e1 ← r_e1 + α · round_e1[0] + α⁻¹ · round_e1[1] - r_e2 ← r_e2 + α · round_e2[0] + α⁻¹ · round_e2[1] -``` - -This tracking is correct: the accumulated blinds mirror the verifier's -accumulation of the GT/G₁/G₂ elements, ensuring that the final scalar -product proof can account for all blinding contributions. - ---- - -## 5. Final Message (`reduce_and_fold.rs:381–420`) - -After `γ` is derived from the transcript: - -``` -r_final1, r_final2 ← random (zero in Transparent mode) - -E₁_final = v₁[0] + (γ · s₁[0] + r_final1) · H₁ -E₂_final = v₂[0] + (γ⁻¹ · s₂[0] + r_final2) · H₂ - -r_c ← r_c + γ · r_e2 + γ⁻¹ · r_e1 -``` - -**Sent to transcript**: `final_e1`, `final_e2` - -In ZK mode, `r_final1` and `r_final2` are fresh random scalars that mask the -deterministic offset `γ · s₁[0]` (resp. `γ⁻¹ · s₂[0]`), preventing recovery -of the folded vectors `v₁[0]` and `v₂[0]`. - -These blinds do **not** need to be tracked in the accumulated `r_c` because -`verify_final_zk` does not use the final message elements in the pairing check -— it uses the scalar product proof's blinded `e₁, e₂` instead. The final -message values serve only as entropy for the Fiat-Shamir `d` challenge. - -In Transparent mode, `r_final1 = r_final2 = 0` (via `Mode::sample`), so the -transparent verification path (`verify_final`) is unaffected. - -**Verdict**: **BLINDED** (ZK mode). The folded vectors `v₁[0]`, `v₂[0]` -cannot be recovered from the proof. - ---- - -## 6. Scalar Product Proof (ZK-only, `reduce_and_fold.rs:411–448`) - -Generated **before** `compute_final_message`, using the current `v₁[0], v₂[0]`: - -``` -sd₁, sd₂ ← random -d₁ = Γ₁,₀ · sd₁, d₂ = Γ₂,₀ · sd₂ - -rp₁, rp₂, rq, rr ← random -p₁ = e(d₁, Γ₂,₀) + rp₁ · HT -p₂ = e(Γ₁,₀, d₂) + rp₂ · HT -q = e(d₁, v₂) + e(v₁, d₂) + rq · HT -r = e(d₁, d₂) + rr · HT - -(append p₁, p₂, q, r → transcript) -c = H(transcript) ← Fiat-Shamir challenge - -Response: - e₁ = d₁ + v₁ · c (blinded by d₁) - e₂ = d₂ + v₂ · c (blinded by d₂) - r₁ = rp₁ + c · r_d1 (blinded by rp₁) - r₂ = rp₂ + c · r_d2 (blinded by rp₂) - r₃ = rr + c · rq + c² · r_c (blinded by rr, rq) -``` - -**Sent**: `(p₁, p₂, q, r, e₁, e₂, r₁, r₂, r₃)` - -**Verdict**: HVZK. This is a batched Sigma protocol for the relation: - -``` -e(v₁, v₂) + r_c · HT = C_accumulated -e(v₁, Γ₂,₀) + r_d1 · HT = D₁_accumulated -e(Γ₁,₀, v₂) + r_d2 · HT = D₂_accumulated -``` - -The 4 commitments `(p₁, p₂, q, r)` are independently blinded. The responses -are standard Sigma-protocol responses. A simulator can produce -indistinguishable transcripts by choosing responses first and computing -commitments. - -### Interaction with Final Message - -Since the final message is now independently blinded (§5), `v₁[0]` and `v₂[0]` -cannot be recovered. This means the scalar product proof's blinding commitments -`d₁, d₂` remain hidden, preserving the full HVZK property of the Sigma -protocol. - ---- - -## 7. Verification Paths - -### 7a. Transparent Final Check (`verify_final`) - -``` -LHS = e(E₁ + d·Γ₁,₀, E₂ + d⁻¹·Γ₂,₀) (4 pairings batched) -RHS = C + s₁·s₂·HT + χ₀ + d·D₂ + d⁻¹·D₁ + d²·D₂_init -``` - -All values on both sides are unblinded → no ZK. - -### 7b. ZK Final Check (`verify_final_zk`) - -``` -LHS = e(sp.e₁ + d·Γ₁,₀, sp.e₂ + d⁻¹·Γ₂,₀) -RHS = χ₀ + sp.r + c·sp.q + c²·C - + d·(sp.p₂ + c·D₂) + d⁻¹·(sp.p₁ + c·D₁) - − HT·(sp.r₃ + d·sp.r₂ + d⁻¹·sp.r₁) -``` - -Uses `sp.e₁/e₂` (randomly blinded) instead of `final_message.e₁/e₂`. -The random scalar `d` (derived after `final_e1, final_e2` are appended to the -transcript) batches 4 verification equations into one pairing. - -**Note**: `final_message.e1/e2` are appended to the transcript purely for -Fiat-Shamir entropy — the actual verification uses the scalar product proof's -blinded elements. - ---- - -## 8. Summary Table - -| Proof Element | Group | Blinded? | Blind Source | Leaks Polynomial Info? | -|---------------|-------|----------|-------------|----------------------| -| **VMV C** | GT | Yes | `r_c · HT` | No | -| **VMV D₂** | GT | Yes | `r_d2 · HT` | No | -| **VMV E₁** | G₁ | Yes | `r_e1 · H₁` | No | -| **VMV E₂** (ZK) | G₂ | Yes | `r_e2 · H₂` | No | -| **y_com** (ZK) | G₁ | Yes | `r_y · H₁` | No | -| **Sigma1** (a₁,a₂,z₁,z₂,z₃) | mixed | HVZK | `k₁,k₂,k₃` | No | -| **Sigma2** (a,z₁,z₂) | GT,F,F | HVZK | `k₁,k₂` | No | -| **D₁L, D₁R** | GT | Yes | `round_d1[·] · HT` | No | -| **D₂L, D₂R** | GT | Yes | `round_d2[·] · HT` | No | -| **E₁β** | G₁ | No | — | No (public: MSM(Γ₁, s₂)) | -| **E₂β** | G₂ | No | — | No (public: MSM(Γ₂, s₁)) | -| **C₊, C₋** | GT | Yes | `round_c[·] · HT` | No | -| **E₁₊, E₁₋** | G₁ | Yes | `round_e1[·] · H₁` | No | -| **E₂₊, E₂₋** | G₂ | Yes | `round_e2[·] · H₂` | No | -| **E₁_final** | G₁ | Yes | `r_final1 · H₁` | No | -| **E₂_final** | G₂ | Yes | `r_final2 · H₂` | No | -| **SP p₁,p₂,q,r** | GT | Yes | `rp₁,rp₂,rq,rr` | No | -| **SP e₁,e₂** | G₁,G₂ | Yes | `d₁ + v₁·c` | No | -| **SP r₁,r₂,r₃** | F | Yes | Sigma response | No | - ---- - -## 9. Blinding Budget - -Per proof (with `σ` rounds): - -| Phase | Blinds Sampled | Count | -|-------|---------------|-------| -| VMV | `r_c, r_d2, r_e1, r_e2` | 4 | -| VMV (ZK) | `r_y` | 1 | -| Sigma1 | `k₁, k₂, k₃` | 3 | -| Sigma2 | `k₁, k₂` | 2 | -| Per round (×σ) | `round_d1[2], round_d2[2], round_c[2], round_e1[2], round_e2[2]` | 10σ | -| Final message | `r_final1, r_final2` | 2 | -| Scalar product | `sd₁, sd₂, rp₁, rp₂, rq, rr` | 6 | - -**Total**: `18 + 10σ` random field elements per proof. - ---- - -## 10. Conclusions - -### What IS hidden (ZK mode) - -1. **All polynomial coefficients** — no intermediate message reveals any - individual coefficient or coefficient relation beyond the public evaluation. - -2. **The evaluation value `y`** — hidden behind Pedersen commitments `E₂` and - `y_com`, with consistency proven via Sigma1. - -3. **Internal protocol state** — all round messages involving `v₁, v₂` - cross-products and MSMs are independently blinded. - -4. **The accumulated blinding** `r_c` — correctly tracked through all challenge - transformations and consumed by the scalar product proof. - -### What is NOT hidden - -1. **The evaluation point `z`** — passed as a public input to `verify()`. - `E₁β` and `E₂β` are computable from `z` + setup, so they don't leak - anything the verifier doesn't already know. In a committed-point protocol, - these would need an additional hiding layer. - -2. **Matrix dimensions `ν, σ`** — stored in the proof struct as `nu, sigma`. - These reveal the matrix layout used for the polynomial but not the - polynomial itself. - -### Overall Verdict - -The implementation achieves **computational HVZK in the Random Oracle Model** -when the `zk` feature is enabled. Every protocol message that encodes -polynomial-dependent information is masked by independent randomness, with one -exception (`E₁β/E₂β` which are public, not polynomial-dependent). The three -Sigma sub-protocols (Sigma1, Sigma2, scalar product) are all standard and HVZK. -The final message elements are independently blinded, preventing recovery of -the folded vectors `v₁[0], v₂[0]`. From b3d96e5043017d5c902fb053725d2e3c24c2cc4b Mon Sep 17 00:00:00 2001 From: markosg04 Date: Fri, 27 Feb 2026 17:35:38 -0500 Subject: [PATCH 13/16] refactor: commit at GT level instead row commits --- Cargo.lock | 82 ++++- Cargo.toml | 5 + benches/arkworks_proof.rs | 9 +- examples/basic_e2e.rs | 56 +-- examples/homomorphic.rs | 73 +--- examples/homomorphic_mixed_sizes.rs | 32 +- examples/non_square.rs | 62 +--- examples/zk_e2e.rs | 71 +--- examples/zk_statistical.rs | 513 ++++++++++++++++++++++++++++ src/backends/arkworks/ark_poly.rs | 16 +- src/evaluation_proof.rs | 14 +- src/lib.rs | 11 +- src/primitives/poly.rs | 19 +- src/reduce_and_fold.rs | 9 +- src/setup.rs | 14 +- tests/arkworks/evaluation.rs | 21 +- tests/arkworks/homomorphic.rs | 2 + tests/arkworks/integration.rs | 20 +- tests/arkworks/mod.rs | 2 - tests/arkworks/non_square.rs | 12 +- tests/arkworks/serialization.rs | 12 +- tests/arkworks/soundness.rs | 3 +- tests/arkworks/zk.rs | 42 ++- tests/arkworks/zk_statistical.rs | 502 --------------------------- 24 files changed, 754 insertions(+), 848 deletions(-) create mode 100644 examples/zk_statistical.rs delete mode 100644 tests/arkworks/zk_statistical.rs diff --git a/Cargo.lock b/Cargo.lock index f03781f..1f80369 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -395,6 +395,7 @@ dependencies = [ "serde", "thiserror", "tracing", + "tracing-subscriber", ] [[package]] @@ -533,18 +534,39 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + [[package]] name = "libc" version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + [[package]] name = "memchr" version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys", +] + [[package]] name = "num-bigint" version = "0.4.6" @@ -795,6 +817,21 @@ dependencies = [ "serde_core", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + [[package]] name = "subtle" version = "2.6.1" @@ -832,6 +869,15 @@ dependencies = [ "syn", ] +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -866,11 +912,37 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.34" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ + "log", "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +dependencies = [ + "nu-ansi-term", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", ] [[package]] @@ -885,6 +957,12 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + [[package]] name = "version_check" version = "0.9.5" diff --git a/Cargo.toml b/Cargo.toml index 0d49143..e330c1a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,6 +72,7 @@ rayon = { version = "1.10", optional = true } [dev-dependencies] rand = "0.8" criterion = { version = "0.5", features = ["html_reports"] } +tracing-subscriber = { version = "0.3", features = ["fmt"] } [[example]] name = "basic_e2e" @@ -93,6 +94,10 @@ required-features = ["backends"] name = "zk_e2e" required-features = ["backends", "zk"] +[[example]] +name = "zk_statistical" +required-features = ["backends", "zk"] + [[bench]] name = "arkworks_proof" harness = false diff --git a/benches/arkworks_proof.rs b/benches/arkworks_proof.rs index b3bbaa4..c5dc4fd 100644 --- a/benches/arkworks_proof.rs +++ b/benches/arkworks_proof.rs @@ -74,7 +74,7 @@ fn bench_prove(c: &mut Criterion) { let nu = 13; let sigma = 13; - let (_, tier_1, _) = poly + let (_, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .unwrap(); @@ -85,6 +85,7 @@ fn bench_prove(c: &mut Criterion) { black_box(&poly), black_box(&point), black_box(tier_1.clone()), + black_box(commit_blind), black_box(nu), black_box(sigma), black_box(&prover_setup), @@ -100,7 +101,7 @@ fn bench_verify(c: &mut Criterion) { let nu = 13; let sigma = 13; - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .unwrap(); @@ -109,6 +110,7 @@ fn bench_verify(c: &mut Criterion) { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -158,7 +160,7 @@ fn bench_end_to_end(c: &mut Criterion) { let poly = ArkworksPolynomial::new(coefficients); // Commit - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .unwrap(); @@ -172,6 +174,7 @@ fn bench_end_to_end(c: &mut Criterion) { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, diff --git a/examples/basic_e2e.rs b/examples/basic_e2e.rs index a1b6d6f..6c021d9 100644 --- a/examples/basic_e2e.rs +++ b/examples/basic_e2e.rs @@ -1,12 +1,9 @@ //! Basic end-to-end example of Dory polynomial commitment scheme //! -//! This example demonstrates the standard workflow with a square matrix layout: -//! - Setup generation -//! - Polynomial commitment -//! - Evaluation proof generation -//! - Verification +//! Demonstrates the standard workflow with a square matrix layout: +//! setup, commit, evaluate, prove, verify. //! -//! Matrix dimensions: 16×16 (nu=4, sigma=4, total 256 coefficients) +//! Matrix dimensions: 16x16 (nu=4, sigma=4, total 256 coefficients) use dory_pcs::backends::arkworks::{ ArkFr, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, @@ -14,69 +11,36 @@ use dory_pcs::backends::arkworks::{ use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent}; -use tracing::info; fn main() -> Result<(), Box> { - info!("Dory PCS - Basic End-to-End Example"); - info!("====================================\n"); + let (prover_setup, verifier_setup) = setup::(10); - // Step 1: Setup - let max_log_n = 10; - info!( - "1. Generating transparent setup (max_log_n = {})...", - max_log_n - ); - let (prover_setup, verifier_setup) = setup::(max_log_n); - info!(" ✓ Setup complete\n"); - - // Step 2: Create polynomial - // Square matrix: nu = sigma = 4 → 16 rows × 16 columns = 256 coefficients let nu = 4; let sigma = 4; - let poly_size = 1 << (nu + sigma); // 2^8 = 256 - let num_vars = nu + sigma; // 8 - - info!("2. Creating random polynomial..."); - info!(" Matrix layout: {}×{} (square)", 1 << nu, 1 << sigma); - info!(" Total coefficients: {}", poly_size); - info!(" Number of variables: {}", num_vars); + let poly_size = 1 << (nu + sigma); + let num_vars = nu + sigma; let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); let poly = ArkworksPolynomial::new(coefficients); - info!(" ✓ Polynomial created\n"); - // Step 3: Commit - info!("3. Computing polynomial commitment..."); - let (tier_2, tier_1, _) = + let (tier_2, tier_1, commit_blind) = poly.commit::(nu, sigma, &prover_setup)?; - info!( - " ✓ Tier-1 commitment: {} row commitments (G1)", - tier_1.len() - ); - info!(" ✓ Tier-2 commitment: final commitment (GT)\n"); - // Step 4: Evaluation let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = poly.evaluate(&point); - info!("4. Evaluating polynomial at random point..."); - info!(" ✓ Evaluation result computed\n"); - // Step 5: Prove - info!("5. Generating evaluation proof..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-basic-example"); let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, &mut prover_transcript, )?; - info!(" ✓ Proof generated (logarithmic size)\n"); - // Step 6: Verify - info!("6. Verifying proof..."); let mut verifier_transcript = Blake2bTranscript::new(b"dory-basic-example"); verify::<_, BN254, G1Routines, G2Routines, _>( tier_2, @@ -86,10 +50,6 @@ fn main() -> Result<(), Box> { verifier_setup, &mut verifier_transcript, )?; - info!(" ✓ Proof verified successfully!\n"); - - info!("===================================="); - info!("Example completed successfully!"); Ok(()) } diff --git a/examples/homomorphic.rs b/examples/homomorphic.rs index e1f40ae..1cab76e 100644 --- a/examples/homomorphic.rs +++ b/examples/homomorphic.rs @@ -1,12 +1,6 @@ //! Homomorphic combination example for Dory commitments //! -//! This example demonstrates the homomorphic properties of Dory commitments: -//! - Commit to multiple polynomials independently -//! - Combine commitments -//! - Prove evaluation on the combined polynomial using the combined commitment -//! - Verify that the homomorphic combination works correctly -//! -//! Homomorphic property: Com(r₁·P₁ + r₂·P₂ + ... + rₙ·Pₙ) = r₁·Com(P₁) + r₂·Com(P₂) + ... + rₙ·Com(Pₙ) +//! Demonstrates: Com(r1*P1 + r2*P2 + ... + rn*Pn) = r1*Com(P1) + r2*Com(P2) + ... + rn*Com(Pn) use dory_pcs::backends::arkworks::{ ArkFr, ArkG1, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, @@ -14,40 +8,23 @@ use dory_pcs::backends::arkworks::{ use dory_pcs::primitives::arithmetic::{Field, Group}; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent}; -use tracing::info; fn main() -> Result<(), Box> { - info!("Dory PCS - Homomorphic Combination Example"); - info!("===========================================\n"); - - // Step 1: Setup - let max_log_n = 10; - info!( - "1. Generating transparent setup (max_log_n = {})...", - max_log_n - ); - let (prover_setup, verifier_setup) = setup::(max_log_n); - info!(" ✓ Setup complete\n"); + let (prover_setup, verifier_setup) = setup::(10); - // Parameters let nu = 4; let sigma = 4; - let poly_size = 1 << (nu + sigma); // 256 - let num_vars = nu + sigma; // 8 + let poly_size = 1 << (nu + sigma); + let num_vars = nu + sigma; let num_polys = 5; - info!("2. Creating {} random polynomials...", num_polys); - info!(" Each polynomial: {} coefficients", poly_size); let polys: Vec = (0..num_polys) .map(|_| { let coeffs: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); ArkworksPolynomial::new(coeffs) }) .collect(); - info!(" ✓ Polynomials created\n"); - // Step 3: Commit to each polynomial - info!("3. Computing individual commitments..."); let commitments: Vec<_> = polys .iter() .map(|poly| { @@ -55,17 +32,9 @@ fn main() -> Result<(), Box> { .unwrap() }) .collect(); - info!(" ✓ {} commitments computed\n", num_polys); - // Step 4: Generate random coefficients for linear combination - info!("4. Generating random combination coefficients..."); let coeffs: Vec = (0..num_polys).map(|_| ArkFr::random()).collect(); - info!(" ✓ Coefficients: r₁, r₂, ..., r{}\n", num_polys); - // Step 5: Homomorphically combine commitments - info!("5. Combining commitments homomorphically..."); - - // Tier-2 (GT group): combined_tier2 = r₁·C₁ + r₂·C₂ + ... + r₅·C₅ #[allow(clippy::op_ref)] let mut combined_tier2 = coeffs[0] * &commitments[0].0; for i in 1..num_polys { @@ -74,7 +43,6 @@ fn main() -> Result<(), Box> { combined_tier2 = combined_tier2 + scaled; } - // Tier-1 (G1 group): For each row, combine the row commitments let num_rows = 1 << nu; let mut combined_tier1 = vec![ArkG1::identity(); num_rows]; for i in 0..num_polys { @@ -83,20 +51,11 @@ fn main() -> Result<(), Box> { combined_tier1[row_idx] = combined_tier1[row_idx] + scaled; } } - info!(" ✓ Tier-2 combined (GT)"); - info!(" ✓ Tier-1 combined ({} rows in G1)\n", num_rows); - // Step 6: Compute combined polynomial - info!( - "6. Computing combined polynomial: P = r₁·P₁ + r₂·P₂ + ... + r{}·P{}...", - num_polys, num_polys - ); let mut combined_coeffs = vec![ArkFr::zero(); poly_size]; for poly_idx in 0..num_polys { - // Access coefficients through evaluation at hypercube vertices #[allow(clippy::needless_range_loop)] for coeff_idx in 0..poly_size { - // Create point that selects the coeff_idx-th vertex of the hypercube let point: Vec = (0..num_vars) .map(|bit_idx| { if (coeff_idx >> bit_idx) & 1 == 1 { @@ -114,41 +73,29 @@ fn main() -> Result<(), Box> { } } let combined_poly = ArkworksPolynomial::new(combined_coeffs); - info!(" ✓ Combined polynomial computed\n"); - // Step 7: Evaluate and verify consistency - info!("7. Verifying homomorphic property..."); let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = combined_poly.evaluate(&point); - // Check that combined polynomial evaluation matches linear combination let mut expected_eval = ArkFr::zero(); for i in 0..num_polys { let poly_eval = polys[i].evaluate(&point); expected_eval = expected_eval + coeffs[i].mul(&poly_eval); } - assert_eq!( - evaluation, expected_eval, - "Combined polynomial evaluation must match linear combination" - ); - info!(" ✓ Evaluation matches: P(x) = Σ rᵢ·Pᵢ(x)\n"); + assert_eq!(evaluation, expected_eval); - // Step 8: Generate proof - info!("8. Generating evaluation proof for combined polynomial..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-homomorphic-example"); let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &combined_poly, &point, combined_tier1, + ArkFr::zero(), nu, sigma, &prover_setup, &mut prover_transcript, )?; - info!(" ✓ Proof generated\n"); - // Step 9: Verify - info!("9. Verifying proof with combined commitment..."); let mut verifier_transcript = Blake2bTranscript::new(b"dory-homomorphic-example"); verify::<_, BN254, G1Routines, G2Routines, _>( combined_tier2, @@ -158,14 +105,6 @@ fn main() -> Result<(), Box> { verifier_setup, &mut verifier_transcript, )?; - info!(" ✓ Proof verified successfully!\n"); - - info!("==========================================="); - info!("Homomorphic combination verified!"); - info!( - "Combined {} polynomials using random coefficients", - num_polys - ); Ok(()) } diff --git a/examples/homomorphic_mixed_sizes.rs b/examples/homomorphic_mixed_sizes.rs index 6e6bd27..97157a8 100644 --- a/examples/homomorphic_mixed_sizes.rs +++ b/examples/homomorphic_mixed_sizes.rs @@ -1,8 +1,7 @@ -//! Mixed-size homomorphic combination example for Dory commitments. +//! Mixed-size homomorphic combination example for Dory commitments //! -//! Demonstrates how to homomorphically combine two polynomials that only use a -//! subset of the coefficient domain (sizes 5 and 20, padded to 32) and then -//! produce and verify an evaluation proof for the combined commitment. +//! Demonstrates homomorphic combination of polynomials with different matrix +//! dimensions (sizes 16 and 4, combined in a 4x4 layout). use dory_pcs::backends::arkworks::{ ArkFr, ArkG1, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, @@ -10,14 +9,10 @@ use dory_pcs::backends::arkworks::{ use dory_pcs::primitives::arithmetic::{Field, Group}; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent}; -use tracing::info; fn main() -> Result<(), Box> { - info!("Dory PCS - Mixed-size Homomorphic Combination Example"); - let (prover_setup, verifier_setup) = setup::(4); - info!("Creating two polynomials with logical sizes 16 and 4..."); let mut coeffs_poly1 = vec![ArkFr::zero(); 16]; let mut coeffs_poly2 = vec![ArkFr::zero(); 4]; for coeff in coeffs_poly1.iter_mut() { @@ -29,25 +24,17 @@ fn main() -> Result<(), Box> { let poly1 = ArkworksPolynomial::new(coeffs_poly1.clone()); let poly2 = ArkworksPolynomial::new(coeffs_poly2.clone()); - info!("Poly1: {:?}", poly1); - info!("Poly2: {:?}", poly2); - let commitment1 = poly1 .commit::(2, 2, &prover_setup) .unwrap(); let commitment2 = poly2 .commit::(1, 1, &prover_setup) .unwrap(); - info!("✓ Commitments ready\n"); - info!("Sampling random combination scalars r1, r2..."); let coeff_scalars = [ArkFr::random(), ArkFr::random()]; - info!("Combining tier-2 commitments (GT)..."); let combined_tier2 = coeff_scalars[0] * commitment1.0 + coeff_scalars[1] * commitment2.0; - info!("Combining tier-1 commitments (G1 rows)..."); - let mut combined_tier1 = vec![ArkG1::identity(); 4]; for (row_idx, row_commit) in commitment1.1.iter().enumerate() { combined_tier1[row_idx] = combined_tier1[row_idx] + (coeff_scalars[0] * row_commit); @@ -56,7 +43,6 @@ fn main() -> Result<(), Box> { combined_tier1[row_idx] = combined_tier1[row_idx] + (coeff_scalars[1] * row_commit); } - info!("Building combined polynomial coefficients..."); let mut combined_coeffs = vec![ArkFr::zero(); 16]; for idx in 0..16 { let term1 = coeff_scalars[0].mul(&coeffs_poly1[idx]); @@ -72,8 +58,6 @@ fn main() -> Result<(), Box> { } let combined_poly = ArkworksPolynomial::new(combined_coeffs); - info!("Combined polynomial: {:?}", combined_poly); - let mut padded_poly2_coefficients = vec![ArkFr::zero(); 16]; padded_poly2_coefficients[0] = coeffs_poly2[0]; padded_poly2_coefficients[1] = coeffs_poly2[1]; @@ -81,11 +65,9 @@ fn main() -> Result<(), Box> { padded_poly2_coefficients[5] = coeffs_poly2[3]; let padded_poly2 = ArkworksPolynomial::new(padded_poly2_coefficients); - info!("Evaluating combined polynomial at a random point..."); let point: Vec = (0..4).map(|_| ArkFr::random()).collect(); let evaluation = combined_poly.evaluate(&point); - info!("Checking that evaluation matches r1·P1(x) + r2·P2(x)..."); let eval1 = poly1.evaluate(&point); let eval2 = padded_poly2.evaluate(&point); let eval3 = poly2.evaluate(&[point[0], point[2]]) @@ -96,22 +78,19 @@ fn main() -> Result<(), Box> { expected = expected + coeff_scalars[0].mul(&eval1); expected = expected + coeff_scalars[1].mul(&eval2); assert_eq!(evaluation, expected); - info!("✓ Evaluation matches linear combination\n"); - info!("Generating evaluation proof with combined commitment..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-homomorphic-mixed"); let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &combined_poly, &point, combined_tier1, + ArkFr::zero(), 2, 2, &prover_setup, &mut prover_transcript, )?; - info!("✓ Proof generated\n"); - info!("Verifying proof against combined tier-2 commitment..."); let mut verifier_transcript = Blake2bTranscript::new(b"dory-homomorphic-mixed"); verify::<_, BN254, G1Routines, G2Routines, _>( combined_tier2, @@ -121,14 +100,11 @@ fn main() -> Result<(), Box> { verifier_setup, &mut verifier_transcript, )?; - info!("✓ Proof verified!"); - info!("==========================================="); let padded_poly_commitment = padded_poly2 .commit::(2, 2, &prover_setup) .unwrap(); assert_eq!(padded_poly_commitment.0, commitment2.0); - info!("✓ Padded poly commitment matches original poly2 commitment"); Ok(()) } diff --git a/examples/non_square.rs b/examples/non_square.rs index 7ba5226..9694c22 100644 --- a/examples/non_square.rs +++ b/examples/non_square.rs @@ -1,10 +1,8 @@ //! Non-square matrix example for Dory commitments //! -//! This example demonstrates that Dory supports non-square matrix layouts -//! where the number of rows differs from the number of columns. +//! Demonstrates that Dory supports non-square matrix layouts where nu < sigma. //! -//! Constraint: nu ≤ sigma (rows ≤ columns) -//! Matrix dimensions: 8×16 (nu=3, sigma=4, total 128 coefficients) +//! Matrix dimensions: 8x16 (nu=3, sigma=4, total 128 coefficients) use dory_pcs::backends::arkworks::{ ArkFr, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, @@ -12,70 +10,36 @@ use dory_pcs::backends::arkworks::{ use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent}; -use tracing::info; fn main() -> Result<(), Box> { - info!("Dory PCS - Non-Square Matrix Example"); - info!("=====================================\n"); + let (prover_setup, verifier_setup) = setup::(10); - // Step 1: Setup - let max_log_n = 10; - info!( - "1. Generating transparent setup (max_log_n = {})...", - max_log_n - ); - let (prover_setup, verifier_setup) = setup::(max_log_n); - info!(" ✓ Setup complete\n"); - - // Step 2: Create polynomial with non-square matrix layout - // Non-square: nu = 3, sigma = 4 → 8 rows × 16 columns = 128 coefficients let nu = 3; let sigma = 4; - let poly_size = 1 << (nu + sigma); // 2^7 = 128 - let num_vars = nu + sigma; // 7 - - info!("2. Creating random polynomial..."); - info!(" Matrix layout: {}×{} (NON-SQUARE)", 1 << nu, 1 << sigma); - info!(" Total coefficients: {}", poly_size); - info!(" Number of variables: {}", num_vars); - info!(" Constraint: nu ({}) ≤ sigma ({})", nu, sigma); + let poly_size = 1 << (nu + sigma); + let num_vars = nu + sigma; let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); let poly = ArkworksPolynomial::new(coefficients); - info!(" ✓ Polynomial created\n"); - // Step 3: Commit - info!("3. Computing polynomial commitment..."); - let (tier_2, tier_1, _) = + let (tier_2, tier_1, commit_blind) = poly.commit::(nu, sigma, &prover_setup)?; - info!( - " ✓ Tier-1 commitment: {} row commitments (G1)", - tier_1.len() - ); - info!(" ✓ Tier-2 commitment: final commitment (GT)\n"); - // Step 4: Evaluation let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = poly.evaluate(&point); - info!("4. Evaluating polynomial at random point..."); - info!(" ✓ Evaluation result computed\n"); - // Step 5: Prove - info!("5. Generating evaluation proof..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-non-square-example"); let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, &mut prover_transcript, )?; - info!(" ✓ Proof generated (logarithmic size)\n"); - // Step 6: Verify - info!("6. Verifying proof..."); let mut verifier_transcript = Blake2bTranscript::new(b"dory-non-square-example"); verify::<_, BN254, G1Routines, G2Routines, _>( tier_2, @@ -85,18 +49,6 @@ fn main() -> Result<(), Box> { verifier_setup, &mut verifier_transcript, )?; - info!(" ✓ Proof verified successfully!\n"); - - info!("====================================="); - info!("Non-square matrix example completed!"); - info!( - "Matrix: {}×{} ({}×{} = {} coefficients)", - 1 << nu, - 1 << sigma, - 1 << nu, - 1 << sigma, - poly_size - ); Ok(()) } diff --git a/examples/zk_e2e.rs b/examples/zk_e2e.rs index 8ddfb6d..0bf4162 100644 --- a/examples/zk_e2e.rs +++ b/examples/zk_e2e.rs @@ -1,92 +1,47 @@ //! Zero-knowledge end-to-end example of Dory polynomial commitment scheme //! -//! This example demonstrates the ZK workflow where the prover generates a -//! hiding proof. The only API difference from transparent mode is switching -//! the mode type parameter from `Transparent` to `ZK`. +//! Demonstrates the full ZK workflow where both the commitment and the proof +//! are hiding. The `ZK` mode type parameter is used for both `commit()` and +//! `prove()`. //! -//! In ZK mode: -//! - Protocol messages are blinded with random scalars -//! - The proof contains additional sigma and scalar-product sub-proofs -//! - The evaluation commitment (`y_com`) hides the actual evaluation value -//! -//! Matrix dimensions: 16×16 (nu=4, sigma=4, total 256 coefficients) +//! Matrix dimensions: 16x16 (nu=4, sigma=4, total 256 coefficients) use dory_pcs::backends::arkworks::{ ArkFr, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, }; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify, Transparent, ZK}; -use tracing::info; +use dory_pcs::{prove, setup, verify, ZK}; fn main() -> Result<(), Box> { - info!("Dory PCS - Zero-Knowledge End-to-End Example"); - info!("==============================================\n"); - - // Step 1: Setup (identical to transparent mode) - let max_log_n = 10; - info!("1. Generating setup (max_log_n = {})...", max_log_n); - let (prover_setup, verifier_setup) = setup::(max_log_n); - info!(" Setup complete\n"); + let (prover_setup, verifier_setup) = setup::(10); - // Step 2: Create polynomial let nu = 4; let sigma = 4; - let poly_size = 1 << (nu + sigma); // 256 - let num_vars = nu + sigma; // 8 - - info!("2. Creating random polynomial..."); - info!(" Matrix layout: {}x{} (square)", 1 << nu, 1 << sigma); - info!(" Total coefficients: {}", poly_size); + let poly_size = 1 << (nu + sigma); + let num_vars = nu + sigma; let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); let poly = ArkworksPolynomial::new(coefficients); - info!(" Polynomial created\n"); - // Step 3: Commit (identical to transparent mode) - info!("3. Computing polynomial commitment..."); - let (tier_2, tier_1, _) = - poly.commit::(nu, sigma, &prover_setup)?; - info!(" Tier-1: {} row commitments", tier_1.len()); - info!(" Tier-2: final GT commitment\n"); + let (tier_2, tier_1, commit_blind) = + poly.commit::(nu, sigma, &prover_setup)?; - // Step 4: Evaluate let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = poly.evaluate(&point); - info!("4. Evaluated polynomial at random point\n"); - // Step 5: Prove in ZK mode - // The only API difference: `ZK` replaces `Transparent` as the mode parameter. - // This causes all protocol messages to be blinded with random scalars and - // generates additional sigma1, sigma2, and scalar-product sub-proofs. - info!("5. Generating ZK evaluation proof..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-zk-example"); let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, ZK>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, &mut prover_transcript, )?; - info!(" Proof generated"); - info!( - " ZK sub-proofs present: e2={}, y_com={}, sigma1={}, sigma2={}, scalar_product={}", - proof.e2.is_some(), - proof.y_com.is_some(), - proof.sigma1_proof.is_some(), - proof.sigma2_proof.is_some(), - proof.scalar_product_proof.is_some(), - ); - info!( - " {} reduce rounds (logarithmic)\n", - proof.first_messages.len() - ); - // Step 6: Verify (identical call signature to transparent mode) - // The verifier detects ZK mode from the proof's e2/y_com fields. - info!("6. Verifying ZK proof..."); let mut verifier_transcript = Blake2bTranscript::new(b"dory-zk-example"); verify::<_, BN254, G1Routines, G2Routines, _>( tier_2, @@ -96,10 +51,6 @@ fn main() -> Result<(), Box> { verifier_setup, &mut verifier_transcript, )?; - info!(" Proof verified successfully!\n"); - - info!("=============================================="); - info!("ZK example completed successfully!"); Ok(()) } diff --git a/examples/zk_statistical.rs b/examples/zk_statistical.rs new file mode 100644 index 0000000..ba876f7 --- /dev/null +++ b/examples/zk_statistical.rs @@ -0,0 +1,513 @@ +//! Statistical tests for zero-knowledge property of Dory PCS +//! +//! Verifies that proof elements are statistically indistinguishable from uniform +//! random regardless of the witness (polynomial) distribution. +//! +//! ```sh +//! cargo run --release --features "backends zk" --example zk_statistical +//! ``` + +use ark_serialize::CanonicalSerialize; +use dory_pcs::backends::arkworks::{ + ArkFr, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, +}; +use dory_pcs::primitives::arithmetic::Field; +use dory_pcs::primitives::poly::Polynomial; +use dory_pcs::{create_evaluation_proof, setup, verify, DoryProof, ZK}; +use std::collections::HashMap; +use tracing::info; + +const NUM_BUCKETS: usize = 16; +const NUM_TRIALS: usize = 1000; +/// chi-squared critical value: df=15, alpha=0.001 (Bonferroni-safe for ~120 elements) +const CHI2_CRITICAL: f64 = 37.70; + +fn random_polynomial(size: usize) -> ArkworksPolynomial { + let coefficients: Vec = (0..size).map(|_| ArkFr::random()).collect(); + ArkworksPolynomial::new(coefficients) +} + +fn random_point(num_vars: usize) -> Vec { + (0..num_vars).map(|_| ArkFr::random()).collect() +} + +fn fresh_transcript() -> Blake2bTranscript { + Blake2bTranscript::new(b"dory-test") +} + +struct BucketTracker { + buckets: HashMap>, +} + +impl BucketTracker { + fn new() -> Self { + Self { + buckets: HashMap::new(), + } + } + + fn record(&mut self, name: &str, bucket: usize) { + self.buckets + .entry(name.to_string()) + .or_insert_with(|| vec![0; NUM_BUCKETS])[bucket] += 1; + } + + fn chi_squared(&self, name: &str, expected: f64) -> Option { + self.buckets.get(name).map(|buckets| { + buckets + .iter() + .map(|&observed| { + let diff = observed as f64 - expected; + diff * diff / expected + }) + .sum() + }) + } + + fn all_names(&self) -> Vec { + let mut names: Vec<_> = self.buckets.keys().cloned().collect(); + names.sort(); + names + } +} + +fn bucket_from_serializable(elem: &T) -> usize { + let mut bytes = Vec::new(); + elem.serialize_compressed(&mut bytes).unwrap(); + (bytes[0] as usize) % NUM_BUCKETS +} + +type ArkDoryProof = DoryProof< + dory_pcs::backends::arkworks::ArkG1, + dory_pcs::backends::arkworks::ArkG2, + dory_pcs::backends::arkworks::ArkGT, +>; + +fn collect_full_zk_proof_stats(proof: &ArkDoryProof, tracker: &mut BucketTracker) { + tracker.record("zk_vmv_c", bucket_from_serializable(&proof.vmv_message.c)); + tracker.record("zk_vmv_d2", bucket_from_serializable(&proof.vmv_message.d2)); + tracker.record("zk_vmv_e1", bucket_from_serializable(&proof.vmv_message.e1)); + + if let Some(ref e2) = proof.e2 { + tracker.record("zk_vmv_e2", bucket_from_serializable(e2)); + } + if let Some(ref y_com) = proof.y_com { + tracker.record("zk_vmv_y_com", bucket_from_serializable(y_com)); + } + + for (i, msg) in proof.first_messages.iter().enumerate() { + let prefix = format!("zk_first_{i}"); + tracker.record( + &format!("{prefix}_d1_left"), + bucket_from_serializable(&msg.d1_left), + ); + tracker.record( + &format!("{prefix}_d1_right"), + bucket_from_serializable(&msg.d1_right), + ); + tracker.record( + &format!("{prefix}_d2_left"), + bucket_from_serializable(&msg.d2_left), + ); + tracker.record( + &format!("{prefix}_d2_right"), + bucket_from_serializable(&msg.d2_right), + ); + } + + for (i, msg) in proof.second_messages.iter().enumerate() { + let prefix = format!("zk_second_{i}"); + tracker.record( + &format!("{prefix}_c_plus"), + bucket_from_serializable(&msg.c_plus), + ); + tracker.record( + &format!("{prefix}_c_minus"), + bucket_from_serializable(&msg.c_minus), + ); + tracker.record( + &format!("{prefix}_e1_plus"), + bucket_from_serializable(&msg.e1_plus), + ); + tracker.record( + &format!("{prefix}_e1_minus"), + bucket_from_serializable(&msg.e1_minus), + ); + tracker.record( + &format!("{prefix}_e2_plus"), + bucket_from_serializable(&msg.e2_plus), + ); + tracker.record( + &format!("{prefix}_e2_minus"), + bucket_from_serializable(&msg.e2_minus), + ); + } + + tracker.record( + "zk_final_e1", + bucket_from_serializable(&proof.final_message.e1), + ); + tracker.record( + "zk_final_e2", + bucket_from_serializable(&proof.final_message.e2), + ); + + if let Some(ref sigma1) = proof.sigma1_proof { + tracker.record("sigma1_a1", bucket_from_serializable(&sigma1.a1)); + tracker.record("sigma1_a2", bucket_from_serializable(&sigma1.a2)); + tracker.record("sigma1_z1", bucket_from_serializable(&sigma1.z1)); + tracker.record("sigma1_z2", bucket_from_serializable(&sigma1.z2)); + tracker.record("sigma1_z3", bucket_from_serializable(&sigma1.z3)); + } + + if let Some(ref sigma2) = proof.sigma2_proof { + tracker.record("sigma2_a", bucket_from_serializable(&sigma2.a)); + tracker.record("sigma2_z1", bucket_from_serializable(&sigma2.z1)); + tracker.record("sigma2_z2", bucket_from_serializable(&sigma2.z2)); + } + + if let Some(ref sp) = proof.scalar_product_proof { + tracker.record("zk_sp_p1", bucket_from_serializable(&sp.p1)); + tracker.record("zk_sp_p2", bucket_from_serializable(&sp.p2)); + tracker.record("zk_sp_q", bucket_from_serializable(&sp.q)); + tracker.record("zk_sp_r", bucket_from_serializable(&sp.r)); + tracker.record("zk_sp_e1", bucket_from_serializable(&sp.e1)); + tracker.record("zk_sp_e2", bucket_from_serializable(&sp.e2)); + tracker.record("zk_sp_r1", bucket_from_serializable(&sp.r1)); + tracker.record("zk_sp_r2", bucket_from_serializable(&sp.r2)); + tracker.record("zk_sp_r3", bucket_from_serializable(&sp.r3)); + } +} + +fn prove_verify_collect( + poly: &ArkworksPolynomial, + point: &[ArkFr], + nu: usize, + sigma: usize, + prover_setup: &dory_pcs::ProverSetup, + verifier_setup: &dory_pcs::VerifierSetup, + tracker: &mut BucketTracker, +) { + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, prover_setup) + .unwrap(); + + let evaluation = poly.evaluate(point); + let mut transcript = fresh_transcript(); + let (proof, _) = create_evaluation_proof::<_, BN254, G1Routines, G2Routines, _, _, ZK>( + poly, + point, + Some(tier_1), + commit_blind, + nu, + sigma, + prover_setup, + &mut transcript, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + verify::<_, BN254, G1Routines, G2Routines, _>( + tier_2, + evaluation, + point, + &proof, + verifier_setup.clone(), + &mut verifier_transcript, + ) + .expect("proof verification failed"); + + collect_full_zk_proof_stats(&proof, tracker); +} + +fn two_sample_chi_squared(a: &[usize], b: &[usize]) -> f64 { + let n_a: f64 = a.iter().sum::() as f64; + let n_b: f64 = b.iter().sum::() as f64; + let n_total = n_a + n_b; + + a.iter() + .zip(b.iter()) + .map(|(&obs_a, &obs_b)| { + let pooled = obs_a as f64 + obs_b as f64; + if pooled < 1.0 { + return 0.0; + } + let expected_a = pooled * n_a / n_total; + let expected_b = pooled * n_b / n_total; + let term_a = if expected_a > 0.0 { + (obs_a as f64 - expected_a).powi(2) / expected_a + } else { + 0.0 + }; + let term_b = if expected_b > 0.0 { + (obs_b as f64 - expected_b).powi(2) / expected_b + } else { + 0.0 + }; + term_a + term_b + }) + .sum() +} + +fn assert_uniformity(trackers: &[(&str, &BucketTracker)], expected: f64) { + let mut failures = Vec::new(); + + for &(label, tracker) in trackers { + for name in tracker.all_names() { + if let Some(chi2) = tracker.chi_squared(&name, expected) { + if chi2 >= CHI2_CRITICAL { + failures.push(format!( + "{label}/{name}: chi2={chi2:.2} >= {CHI2_CRITICAL:.2}" + )); + } + } + } + } + + assert!( + failures.is_empty(), + "ZK statistical test failed - {} elements showed non-uniform distribution:\n{}", + failures.len(), + failures.join("\n") + ); +} + +fn assert_witness_independence(trackers: &[(&str, &BucketTracker)]) { + let mut failures = Vec::new(); + + for i in 0..trackers.len() { + for j in (i + 1)..trackers.len() { + let (label_a, tracker_a) = trackers[i]; + let (label_b, tracker_b) = trackers[j]; + + for name in tracker_a.all_names() { + let Some(buckets_a) = tracker_a.buckets.get(&name) else { + continue; + }; + let Some(buckets_b) = tracker_b.buckets.get(&name) else { + continue; + }; + + let chi2 = two_sample_chi_squared(buckets_a, buckets_b); + if chi2 >= CHI2_CRITICAL { + failures.push(format!( + "{label_a} vs {label_b}/{name}: chi2={chi2:.2} >= {CHI2_CRITICAL:.2}" + )); + } + } + } + } + + assert!( + failures.is_empty(), + "ZK witness independence test failed - {} elements showed witness-dependent distribution:\n{}", + failures.len(), + failures.join("\n") + ); +} + +fn test_statistical_indistinguishability( + prover_setup: &dory_pcs::ProverSetup, + verifier_setup: &dory_pcs::VerifierSetup, +) { + let nu = 2; + let sigma = 2; + let poly_size = 1 << (nu + sigma); + let point = random_point(nu + sigma); + + let mut tracker_zeros = BucketTracker::new(); + let mut tracker_ones = BucketTracker::new(); + let mut tracker_random = BucketTracker::new(); + + for _ in 0..NUM_TRIALS { + let zeros = ArkworksPolynomial::new(vec![ArkFr::zero(); poly_size]); + prove_verify_collect( + &zeros, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_zeros, + ); + + let ones = ArkworksPolynomial::new(vec![ArkFr::one(); poly_size]); + prove_verify_collect( + &ones, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_ones, + ); + + let random = random_polynomial(poly_size); + prove_verify_collect( + &random, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_random, + ); + } + + let expected = NUM_TRIALS as f64 / NUM_BUCKETS as f64; + assert_uniformity( + &[ + ("zeros", &tracker_zeros), + ("ones", &tracker_ones), + ("random", &tracker_random), + ], + expected, + ); +} + +fn test_statistical_indistinguishability_non_square( + prover_setup: &dory_pcs::ProverSetup, + verifier_setup: &dory_pcs::VerifierSetup, +) { + let nu = 1; + let sigma = 3; + let poly_size = 1 << (nu + sigma); + let point = random_point(nu + sigma); + + let mut tracker_zeros = BucketTracker::new(); + let mut tracker_ones = BucketTracker::new(); + let mut tracker_random = BucketTracker::new(); + + for _ in 0..NUM_TRIALS { + let zeros = ArkworksPolynomial::new(vec![ArkFr::zero(); poly_size]); + prove_verify_collect( + &zeros, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_zeros, + ); + + let ones = ArkworksPolynomial::new(vec![ArkFr::one(); poly_size]); + prove_verify_collect( + &ones, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_ones, + ); + + let random = random_polynomial(poly_size); + prove_verify_collect( + &random, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_random, + ); + } + + let expected = NUM_TRIALS as f64 / NUM_BUCKETS as f64; + assert_uniformity( + &[ + ("zeros", &tracker_zeros), + ("ones", &tracker_ones), + ("random", &tracker_random), + ], + expected, + ); +} + +fn test_witness_independence( + prover_setup: &dory_pcs::ProverSetup, + verifier_setup: &dory_pcs::VerifierSetup, +) { + let nu = 2; + let sigma = 2; + let poly_size = 1 << (nu + sigma); + let point = random_point(nu + sigma); + + let mut tracker_zeros = BucketTracker::new(); + let mut tracker_ones = BucketTracker::new(); + let mut tracker_skewed = BucketTracker::new(); + let mut tracker_uniform = BucketTracker::new(); + + for _ in 0..NUM_TRIALS { + let zeros = ArkworksPolynomial::new(vec![ArkFr::zero(); poly_size]); + prove_verify_collect( + &zeros, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_zeros, + ); + + let ones = ArkworksPolynomial::new(vec![ArkFr::one(); poly_size]); + prove_verify_collect( + &ones, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_ones, + ); + + let mut skewed_coeffs = vec![ArkFr::zero(); poly_size]; + skewed_coeffs[0] = ArkFr::from_u64(42); + let skewed = ArkworksPolynomial::new(skewed_coeffs); + prove_verify_collect( + &skewed, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_skewed, + ); + + let uniform = random_polynomial(poly_size); + prove_verify_collect( + &uniform, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_uniform, + ); + } + + assert_witness_independence(&[ + ("zeros", &tracker_zeros), + ("ones", &tracker_ones), + ("skewed", &tracker_skewed), + ("uniform", &tracker_uniform), + ]); +} + +fn main() { + tracing_subscriber::fmt::init(); + + let (prover_setup, verifier_setup) = setup::(6); + + info!("[1/3] statistical indistinguishability (square, nu=2 sigma=2)..."); + test_statistical_indistinguishability(&prover_setup, &verifier_setup); + info!(" PASS"); + + info!("[2/3] statistical indistinguishability (non-square, nu=1 sigma=3)..."); + test_statistical_indistinguishability_non_square(&prover_setup, &verifier_setup); + info!(" PASS"); + + info!("[3/3] witness independence (4 distributions, pairwise chi-squared)..."); + test_witness_independence(&prover_setup, &verifier_setup); + info!(" PASS"); +} diff --git a/src/backends/arkworks/ark_poly.rs b/src/backends/arkworks/ark_poly.rs index 81d1cfd..875de23 100644 --- a/src/backends/arkworks/ark_poly.rs +++ b/src/backends/arkworks/ark_poly.rs @@ -60,12 +60,13 @@ impl Polynomial for ArkworksPolynomial { nu: usize, sigma: usize, setup: &ProverSetup, - ) -> Result<(E::GT, Vec, Option>), DoryError> + ) -> Result<(E::GT, Vec, ArkFr), DoryError> where E: PairingCurve, Mo: Mode, M1: DoryRoutines, E::G1: Group, + E::GT: Group, { let expected_len = 1 << (nu + sigma); if self.coefficients.len() != expected_len { @@ -79,20 +80,21 @@ impl Polynomial for ArkworksPolynomial { let num_cols = 1 << sigma; let g1 = &setup.g1_vec[..num_cols]; - let blinds: Vec = (0..num_rows).map(|_| Mo::sample()).collect(); - + // Row commitments are always unblinded (internal to prover) let row_commitments: Vec = (0..num_rows) .map(|i| { let row = &self.coefficients[i * num_cols..(i + 1) * num_cols]; - Mo::mask(M1::msm(g1, row), &setup.h1, &blinds[i]) + M1::msm(g1, row) }) .collect(); - let commitment = E::multi_pair_g2_setup(&row_commitments, &setup.g2_vec[..num_rows]); + let tier_2 = E::multi_pair_g2_setup(&row_commitments, &setup.g2_vec[..num_rows]); - let opt_blinds = if Mo::BLINDING { Some(blinds) } else { None }; + // Single GT-level blind: commitment += r_d1 * HT + let r_d1: ArkFr = Mo::sample(); + let commitment = Mo::mask(tier_2, &setup.ht, &r_d1); - Ok((commitment, row_commitments, opt_blinds)) + Ok((commitment, row_commitments, r_d1)) } } diff --git a/src/evaluation_proof.rs b/src/evaluation_proof.rs index 7ea9889..f68366a 100644 --- a/src/evaluation_proof.rs +++ b/src/evaluation_proof.rs @@ -56,6 +56,8 @@ use crate::setup::{ProverSetup, VerifierSetup}; /// - `polynomial`: Polynomial to prove evaluation for /// - `point`: Evaluation point (length nu + sigma) /// - `row_commitments`: Optional precomputed row commitments from polynomial.commit() +/// - `commit_blind`: GT-level blinding scalar from `commit()`. Ignored when +/// `row_commitments` is `None` (the blind is computed internally in that case). /// - `nu`: Log₂ of number of rows (constraint: nu ≤ sigma) /// - `sigma`: Log₂ of number of columns /// - `setup`: Prover setup @@ -77,6 +79,7 @@ pub fn create_evaluation_proof( polynomial: &P, point: &[F], row_commitments: Option>, + commit_blind: F, nu: usize, sigma: usize, setup: &ProverSetup, @@ -109,9 +112,12 @@ where }); } - let row_commitments = match row_commitments { - Some(rc) => rc, - None => polynomial.commit::(nu, sigma, setup)?.1, + let (row_commitments, commit_blind) = match row_commitments { + Some(rc) => (rc, commit_blind), + None => { + let (_, rc, blind) = polynomial.commit::(nu, sigma, setup)?; + (rc, blind) + } }; let (left_vec, right_vec) = polynomial.compute_evaluation_vectors(point, nu, sigma); @@ -182,7 +188,7 @@ where padded_left_vec, // s2 = left_vec (padded) setup, ); - prover_state.set_initial_blinds(r_c, r_d2, r_e1, r_e2); + prover_state.set_initial_blinds(commit_blind, r_c, r_d2, r_e1, r_e2); let num_rounds = nu.max(sigma); let mut first_messages = Vec::with_capacity(num_rounds); diff --git a/src/lib.rs b/src/lib.rs index aef27f4..2769838 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -46,13 +46,13 @@ //! let (prover_setup, verifier_setup) = setup::(max_log_n); //! //! // 2. Commit to polynomial -//! let (tier_2_commitment, tier_1_commitments, _blinds) = polynomial +//! let (tier_2_commitment, tier_1_commitments, commit_blind) = polynomial //! .commit::(nu, sigma, &prover_setup)?; //! //! // 3. Generate evaluation proof //! let mut prover_transcript = Blake2bTranscript::new(b"domain-separation"); //! let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( -//! &polynomial, &point, tier_1_commitments, nu, sigma, +//! &polynomial, &point, tier_1_commitments, commit_blind, nu, sigma, //! &prover_setup, &mut prover_transcript //! )?; //! @@ -230,14 +230,15 @@ where /// tier-1 commitments (row commitments). /// /// # Workflow -/// 1. Call `polynomial.commit(nu, sigma, setup)` to get `(tier_2, row_commitments)` -/// 2. Call this function with the `row_commitments` to create the proof +/// 1. Call `polynomial.commit(nu, sigma, setup)` to get `(tier_2, row_commitments, commit_blind)` +/// 2. Call this function with the `row_commitments` and `commit_blind` to create the proof /// 3. Use `tier_2` for verification via the `verify()` function /// /// # Parameters /// - `polynomial`: Polynomial implementing MultilinearLagrange trait /// - `point`: Evaluation point (length must equal nu + sigma) /// - `row_commitments`: Tier-1 commitments (row commitments in G1) from `polynomial.commit()` +/// - `commit_blind`: GT-level blinding scalar from `polynomial.commit()` (zero in Transparent mode) /// - `nu`: Log₂ of number of rows (constraint: nu ≤ sigma for non-square matrices) /// - `sigma`: Log₂ of number of columns /// - `setup`: Prover setup @@ -268,6 +269,7 @@ pub fn prove( polynomial: &P, point: &[F], row_commitments: Vec, + commit_blind: F, nu: usize, sigma: usize, setup: &ProverSetup, @@ -289,6 +291,7 @@ where polynomial, point, Some(row_commitments), + commit_blind, nu, sigma, setup, diff --git a/src/primitives/poly.rs b/src/primitives/poly.rs index 1f1b951..959025a 100644 --- a/src/primitives/poly.rs +++ b/src/primitives/poly.rs @@ -64,22 +64,24 @@ pub trait Polynomial { /// # Tier 1 (Row Commitments) /// For each row i: `row_commit[i] = MSM(g1_generators[0..2^sigma], row_coefficients[i])` /// - /// In ZK mode (`Mo = ZK`), each row commitment is additionally blinded: - /// `row_commit[i] += H₁ · blind[i]` where `blind[i]` is a fresh random scalar. + /// Row commitments are always unblinded (internal to the prover, never exposed). /// /// # Tier 2 (Final Commitment) /// `commitment = Σ e(row_commit[i], g2_generators[i])` for i in 0..2^nu /// + /// In ZK mode (`Mo = ZK`), the tier-2 commitment is blinded with a single GT-level blind: + /// `commitment += r_d1 * HT` where `r_d1` is a fresh random scalar and `HT = e(H₁, H₂)`. + /// /// # Parameters /// - `nu`: Log₂ of number of rows /// - `sigma`: Log₂ of number of columns /// - `setup`: Prover setup containing generators /// /// # Returns - /// `(commitment, row_commitments, blinds)` where: - /// - `commitment`: Final commitment in GT - /// - `row_commitments`: Intermediate row commitments in G1 (used in opening proof) - /// - `blinds`: Per-row blinding scalars in ZK mode (`Some`), or `None` in Transparent mode + /// `(commitment, row_commitments, commit_blind)` where: + /// - `commitment`: Final commitment in GT (blinded in ZK mode) + /// - `row_commitments`: Intermediate unblinded row commitments in G1 (used in opening proof) + /// - `commit_blind`: GT-level blinding scalar (`r_d1`); zero in Transparent mode /// /// # Errors /// Returns error if coefficient length doesn't match 2^(nu + sigma) or if setup is insufficient. @@ -89,12 +91,13 @@ pub trait Polynomial { nu: usize, sigma: usize, setup: &ProverSetup, - ) -> Result<(E::GT, Vec, Option>), DoryError> + ) -> Result<(E::GT, Vec, F), DoryError> where E: PairingCurve, Mo: Mode, M1: DoryRoutines, - E::G1: Group; + E::G1: Group, + E::GT: Group; } /// Compute multilinear Lagrange basis evaluations at a point diff --git a/src/reduce_and_fold.rs b/src/reduce_and_fold.rs index c59ef92..e9b0fcf 100644 --- a/src/reduce_and_fold.rs +++ b/src/reduce_and_fold.rs @@ -168,15 +168,16 @@ where } } - /// Set initial VMV blinds (r_c, r_d2, r_e1, r_e2). + /// Set initial VMV blinds (r_d1, r_c, r_d2, r_e1, r_e2). pub fn set_initial_blinds( &mut self, + r_d1: Scalar, r_c: Scalar, r_d2: Scalar, r_e1: Scalar, r_e2: Scalar, ) { - (self.r_c, self.r_d2, self.r_e1, self.r_e2) = (r_c, r_d2, r_e1, r_e2); + (self.r_d1, self.r_c, self.r_d2, self.r_e1, self.r_e2) = (r_d1, r_c, r_d2, r_e1, r_e2); } /// Compute first reduce message for current round @@ -363,9 +364,7 @@ where /// /// In ZK mode, E₁ and E₂ are additionally blinded with fresh randomness so /// that the folded vectors `v₁[0]`, `v₂[0]` cannot be recovered from the - /// proof. These blinds do not affect the scalar product proof or the - /// accumulated `r_c` — they only add entropy to the Fiat-Shamir transcript - /// from which the `d` challenge is derived. + /// proof. #[tracing::instrument(skip_all, name = "DoryProverState::compute_final_message")] pub fn compute_final_message( &mut self, diff --git a/src/setup.rs b/src/setup.rs index 0012c79..602261c 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -75,9 +75,6 @@ pub struct VerifierSetup { /// h_t = e(h₁, h₂) pub ht: E::GT, - /// e(H1, Γ2,fin) - precomputed for ZK verification - #[cfg(feature = "zk")] - pub h1_g2_fin: E::GT, /// Maximum log₂ of polynomial size supported pub max_log_n: usize, } @@ -170,8 +167,6 @@ impl ProverSetup { h1: self.h1, h2: self.h2, ht: self.ht, - #[cfg(feature = "zk")] - h1_g2_fin: E::pair(&self.h1, &self.g2_vec[0]), max_log_n: max_num_rounds * 2, // Since square matrices: max_log_n = 2 * max_nu } } @@ -239,14 +234,7 @@ fn get_storage_path(max_log_n: usize) -> Option { cache_directory.map(|mut path| { path.push("dory"); - #[cfg(feature = "zk")] - { - path.push(format!("dory_{max_log_n}_zk.urs")); - } - #[cfg(not(feature = "zk"))] - { - path.push(format!("dory_{max_log_n}.urs")); - } + path.push(format!("dory_{max_log_n}.urs")); path }) } diff --git a/tests/arkworks/evaluation.rs b/tests/arkworks/evaluation.rs index 839c016..0235541 100644 --- a/tests/arkworks/evaluation.rs +++ b/tests/arkworks/evaluation.rs @@ -15,7 +15,7 @@ fn test_evaluation_proof_small() { let nu = 2; let sigma = 2; - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &setup) .unwrap(); @@ -24,6 +24,7 @@ fn test_evaluation_proof_small() { &poly, &point, tier_1, + commit_blind, nu, sigma, &setup, @@ -58,7 +59,7 @@ fn test_evaluation_proof_with_precomputed_commitment() { let nu = 2; let sigma = 2; - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &setup) .unwrap(); @@ -67,6 +68,7 @@ fn test_evaluation_proof_with_precomputed_commitment() { &poly, &point, tier_1, + commit_blind, nu, sigma, &setup, @@ -104,7 +106,7 @@ fn test_evaluation_proof_constant_polynomial() { let expected_eval = poly.evaluate(&point); assert_eq!(expected_eval, ArkFr::from_u64(7)); - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &setup) .unwrap(); @@ -113,6 +115,7 @@ fn test_evaluation_proof_constant_polynomial() { &poly, &point, tier_1, + commit_blind, nu, sigma, &setup, @@ -147,7 +150,7 @@ fn test_evaluation_proof_wrong_evaluation_fails() { let nu = 2; let sigma = 2; - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &setup) .unwrap(); @@ -156,6 +159,7 @@ fn test_evaluation_proof_wrong_evaluation_fails() { &poly, &point, tier_1, + commit_blind, nu, sigma, &setup, @@ -188,7 +192,7 @@ fn test_evaluation_proof_different_sizes() { let poly = random_polynomial(4); let point = random_point(2); - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(1, 1, &setup) .unwrap(); @@ -197,6 +201,7 @@ fn test_evaluation_proof_different_sizes() { &poly, &point, tier_1, + commit_blind, 1, 1, &setup, @@ -224,7 +229,7 @@ fn test_evaluation_proof_different_sizes() { let poly = random_polynomial(64); let point = random_point(6); - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(3, 3, &setup) .unwrap(); @@ -233,6 +238,7 @@ fn test_evaluation_proof_different_sizes() { &poly, &point, tier_1, + commit_blind, 3, 3, &setup, @@ -263,7 +269,7 @@ fn test_multiple_evaluations_same_commitment() { let nu = 2; let sigma = 2; - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &setup) .unwrap(); @@ -275,6 +281,7 @@ fn test_multiple_evaluations_same_commitment() { &poly, &point, tier_1.clone(), + commit_blind, nu, sigma, &setup, diff --git a/tests/arkworks/homomorphic.rs b/tests/arkworks/homomorphic.rs index 454c405..6a4fb2d 100644 --- a/tests/arkworks/homomorphic.rs +++ b/tests/arkworks/homomorphic.rs @@ -90,6 +90,7 @@ fn test_homomorphic_combination_e2e() { &combined_poly, &point, combined_tier1, + ArkFr::zero(), nu, sigma, &prover_setup, @@ -179,6 +180,7 @@ fn test_homomorphic_combination_small() { &combined_poly, &point, combined_tier1, + ArkFr::zero(), nu, sigma, &prover_setup, diff --git a/tests/arkworks/integration.rs b/tests/arkworks/integration.rs index 5a76f68..a8e61ba 100644 --- a/tests/arkworks/integration.rs +++ b/tests/arkworks/integration.rs @@ -15,7 +15,7 @@ fn test_full_workflow() { let nu = 4; let sigma = 4; - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .unwrap(); @@ -27,6 +27,7 @@ fn test_full_workflow() { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -60,7 +61,7 @@ fn test_workflow_without_precommitment() { let nu = 4; let sigma = 4; - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .unwrap(); @@ -69,6 +70,7 @@ fn test_workflow_without_precommitment() { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -98,7 +100,7 @@ fn test_batched_proofs() { let nu = 4; let sigma = 4; - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .unwrap(); @@ -110,6 +112,7 @@ fn test_batched_proofs() { &poly, &point, tier_1.clone(), + commit_blind, nu, sigma, &prover_setup, @@ -153,7 +156,7 @@ fn test_linear_polynomial() { let nu = 4; let sigma = 4; - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .unwrap(); @@ -162,6 +165,7 @@ fn test_linear_polynomial() { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -196,7 +200,7 @@ fn test_zero_polynomial() { let nu = 4; let sigma = 4; - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .unwrap(); @@ -205,6 +209,7 @@ fn test_zero_polynomial() { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -239,11 +244,11 @@ fn test_soundness_wrong_commitment() { let nu = 4; let sigma = 4; - let (commitment1, _, _) = poly1 + let (commitment1, _, _commit_blind) = poly1 .commit::(nu, sigma, &prover_setup) .unwrap(); - let (_, tier_1_poly2, _) = poly2 + let (_, tier_1_poly2, commit_blind) = poly2 .commit::(nu, sigma, &prover_setup) .unwrap(); @@ -252,6 +257,7 @@ fn test_soundness_wrong_commitment() { &poly2, &point, tier_1_poly2, + commit_blind, nu, sigma, &prover_setup, diff --git a/tests/arkworks/mod.rs b/tests/arkworks/mod.rs index cf480e8..bb7042c 100644 --- a/tests/arkworks/mod.rs +++ b/tests/arkworks/mod.rs @@ -20,8 +20,6 @@ pub mod setup; pub mod soundness; #[cfg(feature = "zk")] pub mod zk; -#[cfg(feature = "zk")] -pub mod zk_statistical; pub fn random_polynomial(size: usize) -> ArkworksPolynomial { let coefficients: Vec = (0..size).map(|_| ArkFr::random()).collect(); diff --git a/tests/arkworks/non_square.rs b/tests/arkworks/non_square.rs index 0b8f6a9..2254f6c 100644 --- a/tests/arkworks/non_square.rs +++ b/tests/arkworks/non_square.rs @@ -17,7 +17,7 @@ fn test_non_square_matrix_nu_eq_sigma_minus_1() { let poly = random_polynomial(poly_size); let point = random_point(num_vars); - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .expect("Commitment should succeed"); @@ -26,6 +26,7 @@ fn test_non_square_matrix_nu_eq_sigma_minus_1() { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -61,7 +62,7 @@ fn test_non_square_matrix_nu_greater_than_sigma_rejected() { let poly = random_polynomial(poly_size); let point = random_point(num_vars); - let (_, tier_1, _) = poly + let (_, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .expect("Commitment should succeed"); @@ -70,6 +71,7 @@ fn test_non_square_matrix_nu_greater_than_sigma_rejected() { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -95,7 +97,7 @@ fn test_non_square_matrix_small() { let poly = random_polynomial(poly_size); let point = random_point(num_vars); - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .expect("Commitment should succeed"); @@ -104,6 +106,7 @@ fn test_non_square_matrix_small() { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -143,7 +146,7 @@ fn test_non_square_matrix_very_rectangular() { let poly = random_polynomial(poly_size); let point = random_point(num_vars); - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .expect("Commitment should succeed"); @@ -152,6 +155,7 @@ fn test_non_square_matrix_very_rectangular() { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, diff --git a/tests/arkworks/serialization.rs b/tests/arkworks/serialization.rs index e3b191e..4e454a3 100644 --- a/tests/arkworks/serialization.rs +++ b/tests/arkworks/serialization.rs @@ -15,7 +15,7 @@ fn make_transparent_proof() -> ( let poly = random_polynomial(16); let point = random_point(4); - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(2, 2, &setup) .unwrap(); let mut transcript = fresh_transcript(); @@ -23,6 +23,7 @@ fn make_transparent_proof() -> ( &poly, &point, tier_1, + commit_blind, 2, 2, &setup, @@ -81,7 +82,7 @@ fn test_transparent_proof_roundtrip_verifies() { let poly = random_polynomial(16); let point = random_point(4); - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(2, 2, &setup) .unwrap(); @@ -90,6 +91,7 @@ fn test_transparent_proof_roundtrip_verifies() { &poly, &point, tier_1, + commit_blind, 2, 2, &setup, @@ -130,7 +132,7 @@ mod zk_roundtrip { let poly = random_polynomial(16); let point = random_point(4); - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(2, 2, &setup) .unwrap(); @@ -139,6 +141,7 @@ mod zk_roundtrip { &poly, &point, tier_1, + commit_blind, 2, 2, &setup, @@ -192,7 +195,7 @@ mod zk_roundtrip { let poly = random_polynomial(16); let point = random_point(4); - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(2, 2, &setup) .unwrap(); @@ -201,6 +204,7 @@ mod zk_roundtrip { &poly, &point, tier_1, + commit_blind, 2, 2, &setup, diff --git a/tests/arkworks/soundness.rs b/tests/arkworks/soundness.rs index fd28da1..27a1097 100644 --- a/tests/arkworks/soundness.rs +++ b/tests/arkworks/soundness.rs @@ -27,7 +27,7 @@ fn create_valid_proof_components( let poly = random_polynomial(size); let point = random_point(nu + sigma); - let (tier_2, tier_1, _) = poly + let (tier_2, tier_1, commit_blind) = poly .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = fresh_transcript(); @@ -35,6 +35,7 @@ fn create_valid_proof_components( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, diff --git a/tests/arkworks/zk.rs b/tests/arkworks/zk.rs index 351e217..9915a6d 100644 --- a/tests/arkworks/zk.rs +++ b/tests/arkworks/zk.rs @@ -5,7 +5,7 @@ use ark_bn254::{Fq12, Fr, G1Projective, G2Projective}; use ark_ff::UniformRand; use dory_pcs::backends::arkworks::{ArkFr, ArkG1, ArkG2, ArkGT}; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{create_evaluation_proof, prove, setup, verify, Transparent, ZK}; +use dory_pcs::{create_evaluation_proof, prove, setup, verify, ZK}; #[test] fn test_zk_full_workflow() { @@ -17,8 +17,8 @@ fn test_zk_full_workflow() { let nu = 4; let sigma = 4; - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(8); @@ -29,6 +29,7 @@ fn test_zk_full_workflow() { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -59,8 +60,8 @@ fn test_zk_small_polynomial() { let nu = 1; let sigma = 1; - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(2); @@ -71,6 +72,7 @@ fn test_zk_small_polynomial() { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -103,8 +105,8 @@ fn test_zk_larger_polynomial() { let nu = 5; let sigma = 5; - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(10); @@ -115,6 +117,7 @@ fn test_zk_larger_polynomial() { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -148,8 +151,8 @@ fn test_zk_non_square_matrix() { let nu = 3; let sigma = 4; - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(7); // nu + sigma = 7 @@ -160,6 +163,7 @@ fn test_zk_non_square_matrix() { &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -192,8 +196,8 @@ fn test_zk_hidden_evaluation() { let nu = 2; let sigma = 2; - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(4); @@ -205,6 +209,7 @@ fn test_zk_hidden_evaluation() { &poly, &point, Some(tier_1), + commit_blind, nu, sigma, &prover_setup, @@ -243,8 +248,8 @@ fn test_zk_tampered_e2_rejected() { let nu = 2; let sigma = 2; - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(4); @@ -256,6 +261,7 @@ fn test_zk_tampered_e2_rejected() { &poly, &point, Some(tier_1), + commit_blind, nu, sigma, &prover_setup, @@ -289,8 +295,8 @@ fn test_zk_hidden_evaluation_larger() { let nu = 4; let sigma = 4; - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(8); @@ -301,6 +307,7 @@ fn test_zk_hidden_evaluation_larger() { &poly, &point, Some(tier_1), + commit_blind, nu, sigma, &prover_setup, @@ -346,14 +353,15 @@ fn create_valid_zk_proof_components( let poly = random_polynomial(size); let point = random_point(nu + sigma); - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = fresh_transcript(); let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, diff --git a/tests/arkworks/zk_statistical.rs b/tests/arkworks/zk_statistical.rs deleted file mode 100644 index b920586..0000000 --- a/tests/arkworks/zk_statistical.rs +++ /dev/null @@ -1,502 +0,0 @@ -//! Statistical tests for zero-knowledge property of Dory PCS -//! -//! Verifies that proof elements are statistically indistinguishable from uniform -//! random regardless of the witness (polynomial) distribution. - -use super::*; -use ark_serialize::CanonicalSerialize; -use dory_pcs::primitives::arithmetic::Field; -use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{create_evaluation_proof, setup, verify, DoryProof, Transparent, ZK}; -use std::collections::HashMap; - -const NUM_BUCKETS: usize = 16; - -/// Distribution tracker for statistical analysis -struct BucketTracker { - buckets: HashMap>, -} - -impl BucketTracker { - fn new() -> Self { - Self { - buckets: HashMap::new(), - } - } - - fn record(&mut self, name: &str, bucket: usize) { - self.buckets - .entry(name.to_string()) - .or_insert_with(|| vec![0; NUM_BUCKETS])[bucket] += 1; - } - - fn chi_squared(&self, name: &str, expected: f64) -> Option { - self.buckets.get(name).map(|buckets| { - buckets - .iter() - .map(|&observed| { - let diff = observed as f64 - expected; - diff * diff / expected - }) - .sum() - }) - } - - fn all_names(&self) -> Vec { - let mut names: Vec<_> = self.buckets.keys().cloned().collect(); - names.sort(); - names - } -} - -/// Extract low bytes from serializable element for bucketing -fn bucket_from_serializable(elem: &T) -> usize { - let mut bytes = Vec::new(); - elem.serialize_compressed(&mut bytes).unwrap(); - // Use first byte for primary bucket - (bytes[0] as usize) % NUM_BUCKETS -} - -type ArkDoryProof = DoryProof< - dory_pcs::backends::arkworks::ArkG1, - dory_pcs::backends::arkworks::ArkG2, - dory_pcs::backends::arkworks::ArkGT, ->; - -/// Collect bucket statistics from a full ZK proof (with hidden y) -fn collect_full_zk_proof_stats(proof: &ArkDoryProof, tracker: &mut BucketTracker) { - // VMV message elements - tracker.record("zk_vmv_c", bucket_from_serializable(&proof.vmv_message.c)); - tracker.record("zk_vmv_d2", bucket_from_serializable(&proof.vmv_message.d2)); - tracker.record("zk_vmv_e1", bucket_from_serializable(&proof.vmv_message.e1)); - - // ZK-specific fields from proof - if let Some(ref e2) = proof.e2 { - tracker.record("zk_vmv_e2", bucket_from_serializable(e2)); - } - if let Some(ref y_com) = proof.y_com { - tracker.record("zk_vmv_y_com", bucket_from_serializable(y_com)); - } - - // First reduce messages (D values only - e1_beta/e2_beta are public) - for (i, msg) in proof.first_messages.iter().enumerate() { - let prefix = format!("zk_first_{}", i); - tracker.record( - &format!("{}_d1_left", prefix), - bucket_from_serializable(&msg.d1_left), - ); - tracker.record( - &format!("{}_d1_right", prefix), - bucket_from_serializable(&msg.d1_right), - ); - tracker.record( - &format!("{}_d2_left", prefix), - bucket_from_serializable(&msg.d2_left), - ); - tracker.record( - &format!("{}_d2_right", prefix), - bucket_from_serializable(&msg.d2_right), - ); - } - - // Second reduce messages - for (i, msg) in proof.second_messages.iter().enumerate() { - let prefix = format!("zk_second_{}", i); - tracker.record( - &format!("{}_c_plus", prefix), - bucket_from_serializable(&msg.c_plus), - ); - tracker.record( - &format!("{}_c_minus", prefix), - bucket_from_serializable(&msg.c_minus), - ); - tracker.record( - &format!("{}_e1_plus", prefix), - bucket_from_serializable(&msg.e1_plus), - ); - tracker.record( - &format!("{}_e1_minus", prefix), - bucket_from_serializable(&msg.e1_minus), - ); - tracker.record( - &format!("{}_e2_plus", prefix), - bucket_from_serializable(&msg.e2_plus), - ); - tracker.record( - &format!("{}_e2_minus", prefix), - bucket_from_serializable(&msg.e2_minus), - ); - } - - // Final message - tracker.record( - "zk_final_e1", - bucket_from_serializable(&proof.final_message.e1), - ); - tracker.record( - "zk_final_e2", - bucket_from_serializable(&proof.final_message.e2), - ); - - // Sigma1 proof (proves y_com and E2 commit to same y) - if let Some(ref sigma1) = proof.sigma1_proof { - tracker.record("sigma1_a1", bucket_from_serializable(&sigma1.a1)); - tracker.record("sigma1_a2", bucket_from_serializable(&sigma1.a2)); - tracker.record("sigma1_z1", bucket_from_serializable(&sigma1.z1)); - tracker.record("sigma1_z2", bucket_from_serializable(&sigma1.z2)); - tracker.record("sigma1_z3", bucket_from_serializable(&sigma1.z3)); - } - - // Sigma2 proof (proves VMV relation) - if let Some(ref sigma2) = proof.sigma2_proof { - tracker.record("sigma2_a", bucket_from_serializable(&sigma2.a)); - tracker.record("sigma2_z1", bucket_from_serializable(&sigma2.z1)); - tracker.record("sigma2_z2", bucket_from_serializable(&sigma2.z2)); - } - - // Scalar product proof - if let Some(ref sp) = proof.scalar_product_proof { - tracker.record("zk_sp_p1", bucket_from_serializable(&sp.p1)); - tracker.record("zk_sp_p2", bucket_from_serializable(&sp.p2)); - tracker.record("zk_sp_q", bucket_from_serializable(&sp.q)); - tracker.record("zk_sp_r", bucket_from_serializable(&sp.r)); - tracker.record("zk_sp_e1", bucket_from_serializable(&sp.e1)); - tracker.record("zk_sp_e2", bucket_from_serializable(&sp.e2)); - tracker.record("zk_sp_r1", bucket_from_serializable(&sp.r1)); - tracker.record("zk_sp_r2", bucket_from_serializable(&sp.r2)); - tracker.record("zk_sp_r3", bucket_from_serializable(&sp.r3)); - } -} - -/// Statistical test for zero-knowledge property (full ZK with hidden y). -/// -/// Creates polynomials with different coefficient distributions and verifies -/// that all resulting proof elements (including y_com, Sigma1, Sigma2) are -/// statistically indistinguishable from uniform random. -#[test] -#[ignore] // Long-running statistical test -fn test_zk_statistical_indistinguishability() { - const NUM_TRIALS: usize = 100; - - let (prover_setup, verifier_setup) = setup::(6); - - let nu = 2; - let sigma = 2; - let poly_size = 16; - let point = random_point(nu + sigma); - - // Track distributions for three witness types - let mut tracker_zeros = BucketTracker::new(); - let mut tracker_ones = BucketTracker::new(); - let mut tracker_random = BucketTracker::new(); - - for _trial in 0..NUM_TRIALS { - // Distribution A: All-zeros polynomial (y=0 for all points) - { - let coeffs = vec![ArkFr::zero(); poly_size]; - let poly = ArkworksPolynomial::new(coeffs); - - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) - .unwrap(); - - let evaluation = poly.evaluate(&point); - let mut transcript = fresh_transcript(); - let (proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( - &poly, - &point, - Some(tier_1), - nu, - sigma, - &prover_setup, - &mut transcript, - ) - .unwrap(); - - // Verify proof is valid - let mut verifier_transcript = fresh_transcript(); - assert!(verify::<_, BN254, TestG1Routines, TestG2Routines, _>( - tier_2, - evaluation, - &point, - &proof, - verifier_setup.clone(), - &mut verifier_transcript, - ) - .is_ok()); - - collect_full_zk_proof_stats(&proof, &mut tracker_zeros); - } - - // Distribution B: All-ones polynomial (y=2^n for point=(0,0,...)) - { - let coeffs = vec![ArkFr::one(); poly_size]; - let poly = ArkworksPolynomial::new(coeffs); - - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) - .unwrap(); - - let evaluation = poly.evaluate(&point); - let mut transcript = fresh_transcript(); - let (proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( - &poly, - &point, - Some(tier_1), - nu, - sigma, - &prover_setup, - &mut transcript, - ) - .unwrap(); - - let mut verifier_transcript = fresh_transcript(); - assert!(verify::<_, BN254, TestG1Routines, TestG2Routines, _>( - tier_2, - evaluation, - &point, - &proof, - verifier_setup.clone(), - &mut verifier_transcript, - ) - .is_ok()); - - collect_full_zk_proof_stats(&proof, &mut tracker_ones); - } - - // Distribution C: Random polynomial - { - let poly = random_polynomial(poly_size); - - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) - .unwrap(); - - let evaluation = poly.evaluate(&point); - let mut transcript = fresh_transcript(); - let (proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( - &poly, - &point, - Some(tier_1), - nu, - sigma, - &prover_setup, - &mut transcript, - ) - .unwrap(); - - let mut verifier_transcript = fresh_transcript(); - assert!(verify::<_, BN254, TestG1Routines, TestG2Routines, _>( - tier_2, - evaluation, - &point, - &proof, - verifier_setup.clone(), - &mut verifier_transcript, - ) - .is_ok()); - - collect_full_zk_proof_stats(&proof, &mut tracker_random); - } - } - - // Statistical analysis - let expected = NUM_TRIALS as f64 / NUM_BUCKETS as f64; - - // Critical value for χ² with df=15 at α=0.01 is ~30.58 - // Use lenient threshold for randomness testing - let critical_value = 35.0; - - let mut failures = Vec::new(); - - for name in tracker_zeros.all_names() { - // Test uniformity for each witness type - if let Some(chi2) = tracker_zeros.chi_squared(&name, expected) { - if chi2 >= critical_value { - failures.push(format!( - "zeros/{}: χ²={:.2} >= {:.2}", - name, chi2, critical_value - )); - } - } - - if let Some(chi2) = tracker_ones.chi_squared(&name, expected) { - if chi2 >= critical_value { - failures.push(format!( - "ones/{}: χ²={:.2} >= {:.2}", - name, chi2, critical_value - )); - } - } - - if let Some(chi2) = tracker_random.chi_squared(&name, expected) { - if chi2 >= critical_value { - failures.push(format!( - "random/{}: χ²={:.2} >= {:.2}", - name, chi2, critical_value - )); - } - } - } - - if !failures.is_empty() { - panic!( - "ZK statistical test failed - {} elements showed non-uniform distribution:\n{}", - failures.len(), - failures.join("\n") - ); - } -} - -/// Test that proof distributions from different witnesses are similar (two-sample test) -/// Uses full ZK API with hidden y to test all proof elements including y_com. -#[test] -#[ignore] // Long-running statistical test -fn test_zk_witness_independence() { - const NUM_TRIALS: usize = 80; - - let (prover_setup, verifier_setup) = setup::(6); - - let nu = 2; - let sigma = 2; - let poly_size = 16; - let point = random_point(nu + sigma); - - let mut tracker_skewed = BucketTracker::new(); - let mut tracker_uniform = BucketTracker::new(); - - for _trial in 0..NUM_TRIALS { - // Skewed: Single non-zero coefficient at position 0 (y will be small/predictable) - { - let mut coeffs = vec![ArkFr::zero(); poly_size]; - coeffs[0] = ArkFr::from_u64(42); - let poly = ArkworksPolynomial::new(coeffs); - - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) - .unwrap(); - - let evaluation = poly.evaluate(&point); - let mut transcript = fresh_transcript(); - let (proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( - &poly, - &point, - Some(tier_1), - nu, - sigma, - &prover_setup, - &mut transcript, - ) - .unwrap(); - - let mut verifier_transcript = fresh_transcript(); - assert!(verify::<_, BN254, TestG1Routines, TestG2Routines, _>( - tier_2, - evaluation, - &point, - &proof, - verifier_setup.clone(), - &mut verifier_transcript, - ) - .is_ok()); - - collect_full_zk_proof_stats(&proof, &mut tracker_skewed); - } - - // Uniform: Random polynomial (y will be random) - { - let poly = random_polynomial(poly_size); - - let (tier_2, tier_1, _) = poly - .commit::(nu, sigma, &prover_setup) - .unwrap(); - - let evaluation = poly.evaluate(&point); - let mut transcript = fresh_transcript(); - let (proof, _) = - create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( - &poly, - &point, - Some(tier_1), - nu, - sigma, - &prover_setup, - &mut transcript, - ) - .unwrap(); - - let mut verifier_transcript = fresh_transcript(); - assert!(verify::<_, BN254, TestG1Routines, TestG2Routines, _>( - tier_2, - evaluation, - &point, - &proof, - verifier_setup.clone(), - &mut verifier_transcript, - ) - .is_ok()); - - collect_full_zk_proof_stats(&proof, &mut tracker_uniform); - } - } - - // Two-sample χ² test between skewed and uniform witness distributions - fn two_sample_chi_squared(a: &[usize], b: &[usize]) -> f64 { - let n_a: f64 = a.iter().sum::() as f64; - let n_b: f64 = b.iter().sum::() as f64; - let n_total = n_a + n_b; - - a.iter() - .zip(b.iter()) - .map(|(&obs_a, &obs_b)| { - let pooled = obs_a as f64 + obs_b as f64; - if pooled < 1.0 { - return 0.0; - } - let expected_a = pooled * n_a / n_total; - let expected_b = pooled * n_b / n_total; - let term_a = if expected_a > 0.0 { - (obs_a as f64 - expected_a).powi(2) / expected_a - } else { - 0.0 - }; - let term_b = if expected_b > 0.0 { - (obs_b as f64 - expected_b).powi(2) / expected_b - } else { - 0.0 - }; - term_a + term_b - }) - .sum() - } - - // Critical value for two-sample χ² with df=15 at α=0.005 - // Using slightly higher threshold to reduce false positives from random variation - let critical_value = 40.0; - let mut failures = Vec::new(); - - for name in tracker_skewed.all_names() { - let buckets_skewed = tracker_skewed.buckets.get(&name).unwrap(); - let buckets_uniform = tracker_uniform.buckets.get(&name).unwrap(); - - let chi2 = two_sample_chi_squared(buckets_skewed, buckets_uniform); - if chi2 >= critical_value { - failures.push(format!( - "{}: skewed vs uniform χ²={:.2} >= {:.2}", - name, chi2, critical_value - )); - } - } - - if !failures.is_empty() { - panic!( - "ZK witness independence test failed - {} elements showed witness-dependent distribution:\n{}", - failures.len(), - failures.join("\n") - ); - } -} From fea6ed0e031d5de0108c1345460ba23e328f6851 Mon Sep 17 00:00:00 2001 From: markosg04 Date: Fri, 27 Feb 2026 17:42:48 -0500 Subject: [PATCH 14/16] fix: tracing and add statistical to CI --- .github/workflows/ci.yml | 2 ++ examples/basic_e2e.rs | 2 ++ examples/homomorphic.rs | 2 ++ examples/homomorphic_mixed_sizes.rs | 2 ++ examples/non_square.rs | 2 ++ examples/zk_e2e.rs | 2 ++ examples/zk_statistical.rs | 4 ++-- 7 files changed, 14 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5d41e04..2185dcc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -90,3 +90,5 @@ jobs: run: cargo run --example homomorphic_mixed_sizes --features backends - name: Run zk_e2e example run: cargo run --example zk_e2e --features backends,zk + - name: Run zk_statistical example + run: cargo run --release --example zk_statistical --features backends,zk diff --git a/examples/basic_e2e.rs b/examples/basic_e2e.rs index 6c021d9..146756c 100644 --- a/examples/basic_e2e.rs +++ b/examples/basic_e2e.rs @@ -13,6 +13,8 @@ use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent}; fn main() -> Result<(), Box> { + tracing_subscriber::fmt::init(); + let (prover_setup, verifier_setup) = setup::(10); let nu = 4; diff --git a/examples/homomorphic.rs b/examples/homomorphic.rs index 1cab76e..6232686 100644 --- a/examples/homomorphic.rs +++ b/examples/homomorphic.rs @@ -10,6 +10,8 @@ use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent}; fn main() -> Result<(), Box> { + tracing_subscriber::fmt::init(); + let (prover_setup, verifier_setup) = setup::(10); let nu = 4; diff --git a/examples/homomorphic_mixed_sizes.rs b/examples/homomorphic_mixed_sizes.rs index 97157a8..65412b9 100644 --- a/examples/homomorphic_mixed_sizes.rs +++ b/examples/homomorphic_mixed_sizes.rs @@ -11,6 +11,8 @@ use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent}; fn main() -> Result<(), Box> { + tracing_subscriber::fmt::init(); + let (prover_setup, verifier_setup) = setup::(4); let mut coeffs_poly1 = vec![ArkFr::zero(); 16]; diff --git a/examples/non_square.rs b/examples/non_square.rs index 9694c22..8ae8e45 100644 --- a/examples/non_square.rs +++ b/examples/non_square.rs @@ -12,6 +12,8 @@ use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, Transparent}; fn main() -> Result<(), Box> { + tracing_subscriber::fmt::init(); + let (prover_setup, verifier_setup) = setup::(10); let nu = 3; diff --git a/examples/zk_e2e.rs b/examples/zk_e2e.rs index 0bf4162..8e40759 100644 --- a/examples/zk_e2e.rs +++ b/examples/zk_e2e.rs @@ -14,6 +14,8 @@ use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify, ZK}; fn main() -> Result<(), Box> { + tracing_subscriber::fmt::init(); + let (prover_setup, verifier_setup) = setup::(10); let nu = 4; diff --git a/examples/zk_statistical.rs b/examples/zk_statistical.rs index ba876f7..dfe4859 100644 --- a/examples/zk_statistical.rs +++ b/examples/zk_statistical.rs @@ -19,8 +19,8 @@ use tracing::info; const NUM_BUCKETS: usize = 16; const NUM_TRIALS: usize = 1000; -/// chi-squared critical value: df=15, alpha=0.001 (Bonferroni-safe for ~120 elements) -const CHI2_CRITICAL: f64 = 37.70; +/// chi-squared critical value: df=15, alpha=0.0001 (Bonferroni-safe for ~360 tests) +const CHI2_CRITICAL: f64 = 43.84; fn random_polynomial(size: usize) -> ArkworksPolynomial { let coefficients: Vec = (0..size).map(|_| ArkFr::random()).collect(); From b8581e439051519afb0bff1c23e49aa66f292d9e Mon Sep 17 00:00:00 2001 From: markosg04 Date: Fri, 27 Feb 2026 17:54:29 -0500 Subject: [PATCH 15/16] chore: prepare release --- .github/workflows/ci.yml | 2 +- CHANGELOG.md | 24 +++++++ Cargo.lock | 148 +++++++++++++++++++-------------------- Cargo.toml | 4 +- README.md | 19 +++-- derive/Cargo.toml | 2 +- src/lib.rs | 5 +- 7 files changed, 118 insertions(+), 86 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2185dcc..0513cd1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -91,4 +91,4 @@ jobs: - name: Run zk_e2e example run: cargo run --example zk_e2e --features backends,zk - name: Run zk_statistical example - run: cargo run --release --example zk_statistical --features backends,zk + run: cargo run --release --example zk_statistical --features backends,zk,parallel diff --git a/CHANGELOG.md b/CHANGELOG.md index 3042748..93209cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,29 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.3.0] - 2026-02-27 + +### Added + +- **Zero-knowledge mode** (`zk` feature): optional hiding proofs where both commitment and proof are blinded + - Single GT-level commitment blind (`r_d1 * HT`) + - VMV messages (C, D2, E1, E2, y_com) blinded with OS randomness + - Reduce-and-fold messages blinded with OS per-round randomness + - Final message (E1, E2) blinded to hide folded witness vectors + - Sigma1 proof: proves E2 and y_com commit to the same evaluation + - Sigma2 proof: proves consistency of E1 and D2 blinds + - Scalar product proof: proves (C, D1, D2) are consistent with blinded v1, v2 + - 1 ML + 1 FE verification in ZK mode (vs 4 ML + 1 FE in transparent mode) +- New `zk_e2e` example demonstrating the full ZK workflow +- New `zk_statistical` example with chi-squared uniformity and witness-independence tests (1000 trials) +- ZK test suite: end-to-end proofs, tampering resistance, sigma proof verification, soundness + +### Changed + +- `Polynomial::commit()` return type changed from `(GT, Vec, Option>)` to `(GT, Vec, F)` — the third element is now a single GT-level blind scalar (zero in Transparent mode) +- `prove()` and `create_evaluation_proof()` now take a `commit_blind: F` parameter +- `DoryProverState::set_initial_blinds()` now takes `r_d1` as its first parameter + ## [0.2.0] - 2026-01-29 ### Changed @@ -33,5 +56,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Homomorphic commitment properties - Comprehensive test suite including soundness tests +[0.3.0]: https://github.com/a16z/dory/compare/v0.2.0...v0.3.0 [0.2.0]: https://github.com/a16z/dory/compare/v0.1.0...v0.2.0 [0.1.0]: https://github.com/a16z/dory/releases/tag/v0.1.0 diff --git a/Cargo.lock b/Cargo.lock index 1f80369..1ad9d5d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 4 +version = 3 [[package]] name = "ahash" @@ -210,9 +210,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" [[package]] name = "cast" @@ -255,18 +255,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.51" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" +checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.5.51" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" +checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" dependencies = [ "anstyle", "clap_lex", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" [[package]] name = "criterion" @@ -347,9 +347,9 @@ checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", "typenum", @@ -368,7 +368,7 @@ dependencies = [ [[package]] name = "dory-derive" -version = "0.2.0" +version = "0.3.0" dependencies = [ "proc-macro2", "quote", @@ -377,7 +377,7 @@ dependencies = [ [[package]] name = "dory-pcs" -version = "0.2.0" +version = "0.3.0" dependencies = [ "ark-bn254", "ark-ec", @@ -444,9 +444,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "generic-array" -version = "0.14.9" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -454,9 +454,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", "libc", @@ -520,15 +520,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "js-sys" -version = "0.3.82" +version = "0.3.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" +checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" dependencies = [ "once_cell", "wasm-bindgen", @@ -542,9 +542,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.177" +version = "0.2.182" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" [[package]] name = "log" @@ -554,9 +554,9 @@ checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "memchr" -version = "2.7.6" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "nu-ansi-term" @@ -615,9 +615,9 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pin-project-lite" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" [[package]] name = "plotters" @@ -658,18 +658,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] [[package]] name = "quote" -version = "1.0.41" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] @@ -726,9 +726,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.12.2" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -738,9 +738,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -749,9 +749,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" [[package]] name = "rustversion" @@ -759,12 +759,6 @@ version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - [[package]] name = "same-file" version = "1.0.6" @@ -806,15 +800,15 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.145" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", "memchr", - "ryu", "serde", "serde_core", + "zmij", ] [[package]] @@ -840,9 +834,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.108" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -851,18 +845,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", @@ -890,9 +884,9 @@ dependencies = [ [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "pin-project-lite", "tracing-attributes", @@ -901,9 +895,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", @@ -953,9 +947,9 @@ checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] name = "unicode-ident" -version = "1.0.20" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "valuable" @@ -987,9 +981,9 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasm-bindgen" -version = "0.2.105" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" +checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" dependencies = [ "cfg-if", "once_cell", @@ -1000,9 +994,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.105" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" +checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1010,9 +1004,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.105" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" +checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" dependencies = [ "bumpalo", "proc-macro2", @@ -1023,18 +1017,18 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.105" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" +checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" dependencies = [ "unicode-ident", ] [[package]] name = "web-sys" -version = "0.3.82" +version = "0.3.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" +checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" dependencies = [ "js-sys", "wasm-bindgen", @@ -1066,18 +1060,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.27" +version = "0.8.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +checksum = "a789c6e490b576db9f7e6b6d661bcc9799f7c0ac8352f56ea20193b2681532e5" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.27" +version = "0.8.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +checksum = "f65c489a7071a749c849713807783f70672b28094011623e200cb86dcb835953" dependencies = [ "proc-macro2", "quote", @@ -1095,11 +1089,17 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", "syn", ] + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/Cargo.toml b/Cargo.toml index e330c1a..9edbf11 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ resolver = "2" [package] name = "dory-pcs" -version = "0.2.0" +version = "0.3.0" edition = "2021" rust-version = "1.75" authors = [ @@ -54,7 +54,7 @@ disk-persistence = [] [dependencies] thiserror = "2.0" rand_core = { version = "0.6", features = ["getrandom"] } -dory-derive = { version = "0.2.0", path = "derive" } +dory-derive = { version = "0.3.0", path = "derive" } tracing = "0.1" # Arkworks backend diff --git a/README.md b/README.md index 301e351..79c093d 100644 --- a/README.md +++ b/README.md @@ -25,21 +25,21 @@ Add `dory-pcs` to your `Cargo.toml`: ```toml [dependencies] -dory-pcs = "0.1" +dory-pcs = "0.3" ``` Or with specific features: ```toml [dependencies] -dory-pcs = { version = "0.1", features = ["backends", "disk-persistence"] } +dory-pcs = { version = "0.3", features = ["backends", "disk-persistence"] } ``` For maximum performance with all optimizations: ```toml [dependencies] -dory-pcs = { version = "0.1", features = ["backends", "cache", "parallel", "disk-persistence"] } +dory-pcs = { version = "0.3", features = ["backends", "cache", "parallel", "disk-persistence"] } ``` ## Architecture @@ -120,7 +120,7 @@ fn main() -> Result<(), Box> { let sigma = 4; // log₂(cols) = 4 → 16 columns // 4. Commit to polynomial to get tier-2 commitment and row commitments - let (tier_2, row_commitments, _) = polynomial + let (tier_2, row_commitments, commit_blind) = polynomial .commit::(nu, sigma, &prover_setup)?; // 5. Create evaluation proof using row commitments @@ -129,6 +129,7 @@ fn main() -> Result<(), Box> { &polynomial, &point, row_commitments, + commit_blind, nu, sigma, &prover_setup, @@ -154,7 +155,7 @@ fn main() -> Result<(), Box> { ## Examples -The repository includes five comprehensive examples demonstrating different aspects of Dory: +The repository includes six comprehensive examples demonstrating different aspects of Dory: 1. **`basic_e2e`** - Standard end-to-end workflow with square matrix (nu=4, sigma=4) ```bash @@ -181,6 +182,11 @@ The repository includes five comprehensive examples demonstrating different aspe cargo run --example zk_e2e --features backends,zk ``` +6. **`zk_statistical`** - Chi-squared uniformity and witness-independence tests for ZK proofs + ```bash + cargo run --release --example zk_statistical --features backends,zk,parallel + ``` + ## Development Setup After cloning the repository, install Git hooks to ensure code quality: @@ -293,8 +299,7 @@ tests/arkworks/ ├── serialization.rs # Proof serialization round-trip tests ├── cache.rs # Prepared point caching tests ├── soundness.rs # Soundness tests -├── zk.rs # Zero-knowledge mode and ZK soundness tests -└── zk_statistical.rs # ZK statistical indistinguishability tests +└── zk.rs # Zero-knowledge mode and ZK soundness tests ``` ## Test Coverage diff --git a/derive/Cargo.toml b/derive/Cargo.toml index 2e59962..cdaa6f2 100644 --- a/derive/Cargo.toml +++ b/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dory-derive" -version = "0.2.0" +version = "0.3.0" edition = "2021" rust-version = "1.75" authors = ["Markos Georghiades "] diff --git a/src/lib.rs b/src/lib.rs index 2769838..7f2e8f5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,10 +10,11 @@ //! //! - **Transparent setup** with automatic disk persistence //! - **Logarithmic proof size**: O(log n) group elements -//! - **Logarithmic verification**: O(log n) GT exps and 5 pairings +//! - **Logarithmic verification**: O(log n) GT exps and 1 multi-pairing //! - **Performance optimizations**: Optional prepared point caching (~20-30% speedup) and parallelization //! - **Flexible matrix layouts**: Supports both square and non-square matrices (nu ≤ sigma) //! - **Homomorphic properties**: Com(r₁·P₁ + r₂·P₂ + ... + rₙ·Pₙ) = r₁·Com(P₁) + r₂·Com(P₂) + ... + rₙ·Com(Pₙ) +//! - **Zero-knowledge mode**: Optional hiding proofs via the `zk` feature flag //! //! ## Structure //! @@ -82,11 +83,13 @@ //! - `homomorphic.rs` - Homomorphic combination of multiple polynomials //! - `non_square.rs` - Non-square matrix layout (nu < sigma) //! - `zk_e2e.rs` - Zero-knowledge proof workflow (requires `zk` feature) +//! - `zk_statistical.rs` - Chi-squared uniformity and witness-independence tests (requires `zk` feature) //! //! ## Feature Flags //! //! - `backends` - Enable concrete backends (currently Arkworks BN254, includes `disk-persistence`) //! - `cache` - Enable prepared point caching (~20-30% speedup, requires `parallel`) +//! - `zk` - Enable zero-knowledge mode with hiding commitments and proofs //! - `parallel` - Enable Rayon parallelization for MSMs and pairings //! - `disk-persistence` - Enable automatic setup caching to disk From 1927a0a3190e461a0e5c883756bef8a723e5a062 Mon Sep 17 00:00:00 2001 From: markosg04 Date: Fri, 27 Feb 2026 18:01:24 -0500 Subject: [PATCH 16/16] fix: parallel and cache flags conflict --- src/backends/arkworks/ark_pairing.rs | 32 +++++++++++++++++++++------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/src/backends/arkworks/ark_pairing.rs b/src/backends/arkworks/ark_pairing.rs index 64b1762..c8278e5 100644 --- a/src/backends/arkworks/ark_pairing.rs +++ b/src/backends/arkworks/ark_pairing.rs @@ -190,10 +190,6 @@ mod pairing_helpers { #[cfg(feature = "cache")] let cache = crate::backends::arkworks::ark_cache::get_prepared_cache(); - #[cfg(not(feature = "cache"))] - let cache: Option< - std::sync::Arc, - > = None; let combined = ps .par_chunks(chunk_size) @@ -210,6 +206,7 @@ mod pairing_helpers { }) .collect(); + #[cfg(feature = "cache")] let qs_prep: Vec<::G2Prepared> = if let Some(ref c) = cache { c.g2_prepared[start_idx..end_idx].to_vec() } else { @@ -222,6 +219,17 @@ mod pairing_helpers { }) .collect() }; + #[cfg(not(feature = "cache"))] + let qs_prep: Vec<::G2Prepared> = { + use ark_bn254::G2Affine; + qs[start_idx..end_idx] + .iter() + .map(|q| { + let affine: G2Affine = q.0.into(); + affine.into() + }) + .collect() + }; Bn254::multi_miller_loop(ps_prep, qs_prep) }) @@ -246,10 +254,6 @@ mod pairing_helpers { #[cfg(feature = "cache")] let cache = crate::backends::arkworks::ark_cache::get_prepared_cache(); - #[cfg(not(feature = "cache"))] - let cache: Option< - std::sync::Arc, - > = None; let combined = qs .par_chunks(chunk_size) @@ -266,6 +270,7 @@ mod pairing_helpers { }) .collect(); + #[cfg(feature = "cache")] let ps_prep: Vec<::G1Prepared> = if let Some(ref c) = cache { c.g1_prepared[start_idx..end_idx].to_vec() } else { @@ -278,6 +283,17 @@ mod pairing_helpers { }) .collect() }; + #[cfg(not(feature = "cache"))] + let ps_prep: Vec<::G1Prepared> = { + use ark_bn254::G1Affine; + ps[start_idx..end_idx] + .iter() + .map(|p| { + let affine: G1Affine = p.0.into(); + affine.into() + }) + .collect() + }; Bn254::multi_miller_loop(ps_prep, qs_prep) })