diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1b83b4c..0513cd1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,13 +27,21 @@ jobs: clippy: runs-on: ubuntu-latest + strategy: + matrix: + include: + - name: transparent + features: backends,parallel,cache,disk-persistence + - name: zk + features: backends,parallel,cache,disk-persistence,zk + name: Clippy (${{ matrix.name }}) steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: components: clippy - - name: cargo clippy --all-features - run: cargo clippy --all --all-targets --all-features + - name: cargo clippy --features ${{ matrix.features }} + run: cargo clippy --all --all-targets --features ${{ matrix.features }} - name: cargo clippy --no-default-features run: cargo clippy --all --all-targets --no-default-features @@ -50,14 +58,21 @@ jobs: test: runs-on: ubuntu-latest - name: Test + strategy: + matrix: + include: + - name: transparent + features: backends,parallel,cache,disk-persistence + - name: zk + features: backends,parallel,cache,disk-persistence,zk + name: Test (${{ matrix.name }}) steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 - name: Install nextest uses: taiki-e/install-action@nextest - name: Run tests - run: cargo nextest run --all-features + run: cargo nextest run --features ${{ matrix.features }} examples: runs-on: ubuntu-latest @@ -73,3 +88,7 @@ jobs: run: cargo run --example non_square --features backends - name: Run homomorphic_mixed_sizes example run: cargo run --example homomorphic_mixed_sizes --features backends + - name: Run zk_e2e example + run: cargo run --example zk_e2e --features backends,zk + - name: Run zk_statistical example + run: cargo run --release --example zk_statistical --features backends,zk,parallel diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 0000000..e69de29 diff --git a/CHANGELOG.md b/CHANGELOG.md index 3042748..93209cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,29 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.3.0] - 2026-02-27 + +### Added + +- **Zero-knowledge mode** (`zk` feature): optional hiding proofs where both commitment and proof are blinded + - Single GT-level commitment blind (`r_d1 * HT`) + - VMV messages (C, D2, E1, E2, y_com) blinded with OS randomness + - Reduce-and-fold messages blinded with OS per-round randomness + - Final message (E1, E2) blinded to hide folded witness vectors + - Sigma1 proof: proves E2 and y_com commit to the same evaluation + - Sigma2 proof: proves consistency of E1 and D2 blinds + - Scalar product proof: proves (C, D1, D2) are consistent with blinded v1, v2 + - 1 ML + 1 FE verification in ZK mode (vs 4 ML + 1 FE in transparent mode) +- New `zk_e2e` example demonstrating the full ZK workflow +- New `zk_statistical` example with chi-squared uniformity and witness-independence tests (1000 trials) +- ZK test suite: end-to-end proofs, tampering resistance, sigma proof verification, soundness + +### Changed + +- `Polynomial::commit()` return type changed from `(GT, Vec, Option>)` to `(GT, Vec, F)` — the third element is now a single GT-level blind scalar (zero in Transparent mode) +- `prove()` and `create_evaluation_proof()` now take a `commit_blind: F` parameter +- `DoryProverState::set_initial_blinds()` now takes `r_d1` as its first parameter + ## [0.2.0] - 2026-01-29 ### Changed @@ -33,5 +56,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Homomorphic commitment properties - Comprehensive test suite including soundness tests +[0.3.0]: https://github.com/a16z/dory/compare/v0.2.0...v0.3.0 [0.2.0]: https://github.com/a16z/dory/compare/v0.1.0...v0.2.0 [0.1.0]: https://github.com/a16z/dory/releases/tag/v0.1.0 diff --git a/Cargo.lock b/Cargo.lock index f03781f..1ad9d5d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 4 +version = 3 [[package]] name = "ahash" @@ -210,9 +210,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" [[package]] name = "cast" @@ -255,18 +255,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.51" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" +checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.5.51" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" +checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" dependencies = [ "anstyle", "clap_lex", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" [[package]] name = "criterion" @@ -347,9 +347,9 @@ checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", "typenum", @@ -368,7 +368,7 @@ dependencies = [ [[package]] name = "dory-derive" -version = "0.2.0" +version = "0.3.0" dependencies = [ "proc-macro2", "quote", @@ -377,7 +377,7 @@ dependencies = [ [[package]] name = "dory-pcs" -version = "0.2.0" +version = "0.3.0" dependencies = [ "ark-bn254", "ark-ec", @@ -395,6 +395,7 @@ dependencies = [ "serde", "thiserror", "tracing", + "tracing-subscriber", ] [[package]] @@ -443,9 +444,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "generic-array" -version = "0.14.9" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -453,9 +454,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", "libc", @@ -519,31 +520,52 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "js-sys" -version = "0.3.82" +version = "0.3.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" +checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" dependencies = [ "once_cell", "wasm-bindgen", ] +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + [[package]] name = "libc" -version = "0.2.177" +version = "0.2.182" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "memchr" -version = "2.7.6" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys", +] [[package]] name = "num-bigint" @@ -593,9 +615,9 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pin-project-lite" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" [[package]] name = "plotters" @@ -636,18 +658,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] [[package]] name = "quote" -version = "1.0.41" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] @@ -704,9 +726,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.12.2" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -716,9 +738,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -727,9 +749,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" [[package]] name = "rustversion" @@ -737,12 +759,6 @@ version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - [[package]] name = "same-file" version = "1.0.6" @@ -784,17 +800,32 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.145" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", "memchr", - "ryu", "serde", "serde_core", + "zmij", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + [[package]] name = "subtle" version = "2.6.1" @@ -803,9 +834,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.108" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -814,24 +845,33 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", "syn", ] +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -844,9 +884,9 @@ dependencies = [ [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "pin-project-lite", "tracing-attributes", @@ -855,9 +895,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", @@ -866,11 +906,37 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.34" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ + "log", "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +dependencies = [ + "nu-ansi-term", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", ] [[package]] @@ -881,9 +947,15 @@ checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] name = "unicode-ident" -version = "1.0.20" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "valuable" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "version_check" @@ -909,9 +981,9 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasm-bindgen" -version = "0.2.105" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" +checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" dependencies = [ "cfg-if", "once_cell", @@ -922,9 +994,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.105" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" +checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -932,9 +1004,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.105" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" +checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" dependencies = [ "bumpalo", "proc-macro2", @@ -945,18 +1017,18 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.105" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" +checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" dependencies = [ "unicode-ident", ] [[package]] name = "web-sys" -version = "0.3.82" +version = "0.3.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" +checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" dependencies = [ "js-sys", "wasm-bindgen", @@ -988,18 +1060,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.27" +version = "0.8.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +checksum = "a789c6e490b576db9f7e6b6d661bcc9799f7c0ac8352f56ea20193b2681532e5" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.27" +version = "0.8.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +checksum = "f65c489a7071a749c849713807783f70672b28094011623e200cb86dcb835953" dependencies = [ "proc-macro2", "quote", @@ -1017,11 +1089,17 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", "syn", ] + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/Cargo.toml b/Cargo.toml index 2997987..9edbf11 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ resolver = "2" [package] name = "dory-pcs" -version = "0.2.0" +version = "0.3.0" edition = "2021" rust-version = "1.75" authors = [ @@ -34,6 +34,7 @@ all-features = true [features] default = [] +zk = [] backends = ["arkworks", "disk-persistence"] arkworks = [ "dep:ark-bn254", @@ -52,8 +53,8 @@ disk-persistence = [] [dependencies] thiserror = "2.0" -rand_core = "0.6" -dory-derive = { version = "0.2.0", path = "derive" } +rand_core = { version = "0.6", features = ["getrandom"] } +dory-derive = { version = "0.3.0", path = "derive" } tracing = "0.1" # Arkworks backend @@ -71,6 +72,7 @@ rayon = { version = "1.10", optional = true } [dev-dependencies] rand = "0.8" criterion = { version = "0.5", features = ["html_reports"] } +tracing-subscriber = { version = "0.3", features = ["fmt"] } [[example]] name = "basic_e2e" @@ -88,6 +90,14 @@ required-features = ["backends"] name = "homomorphic_mixed_sizes" required-features = ["backends"] +[[example]] +name = "zk_e2e" +required-features = ["backends", "zk"] + +[[example]] +name = "zk_statistical" +required-features = ["backends", "zk"] + [[bench]] name = "arkworks_proof" harness = false diff --git a/README.md b/README.md index 8d9cce0..79c093d 100644 --- a/README.md +++ b/README.md @@ -12,11 +12,12 @@ Dory is a transparent polynomial commitment scheme with excellent asymptotic per **Key Features:** - **Transparent setup**: No trusted setup ceremony required with optional disk persistence - **Logarithmic proof size**: O(log n) group elements -- **Logarithmic verification**: O(log n) GT exps and 5 pairings +- **Logarithmic verification**: O(log n) GT exps and 1 multi-pairing - **Modular design**: Pluggable backends for curves and cryptographic primitives - **Performance-optimized**: Vectorized operations, optional prepared point caching, and parallelization with Rayon - **Flexible matrix layouts**: Supports both square and non-square matrices (nu ≤ sigma) - **Homomorphic properties**: Commitment linearity enables proof aggregation +- **Zero-knowledge mode**: Toggable hiding proofs ## Installation @@ -24,21 +25,21 @@ Add `dory-pcs` to your `Cargo.toml`: ```toml [dependencies] -dory-pcs = "0.1" +dory-pcs = "0.3" ``` Or with specific features: ```toml [dependencies] -dory-pcs = { version = "0.1", features = ["backends", "disk-persistence"] } +dory-pcs = { version = "0.3", features = ["backends", "disk-persistence"] } ``` For maximum performance with all optimizations: ```toml [dependencies] -dory-pcs = { version = "0.1", features = ["backends", "cache", "parallel", "disk-persistence"] } +dory-pcs = { version = "0.3", features = ["backends", "cache", "parallel", "disk-persistence"] } ``` ## Architecture @@ -96,38 +97,39 @@ This property enables efficient proof aggregation and batch verification. See `e ## Usage ```rust -use dory_pcs::{setup, prove, verify}; +use dory_pcs::{setup, prove, verify, Transparent}; use dory_pcs::backends::arkworks::{ - BN254, G1Routines, G2Routines, ArkworksPolynomial, Blake2bTranscript + BN254, G1Routines, G2Routines, ArkworksPolynomial, ArkFr, Blake2bTranscript }; +use dory_pcs::primitives::arithmetic::Field; +use dory_pcs::primitives::poly::Polynomial; fn main() -> Result<(), Box> { - let mut rng = rand::thread_rng(); - // 1. Generate setup for polynomials up to 2^10 coefficients let max_log_n = 10; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); // 2. Create a polynomial with 256 coefficients (nu=4, sigma=4) - let coefficients: Vec<_> = (0..256).map(|_| rand::random()).collect(); + let coefficients: Vec = (0..256).map(|_| ArkFr::random()).collect(); let polynomial = ArkworksPolynomial::new(coefficients); // 3. Define evaluation point (length = nu + sigma = 8) - let point: Vec<_> = (0..8).map(|_| rand::random()).collect(); + let point: Vec = (0..8).map(|_| ArkFr::random()).collect(); let nu = 4; // log₂(rows) = 4 → 16 rows let sigma = 4; // log₂(cols) = 4 → 16 columns // 4. Commit to polynomial to get tier-2 commitment and row commitments - let (tier_2, row_commitments) = polynomial - .commit::(nu, sigma, &prover_setup)?; + let (tier_2, row_commitments, commit_blind) = polynomial + .commit::(nu, sigma, &prover_setup)?; // 5. Create evaluation proof using row commitments let mut prover_transcript = Blake2bTranscript::new(b"dory-example"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &polynomial, &point, row_commitments, + commit_blind, nu, sigma, &prover_setup, @@ -153,7 +155,7 @@ fn main() -> Result<(), Box> { ## Examples -The repository includes four comprehensive examples demonstrating different aspects of Dory: +The repository includes six comprehensive examples demonstrating different aspects of Dory: 1. **`basic_e2e`** - Standard end-to-end workflow with square matrix (nu=4, sigma=4) ```bash @@ -175,6 +177,16 @@ The repository includes four comprehensive examples demonstrating different aspe cargo run --example homomorphic_mixed_sizes --features backends ``` +5. **`zk_e2e`** - Zero-knowledge end-to-end workflow with hiding proofs + ```bash + cargo run --example zk_e2e --features backends,zk + ``` + +6. **`zk_statistical`** - Chi-squared uniformity and witness-independence tests for ZK proofs + ```bash + cargo run --release --example zk_statistical --features backends,zk,parallel + ``` + ## Development Setup After cloning the repository, install Git hooks to ensure code quality: @@ -242,6 +254,7 @@ cargo bench --features backends,cache,parallel - `backends` - Enable concrete backends. Currently supports Arkworks BN254. - `cache` - Enable prepared point caching for ~20-30% pairing speedup. Requires `arkworks` and `parallel`. - `parallel` - Enable parallelization using Rayon for MSMs and pairings. Works with both `arkworks` backend and enables parallel features in `ark-ec` and `ark-ff`. +- `zk` - Enable zero-knowledge mode. Adds the `ZK` mode type for generating hiding proofs with blinded protocol messages, sigma proofs, and scalar-product sub-proofs. - `disk-persistence` - Enable automatic setup caching to disk. When enabled, `setup()` will load from OS-specific cache directories if available, avoiding regeneration. ## Project Structure @@ -260,9 +273,14 @@ src/ │ ├── mod.rs # Module exports │ ├── ark_field.rs # Field wrapper (ArkFr) │ ├── ark_group.rs # Group wrappers (ArkG1, ArkG2, ArkGT) +│ ├── ark_pairing.rs # Pairing curve (BN254) │ ├── ark_poly.rs # Polynomial implementation +│ ├── ark_proof.rs # Proof type alias and serialization +│ ├── ark_cache.rs # Prepared point caching +│ ├── ark_setup.rs # Setup wrapper with disk persistence │ ├── ark_serde.rs # Serialization bridge │ └── blake2b_transcript.rs # Blake2b transcript +├── mode.rs # Transparent and ZK mode types ├── setup.rs # Transparent setup generation ├── evaluation_proof.rs # Proof creation and verification ├── reduce_and_fold.rs # Inner product protocol @@ -276,7 +294,12 @@ tests/arkworks/ ├── commitment.rs # Commitment tests ├── evaluation.rs # Evaluation tests ├── integration.rs # End-to-end tests -└── soundness.rs # Soundness tests +├── homomorphic.rs # Homomorphic combination tests +├── non_square.rs # Non-square matrix tests +├── serialization.rs # Proof serialization round-trip tests +├── cache.rs # Prepared point caching tests +├── soundness.rs # Soundness tests +└── zk.rs # Zero-knowledge mode and ZK soundness tests ``` ## Test Coverage @@ -288,6 +311,8 @@ The implementation includes comprehensive tests covering: - End-to-end workflows - Homomorphic combination - Non-square matrix support (nu < sigma, nu = sigma - 1, and very rectangular cases) +- Zero-knowledge mode (hidden evaluations, sigma proofs, scalar-product proofs, soundness) +- Statistical indistinguishability of ZK proofs (witness independence) - Soundness (tampering resistance for all proof components across 20+ attack vectors) - Prepared point caching correctness diff --git a/benches/arkworks_proof.rs b/benches/arkworks_proof.rs index 527b297..c5dc4fd 100644 --- a/benches/arkworks_proof.rs +++ b/benches/arkworks_proof.rs @@ -15,10 +15,10 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; use dory_pcs::backends::arkworks::{ ArkFr, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, }; +use dory_pcs::mode::Transparent; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; use dory_pcs::{prove, setup, verify}; -use rand::thread_rng; #[cfg(feature = "cache")] use dory_pcs::backends::arkworks::init_cache; @@ -29,10 +29,9 @@ fn setup_benchmark_data() -> ( dory_pcs::setup::ProverSetup, dory_pcs::setup::VerifierSetup, ) { - let mut rng = thread_rng(); let max_log_n = 26; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); // Initialize cache with setup generators for optimized pairings #[cfg(feature = "cache")] @@ -45,10 +44,10 @@ fn setup_benchmark_data() -> ( // Create polynomial with 2^26 coefficients (nu=13, sigma=13) let poly_size = 1 << 26; // 67,108,864 coefficients let num_vars = 26; - let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random(&mut rng)).collect(); + let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); let poly = ArkworksPolynomial::new(coefficients); - let point: Vec = (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); (poly, point, prover_setup, verifier_setup) } @@ -60,7 +59,7 @@ fn bench_commitment(c: &mut Criterion) { c.bench_function("commitment_2^26_coefficients", |b| { b.iter(|| { - poly.commit::( + poly.commit::( black_box(nu), black_box(sigma), black_box(&prover_setup), @@ -75,17 +74,18 @@ fn bench_prove(c: &mut Criterion) { let nu = 13; let sigma = 13; - let (_, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (_, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); c.bench_function("prove_2^26_coefficients", |b| { b.iter(|| { let mut transcript = Blake2bTranscript::new(b"dory-bench"); - prove::<_, BN254, G1Routines, G2Routines, _, _>( + prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( black_box(&poly), black_box(&point), black_box(tier_1.clone()), + black_box(commit_blind), black_box(nu), black_box(sigma), black_box(&prover_setup), @@ -101,15 +101,16 @@ fn bench_verify(c: &mut Criterion) { let nu = 13; let sigma = 13; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = Blake2bTranscript::new(b"dory-bench"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -136,9 +137,8 @@ fn bench_verify(c: &mut Criterion) { } fn bench_end_to_end(c: &mut Criterion) { - let mut rng = thread_rng(); let max_log_n = 26; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); // Initialize cache once #[cfg(feature = "cache")] @@ -150,32 +150,31 @@ fn bench_end_to_end(c: &mut Criterion) { c.bench_function("end_to_end_2^26_coefficients", |b| { b.iter(|| { - let mut rng = thread_rng(); let nu = 13; let sigma = 13; let poly_size = 1 << 26; // 67,108,864 coefficients let num_vars = 26; // Create polynomial - let coefficients: Vec = - (0..poly_size).map(|_| ArkFr::random(&mut rng)).collect(); + let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); let poly = ArkworksPolynomial::new(coefficients); // Commit - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); // Evaluate - let point: Vec = (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = poly.evaluate(&point); // Prove let mut prover_transcript = Blake2bTranscript::new(b"dory-bench"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, diff --git a/derive/Cargo.toml b/derive/Cargo.toml index 2e59962..cdaa6f2 100644 --- a/derive/Cargo.toml +++ b/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dory-derive" -version = "0.2.0" +version = "0.3.0" edition = "2021" rust-version = "1.75" authors = ["Markos Georghiades "] diff --git a/examples/basic_e2e.rs b/examples/basic_e2e.rs index 8de8fc3..146756c 100644 --- a/examples/basic_e2e.rs +++ b/examples/basic_e2e.rs @@ -1,84 +1,48 @@ //! Basic end-to-end example of Dory polynomial commitment scheme //! -//! This example demonstrates the standard workflow with a square matrix layout: -//! - Setup generation -//! - Polynomial commitment -//! - Evaluation proof generation -//! - Verification +//! Demonstrates the standard workflow with a square matrix layout: +//! setup, commit, evaluate, prove, verify. //! -//! Matrix dimensions: 16×16 (nu=4, sigma=4, total 256 coefficients) +//! Matrix dimensions: 16x16 (nu=4, sigma=4, total 256 coefficients) use dory_pcs::backends::arkworks::{ ArkFr, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, }; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; -use rand::thread_rng; -use tracing::info; +use dory_pcs::{prove, setup, verify, Transparent}; fn main() -> Result<(), Box> { - info!("Dory PCS - Basic End-to-End Example"); - info!("====================================\n"); + tracing_subscriber::fmt::init(); - let mut rng = thread_rng(); + let (prover_setup, verifier_setup) = setup::(10); - // Step 1: Setup - let max_log_n = 10; - info!( - "1. Generating transparent setup (max_log_n = {})...", - max_log_n - ); - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); - info!(" ✓ Setup complete\n"); - - // Step 2: Create polynomial - // Square matrix: nu = sigma = 4 → 16 rows × 16 columns = 256 coefficients let nu = 4; let sigma = 4; - let poly_size = 1 << (nu + sigma); // 2^8 = 256 - let num_vars = nu + sigma; // 8 - - info!("2. Creating random polynomial..."); - info!(" Matrix layout: {}×{} (square)", 1 << nu, 1 << sigma); - info!(" Total coefficients: {}", poly_size); - info!(" Number of variables: {}", num_vars); + let poly_size = 1 << (nu + sigma); + let num_vars = nu + sigma; - let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random(&mut rng)).collect(); + let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); let poly = ArkworksPolynomial::new(coefficients); - info!(" ✓ Polynomial created\n"); - // Step 3: Commit - info!("3. Computing polynomial commitment..."); - let (tier_2, tier_1) = poly.commit::(nu, sigma, &prover_setup)?; - info!( - " ✓ Tier-1 commitment: {} row commitments (G1)", - tier_1.len() - ); - info!(" ✓ Tier-2 commitment: final commitment (GT)\n"); + let (tier_2, tier_1, commit_blind) = + poly.commit::(nu, sigma, &prover_setup)?; - // Step 4: Evaluation - let point: Vec = (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = poly.evaluate(&point); - info!("4. Evaluating polynomial at random point..."); - info!(" ✓ Evaluation result computed\n"); - // Step 5: Prove - info!("5. Generating evaluation proof..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-basic-example"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, &mut prover_transcript, )?; - info!(" ✓ Proof generated (logarithmic size)\n"); - // Step 6: Verify - info!("6. Verifying proof..."); let mut verifier_transcript = Blake2bTranscript::new(b"dory-basic-example"); verify::<_, BN254, G1Routines, G2Routines, _>( tier_2, @@ -88,10 +52,6 @@ fn main() -> Result<(), Box> { verifier_setup, &mut verifier_transcript, )?; - info!(" ✓ Proof verified successfully!\n"); - - info!("===================================="); - info!("Example completed successfully!"); Ok(()) } diff --git a/examples/homomorphic.rs b/examples/homomorphic.rs index 83e4428..6232686 100644 --- a/examples/homomorphic.rs +++ b/examples/homomorphic.rs @@ -1,74 +1,42 @@ //! Homomorphic combination example for Dory commitments //! -//! This example demonstrates the homomorphic properties of Dory commitments: -//! - Commit to multiple polynomials independently -//! - Combine commitments -//! - Prove evaluation on the combined polynomial using the combined commitment -//! - Verify that the homomorphic combination works correctly -//! -//! Homomorphic property: Com(r₁·P₁ + r₂·P₂ + ... + rₙ·Pₙ) = r₁·Com(P₁) + r₂·Com(P₂) + ... + rₙ·Com(Pₙ) +//! Demonstrates: Com(r1*P1 + r2*P2 + ... + rn*Pn) = r1*Com(P1) + r2*Com(P2) + ... + rn*Com(Pn) use dory_pcs::backends::arkworks::{ ArkFr, ArkG1, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, }; use dory_pcs::primitives::arithmetic::{Field, Group}; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; -use rand::thread_rng; -use tracing::info; +use dory_pcs::{prove, setup, verify, Transparent}; fn main() -> Result<(), Box> { - info!("Dory PCS - Homomorphic Combination Example"); - info!("===========================================\n"); - - let mut rng = thread_rng(); + tracing_subscriber::fmt::init(); - // Step 1: Setup - let max_log_n = 10; - info!( - "1. Generating transparent setup (max_log_n = {})...", - max_log_n - ); - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); - info!(" ✓ Setup complete\n"); + let (prover_setup, verifier_setup) = setup::(10); - // Parameters let nu = 4; let sigma = 4; - let poly_size = 1 << (nu + sigma); // 256 - let num_vars = nu + sigma; // 8 + let poly_size = 1 << (nu + sigma); + let num_vars = nu + sigma; let num_polys = 5; - info!("2. Creating {} random polynomials...", num_polys); - info!(" Each polynomial: {} coefficients", poly_size); let polys: Vec = (0..num_polys) .map(|_| { - let coeffs: Vec = (0..poly_size).map(|_| ArkFr::random(&mut rng)).collect(); + let coeffs: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); ArkworksPolynomial::new(coeffs) }) .collect(); - info!(" ✓ Polynomials created\n"); - // Step 3: Commit to each polynomial - info!("3. Computing individual commitments..."); let commitments: Vec<_> = polys .iter() .map(|poly| { - poly.commit::(nu, sigma, &prover_setup) + poly.commit::(nu, sigma, &prover_setup) .unwrap() }) .collect(); - info!(" ✓ {} commitments computed\n", num_polys); - - // Step 4: Generate random coefficients for linear combination - info!("4. Generating random combination coefficients..."); - let coeffs: Vec = (0..num_polys).map(|_| ArkFr::random(&mut rng)).collect(); - info!(" ✓ Coefficients: r₁, r₂, ..., r{}\n", num_polys); - // Step 5: Homomorphically combine commitments - info!("5. Combining commitments homomorphically..."); + let coeffs: Vec = (0..num_polys).map(|_| ArkFr::random()).collect(); - // Tier-2 (GT group): combined_tier2 = r₁·C₁ + r₂·C₂ + ... + r₅·C₅ #[allow(clippy::op_ref)] let mut combined_tier2 = coeffs[0] * &commitments[0].0; for i in 1..num_polys { @@ -77,7 +45,6 @@ fn main() -> Result<(), Box> { combined_tier2 = combined_tier2 + scaled; } - // Tier-1 (G1 group): For each row, combine the row commitments let num_rows = 1 << nu; let mut combined_tier1 = vec![ArkG1::identity(); num_rows]; for i in 0..num_polys { @@ -86,20 +53,11 @@ fn main() -> Result<(), Box> { combined_tier1[row_idx] = combined_tier1[row_idx] + scaled; } } - info!(" ✓ Tier-2 combined (GT)"); - info!(" ✓ Tier-1 combined ({} rows in G1)\n", num_rows); - - // Step 6: Compute combined polynomial - info!( - "6. Computing combined polynomial: P = r₁·P₁ + r₂·P₂ + ... + r{}·P{}...", - num_polys, num_polys - ); + let mut combined_coeffs = vec![ArkFr::zero(); poly_size]; for poly_idx in 0..num_polys { - // Access coefficients through evaluation at hypercube vertices #[allow(clippy::needless_range_loop)] for coeff_idx in 0..poly_size { - // Create point that selects the coeff_idx-th vertex of the hypercube let point: Vec = (0..num_vars) .map(|bit_idx| { if (coeff_idx >> bit_idx) & 1 == 1 { @@ -117,41 +75,29 @@ fn main() -> Result<(), Box> { } } let combined_poly = ArkworksPolynomial::new(combined_coeffs); - info!(" ✓ Combined polynomial computed\n"); - // Step 7: Evaluate and verify consistency - info!("7. Verifying homomorphic property..."); - let point: Vec = (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = combined_poly.evaluate(&point); - // Check that combined polynomial evaluation matches linear combination let mut expected_eval = ArkFr::zero(); for i in 0..num_polys { let poly_eval = polys[i].evaluate(&point); expected_eval = expected_eval + coeffs[i].mul(&poly_eval); } - assert_eq!( - evaluation, expected_eval, - "Combined polynomial evaluation must match linear combination" - ); - info!(" ✓ Evaluation matches: P(x) = Σ rᵢ·Pᵢ(x)\n"); - - // Step 8: Generate proof - info!("8. Generating evaluation proof for combined polynomial..."); + assert_eq!(evaluation, expected_eval); + let mut prover_transcript = Blake2bTranscript::new(b"dory-homomorphic-example"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &combined_poly, &point, combined_tier1, + ArkFr::zero(), nu, sigma, &prover_setup, &mut prover_transcript, )?; - info!(" ✓ Proof generated\n"); - // Step 9: Verify - info!("9. Verifying proof with combined commitment..."); let mut verifier_transcript = Blake2bTranscript::new(b"dory-homomorphic-example"); verify::<_, BN254, G1Routines, G2Routines, _>( combined_tier2, @@ -161,14 +107,6 @@ fn main() -> Result<(), Box> { verifier_setup, &mut verifier_transcript, )?; - info!(" ✓ Proof verified successfully!\n"); - - info!("==========================================="); - info!("Homomorphic combination verified!"); - info!( - "Combined {} polynomials using random coefficients", - num_polys - ); Ok(()) } diff --git a/examples/homomorphic_mixed_sizes.rs b/examples/homomorphic_mixed_sizes.rs index 90fb474..65412b9 100644 --- a/examples/homomorphic_mixed_sizes.rs +++ b/examples/homomorphic_mixed_sizes.rs @@ -1,55 +1,42 @@ -//! Mixed-size homomorphic combination example for Dory commitments. +//! Mixed-size homomorphic combination example for Dory commitments //! -//! Demonstrates how to homomorphically combine two polynomials that only use a -//! subset of the coefficient domain (sizes 5 and 20, padded to 32) and then -//! produce and verify an evaluation proof for the combined commitment. +//! Demonstrates homomorphic combination of polynomials with different matrix +//! dimensions (sizes 16 and 4, combined in a 4x4 layout). use dory_pcs::backends::arkworks::{ ArkFr, ArkG1, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, }; use dory_pcs::primitives::arithmetic::{Field, Group}; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; -use rand::thread_rng; -use tracing::info; +use dory_pcs::{prove, setup, verify, Transparent}; fn main() -> Result<(), Box> { - info!("Dory PCS - Mixed-size Homomorphic Combination Example"); - let mut rng = thread_rng(); + tracing_subscriber::fmt::init(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 4); + let (prover_setup, verifier_setup) = setup::(4); - info!("Creating two polynomials with logical sizes 16 and 4..."); let mut coeffs_poly1 = vec![ArkFr::zero(); 16]; let mut coeffs_poly2 = vec![ArkFr::zero(); 4]; for coeff in coeffs_poly1.iter_mut() { - *coeff = ArkFr::random(&mut rng); + *coeff = ArkFr::random(); } for coeff in coeffs_poly2.iter_mut() { - *coeff = ArkFr::random(&mut rng); + *coeff = ArkFr::random(); } let poly1 = ArkworksPolynomial::new(coeffs_poly1.clone()); let poly2 = ArkworksPolynomial::new(coeffs_poly2.clone()); - info!("Poly1: {:?}", poly1); - info!("Poly2: {:?}", poly2); - let commitment1 = poly1 - .commit::(2, 2, &prover_setup) + .commit::(2, 2, &prover_setup) .unwrap(); let commitment2 = poly2 - .commit::(1, 1, &prover_setup) + .commit::(1, 1, &prover_setup) .unwrap(); - info!("✓ Commitments ready\n"); - info!("Sampling random combination scalars r1, r2..."); - let coeff_scalars = [ArkFr::random(&mut rng), ArkFr::random(&mut rng)]; + let coeff_scalars = [ArkFr::random(), ArkFr::random()]; - info!("Combining tier-2 commitments (GT)..."); let combined_tier2 = coeff_scalars[0] * commitment1.0 + coeff_scalars[1] * commitment2.0; - info!("Combining tier-1 commitments (G1 rows)..."); - let mut combined_tier1 = vec![ArkG1::identity(); 4]; for (row_idx, row_commit) in commitment1.1.iter().enumerate() { combined_tier1[row_idx] = combined_tier1[row_idx] + (coeff_scalars[0] * row_commit); @@ -58,7 +45,6 @@ fn main() -> Result<(), Box> { combined_tier1[row_idx] = combined_tier1[row_idx] + (coeff_scalars[1] * row_commit); } - info!("Building combined polynomial coefficients..."); let mut combined_coeffs = vec![ArkFr::zero(); 16]; for idx in 0..16 { let term1 = coeff_scalars[0].mul(&coeffs_poly1[idx]); @@ -74,8 +60,6 @@ fn main() -> Result<(), Box> { } let combined_poly = ArkworksPolynomial::new(combined_coeffs); - info!("Combined polynomial: {:?}", combined_poly); - let mut padded_poly2_coefficients = vec![ArkFr::zero(); 16]; padded_poly2_coefficients[0] = coeffs_poly2[0]; padded_poly2_coefficients[1] = coeffs_poly2[1]; @@ -83,11 +67,9 @@ fn main() -> Result<(), Box> { padded_poly2_coefficients[5] = coeffs_poly2[3]; let padded_poly2 = ArkworksPolynomial::new(padded_poly2_coefficients); - info!("Evaluating combined polynomial at a random point..."); - let point: Vec = (0..4).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..4).map(|_| ArkFr::random()).collect(); let evaluation = combined_poly.evaluate(&point); - info!("Checking that evaluation matches r1·P1(x) + r2·P2(x)..."); let eval1 = poly1.evaluate(&point); let eval2 = padded_poly2.evaluate(&point); let eval3 = poly2.evaluate(&[point[0], point[2]]) @@ -98,22 +80,19 @@ fn main() -> Result<(), Box> { expected = expected + coeff_scalars[0].mul(&eval1); expected = expected + coeff_scalars[1].mul(&eval2); assert_eq!(evaluation, expected); - info!("✓ Evaluation matches linear combination\n"); - info!("Generating evaluation proof with combined commitment..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-homomorphic-mixed"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &combined_poly, &point, combined_tier1, + ArkFr::zero(), 2, 2, &prover_setup, &mut prover_transcript, )?; - info!("✓ Proof generated\n"); - info!("Verifying proof against combined tier-2 commitment..."); let mut verifier_transcript = Blake2bTranscript::new(b"dory-homomorphic-mixed"); verify::<_, BN254, G1Routines, G2Routines, _>( combined_tier2, @@ -123,14 +102,11 @@ fn main() -> Result<(), Box> { verifier_setup, &mut verifier_transcript, )?; - info!("✓ Proof verified!"); - info!("==========================================="); let padded_poly_commitment = padded_poly2 - .commit::(2, 2, &prover_setup) + .commit::(2, 2, &prover_setup) .unwrap(); assert_eq!(padded_poly_commitment.0, commitment2.0); - info!("✓ Padded poly commitment matches original poly2 commitment"); Ok(()) } diff --git a/examples/non_square.rs b/examples/non_square.rs index eece5fa..8ae8e45 100644 --- a/examples/non_square.rs +++ b/examples/non_square.rs @@ -1,83 +1,47 @@ //! Non-square matrix example for Dory commitments //! -//! This example demonstrates that Dory supports non-square matrix layouts -//! where the number of rows differs from the number of columns. +//! Demonstrates that Dory supports non-square matrix layouts where nu < sigma. //! -//! Constraint: nu ≤ sigma (rows ≤ columns) -//! Matrix dimensions: 8×16 (nu=3, sigma=4, total 128 coefficients) +//! Matrix dimensions: 8x16 (nu=3, sigma=4, total 128 coefficients) use dory_pcs::backends::arkworks::{ ArkFr, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, }; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; -use rand::thread_rng; -use tracing::info; +use dory_pcs::{prove, setup, verify, Transparent}; fn main() -> Result<(), Box> { - info!("Dory PCS - Non-Square Matrix Example"); - info!("=====================================\n"); + tracing_subscriber::fmt::init(); - let mut rng = thread_rng(); + let (prover_setup, verifier_setup) = setup::(10); - // Step 1: Setup - let max_log_n = 10; - info!( - "1. Generating transparent setup (max_log_n = {})...", - max_log_n - ); - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); - info!(" ✓ Setup complete\n"); - - // Step 2: Create polynomial with non-square matrix layout - // Non-square: nu = 3, sigma = 4 → 8 rows × 16 columns = 128 coefficients let nu = 3; let sigma = 4; - let poly_size = 1 << (nu + sigma); // 2^7 = 128 - let num_vars = nu + sigma; // 7 - - info!("2. Creating random polynomial..."); - info!(" Matrix layout: {}×{} (NON-SQUARE)", 1 << nu, 1 << sigma); - info!(" Total coefficients: {}", poly_size); - info!(" Number of variables: {}", num_vars); - info!(" Constraint: nu ({}) ≤ sigma ({})", nu, sigma); + let poly_size = 1 << (nu + sigma); + let num_vars = nu + sigma; - let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random(&mut rng)).collect(); + let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); let poly = ArkworksPolynomial::new(coefficients); - info!(" ✓ Polynomial created\n"); - // Step 3: Commit - info!("3. Computing polynomial commitment..."); - let (tier_2, tier_1) = poly.commit::(nu, sigma, &prover_setup)?; - info!( - " ✓ Tier-1 commitment: {} row commitments (G1)", - tier_1.len() - ); - info!(" ✓ Tier-2 commitment: final commitment (GT)\n"); + let (tier_2, tier_1, commit_blind) = + poly.commit::(nu, sigma, &prover_setup)?; - // Step 4: Evaluation - let point: Vec = (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect(); + let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); let evaluation = poly.evaluate(&point); - info!("4. Evaluating polynomial at random point..."); - info!(" ✓ Evaluation result computed\n"); - // Step 5: Prove - info!("5. Generating evaluation proof..."); let mut prover_transcript = Blake2bTranscript::new(b"dory-non-square-example"); - let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, &mut prover_transcript, )?; - info!(" ✓ Proof generated (logarithmic size)\n"); - // Step 6: Verify - info!("6. Verifying proof..."); let mut verifier_transcript = Blake2bTranscript::new(b"dory-non-square-example"); verify::<_, BN254, G1Routines, G2Routines, _>( tier_2, @@ -87,18 +51,6 @@ fn main() -> Result<(), Box> { verifier_setup, &mut verifier_transcript, )?; - info!(" ✓ Proof verified successfully!\n"); - - info!("====================================="); - info!("Non-square matrix example completed!"); - info!( - "Matrix: {}×{} ({}×{} = {} coefficients)", - 1 << nu, - 1 << sigma, - 1 << nu, - 1 << sigma, - poly_size - ); Ok(()) } diff --git a/examples/zk_e2e.rs b/examples/zk_e2e.rs new file mode 100644 index 0000000..8e40759 --- /dev/null +++ b/examples/zk_e2e.rs @@ -0,0 +1,58 @@ +//! Zero-knowledge end-to-end example of Dory polynomial commitment scheme +//! +//! Demonstrates the full ZK workflow where both the commitment and the proof +//! are hiding. The `ZK` mode type parameter is used for both `commit()` and +//! `prove()`. +//! +//! Matrix dimensions: 16x16 (nu=4, sigma=4, total 256 coefficients) + +use dory_pcs::backends::arkworks::{ + ArkFr, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, +}; +use dory_pcs::primitives::arithmetic::Field; +use dory_pcs::primitives::poly::Polynomial; +use dory_pcs::{prove, setup, verify, ZK}; + +fn main() -> Result<(), Box> { + tracing_subscriber::fmt::init(); + + let (prover_setup, verifier_setup) = setup::(10); + + let nu = 4; + let sigma = 4; + let poly_size = 1 << (nu + sigma); + let num_vars = nu + sigma; + + let coefficients: Vec = (0..poly_size).map(|_| ArkFr::random()).collect(); + let poly = ArkworksPolynomial::new(coefficients); + + let (tier_2, tier_1, commit_blind) = + poly.commit::(nu, sigma, &prover_setup)?; + + let point: Vec = (0..num_vars).map(|_| ArkFr::random()).collect(); + let evaluation = poly.evaluate(&point); + + let mut prover_transcript = Blake2bTranscript::new(b"dory-zk-example"); + let (proof, _) = prove::<_, BN254, G1Routines, G2Routines, _, _, ZK>( + &poly, + &point, + tier_1, + commit_blind, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + )?; + + let mut verifier_transcript = Blake2bTranscript::new(b"dory-zk-example"); + verify::<_, BN254, G1Routines, G2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + )?; + + Ok(()) +} diff --git a/examples/zk_statistical.rs b/examples/zk_statistical.rs new file mode 100644 index 0000000..dfe4859 --- /dev/null +++ b/examples/zk_statistical.rs @@ -0,0 +1,513 @@ +//! Statistical tests for zero-knowledge property of Dory PCS +//! +//! Verifies that proof elements are statistically indistinguishable from uniform +//! random regardless of the witness (polynomial) distribution. +//! +//! ```sh +//! cargo run --release --features "backends zk" --example zk_statistical +//! ``` + +use ark_serialize::CanonicalSerialize; +use dory_pcs::backends::arkworks::{ + ArkFr, ArkworksPolynomial, Blake2bTranscript, G1Routines, G2Routines, BN254, +}; +use dory_pcs::primitives::arithmetic::Field; +use dory_pcs::primitives::poly::Polynomial; +use dory_pcs::{create_evaluation_proof, setup, verify, DoryProof, ZK}; +use std::collections::HashMap; +use tracing::info; + +const NUM_BUCKETS: usize = 16; +const NUM_TRIALS: usize = 1000; +/// chi-squared critical value: df=15, alpha=0.0001 (Bonferroni-safe for ~360 tests) +const CHI2_CRITICAL: f64 = 43.84; + +fn random_polynomial(size: usize) -> ArkworksPolynomial { + let coefficients: Vec = (0..size).map(|_| ArkFr::random()).collect(); + ArkworksPolynomial::new(coefficients) +} + +fn random_point(num_vars: usize) -> Vec { + (0..num_vars).map(|_| ArkFr::random()).collect() +} + +fn fresh_transcript() -> Blake2bTranscript { + Blake2bTranscript::new(b"dory-test") +} + +struct BucketTracker { + buckets: HashMap>, +} + +impl BucketTracker { + fn new() -> Self { + Self { + buckets: HashMap::new(), + } + } + + fn record(&mut self, name: &str, bucket: usize) { + self.buckets + .entry(name.to_string()) + .or_insert_with(|| vec![0; NUM_BUCKETS])[bucket] += 1; + } + + fn chi_squared(&self, name: &str, expected: f64) -> Option { + self.buckets.get(name).map(|buckets| { + buckets + .iter() + .map(|&observed| { + let diff = observed as f64 - expected; + diff * diff / expected + }) + .sum() + }) + } + + fn all_names(&self) -> Vec { + let mut names: Vec<_> = self.buckets.keys().cloned().collect(); + names.sort(); + names + } +} + +fn bucket_from_serializable(elem: &T) -> usize { + let mut bytes = Vec::new(); + elem.serialize_compressed(&mut bytes).unwrap(); + (bytes[0] as usize) % NUM_BUCKETS +} + +type ArkDoryProof = DoryProof< + dory_pcs::backends::arkworks::ArkG1, + dory_pcs::backends::arkworks::ArkG2, + dory_pcs::backends::arkworks::ArkGT, +>; + +fn collect_full_zk_proof_stats(proof: &ArkDoryProof, tracker: &mut BucketTracker) { + tracker.record("zk_vmv_c", bucket_from_serializable(&proof.vmv_message.c)); + tracker.record("zk_vmv_d2", bucket_from_serializable(&proof.vmv_message.d2)); + tracker.record("zk_vmv_e1", bucket_from_serializable(&proof.vmv_message.e1)); + + if let Some(ref e2) = proof.e2 { + tracker.record("zk_vmv_e2", bucket_from_serializable(e2)); + } + if let Some(ref y_com) = proof.y_com { + tracker.record("zk_vmv_y_com", bucket_from_serializable(y_com)); + } + + for (i, msg) in proof.first_messages.iter().enumerate() { + let prefix = format!("zk_first_{i}"); + tracker.record( + &format!("{prefix}_d1_left"), + bucket_from_serializable(&msg.d1_left), + ); + tracker.record( + &format!("{prefix}_d1_right"), + bucket_from_serializable(&msg.d1_right), + ); + tracker.record( + &format!("{prefix}_d2_left"), + bucket_from_serializable(&msg.d2_left), + ); + tracker.record( + &format!("{prefix}_d2_right"), + bucket_from_serializable(&msg.d2_right), + ); + } + + for (i, msg) in proof.second_messages.iter().enumerate() { + let prefix = format!("zk_second_{i}"); + tracker.record( + &format!("{prefix}_c_plus"), + bucket_from_serializable(&msg.c_plus), + ); + tracker.record( + &format!("{prefix}_c_minus"), + bucket_from_serializable(&msg.c_minus), + ); + tracker.record( + &format!("{prefix}_e1_plus"), + bucket_from_serializable(&msg.e1_plus), + ); + tracker.record( + &format!("{prefix}_e1_minus"), + bucket_from_serializable(&msg.e1_minus), + ); + tracker.record( + &format!("{prefix}_e2_plus"), + bucket_from_serializable(&msg.e2_plus), + ); + tracker.record( + &format!("{prefix}_e2_minus"), + bucket_from_serializable(&msg.e2_minus), + ); + } + + tracker.record( + "zk_final_e1", + bucket_from_serializable(&proof.final_message.e1), + ); + tracker.record( + "zk_final_e2", + bucket_from_serializable(&proof.final_message.e2), + ); + + if let Some(ref sigma1) = proof.sigma1_proof { + tracker.record("sigma1_a1", bucket_from_serializable(&sigma1.a1)); + tracker.record("sigma1_a2", bucket_from_serializable(&sigma1.a2)); + tracker.record("sigma1_z1", bucket_from_serializable(&sigma1.z1)); + tracker.record("sigma1_z2", bucket_from_serializable(&sigma1.z2)); + tracker.record("sigma1_z3", bucket_from_serializable(&sigma1.z3)); + } + + if let Some(ref sigma2) = proof.sigma2_proof { + tracker.record("sigma2_a", bucket_from_serializable(&sigma2.a)); + tracker.record("sigma2_z1", bucket_from_serializable(&sigma2.z1)); + tracker.record("sigma2_z2", bucket_from_serializable(&sigma2.z2)); + } + + if let Some(ref sp) = proof.scalar_product_proof { + tracker.record("zk_sp_p1", bucket_from_serializable(&sp.p1)); + tracker.record("zk_sp_p2", bucket_from_serializable(&sp.p2)); + tracker.record("zk_sp_q", bucket_from_serializable(&sp.q)); + tracker.record("zk_sp_r", bucket_from_serializable(&sp.r)); + tracker.record("zk_sp_e1", bucket_from_serializable(&sp.e1)); + tracker.record("zk_sp_e2", bucket_from_serializable(&sp.e2)); + tracker.record("zk_sp_r1", bucket_from_serializable(&sp.r1)); + tracker.record("zk_sp_r2", bucket_from_serializable(&sp.r2)); + tracker.record("zk_sp_r3", bucket_from_serializable(&sp.r3)); + } +} + +fn prove_verify_collect( + poly: &ArkworksPolynomial, + point: &[ArkFr], + nu: usize, + sigma: usize, + prover_setup: &dory_pcs::ProverSetup, + verifier_setup: &dory_pcs::VerifierSetup, + tracker: &mut BucketTracker, +) { + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, prover_setup) + .unwrap(); + + let evaluation = poly.evaluate(point); + let mut transcript = fresh_transcript(); + let (proof, _) = create_evaluation_proof::<_, BN254, G1Routines, G2Routines, _, _, ZK>( + poly, + point, + Some(tier_1), + commit_blind, + nu, + sigma, + prover_setup, + &mut transcript, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + verify::<_, BN254, G1Routines, G2Routines, _>( + tier_2, + evaluation, + point, + &proof, + verifier_setup.clone(), + &mut verifier_transcript, + ) + .expect("proof verification failed"); + + collect_full_zk_proof_stats(&proof, tracker); +} + +fn two_sample_chi_squared(a: &[usize], b: &[usize]) -> f64 { + let n_a: f64 = a.iter().sum::() as f64; + let n_b: f64 = b.iter().sum::() as f64; + let n_total = n_a + n_b; + + a.iter() + .zip(b.iter()) + .map(|(&obs_a, &obs_b)| { + let pooled = obs_a as f64 + obs_b as f64; + if pooled < 1.0 { + return 0.0; + } + let expected_a = pooled * n_a / n_total; + let expected_b = pooled * n_b / n_total; + let term_a = if expected_a > 0.0 { + (obs_a as f64 - expected_a).powi(2) / expected_a + } else { + 0.0 + }; + let term_b = if expected_b > 0.0 { + (obs_b as f64 - expected_b).powi(2) / expected_b + } else { + 0.0 + }; + term_a + term_b + }) + .sum() +} + +fn assert_uniformity(trackers: &[(&str, &BucketTracker)], expected: f64) { + let mut failures = Vec::new(); + + for &(label, tracker) in trackers { + for name in tracker.all_names() { + if let Some(chi2) = tracker.chi_squared(&name, expected) { + if chi2 >= CHI2_CRITICAL { + failures.push(format!( + "{label}/{name}: chi2={chi2:.2} >= {CHI2_CRITICAL:.2}" + )); + } + } + } + } + + assert!( + failures.is_empty(), + "ZK statistical test failed - {} elements showed non-uniform distribution:\n{}", + failures.len(), + failures.join("\n") + ); +} + +fn assert_witness_independence(trackers: &[(&str, &BucketTracker)]) { + let mut failures = Vec::new(); + + for i in 0..trackers.len() { + for j in (i + 1)..trackers.len() { + let (label_a, tracker_a) = trackers[i]; + let (label_b, tracker_b) = trackers[j]; + + for name in tracker_a.all_names() { + let Some(buckets_a) = tracker_a.buckets.get(&name) else { + continue; + }; + let Some(buckets_b) = tracker_b.buckets.get(&name) else { + continue; + }; + + let chi2 = two_sample_chi_squared(buckets_a, buckets_b); + if chi2 >= CHI2_CRITICAL { + failures.push(format!( + "{label_a} vs {label_b}/{name}: chi2={chi2:.2} >= {CHI2_CRITICAL:.2}" + )); + } + } + } + } + + assert!( + failures.is_empty(), + "ZK witness independence test failed - {} elements showed witness-dependent distribution:\n{}", + failures.len(), + failures.join("\n") + ); +} + +fn test_statistical_indistinguishability( + prover_setup: &dory_pcs::ProverSetup, + verifier_setup: &dory_pcs::VerifierSetup, +) { + let nu = 2; + let sigma = 2; + let poly_size = 1 << (nu + sigma); + let point = random_point(nu + sigma); + + let mut tracker_zeros = BucketTracker::new(); + let mut tracker_ones = BucketTracker::new(); + let mut tracker_random = BucketTracker::new(); + + for _ in 0..NUM_TRIALS { + let zeros = ArkworksPolynomial::new(vec![ArkFr::zero(); poly_size]); + prove_verify_collect( + &zeros, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_zeros, + ); + + let ones = ArkworksPolynomial::new(vec![ArkFr::one(); poly_size]); + prove_verify_collect( + &ones, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_ones, + ); + + let random = random_polynomial(poly_size); + prove_verify_collect( + &random, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_random, + ); + } + + let expected = NUM_TRIALS as f64 / NUM_BUCKETS as f64; + assert_uniformity( + &[ + ("zeros", &tracker_zeros), + ("ones", &tracker_ones), + ("random", &tracker_random), + ], + expected, + ); +} + +fn test_statistical_indistinguishability_non_square( + prover_setup: &dory_pcs::ProverSetup, + verifier_setup: &dory_pcs::VerifierSetup, +) { + let nu = 1; + let sigma = 3; + let poly_size = 1 << (nu + sigma); + let point = random_point(nu + sigma); + + let mut tracker_zeros = BucketTracker::new(); + let mut tracker_ones = BucketTracker::new(); + let mut tracker_random = BucketTracker::new(); + + for _ in 0..NUM_TRIALS { + let zeros = ArkworksPolynomial::new(vec![ArkFr::zero(); poly_size]); + prove_verify_collect( + &zeros, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_zeros, + ); + + let ones = ArkworksPolynomial::new(vec![ArkFr::one(); poly_size]); + prove_verify_collect( + &ones, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_ones, + ); + + let random = random_polynomial(poly_size); + prove_verify_collect( + &random, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_random, + ); + } + + let expected = NUM_TRIALS as f64 / NUM_BUCKETS as f64; + assert_uniformity( + &[ + ("zeros", &tracker_zeros), + ("ones", &tracker_ones), + ("random", &tracker_random), + ], + expected, + ); +} + +fn test_witness_independence( + prover_setup: &dory_pcs::ProverSetup, + verifier_setup: &dory_pcs::VerifierSetup, +) { + let nu = 2; + let sigma = 2; + let poly_size = 1 << (nu + sigma); + let point = random_point(nu + sigma); + + let mut tracker_zeros = BucketTracker::new(); + let mut tracker_ones = BucketTracker::new(); + let mut tracker_skewed = BucketTracker::new(); + let mut tracker_uniform = BucketTracker::new(); + + for _ in 0..NUM_TRIALS { + let zeros = ArkworksPolynomial::new(vec![ArkFr::zero(); poly_size]); + prove_verify_collect( + &zeros, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_zeros, + ); + + let ones = ArkworksPolynomial::new(vec![ArkFr::one(); poly_size]); + prove_verify_collect( + &ones, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_ones, + ); + + let mut skewed_coeffs = vec![ArkFr::zero(); poly_size]; + skewed_coeffs[0] = ArkFr::from_u64(42); + let skewed = ArkworksPolynomial::new(skewed_coeffs); + prove_verify_collect( + &skewed, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_skewed, + ); + + let uniform = random_polynomial(poly_size); + prove_verify_collect( + &uniform, + &point, + nu, + sigma, + prover_setup, + verifier_setup, + &mut tracker_uniform, + ); + } + + assert_witness_independence(&[ + ("zeros", &tracker_zeros), + ("ones", &tracker_ones), + ("skewed", &tracker_skewed), + ("uniform", &tracker_uniform), + ]); +} + +fn main() { + tracing_subscriber::fmt::init(); + + let (prover_setup, verifier_setup) = setup::(6); + + info!("[1/3] statistical indistinguishability (square, nu=2 sigma=2)..."); + test_statistical_indistinguishability(&prover_setup, &verifier_setup); + info!(" PASS"); + + info!("[2/3] statistical indistinguishability (non-square, nu=1 sigma=3)..."); + test_statistical_indistinguishability_non_square(&prover_setup, &verifier_setup); + info!(" PASS"); + + info!("[3/3] witness independence (4 distributions, pairwise chi-squared)..."); + test_witness_independence(&prover_setup, &verifier_setup); + info!(" PASS"); +} diff --git a/src/backends/arkworks/ark_cache.rs b/src/backends/arkworks/ark_cache.rs index 1cfd8ac..f7b2331 100644 --- a/src/backends/arkworks/ark_cache.rs +++ b/src/backends/arkworks/ark_cache.rs @@ -44,7 +44,7 @@ static CACHE: RwLock>> = RwLock::new(None); /// use dory_pcs::backends::arkworks::{init_cache, BN254}; /// use dory_pcs::setup::ProverSetup; /// -/// let setup = ProverSetup::::new(&mut rng, max_log_n); +/// let setup = ProverSetup::::new(max_log_n); /// init_cache(&setup.g1_vec, &setup.g2_vec); /// ``` pub fn init_cache(g1_vec: &[ArkG1], g2_vec: &[ArkG2]) { diff --git a/src/backends/arkworks/ark_field.rs b/src/backends/arkworks/ark_field.rs index 7fdd57a..a0859d7 100644 --- a/src/backends/arkworks/ark_field.rs +++ b/src/backends/arkworks/ark_field.rs @@ -7,7 +7,6 @@ use ark_bn254::Fr; use ark_ff::{Field as ArkField, UniformRand, Zero as ArkZero}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::ops::{Add, Mul, Neg, Sub}; -use rand_core::RngCore; #[derive(Clone, Copy, PartialEq, Eq, Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct ArkFr(pub Fr); @@ -41,8 +40,8 @@ impl Field for ArkFr { ArkField::inverse(&self.0).map(ArkFr) } - fn random(rng: &mut R) -> Self { - ArkFr(Fr::rand(rng)) + fn random() -> Self { + ArkFr(Fr::rand(&mut rand_core::OsRng)) } fn from_u64(val: u64) -> Self { diff --git a/src/backends/arkworks/ark_group.rs b/src/backends/arkworks/ark_group.rs index 812738a..00658b6 100644 --- a/src/backends/arkworks/ark_group.rs +++ b/src/backends/arkworks/ark_group.rs @@ -11,7 +11,6 @@ use ark_ec::{CurveGroup, VariableBaseMSM}; use ark_ff::{Field as ArkField, One, PrimeField, UniformRand, Zero as ArkZero}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::ops::{Add, Mul, Neg, Sub}; -use rand_core::RngCore; #[derive(Default, Clone, Copy, PartialEq, Eq, Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct ArkG1(pub G1Projective); @@ -41,8 +40,8 @@ impl Group for ArkG1 { ArkG1(self.0 * k.0) } - fn random(rng: &mut R) -> Self { - ArkG1(G1Projective::rand(rng)) + fn random() -> Self { + ArkG1(G1Projective::rand(&mut rand_core::OsRng)) } } @@ -114,8 +113,8 @@ impl Group for ArkG2 { ArkG2(self.0 * k.0) } - fn random(rng: &mut R) -> Self { - ArkG2(G2Projective::rand(rng)) + fn random() -> Self { + ArkG2(G2Projective::rand(&mut rand_core::OsRng)) } } @@ -187,8 +186,8 @@ impl Group for ArkGT { ArkGT(self.0.pow(k.0.into_bigint())) } - fn random(rng: &mut R) -> Self { - ArkGT(Fq12::rand(rng)) + fn random() -> Self { + ArkGT(Fq12::rand(&mut rand_core::OsRng)) } } diff --git a/src/backends/arkworks/ark_pairing.rs b/src/backends/arkworks/ark_pairing.rs index 64b1762..c8278e5 100644 --- a/src/backends/arkworks/ark_pairing.rs +++ b/src/backends/arkworks/ark_pairing.rs @@ -190,10 +190,6 @@ mod pairing_helpers { #[cfg(feature = "cache")] let cache = crate::backends::arkworks::ark_cache::get_prepared_cache(); - #[cfg(not(feature = "cache"))] - let cache: Option< - std::sync::Arc, - > = None; let combined = ps .par_chunks(chunk_size) @@ -210,6 +206,7 @@ mod pairing_helpers { }) .collect(); + #[cfg(feature = "cache")] let qs_prep: Vec<::G2Prepared> = if let Some(ref c) = cache { c.g2_prepared[start_idx..end_idx].to_vec() } else { @@ -222,6 +219,17 @@ mod pairing_helpers { }) .collect() }; + #[cfg(not(feature = "cache"))] + let qs_prep: Vec<::G2Prepared> = { + use ark_bn254::G2Affine; + qs[start_idx..end_idx] + .iter() + .map(|q| { + let affine: G2Affine = q.0.into(); + affine.into() + }) + .collect() + }; Bn254::multi_miller_loop(ps_prep, qs_prep) }) @@ -246,10 +254,6 @@ mod pairing_helpers { #[cfg(feature = "cache")] let cache = crate::backends::arkworks::ark_cache::get_prepared_cache(); - #[cfg(not(feature = "cache"))] - let cache: Option< - std::sync::Arc, - > = None; let combined = qs .par_chunks(chunk_size) @@ -266,6 +270,7 @@ mod pairing_helpers { }) .collect(); + #[cfg(feature = "cache")] let ps_prep: Vec<::G1Prepared> = if let Some(ref c) = cache { c.g1_prepared[start_idx..end_idx].to_vec() } else { @@ -278,6 +283,17 @@ mod pairing_helpers { }) .collect() }; + #[cfg(not(feature = "cache"))] + let ps_prep: Vec<::G1Prepared> = { + use ark_bn254::G1Affine; + ps[start_idx..end_idx] + .iter() + .map(|p| { + let affine: G1Affine = p.0.into(); + affine.into() + }) + .collect() + }; Bn254::multi_miller_loop(ps_prep, qs_prep) }) diff --git a/src/backends/arkworks/ark_poly.rs b/src/backends/arkworks/ark_poly.rs index f8f9a8b..875de23 100644 --- a/src/backends/arkworks/ark_poly.rs +++ b/src/backends/arkworks/ark_poly.rs @@ -4,6 +4,7 @@ use super::ark_field::ArkFr; use crate::error::DoryError; +use crate::mode::Mode; use crate::primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; use crate::primitives::poly::{MultilinearLagrange, Polynomial}; use crate::setup::ProverSetup; @@ -53,16 +54,19 @@ impl Polynomial for ArkworksPolynomial { } #[tracing::instrument(skip_all, name = "ArkworksPolynomial::commit", fields(nu, sigma, num_rows = 1 << nu, num_cols = 1 << sigma))] - fn commit( + #[allow(clippy::type_complexity)] + fn commit( &self, nu: usize, sigma: usize, setup: &ProverSetup, - ) -> Result<(E::GT, Vec), DoryError> + ) -> Result<(E::GT, Vec, ArkFr), DoryError> where E: PairingCurve, + Mo: Mode, M1: DoryRoutines, E::G1: Group, + E::GT: Group, { let expected_len = 1 << (nu + sigma); if self.coefficients.len() != expected_len { @@ -74,24 +78,23 @@ impl Polynomial for ArkworksPolynomial { let num_rows = 1 << nu; let num_cols = 1 << sigma; + let g1 = &setup.g1_vec[..num_cols]; - // Tier 1: Compute row commitments - let mut row_commitments = Vec::with_capacity(num_rows); - for i in 0..num_rows { - let row_start = i * num_cols; - let row_end = row_start + num_cols; - let row = &self.coefficients[row_start..row_end]; + // Row commitments are always unblinded (internal to prover) + let row_commitments: Vec = (0..num_rows) + .map(|i| { + let row = &self.coefficients[i * num_cols..(i + 1) * num_cols]; + M1::msm(g1, row) + }) + .collect(); - let g1_bases = &setup.g1_vec[..num_cols]; - let row_commit = M1::msm(g1_bases, row); - row_commitments.push(row_commit); - } + let tier_2 = E::multi_pair_g2_setup(&row_commitments, &setup.g2_vec[..num_rows]); - // Tier 2: Compute final commitment via multi-pairing (g2_bases from setup) - let g2_bases = &setup.g2_vec[..num_rows]; - let commitment = E::multi_pair_g2_setup(&row_commitments, g2_bases); + // Single GT-level blind: commitment += r_d1 * HT + let r_d1: ArkFr = Mo::sample(); + let commitment = Mo::mask(tier_2, &setup.ht, &r_d1); - Ok((commitment, row_commitments)) + Ok((commitment, row_commitments, r_d1)) } } diff --git a/src/backends/arkworks/ark_serde.rs b/src/backends/arkworks/ark_serde.rs index 8bfd364..6d43998 100644 --- a/src/backends/arkworks/ark_serde.rs +++ b/src/backends/arkworks/ark_serde.rs @@ -243,6 +243,72 @@ impl DoryDeserialize for ArkGT { // Arkworks-specific Dory proof type use super::ArkDoryProof; +#[cfg(feature = "zk")] +mod zk_serde { + use ark_serialize::{ + CanonicalDeserialize as De, CanonicalSerialize as Ser, Compress, SerializationError, Valid, + Validate, + }; + use std::io::{Read, Write}; + + pub(super) fn ser_opt( + v: &Option, + w: &mut W, + c: Compress, + ) -> Result<(), SerializationError> { + match v { + Some(val) => { + Ser::serialize_with_mode(&1u8, &mut *w, c)?; + Ser::serialize_with_mode(val, w, c) + } + None => Ser::serialize_with_mode(&0u8, w, c), + } + } + + pub(super) fn de_opt( + r: &mut R, + c: Compress, + v: Validate, + ) -> Result, SerializationError> { + match ::deserialize_with_mode(&mut *r, c, v)? { + 0 => Ok(None), + 1 => Ok(Some(T::deserialize_with_mode(r, c, v)?)), + _ => Err(SerializationError::InvalidData), + } + } + + pub(super) fn size_opt(v: &Option, c: Compress) -> usize { + 1 + v.as_ref().map_or(0, |val| Ser::serialized_size(val, c)) + } + + macro_rules! impl_serde { + ($ty:ty, [$($field:ident),+]) => { + impl Valid for $ty { fn check(&self) -> Result<(), SerializationError> { Ok(()) } } + impl Ser for $ty { + fn serialize_with_mode(&self, mut w: W, c: Compress) -> Result<(), SerializationError> { + $(Ser::serialize_with_mode(&self.$field, &mut w, c)?;)+ + Ok(()) + } + fn serialized_size(&self, c: Compress) -> usize { + 0 $(+ Ser::serialized_size(&self.$field, c))+ + } + } + impl De for $ty { + fn deserialize_with_mode(mut r: R, c: Compress, v: Validate) -> Result { + Ok(Self { $($field: De::deserialize_with_mode(&mut r, c, v)?),+ }) + } + } + }; + } + + use super::{ArkFr, ArkG1, ArkG2, ArkGT}; + use crate::messages::{ScalarProductProof, Sigma1Proof, Sigma2Proof}; + + impl_serde!(Sigma1Proof, [a1, a2, z1, z2, z3]); + impl_serde!(Sigma2Proof, [a, z1, z2]); + impl_serde!(ScalarProductProof, [p1, p2, q, r, e1, e2, r1, r2, r3]); +} + impl ArkValid for ArkDoryProof { fn check(&self) -> Result<(), ArkSerializationError> { Ok(()) @@ -292,6 +358,15 @@ impl CanonicalSerialize for ArkDoryProof { CanonicalSerialize::serialize_with_mode(&(self.nu as u32), &mut writer, compress)?; CanonicalSerialize::serialize_with_mode(&(self.sigma as u32), &mut writer, compress)?; + #[cfg(feature = "zk")] + { + zk_serde::ser_opt(&self.e2, &mut writer, compress)?; + zk_serde::ser_opt(&self.y_com, &mut writer, compress)?; + zk_serde::ser_opt(&self.sigma1_proof, &mut writer, compress)?; + zk_serde::ser_opt(&self.sigma2_proof, &mut writer, compress)?; + zk_serde::ser_opt(&self.scalar_product_proof, &mut writer, compress)?; + } + Ok(()) } @@ -333,6 +408,15 @@ impl CanonicalSerialize for ArkDoryProof { // nu and sigma size += 8; // 2 * u32 + #[cfg(feature = "zk")] + { + size += zk_serde::size_opt(&self.e2, compress); + size += zk_serde::size_opt(&self.y_com, compress); + size += zk_serde::size_opt(&self.sigma1_proof, compress); + size += zk_serde::size_opt(&self.sigma2_proof, compress); + size += zk_serde::size_opt(&self.scalar_product_proof, compress); + } + size } } @@ -424,6 +508,16 @@ impl CanonicalDeserialize for ArkDoryProof { final_message, nu, sigma, + #[cfg(feature = "zk")] + e2: zk_serde::de_opt(&mut reader, compress, validate)?, + #[cfg(feature = "zk")] + y_com: zk_serde::de_opt(&mut reader, compress, validate)?, + #[cfg(feature = "zk")] + sigma1_proof: zk_serde::de_opt(&mut reader, compress, validate)?, + #[cfg(feature = "zk")] + sigma2_proof: zk_serde::de_opt(&mut reader, compress, validate)?, + #[cfg(feature = "zk")] + scalar_product_proof: zk_serde::de_opt(&mut reader, compress, validate)?, }) } } diff --git a/src/backends/arkworks/ark_setup.rs b/src/backends/arkworks/ark_setup.rs index ece2669..77aebbe 100644 --- a/src/backends/arkworks/ark_setup.rs +++ b/src/backends/arkworks/ark_setup.rs @@ -5,7 +5,6 @@ //! are in the `ark_serde` module. use crate::setup::{ProverSetup, VerifierSetup}; -use rand_core::RngCore; use std::ops::{Deref, DerefMut}; use super::BN254; @@ -33,16 +32,15 @@ impl ArkworksProverSetup { /// supporting polynomials up to 2^max_log_n coefficients arranged as n×n matrices. /// /// # Parameters - /// - `rng`: Random number generator /// - `max_log_n`: Maximum log₂ of polynomial size (for n×n matrix with n² = 2^max_log_n) - pub fn new(rng: &mut R, max_log_n: usize) -> Self { - Self(ProverSetup::new(rng, max_log_n)) + pub fn new(max_log_n: usize) -> Self { + Self(ProverSetup::new(max_log_n)) } /// Load prover setup from disk cache, or generate and cache if not available #[cfg(all(feature = "disk-persistence", not(target_arch = "wasm32")))] - pub fn new_from_urs(rng: &mut R, max_log_n: usize) -> Self { - let (prover_setup, _) = crate::setup::(rng, max_log_n); + pub fn new_from_urs(max_log_n: usize) -> Self { + let (prover_setup, _) = crate::setup::(max_log_n); Self(prover_setup) } diff --git a/src/backends/arkworks/blake2b_transcript.rs b/src/backends/arkworks/blake2b_transcript.rs index 1b1f497..63cd149 100644 --- a/src/backends/arkworks/blake2b_transcript.rs +++ b/src/backends/arkworks/blake2b_transcript.rs @@ -47,13 +47,6 @@ impl Blake2bTranscript { self.append_bytes_impl(label, &bytes); } - pub fn append_serde_impl(&mut self, label: &[u8], s: &S) { - match bincode::serialize(s) { - Ok(bytes) => self.append_bytes_impl(label, &bytes), - Err(_) => panic!("Bincode serialization failed"), - } - } - pub fn challenge_scalar_impl(&mut self, label: &[u8]) -> F { self.hasher.update(label); diff --git a/src/evaluation_proof.rs b/src/evaluation_proof.rs index 3fe1332..f68366a 100644 --- a/src/evaluation_proof.rs +++ b/src/evaluation_proof.rs @@ -27,6 +27,7 @@ use crate::error::DoryError; use crate::messages::VMVMessage; +use crate::mode::Mode; use crate::primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; use crate::primitives::poly::MultilinearLagrange; use crate::primitives::transcript::Transcript; @@ -55,6 +56,8 @@ use crate::setup::{ProverSetup, VerifierSetup}; /// - `polynomial`: Polynomial to prove evaluation for /// - `point`: Evaluation point (length nu + sigma) /// - `row_commitments`: Optional precomputed row commitments from polynomial.commit() +/// - `commit_blind`: GT-level blinding scalar from `commit()`. Ignored when +/// `row_commitments` is `None` (the blind is computed internally in that case). /// - `nu`: Log₂ of number of rows (constraint: nu ≤ sigma) /// - `sigma`: Log₂ of number of columns /// - `setup`: Prover setup @@ -70,16 +73,18 @@ use crate::setup::{ProverSetup, VerifierSetup}; /// Supports both square (nu = sigma) and non-square (nu < sigma) matrices. /// For non-square matrices, vectors are automatically padded to length 2^sigma. #[allow(clippy::type_complexity)] +#[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all, name = "create_evaluation_proof")] -pub fn create_evaluation_proof( +pub fn create_evaluation_proof( polynomial: &P, point: &[F], row_commitments: Option>, + commit_blind: F, nu: usize, sigma: usize, setup: &ProverSetup, transcript: &mut T, -) -> Result, DoryError> +) -> Result<(DoryProof, Option), DoryError> where F: Field, E: PairingCurve, @@ -90,6 +95,7 @@ where M2: DoryRoutines, T: Transcript, P: MultilinearLagrange, + Mo: Mode, { if point.len() != nu + sigma { return Err(DoryError::InvalidPointDimension { @@ -106,23 +112,15 @@ where }); } - let row_commitments = if let Some(rc) = row_commitments { - rc - } else { - let (_commitment, rc) = polynomial.commit::(nu, sigma, setup)?; - rc + let (row_commitments, commit_blind) = match row_commitments { + Some(rc) => (rc, commit_blind), + None => { + let (_, rc, blind) = polynomial.commit::(nu, sigma, setup)?; + (rc, blind) + } }; - let _span_eval_vecs = tracing::span!( - tracing::Level::DEBUG, - "compute_evaluation_vectors", - nu, - sigma - ) - .entered(); let (left_vec, right_vec) = polynomial.compute_evaluation_vectors(point, nu, sigma); - drop(_span_eval_vecs); - let v_vec = polynomial.vector_matrix_product(&left_vec, nu, sigma); let mut padded_row_commitments = row_commitments.clone(); @@ -130,44 +128,50 @@ where padded_row_commitments.resize(1 << sigma, E::G1::identity()); } - let _span_vmv = - tracing::span!(tracing::Level::DEBUG, "compute_vmv_message", nu, sigma).entered(); + // Sample VMV blinds (zero in Transparent, random in ZK) + let (r_c, r_d2, r_e1, r_e2): (F, F, F, F) = + (Mo::sample(), Mo::sample(), Mo::sample(), Mo::sample()); + + let g2_fin = &setup.g2_vec[0]; - // C = e(⟨row_commitments, v_vec⟩, h₂) + // C = e(⟨row_commitments, v_vec⟩, Γ2,fin) + r_c·HT let t_vec_v = M1::msm(&padded_row_commitments, &v_vec); - let c = E::pair(&t_vec_v, &setup.h2); + let c = Mo::mask(E::pair(&t_vec_v, g2_fin), &setup.ht, &r_c); - // D₂ = e(⟨Γ₁[sigma], v_vec⟩, h₂) - let g1_bases_at_sigma = &setup.g1_vec[..1 << sigma]; - let gamma1_v = M1::msm(g1_bases_at_sigma, &v_vec); - let d2 = E::pair(&gamma1_v, &setup.h2); + // D₂ = e(⟨Γ₁[sigma], v_vec⟩, Γ2,fin) + r_d2·HT + let d2 = Mo::mask( + E::pair(&M1::msm(&setup.g1_vec[..1 << sigma], &v_vec), g2_fin), + &setup.ht, + &r_d2, + ); - // E₁ = ⟨row_commitments, left_vec⟩ - let e1 = M1::msm(&row_commitments, &left_vec); + // E₁ = ⟨row_commitments, left_vec⟩ + r_e1·H₁ + let e1 = Mo::mask(M1::msm(&row_commitments, &left_vec), &setup.h1, &r_e1); let vmv_message = VMVMessage { c, d2, e1 }; - drop(_span_vmv); - let _span_transcript = tracing::span!(tracing::Level::DEBUG, "vmv_transcript").entered(); transcript.append_serde(b"vmv_c", &vmv_message.c); transcript.append_serde(b"vmv_d2", &vmv_message.d2); transcript.append_serde(b"vmv_e1", &vmv_message.e1); - drop(_span_transcript); - let _span_init = tracing::span!( - tracing::Level::DEBUG, - "fixed_base_vector_scalar_mul_h2", - nu, - sigma - ) - .entered(); + #[cfg(feature = "zk")] + let (zk_e2, zk_y_com, zk_sigma1, zk_sigma2, zk_r_y) = if Mo::BLINDING { + use crate::reduce_and_fold::{generate_sigma1_proof, generate_sigma2_proof}; + let y = polynomial.evaluate(point); + let r_y: F = Mo::sample(); + let e2 = Mo::mask(g2_fin.scale(&y), &setup.h2, &r_e2); + let y_com = setup.g1_vec[0].scale(&y) + setup.h1.scale(&r_y); + transcript.append_serde(b"vmv_e2", &e2); + transcript.append_serde(b"vmv_y_com", &y_com); + let s1 = generate_sigma1_proof::(&y, &r_e2, &r_y, setup, transcript); + let s2 = generate_sigma2_proof::(&r_e1, &-r_d2, setup, transcript); + (Some(e2), Some(y_com), Some(s1), Some(s2), Some(r_y)) + } else { + (None, None, None, None, None) + }; // v₂ = v_vec · Γ₂,fin (each scalar scales g_fin) - let v2 = { - let _span = - tracing::span!(tracing::Level::DEBUG, "fixed_base_vector_scalar_mul_h2").entered(); - M2::fixed_base_vector_scalar_mul(&setup.h2, &v_vec) - }; + let v2 = M2::fixed_base_vector_scalar_mul(g2_fin, &v_vec); let mut padded_right_vec = right_vec.clone(); let mut padded_left_vec = left_vec.clone(); @@ -176,7 +180,7 @@ where padded_left_vec.resize(1 << sigma, F::zero()); } - let mut prover_state = DoryProverState::new( + let mut prover_state: DoryProverState<'_, E, Mo> = DoryProverState::new( padded_row_commitments, // v1 = T_vec_prime (row commitments, padded) v2, // v2 = v_vec · g_fin Some(v_vec), // v2_scalars for first-round MSM+pair optimization @@ -184,7 +188,7 @@ where padded_left_vec, // s2 = left_vec (padded) setup, ); - drop(_span_init); + prover_state.set_initial_blinds(commit_blind, r_c, r_d2, r_e1, r_e2); let num_rounds = nu.max(sigma); let mut first_messages = Vec::with_capacity(num_rounds); @@ -202,7 +206,6 @@ where let beta = transcript.challenge_scalar(b"beta"); prover_state.apply_first_challenge::(&beta); - first_messages.push(first_msg); let second_msg = prover_state.compute_second_message::(); @@ -216,26 +219,46 @@ where let alpha = transcript.challenge_scalar(b"alpha"); prover_state.apply_second_challenge::(&alpha); - second_messages.push(second_msg); } let gamma = transcript.challenge_scalar(b"gamma"); + + #[cfg(feature = "zk")] + let scalar_product_proof = if Mo::BLINDING { + Some(prover_state.scalar_product_proof(transcript)) + } else { + None + }; + let final_message = prover_state.compute_final_message::(&gamma); transcript.append_serde(b"final_e1", &final_message.e1); transcript.append_serde(b"final_e2", &final_message.e2); - let _d = transcript.challenge_scalar(b"d"); - Ok(DoryProof { + let proof = DoryProof { vmv_message, first_messages, second_messages, final_message, nu, sigma, - }) + #[cfg(feature = "zk")] + e2: zk_e2, + #[cfg(feature = "zk")] + y_com: zk_y_com, + #[cfg(feature = "zk")] + sigma1_proof: zk_sigma1, + #[cfg(feature = "zk")] + sigma2_proof: zk_sigma2, + #[cfg(feature = "zk")] + scalar_product_proof, + }; + #[cfg(feature = "zk")] + return Ok((proof, zk_r_y)); + #[cfg(not(feature = "zk"))] + Ok((proof, None)) } /// Verify an evaluation proof @@ -245,12 +268,11 @@ where /// /// # Algorithm /// 1. Extract VMV message from proof -/// 2. Check sigma protocol 2: d2 = e(e1, h2) -/// 3. Compute e2 = h2 * evaluation -/// 4. Initialize verifier state with commitment and VMV message -/// 5. Run max(nu, sigma) rounds of reduce-and-fold verification (with automatic padding) -/// 6. Derive gamma and d challenges -/// 7. Verify final scalar product message +/// 2. Compute e2 = Γ2,fin * evaluation (or use proof.e2 in ZK mode) +/// 3. Initialize verifier state with commitment and VMV message +/// 4. Run max(nu, sigma) rounds of reduce-and-fold verification (with automatic padding) +/// 5. Derive gamma and d challenges +/// 6. Verify final scalar product message /// /// # Parameters /// - `commitment`: Polynomial commitment (in GT) - can be a homomorphically combined commitment @@ -305,27 +327,56 @@ where transcript.append_serde(b"vmv_d2", &vmv_message.d2); transcript.append_serde(b"vmv_e1", &vmv_message.e1); - // # NOTE: The VMV check `vmv_message.d2 == e(vmv_message.e1, setup.h2)` is deferred - // to verify_final where it's batched with other pairings using random linear - // combination with challenge `d`. See verify_final documentation for details. - - let e2 = setup.h2.scale(&evaluation); + #[cfg(feature = "zk")] + let (e2, is_zk) = match (&proof.e2, &proof.y_com) { + (Some(pe2), Some(yc)) => { + use crate::reduce_and_fold::{verify_sigma1_proof, verify_sigma2_proof}; + transcript.append_serde(b"vmv_e2", pe2); + transcript.append_serde(b"vmv_y_com", yc); + match (&proof.sigma1_proof, &proof.sigma2_proof) { + (Some(s1), Some(s2)) => { + verify_sigma1_proof::(pe2, yc, s1, &setup, transcript)?; + verify_sigma2_proof::( + &vmv_message.e1, + &vmv_message.d2, + s2, + &setup, + transcript, + )?; + } + _ => return Err(DoryError::InvalidProof), + } + (*pe2, true) + } + (None, None) => (setup.g2_0.scale(&evaluation), false), + _ => return Err(DoryError::InvalidProof), + }; + #[cfg(not(feature = "zk"))] + let (e2, _is_zk) = (setup.g2_0.scale(&evaluation), false); // Folded-scalar accumulation with per-round coordinates. // num_rounds = sigma (we fold column dimensions). let num_rounds = sigma; + + // Bounds check: reject proofs with mismatched message counts or that exceed setup capacity. + let max_rounds = setup.max_log_n / 2; + if num_rounds > max_rounds + || proof.first_messages.len() != num_rounds + || proof.second_messages.len() != num_rounds + { + return Err(DoryError::InvalidProof); + } + // s1 (right/prover): the σ column coordinates in natural order (LSB→MSB). // No padding here: the verifier folds across the σ column dimensions. // With MSB-first folding, these coordinates are only consumed after the first σ−ν rounds, // which correspond to the padded MSB dimensions on the left tensor, matching the prover. - let col_coords = &point[..sigma]; - let s1_coords: Vec = col_coords.to_vec(); + let s1_coords: Vec = point[..sigma].to_vec(); // s2 (left/prover): the ν row coordinates in natural order, followed by zeros for the extra // MSB dimensions. Conceptually this is s ⊗ [1,0]^(σ−ν): under MSB-first folds, the first // σ−ν rounds multiply s2 by α⁻¹ while contributing no right halves (since those entries are 0). let mut s2_coords: Vec = vec![F::zero(); sigma]; - let row_coords = &point[sigma..sigma + nu]; - s2_coords[..nu].copy_from_slice(&row_coords[..nu]); + s2_coords[..nu].copy_from_slice(&point[sigma..sigma + nu]); let mut verifier_state = DoryVerifierState::new( vmv_message.c, // c from VMV message @@ -359,15 +410,41 @@ where transcript.append_serde(b"e2_minus", &second_msg.e2_minus); let alpha = transcript.challenge_scalar(b"alpha"); - verifier_state.process_round(first_msg, second_msg, &alpha, &beta); + verifier_state.process_round(first_msg, second_msg, &alpha, &beta)?; } let gamma = transcript.challenge_scalar(b"gamma"); + // In ZK mode: absorb scalar product proof into transcript before deriving d. + #[cfg(feature = "zk")] + let zk_data = if is_zk { + if let Some(ref sp) = proof.scalar_product_proof { + for (l, v) in [ + (b"sigma_p1" as &[u8], &sp.p1), + (b"sigma_p2", &sp.p2), + (b"sigma_q", &sp.q), + (b"sigma_r", &sp.r), + ] { + transcript.append_serde(l, v); + } + let c = transcript.challenge_scalar(b"sigma_c"); + Some((sp, c)) + } else { + return Err(DoryError::InvalidProof); + } + } else { + None + }; + + // Shared: absorb final message and derive d. transcript.append_serde(b"final_e1", &proof.final_message.e1); transcript.append_serde(b"final_e2", &proof.final_message.e2); - let d = transcript.challenge_scalar(b"d"); - verifier_state.verify_final(&proof.final_message, &gamma, &d) + #[cfg(feature = "zk")] + let zk = zk_data.as_ref().map(|(sp, c)| (*sp, c)); + #[cfg(not(feature = "zk"))] + let zk: Option<(&crate::messages::ScalarProductProof<_, _, _, _>, _)> = None; + + verifier_state.verify_final(&proof.final_message, &gamma, &d, zk) } diff --git a/src/lib.rs b/src/lib.rs index ae90490..7f2e8f5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,10 +10,11 @@ //! //! - **Transparent setup** with automatic disk persistence //! - **Logarithmic proof size**: O(log n) group elements -//! - **Logarithmic verification**: O(log n) GT exps and 5 pairings +//! - **Logarithmic verification**: O(log n) GT exps and 1 multi-pairing //! - **Performance optimizations**: Optional prepared point caching (~20-30% speedup) and parallelization //! - **Flexible matrix layouts**: Supports both square and non-square matrices (nu ≤ sigma) //! - **Homomorphic properties**: Com(r₁·P₁ + r₂·P₂ + ... + rₙ·Pₙ) = r₁·Com(P₁) + r₂·Com(P₂) + ... + rₙ·Com(Pₙ) +//! - **Zero-knowledge mode**: Optional hiding proofs via the `zk` feature flag //! //! ## Structure //! @@ -39,20 +40,20 @@ //! ### Basic Example //! //! ```ignore -//! use dory_pcs::{setup, prove, verify}; +//! use dory_pcs::{setup, prove, verify, Transparent}; //! use dory_pcs::backends::arkworks::{BN254, G1Routines, G2Routines, Blake2bTranscript}; //! //! // 1. Generate setup (automatically loads from/saves to disk) -//! let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); +//! let (prover_setup, verifier_setup) = setup::(max_log_n); //! //! // 2. Commit to polynomial -//! let (tier_2_commitment, tier_1_commitments) = polynomial -//! .commit::(nu, sigma, &prover_setup)?; +//! let (tier_2_commitment, tier_1_commitments, commit_blind) = polynomial +//! .commit::(nu, sigma, &prover_setup)?; //! //! // 3. Generate evaluation proof //! let mut prover_transcript = Blake2bTranscript::new(b"domain-separation"); -//! let proof = prove::<_, BN254, G1Routines, G2Routines, _, _>( -//! &polynomial, &point, tier_1_commitments, nu, sigma, +//! let proof = prove::<_, BN254, G1Routines, G2Routines, _, _, Transparent>( +//! &polynomial, &point, tier_1_commitments, commit_blind, nu, sigma, //! &prover_setup, &mut prover_transcript //! )?; //! @@ -70,7 +71,7 @@ //! ```ignore //! use dory_pcs::backends::arkworks::init_cache; //! -//! let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); +//! let (prover_setup, verifier_setup) = setup::(max_log_n); //! init_cache(&prover_setup.g1_vec, &prover_setup.g2_vec); //! // Subsequent operations will automatically use cached prepared points //! ``` @@ -81,11 +82,14 @@ //! - `basic_e2e.rs` - Standard square matrix workflow //! - `homomorphic.rs` - Homomorphic combination of multiple polynomials //! - `non_square.rs` - Non-square matrix layout (nu < sigma) +//! - `zk_e2e.rs` - Zero-knowledge proof workflow (requires `zk` feature) +//! - `zk_statistical.rs` - Chi-squared uniformity and witness-independence tests (requires `zk` feature) //! //! ## Feature Flags //! //! - `backends` - Enable concrete backends (currently Arkworks BN254, includes `disk-persistence`) //! - `cache` - Enable prepared point caching (~20-30% speedup, requires `parallel`) +//! - `zk` - Enable zero-knowledge mode with hiding commitments and proofs //! - `parallel` - Enable Rayon parallelization for MSMs and pairings //! - `disk-persistence` - Enable automatic setup caching to disk @@ -93,6 +97,7 @@ pub mod error; pub mod evaluation_proof; pub mod messages; +pub mod mode; pub mod primitives; pub mod proof; pub mod reduce_and_fold; @@ -103,7 +108,14 @@ pub mod backends; pub use error::DoryError; pub use evaluation_proof::create_evaluation_proof; -pub use messages::{FirstReduceMessage, ScalarProductMessage, SecondReduceMessage, VMVMessage}; +pub use messages::{ + FirstReduceMessage, ScalarProductMessage, ScalarProductProof, SecondReduceMessage, VMVMessage, +}; +#[cfg(feature = "zk")] +pub use messages::{Sigma1Proof, Sigma2Proof}; +#[cfg(feature = "zk")] +pub use mode::ZK; +pub use mode::{Mode, Transparent}; use primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; pub use primitives::poly::{MultilinearLagrange, Polynomial}; use primitives::serialization::{DoryDeserialize, DorySerialize}; @@ -125,7 +137,6 @@ pub use setup::{ProverSetup, VerifierSetup}; /// - Windows: `{FOLDERID_LocalAppData}\dory\dory_{max_log_n}.urs` /// /// # Parameters -/// - `rng`: Random number generator for setup generation (used only if not found on disk) /// - `max_log_n`: Maximum log₂ of polynomial size /// /// # Returns @@ -137,10 +148,7 @@ pub use setup::{ProverSetup, VerifierSetup}; /// /// # Panics /// Panics if the setup file exists on disk but is corrupted or cannot be deserialized. -pub fn setup( - rng: &mut R, - max_log_n: usize, -) -> (ProverSetup, VerifierSetup) +pub fn setup(max_log_n: usize) -> (ProverSetup, VerifierSetup) where ProverSetup: DorySerialize + DoryDeserialize, VerifierSetup: DorySerialize + DoryDeserialize, @@ -165,7 +173,7 @@ where "Setup not found on disk, generating new setup for max_log_n={}", max_log_n ); - let prover_setup = ProverSetup::new(rng, max_log_n); + let prover_setup = ProverSetup::new(max_log_n); let verifier_setup = prover_setup.to_verifier_setup(); // Save to disk @@ -178,7 +186,7 @@ where { tracing::info!("Generating new setup for max_log_n={}", max_log_n); - let prover_setup = ProverSetup::new(rng, max_log_n); + let prover_setup = ProverSetup::new(max_log_n); let verifier_setup = prover_setup.to_verifier_setup(); (prover_setup, verifier_setup) @@ -195,7 +203,6 @@ where /// or when you suspect the saved setup file is corrupted). /// /// # Parameters -/// - `rng`: Random number generator for setup generation /// - `max_log_n`: Maximum log₂ of polynomial size /// /// # Returns @@ -204,17 +211,14 @@ where /// # Availability /// This function is only available when the `disk-persistence` feature is enabled. #[cfg(all(feature = "disk-persistence", not(target_arch = "wasm32")))] -pub fn generate_urs( - rng: &mut R, - max_log_n: usize, -) -> (ProverSetup, VerifierSetup) +pub fn generate_urs(max_log_n: usize) -> (ProverSetup, VerifierSetup) where ProverSetup: DorySerialize + DoryDeserialize, VerifierSetup: DorySerialize + DoryDeserialize, { tracing::info!("Force-generating new setup for max_log_n={}", max_log_n); - let prover_setup = ProverSetup::new(rng, max_log_n); + let prover_setup = ProverSetup::new(max_log_n); let verifier_setup = prover_setup.to_verifier_setup(); // Overwrites existing @@ -229,14 +233,15 @@ where /// tier-1 commitments (row commitments). /// /// # Workflow -/// 1. Call `polynomial.commit(nu, sigma, setup)` to get `(tier_2, row_commitments)` -/// 2. Call this function with the `row_commitments` to create the proof +/// 1. Call `polynomial.commit(nu, sigma, setup)` to get `(tier_2, row_commitments, commit_blind)` +/// 2. Call this function with the `row_commitments` and `commit_blind` to create the proof /// 3. Use `tier_2` for verification via the `verify()` function /// /// # Parameters /// - `polynomial`: Polynomial implementing MultilinearLagrange trait /// - `point`: Evaluation point (length must equal nu + sigma) /// - `row_commitments`: Tier-1 commitments (row commitments in G1) from `polynomial.commit()` +/// - `commit_blind`: GT-level blinding scalar from `polynomial.commit()` (zero in Transparent mode) /// - `nu`: Log₂ of number of rows (constraint: nu ≤ sigma for non-square matrices) /// - `sigma`: Log₂ of number of columns /// - `setup`: Prover setup @@ -261,16 +266,18 @@ where /// - Polynomial size doesn't match 2^(nu + sigma) /// - Number of row commitments doesn't match 2^nu #[allow(clippy::type_complexity)] +#[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all, name = "prove")] -pub fn prove( +pub fn prove( polynomial: &P, point: &[F], row_commitments: Vec, + commit_blind: F, nu: usize, sigma: usize, setup: &ProverSetup, transcript: &mut T, -) -> Result, DoryError> +) -> Result<(DoryProof, Option), DoryError> where F: Field, E: PairingCurve, @@ -281,12 +288,13 @@ where M2: DoryRoutines, P: MultilinearLagrange, T: primitives::transcript::Transcript, + Mo: Mode, { - // Create evaluation proof using row_commitments - evaluation_proof::create_evaluation_proof::( + evaluation_proof::create_evaluation_proof::( polynomial, point, Some(row_commitments), + commit_blind, nu, sigma, setup, diff --git a/src/messages.rs b/src/messages.rs index dd877ee..49ae1ea 100644 --- a/src/messages.rs +++ b/src/messages.rs @@ -64,3 +64,40 @@ pub struct ScalarProductMessage { /// E₂ - final G2 element pub e2: G2, } + +/// Σ-protocol 1: proves E2 and y_com commit to the same y. +#[cfg(feature = "zk")] +#[derive(Clone, Debug)] +#[allow(missing_docs)] +pub struct Sigma1Proof { + pub a1: G2, + pub a2: G1, + pub z1: F, + pub z2: F, + pub z3: F, +} + +/// Σ-protocol 2: proves e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2). +#[cfg(feature = "zk")] +#[derive(Clone, Debug)] +#[allow(missing_docs)] +pub struct Sigma2Proof { + pub a: GT, + pub z1: F, + pub z2: F, +} + +/// ZK scalar product proof: proves (C, D1, D2) are consistent with blinded v1, v2. +#[derive(Clone, Debug)] +#[allow(missing_docs)] +pub struct ScalarProductProof { + pub p1: GT, + pub p2: GT, + pub q: GT, + pub r: GT, + pub e1: G1, + pub e2: G2, + pub r1: F, + pub r2: F, + pub r3: F, +} diff --git a/src/mode.rs b/src/mode.rs new file mode 100644 index 0000000..9ebe95a --- /dev/null +++ b/src/mode.rs @@ -0,0 +1,38 @@ +//! Mode trait for transparent vs zero-knowledge proofs. +use crate::primitives::arithmetic::{Field, Group}; + +/// Determines whether protocol messages are blinded (ZK) or unblinded (transparent). +pub trait Mode: 'static { + /// Whether this mode produces blinding values that callers must retain. + const BLINDING: bool; + /// Sample a blinding scalar: zero in Transparent mode, random in ZK mode. + fn sample() -> F; + /// Mask a group element: identity in Transparent mode, `value + base * blind` in ZK mode. + fn mask(value: G, base: &G, blind: &G::Scalar) -> G; +} + +/// Transparent mode: no blinding, non-hiding proofs. +pub struct Transparent; +impl Mode for Transparent { + const BLINDING: bool = false; + fn sample() -> F { + F::zero() + } + fn mask(value: G, _base: &G, _blind: &G::Scalar) -> G { + value + } +} + +/// Zero-knowledge mode: samples blinds from RNG for hiding proofs. +#[cfg(feature = "zk")] +pub struct ZK; +#[cfg(feature = "zk")] +impl Mode for ZK { + const BLINDING: bool = true; + fn sample() -> F { + F::random() + } + fn mask(value: G, base: &G, blind: &G::Scalar) -> G { + value + base.scale(blind) + } +} diff --git a/src/primitives/arithmetic.rs b/src/primitives/arithmetic.rs index 94dc54c..494682f 100644 --- a/src/primitives/arithmetic.rs +++ b/src/primitives/arithmetic.rs @@ -1,7 +1,6 @@ #![allow(missing_docs)] use super::{DoryDeserialize, DorySerialize}; -use rand_core::RngCore; pub trait Field: Sized @@ -30,7 +29,7 @@ pub trait Field: fn inv(self) -> Option; - fn random(rng: &mut R) -> Self; + fn random() -> Self; fn from_u64(val: u64) -> Self; fn from_i64(val: i64) -> Self; @@ -60,7 +59,7 @@ pub trait Group: fn neg(&self) -> Self; fn scale(&self, k: &Self::Scalar) -> Self; - fn random(rng: &mut R) -> Self; + fn random() -> Self; } pub trait PairingCurve: Clone { diff --git a/src/primitives/poly.rs b/src/primitives/poly.rs index 4ac5118..959025a 100644 --- a/src/primitives/poly.rs +++ b/src/primitives/poly.rs @@ -4,6 +4,7 @@ use crate::error::DoryError; use crate::setup::ProverSetup; use super::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; +use crate::mode::Mode; /// Trait for multilinear Lagrange polynomial operations pub trait MultilinearLagrange: Polynomial { @@ -56,38 +57,47 @@ pub trait Polynomial { /// Polynomial evaluation result fn evaluate(&self, point: &[F]) -> F; - /// Commit to polynomial using Dory's 2-tier (AFGHO) homomorphic commitment + /// Commit to polynomial using Dory's 2-tier (AFGHO) homomorphic commitment. /// /// The polynomial coefficients are arranged as a 2D matrix with 2^nu rows and 2^sigma columns. /// /// # Tier 1 (Row Commitments) /// For each row i: `row_commit[i] = MSM(g1_generators[0..2^sigma], row_coefficients[i])` /// + /// Row commitments are always unblinded (internal to the prover, never exposed). + /// /// # Tier 2 (Final Commitment) /// `commitment = Σ e(row_commit[i], g2_generators[i])` for i in 0..2^nu /// + /// In ZK mode (`Mo = ZK`), the tier-2 commitment is blinded with a single GT-level blind: + /// `commitment += r_d1 * HT` where `r_d1` is a fresh random scalar and `HT = e(H₁, H₂)`. + /// /// # Parameters /// - `nu`: Log₂ of number of rows /// - `sigma`: Log₂ of number of columns /// - `setup`: Prover setup containing generators /// /// # Returns - /// `(commitment, row_commitments)` where: - /// - `commitment`: Final commitment in GT - /// - `row_commitments`: Intermediate row commitments in G1 (used in opening proof) + /// `(commitment, row_commitments, commit_blind)` where: + /// - `commitment`: Final commitment in GT (blinded in ZK mode) + /// - `row_commitments`: Intermediate unblinded row commitments in G1 (used in opening proof) + /// - `commit_blind`: GT-level blinding scalar (`r_d1`); zero in Transparent mode /// /// # Errors /// Returns error if coefficient length doesn't match 2^(nu + sigma) or if setup is insufficient. - fn commit( + #[allow(clippy::type_complexity)] + fn commit( &self, nu: usize, sigma: usize, setup: &ProverSetup, - ) -> Result<(E::GT, Vec), DoryError> + ) -> Result<(E::GT, Vec, F), DoryError> where E: PairingCurve, + Mo: Mode, M1: DoryRoutines, - E::G1: Group; + E::G1: Group, + E::GT: Group; } /// Compute multilinear Lagrange basis evaluations at a point diff --git a/src/proof.rs b/src/proof.rs index cffd36f..230ba5d 100644 --- a/src/proof.rs +++ b/src/proof.rs @@ -6,6 +6,7 @@ //! - Final scalar product message use crate::messages::*; +use crate::primitives::arithmetic::Group; /// A complete Dory evaluation proof /// @@ -16,7 +17,8 @@ use crate::messages::*; /// The proof includes the matrix dimensions (nu, sigma) used during proof generation, /// which the verifier uses to ensure consistency with the evaluation point. #[derive(Clone, Debug)] -pub struct DoryProof { +#[allow(missing_docs)] +pub struct DoryProof { /// Vector-Matrix-Vector message for PCS transformation pub vmv_message: VMVMessage, @@ -34,4 +36,15 @@ pub struct DoryProof { /// Log₂ of number of columns in the coefficient matrix pub sigma: usize, + + #[cfg(feature = "zk")] + pub e2: Option, + #[cfg(feature = "zk")] + pub y_com: Option, + #[cfg(feature = "zk")] + pub sigma1_proof: Option>, + #[cfg(feature = "zk")] + pub sigma2_proof: Option>, + #[cfg(feature = "zk")] + pub scalar_product_proof: Option>, } diff --git a/src/reduce_and_fold.rs b/src/reduce_and_fold.rs index 5e68a04..e9b0fcf 100644 --- a/src/reduce_and_fold.rs +++ b/src/reduce_and_fold.rs @@ -10,14 +10,21 @@ use crate::error::DoryError; use crate::messages::*; +use crate::mode::{Mode, Transparent}; use crate::primitives::arithmetic::{DoryRoutines, Field, Group, PairingCurve}; use crate::setup::{ProverSetup, VerifierSetup}; +use std::marker::PhantomData; + +#[cfg(feature = "zk")] +use crate::primitives::transcript::Transcript; + +type Scalar = <::G1 as Group>::Scalar; /// Prover state for the Dory opening protocol /// /// Maintains the current state of the prover during the interactive protocol. /// The state consists of vectors that get folded in each round. -pub struct DoryProverState<'a, E: PairingCurve> { +pub struct DoryProverState<'a, E: PairingCurve, M: Mode = Transparent> { /// Current v1 vector (G1 elements) v1: Vec, @@ -25,19 +32,34 @@ pub struct DoryProverState<'a, E: PairingCurve> { v2: Vec, /// For first round only: scalars used to construct v2 from fixed base h2 - v2_scalars: Option::Scalar>>, + v2_scalars: Option>>, /// Current s1 vector (scalars) - s1: Vec<::Scalar>, + s1: Vec>, /// Current s2 vector (scalars) - s2: Vec<::Scalar>, + s2: Vec>, /// Number of rounds remaining (log₂ of vector length) num_rounds: usize, /// Reference to prover setup setup: &'a ProverSetup, + + // ZK accumulated blinds (zero in Transparent mode) + r_c: Scalar, + r_d1: Scalar, + r_d2: Scalar, + r_e1: Scalar, + r_e2: Scalar, + // Per-round blinds stored between compute and apply + round_d1: [Scalar; 2], + round_d2: [Scalar; 2], + round_c: [Scalar; 2], + round_e1: [Scalar; 2], + round_e2: [Scalar; 2], + + _mode: PhantomData, } /// Verifier state for the Dory opening protocol @@ -61,24 +83,24 @@ pub struct DoryVerifierState { e2: E::G2, /// Initial e1 from VMV message - /// Used in verify_final to batch the VMV constraint: D₂_init = e(E₁_init, H₂) + /// Used in verify_final to batch the VMV constraint: D₂_init = e(E₁_init, Γ₂₀) e1_init: E::G1, /// Initial d2 from VMV message - /// Used in verify_final to batch the VMV constraint: D₂_init = e(E₁_init, H₂) + /// Used in verify_final to batch the VMV constraint: D₂_init = e(E₁_init, Γ₂₀) d2_init: E::GT, /// Accumulated scalar for s1 after folding across rounds - s1_acc: ::Scalar, + s1_acc: Scalar, /// Accumulated scalar for s2 after folding across rounds - s2_acc: ::Scalar, + s2_acc: Scalar, /// Per-round coordinates for s1 (length = num_rounds). Order matches folding order. - s1_coords: Vec<::Scalar>, + s1_coords: Vec>, /// Per-round coordinates for s2 (length = num_rounds). Order matches folding order. - s2_coords: Vec<::Scalar>, + s2_coords: Vec>, /// Number of rounds remaining for indexing setup arrays num_rounds: usize, @@ -87,7 +109,12 @@ pub struct DoryVerifierState { setup: VerifierSetup, } -impl<'a, E: PairingCurve> DoryProverState<'a, E> { +impl<'a, E: PairingCurve, M: Mode> DoryProverState<'a, E, M> +where + ::Scalar: Field, + E::G2: Group::Scalar>, + E::GT: Group::Scalar>, +{ /// Create new prover state /// /// # Parameters @@ -100,9 +127,9 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { pub fn new( v1: Vec, v2: Vec, - v2_scalars: Option::Scalar>>, - s1: Vec<::Scalar>, - s2: Vec<::Scalar>, + v2_scalars: Option>>, + s1: Vec>, + s2: Vec>, setup: &'a ProverSetup, ) -> Self { debug_assert_eq!(v1.len(), v2.len(), "v1 and v2 must have equal length"); @@ -117,6 +144,7 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { } let num_rounds = v1.len().trailing_zeros() as usize; + let z = Scalar::::zero(); Self { v1, @@ -126,18 +154,40 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { s2, num_rounds, setup, + r_c: z, + r_d1: z, + r_d2: z, + r_e1: z, + r_e2: z, + round_d1: [z; 2], + round_d2: [z; 2], + round_c: [z; 2], + round_e1: [z; 2], + round_e2: [z; 2], + _mode: PhantomData, } } + /// Set initial VMV blinds (r_d1, r_c, r_d2, r_e1, r_e2). + pub fn set_initial_blinds( + &mut self, + r_d1: Scalar, + r_c: Scalar, + r_d2: Scalar, + r_e1: Scalar, + r_e2: Scalar, + ) { + (self.r_d1, self.r_c, self.r_d2, self.r_e1, self.r_e2) = (r_d1, r_c, r_d2, r_e1, r_e2); + } + /// Compute first reduce message for current round /// /// Computes D1L, D1R, D2L, D2R, E1β, E2β based on current state. #[tracing::instrument(skip_all, name = "DoryProverState::compute_first_message")] - pub fn compute_first_message(&self) -> FirstReduceMessage + pub fn compute_first_message(&mut self) -> FirstReduceMessage where M1: DoryRoutines, M2: DoryRoutines, - E::G2: Group::Scalar>, { assert!( self.num_rounds > 0, @@ -154,27 +204,39 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { let g1_prime = &self.setup.g1_vec[..n2]; let g2_prime = &self.setup.g2_vec[..n2]; - // Compute D values: multi-pairings between v-vectors and generators - // D₁L = ⟨v₁L, Γ₂'⟩, D₁R = ⟨v₁R, Γ₂'⟩ - g2_prime is from setup, use cached version - let d1_left = E::multi_pair_g2_setup(v1_l, g2_prime); - let d1_right = E::multi_pair_g2_setup(v1_r, g2_prime); + // Sample round blinds (zero in Transparent mode) + self.round_d1 = [M::sample(), M::sample()]; + self.round_d2 = [M::sample(), M::sample()]; + + // D₁L = ⟨v₁L, Γ₂'⟩, D₁R = ⟨v₁R, Γ₂'⟩ + let ht = &self.setup.ht; + let d1_left = M::mask( + E::multi_pair_g2_setup(v1_l, g2_prime), + ht, + &self.round_d1[0], + ); + let d1_right = M::mask( + E::multi_pair_g2_setup(v1_r, g2_prime), + ht, + &self.round_d1[1], + ); // D₂L = ⟨Γ₁', v₂L⟩, D₂R = ⟨Γ₁', v₂R⟩ // If v2 was constructed as h2 * scalars (first round), compute MSM(Γ₁', scalars) then one pairing. - let (d2_left, d2_right) = if let Some(scalars) = self.v2_scalars.as_ref() { + let (d2_left_base, d2_right_base) = if let Some(scalars) = self.v2_scalars.as_ref() { let (s_l, s_r) = scalars.split_at(n2); let sum_left = M1::msm(g1_prime, s_l); let sum_right = M1::msm(g1_prime, s_r); - ( - E::pair(&sum_left, &self.setup.h2), - E::pair(&sum_right, &self.setup.h2), - ) + let g2_fin = &self.setup.g2_vec[0]; + (E::pair(&sum_left, g2_fin), E::pair(&sum_right, g2_fin)) } else { ( E::multi_pair_g1_setup(g1_prime, v2_l), E::multi_pair_g1_setup(g1_prime, v2_r), ) }; + let d2_left = M::mask(d2_left_base, ht, &self.round_d2[0]); + let d2_right = M::mask(d2_right_base, ht, &self.round_d2[1]); // Compute E values for extended protocol: MSMs with scalar vectors // E₁β = ⟨Γ₁, s₂⟩ @@ -197,36 +259,30 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { /// /// Updates the state by combining with generators scaled by beta. #[tracing::instrument(skip_all, name = "DoryProverState::apply_first_challenge")] - pub fn apply_first_challenge(&mut self, beta: &::Scalar) + pub fn apply_first_challenge(&mut self, beta: &Scalar) where M1: DoryRoutines, M2: DoryRoutines, - E::G2: Group::Scalar>, - ::Scalar: Field, { - let beta_inv = (*beta).inv().expect("beta must be invertible"); - + let beta_inv = beta.inv().expect("beta must be invertible"); let n = 1 << self.num_rounds; - // Combine: v₁ ← v₁ + β·Γ₁ + // v₁ ← v₁ + β·Γ₁, v₂ ← v₂ + β⁻¹·Γ₂ M1::fixed_scalar_mul_bases_then_add(&self.setup.g1_vec[..n], &mut self.v1, beta); - - // Combine: v₂ ← v₂ + β⁻¹·Γ₂ M2::fixed_scalar_mul_bases_then_add(&self.setup.g2_vec[..n], &mut self.v2, &beta_inv); - - // After first combine, the `v2_scalars` optimization does not apply. self.v2_scalars = None; + + self.r_c = self.r_c + self.r_d2 * beta + self.r_d1 * beta_inv; } /// Compute second reduce message for current round /// /// Computes C+, C-, E1+, E1-, E2+, E2- based on current state. #[tracing::instrument(skip_all, name = "DoryProverState::compute_second_message")] - pub fn compute_second_message(&self) -> SecondReduceMessage + pub fn compute_second_message(&mut self) -> SecondReduceMessage where M1: DoryRoutines, M2: DoryRoutines, - E::G2: Group::Scalar>, { let n2 = 1 << (self.num_rounds - 1); // n/2 @@ -236,21 +292,20 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { let (s1_l, s1_r) = self.s1.split_at(n2); let (s2_l, s2_r) = self.s2.split_at(n2); - // Compute C terms: cross products of v-vectors - // C₊ = ⟨v₁L, v₂R⟩ - let c_plus = E::multi_pair(v1_l, v2_r); - // C₋ = ⟨v₁R, v₂L⟩ - let c_minus = E::multi_pair(v1_r, v2_l); + self.round_c = [M::sample(), M::sample()]; + self.round_e1 = [M::sample(), M::sample()]; + self.round_e2 = [M::sample(), M::sample()]; + + // C₊ = ⟨v₁L, v₂R⟩, C₋ = ⟨v₁R, v₂L⟩ + let ht = &self.setup.ht; + let c_plus = M::mask(E::multi_pair(v1_l, v2_r), ht, &self.round_c[0]); + let c_minus = M::mask(E::multi_pair(v1_r, v2_l), ht, &self.round_c[1]); // Compute E terms for extended protocol: cross products with scalars - // E₁₊ = ⟨v₁L, s₂R⟩ - let e1_plus = M1::msm(v1_l, s2_r); - // E₁₋ = ⟨v₁R, s₂L⟩ - let e1_minus = M1::msm(v1_r, s2_l); - // E₂₊ = ⟨s₁L, v₂R⟩ - let e2_plus = M2::msm(v2_r, s1_l); - // E₂₋ = ⟨s₁R, v₂L⟩ - let e2_minus = M2::msm(v2_l, s1_r); + let e1_plus = M::mask(M1::msm(v1_l, s2_r), &self.setup.h1, &self.round_e1[0]); + let e1_minus = M::mask(M1::msm(v1_r, s2_l), &self.setup.h1, &self.round_e1[1]); + let e2_plus = M::mask(M2::msm(v2_r, s1_l), &self.setup.h2, &self.round_e2[0]); + let e2_minus = M::mask(M2::msm(v2_l, s1_r), &self.setup.h2, &self.round_e2[1]); SecondReduceMessage { c_plus, @@ -266,14 +321,11 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { /// /// Reduces the vector size by half using the alpha challenge. #[tracing::instrument(skip_all, name = "DoryProverState::apply_second_challenge")] - pub fn apply_second_challenge(&mut self, alpha: &::Scalar) - where - M1: DoryRoutines, - M2: DoryRoutines, - E::G2: Group::Scalar>, - ::Scalar: Field, - { - let alpha_inv = (*alpha).inv().expect("alpha must be invertible"); + pub fn apply_second_challenge, M2: DoryRoutines>( + &mut self, + alpha: &Scalar, + ) { + let alpha_inv = alpha.inv().expect("alpha must be invertible"); let n2 = 1 << (self.num_rounds - 1); // n/2 // Fold v₁: v₁ ← α·v₁L + v₁R @@ -296,7 +348,12 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { M1::fold_field_vectors(s2_l, s2_r, &alpha_inv); self.s2.truncate(n2); - // Decrement round counter + self.r_c = self.r_c + self.round_c[0] * alpha + self.round_c[1] * alpha_inv; + self.r_d1 = self.round_d1[0] * alpha + self.round_d1[1]; + self.r_d2 = self.round_d2[0] * alpha_inv + self.round_d2[1]; + self.r_e1 = self.r_e1 + self.round_e1[0] * alpha + self.round_e1[1] * alpha_inv; + self.r_e2 = self.r_e2 + self.round_e2[0] * alpha + self.round_e2[1] * alpha_inv; + self.num_rounds -= 1; } @@ -304,57 +361,204 @@ impl<'a, E: PairingCurve> DoryProverState<'a, E> { /// /// Applies fold-scalars transformation and returns the final E1, E2 elements. /// Must be called when num_rounds=0 (vectors are size 1). + /// + /// In ZK mode, E₁ and E₂ are additionally blinded with fresh randomness so + /// that the folded vectors `v₁[0]`, `v₂[0]` cannot be recovered from the + /// proof. #[tracing::instrument(skip_all, name = "DoryProverState::compute_final_message")] pub fn compute_final_message( - self, - gamma: &::Scalar, + &mut self, + gamma: &Scalar, ) -> ScalarProductMessage where M1: DoryRoutines, M2: DoryRoutines, - E::G2: Group::Scalar>, - ::Scalar: Field, { debug_assert_eq!(self.num_rounds, 0, "num_rounds must be 0 for final message"); debug_assert_eq!(self.v1.len(), 1, "v1 must have length 1"); debug_assert_eq!(self.v2.len(), 1, "v2 must have length 1"); - let gamma_inv = (*gamma).inv().expect("gamma must be invertible"); + let gamma_inv = gamma.inv().expect("gamma must be invertible"); - // Apply fold-scalars transform: - // E₁ = v₁ + γ·s₁·H₁ - let gamma_s1 = *gamma * self.s1[0]; - let e1 = self.v1[0] + self.setup.h1.scale(&gamma_s1); + let r_final1: Scalar = M::sample(); + let r_final2: Scalar = M::sample(); - // E₂ = v₂ + γ⁻¹·s₂·H₂ - let gamma_inv_s2 = gamma_inv * self.s2[0]; + // E₁ = v₁ + (γ·s₁ + r_final1)·H₁ + let gamma_s1 = *gamma * self.s1[0] + r_final1; + let e1 = self.v1[0] + gamma_s1 * self.setup.h1; + + // E₂ = v₂ + (γ⁻¹·s₂ + r_final2)·H₂ + let gamma_inv_s2 = gamma_inv * self.s2[0] + r_final2; let e2 = self.v2[0] + self.setup.h2.scale(&gamma_inv_s2); + self.r_c = self.r_c + self.r_e2 * gamma + self.r_e1 * gamma_inv; + ScalarProductMessage { e1, e2 } } + + /// Generate ZK scalar product proof. Must be called BEFORE `compute_final_message`. + #[cfg(feature = "zk")] + pub fn scalar_product_proof>( + &self, + transcript: &mut T, + ) -> ScalarProductProof, E::GT> { + let (v1, v2) = (self.v1[0], self.v2[0]); + let (g1, g2) = (self.setup.g1_vec[0], self.setup.g2_vec[0]); + let ht = &self.setup.ht; + let r = || Scalar::::random(); + let (sd1, sd2) = (r(), r()); + let (d1, d2) = (sd1 * g1, g2.scale(&sd2)); + let (rp1, rp2, rq, rr) = (r(), r(), r(), r()); + let p1 = E::pair(&d1, &g2) + ht.scale(&rp1); + let p2 = E::pair(&g1, &d2) + ht.scale(&rp2); + let q = E::pair(&d1, &v2) + E::pair(&v1, &d2) + ht.scale(&rq); + let rr_val = E::pair(&d1, &d2) + ht.scale(&rr); + for (label, val) in [ + (b"sigma_p1" as &[u8], &p1), + (b"sigma_p2", &p2), + (b"sigma_q", &q), + (b"sigma_r", &rr_val), + ] { + transcript.append_serde(label, val); + } + let c = transcript.challenge_scalar(b"sigma_c"); + ScalarProductProof { + p1, + p2, + q, + r: rr_val, + e1: d1 + c * v1, + e2: d2 + v2.scale(&c), + r1: rp1 + c * self.r_d1, + r2: rp2 + c * self.r_d2, + r3: rr + c * rq + c * c * self.r_c, + } + } +} + +/// Generate Sigma1 proof: proves knowledge of (y, rE2, ry). +#[cfg(feature = "zk")] +pub fn generate_sigma1_proof( + y: &Scalar, + r_e2: &Scalar, + r_y: &Scalar, + setup: &ProverSetup, + transcript: &mut T, +) -> Sigma1Proof> +where + E: PairingCurve, + T: Transcript, + Scalar: Field, + E::G2: Group>, +{ + let (g2_fin, g1_fin) = (&setup.g2_vec[0], &setup.g1_vec[0]); + let (k1, k2, k3) = ( + Scalar::::random(), + Scalar::::random(), + Scalar::::random(), + ); + let a1 = g2_fin.scale(&k1) + setup.h2.scale(&k2); + let a2 = k1 * g1_fin + k3 * setup.h1; + transcript.append_serde(b"sigma1_a1", &a1); + transcript.append_serde(b"sigma1_a2", &a2); + let c = transcript.challenge_scalar(b"sigma1_c"); + Sigma1Proof { + a1, + a2, + z1: k1 + c * y, + z2: k2 + c * r_e2, + z3: k3 + c * r_y, + } +} + +/// Verify Sigma1 proof. +#[cfg(feature = "zk")] +pub fn verify_sigma1_proof>( + e2: &E::G2, + y_commit: &E::G1, + proof: &Sigma1Proof>, + setup: &VerifierSetup, + transcript: &mut T, +) -> Result<(), DoryError> +where + Scalar: Field, + E::G2: Group>, +{ + transcript.append_serde(b"sigma1_a1", &proof.a1); + transcript.append_serde(b"sigma1_a2", &proof.a2); + let c = transcript.challenge_scalar(b"sigma1_c"); + if setup.g2_0.scale(&proof.z1) + setup.h2.scale(&proof.z2) != proof.a1 + e2.scale(&c) { + return Err(DoryError::InvalidProof); + } + if proof.z1 * setup.g1_0 + proof.z3 * setup.h1 != proof.a2 + c * y_commit { + return Err(DoryError::InvalidProof); + } + Ok(()) +} + +/// Generate Sigma2 proof: proves e(E1, Γ2,fin) - D2 = e(H1, t1·Γ2,fin + t2·H2). +#[cfg(feature = "zk")] +pub fn generate_sigma2_proof( + t1: &Scalar, + t2: &Scalar, + setup: &ProverSetup, + transcript: &mut T, +) -> Sigma2Proof, E::GT> +where + E: PairingCurve, + T: Transcript, + Scalar: Field, + E::G2: Group>, + E::GT: Group>, +{ + let (k1, k2) = (Scalar::::random(), Scalar::::random()); + let a = E::pair( + &setup.h1, + &(setup.g2_vec[0].scale(&k1) + setup.h2.scale(&k2)), + ); + transcript.append_serde(b"sigma2_a", &a); + let c = transcript.challenge_scalar(b"sigma2_c"); + Sigma2Proof { + a, + z1: k1 + c * t1, + z2: k2 + c * t2, + } +} + +/// Verify Sigma2 proof. +#[cfg(feature = "zk")] +pub fn verify_sigma2_proof>( + e1: &E::G1, + d2: &E::GT, + proof: &Sigma2Proof, E::GT>, + setup: &VerifierSetup, + transcript: &mut T, +) -> Result<(), DoryError> +where + Scalar: Field, + E::G2: Group>, + E::GT: Group>, +{ + transcript.append_serde(b"sigma2_a", &proof.a); + let c = transcript.challenge_scalar(b"sigma2_c"); + let expected = E::pair(e1, &setup.g2_0) - *d2; + let lhs = E::pair( + &setup.h1, + &(setup.g2_0.scale(&proof.z1) + setup.h2.scale(&proof.z2)), + ); + if lhs == proof.a + expected.scale(&c) { + Ok(()) + } else { + Err(DoryError::InvalidProof) + } } impl DoryVerifierState { - /// Create new verifier state - /// - /// # Parameters - /// - `c`: Initial inner product value - /// - `d1`: Initial d1 value (from VMV) - /// - `d2`: Initial d2 value (from VMV) - /// - `e1`: Initial e1 value - /// - `e2`: Initial e2 value - /// - /// Construct verifier state for O(1) accumulation + /// Create new verifier state for O(1) accumulation. /// - /// - `s1_coords`: Per-round coordinates for s1 (right_vec in prover) - /// - `s2_coords`: Per-round coordinates for s2 (left_vec in prover) - /// - `num_rounds`: Number of rounds - /// - `setup`: Verifier setup parameters - /// - /// Note: `e1` and `d2` are stored both as initial values (for batched VMV check) - /// and as accumulators (updated during reduce rounds) - /// this is because the VMV check happens before the folding rounds, so we need to save - /// the value for the final batched pairing check. + /// `e1` and `d2` are stored both as initial values (for batched VMV check) + /// and as accumulators (updated during reduce rounds), since the VMV check + /// is deferred to the final batched pairing. #[allow(clippy::too_many_arguments)] pub fn new( c: E::GT, @@ -362,8 +566,8 @@ impl DoryVerifierState { d2: E::GT, e1: E::G1, e2: E::G2, - s1_coords: Vec<::Scalar>, - s2_coords: Vec<::Scalar>, + s1_coords: Vec>, + s2_coords: Vec>, num_rounds: usize, setup: VerifierSetup, ) -> Self { @@ -378,8 +582,8 @@ impl DoryVerifierState { e2, e1_init: e1, d2_init: d2, - s1_acc: ::Scalar::one(), - s2_acc: ::Scalar::one(), + s1_acc: Scalar::::one(), + s2_acc: Scalar::::one(), s1_coords, s2_coords, num_rounds, @@ -396,83 +600,79 @@ impl DoryVerifierState { &mut self, first_msg: &FirstReduceMessage, second_msg: &SecondReduceMessage, - alpha: &::Scalar, - beta: &::Scalar, - ) where - E::G2: Group::Scalar>, - E::GT: Group::Scalar>, - ::Scalar: Field, + alpha: &Scalar, + beta: &Scalar, + ) -> Result<(), DoryError> + where + E::G2: Group>, + E::GT: Group>, + Scalar: Field, { - assert!(self.num_rounds > 0, "No rounds remaining"); - - let alpha_inv = (*alpha).inv().expect("alpha must be invertible"); - let beta_inv = (*beta).inv().expect("beta must be invertible"); - - // Update C: C' ← C + χᵢ + β·D₂ + β⁻¹·D₁ + α·C₊ + α⁻¹·C₋ - let chi = &self.setup.chi[self.num_rounds]; - self.c = self.c + chi; - self.c = self.c + self.d2.scale(beta); - self.c = self.c + self.d1.scale(&beta_inv); - self.c = self.c + second_msg.c_plus.scale(alpha); - self.c = self.c + second_msg.c_minus.scale(&alpha_inv); - - // Update D₁: D₁' ← α·D₁L + D₁R + α·β·Δ₁L + β·Δ₁R - let delta_1l = &self.setup.delta_1l[self.num_rounds]; - let delta_1r = &self.setup.delta_1r[self.num_rounds]; - let alpha_beta = *alpha * *beta; - self.d1 = first_msg.d1_left.scale(alpha); - self.d1 = self.d1 + first_msg.d1_right; - self.d1 = self.d1 + delta_1l.scale(&alpha_beta); - self.d1 = self.d1 + delta_1r.scale(beta); - - // Update D₂: D₂' ← α⁻¹·D₂L + D₂R + α⁻¹·β⁻¹·Δ₂L + β⁻¹·Δ₂R - let delta_2l = &self.setup.delta_2l[self.num_rounds]; - let delta_2r = &self.setup.delta_2r[self.num_rounds]; + if self.num_rounds == 0 { + return Err(DoryError::InvalidProof); + } + + let alpha_inv = alpha.inv().ok_or(DoryError::InvalidProof)?; + let beta_inv = beta.inv().ok_or(DoryError::InvalidProof)?; + + // C' ← C + χᵢ + β·D₂ + β⁻¹·D₁ + α·C₊ + α⁻¹·C₋ + self.c = self.c + + self.setup.chi[self.num_rounds] + + self.d2.scale(beta) + + self.d1.scale(&beta_inv) + + second_msg.c_plus.scale(alpha) + + second_msg.c_minus.scale(&alpha_inv); + + // D₁' ← α·D₁L + D₁R + αβ·Δ₁L + β·Δ₁R + let alpha_beta = *alpha * beta; + self.d1 = first_msg.d1_left.scale(alpha) + + first_msg.d1_right + + self.setup.delta_1l[self.num_rounds].scale(&alpha_beta) + + self.setup.delta_1r[self.num_rounds].scale(beta); + + // D₂' ← α⁻¹·D₂L + D₂R + α⁻¹β⁻¹·Δ₂L + β⁻¹·Δ₂R let alpha_inv_beta_inv = alpha_inv * beta_inv; - self.d2 = first_msg.d2_left.scale(&alpha_inv); - self.d2 = self.d2 + first_msg.d2_right; - self.d2 = self.d2 + delta_2l.scale(&alpha_inv_beta_inv); - self.d2 = self.d2 + delta_2r.scale(&beta_inv); - - // Update E₁: E₁' ← E₁ + β·E₁β + α·E₁₊ + α⁻¹·E₁₋ - self.e1 = self.e1 + first_msg.e1_beta.scale(beta); - self.e1 = self.e1 + second_msg.e1_plus.scale(alpha); - self.e1 = self.e1 + second_msg.e1_minus.scale(&alpha_inv); - - // Update E₂: E₂' ← E₂ + β⁻¹·E₂β + α·E₂₊ + α⁻¹·E₂₋ - self.e2 = self.e2 + first_msg.e2_beta.scale(&beta_inv); - self.e2 = self.e2 + second_msg.e2_plus.scale(alpha); - self.e2 = self.e2 + second_msg.e2_minus.scale(&alpha_inv); - - // Update folded scalars in O(1): s1_acc *= (α·(1−y_t) + y_t), s2_acc *= (α⁻¹·(1−x_t) + x_t) - // Endianness note: s*_coords are stored in increasing dimension index (little-endian by dimension). - // Folding processes the most significant dimension first (MSB-first), so we index from the end: idx = num_rounds - 1. + self.d2 = first_msg.d2_left.scale(&alpha_inv) + + first_msg.d2_right + + self.setup.delta_2l[self.num_rounds].scale(&alpha_inv_beta_inv) + + self.setup.delta_2r[self.num_rounds].scale(&beta_inv); + + // E₁' ← E₁ + β·E₁β + α·E₁₊ + α⁻¹·E₁₋ + self.e1 = self.e1 + + *beta * first_msg.e1_beta + + *alpha * second_msg.e1_plus + + alpha_inv * second_msg.e1_minus; + + // E₂' ← E₂ + β⁻¹·E₂β + α·E₂₊ + α⁻¹·E₂₋ + self.e2 = self.e2 + + first_msg.e2_beta.scale(&beta_inv) + + second_msg.e2_plus.scale(alpha) + + second_msg.e2_minus.scale(&alpha_inv); + + // Folded scalars: s_acc *= (α·(1−coord) + coord) indexed MSB-first let idx = self.num_rounds - 1; - let y_t = self.s1_coords[idx]; - let x_t = self.s2_coords[idx]; - let one = ::Scalar::one(); - let s1_term = (*alpha) * (one - y_t) + y_t; - let s2_term = alpha_inv * (one - x_t) + x_t; - self.s1_acc = self.s1_acc * s1_term; - self.s2_acc = self.s2_acc * s2_term; - - // Decrement round counter + let (y_t, x_t) = (self.s1_coords[idx], self.s2_coords[idx]); + let one = Scalar::::one(); + self.s1_acc = self.s1_acc * (*alpha * (one - y_t) + y_t); + self.s2_acc = self.s2_acc * (alpha_inv * (one - x_t) + x_t); + self.num_rounds -= 1; + Ok(()) } - /// Verify final scalar product message + /// Verify the final scalar product equation. /// - /// Applies fold-scalars transformation and checks the final pairing equation. - /// Must be called when num_rounds=0 after all reduce rounds are complete. + /// Must be called when `num_rounds == 0` after all reduce rounds are complete. + /// + /// When `zk_data` is `None`, performs the transparent 4-pairing check. + /// When `zk_data` is `Some((sp, sigma_c))`, performs the ZK 1-pairing check. /// /// # Non-optimized Protocol Equations /// /// ## VMV Check (batched together with the final pairing check) /// - /// The VMV protocol requires: `D₂_init = e(E₁_init, H₂)` - /// - /// This was originally checked as a standalone pairing in `verify_evaluation_proof`. - /// We defer it here to batch with other pairings. + /// The VMV protocol requires: `D₂_init = e(E₁_init, Γ₂₀)` + /// (proven by the Sigma₂ proof in ZK mode, deferred here for batching in transparent mode). /// /// ## Fold-Scalars Updates /// @@ -488,106 +688,134 @@ impl DoryVerifierState { /// e(E₁ + d·Γ₁₀, E₂ + d⁻¹·Γ₂₀) = C' + χ₀ + d·D₂' + d⁻¹·D₁' /// ``` /// - /// # Multi-Pairing Optimization + /// # Transparent Mode — Multi-Pairing Check (4 ML + 1 FE) /// /// ## Batching the VMV Check /// /// We use random linear combination with challenge `d²` to defer the VMV check. /// We use `d²` (not `d`) to ensure sufficient independence from the existing `d·D₂` term. /// - /// Multiplying by `d²` preserves soundness because: - /// - `d` is derived from the transcript AFTER `D₂_init` and `E₁_init` are committed - /// - If `D₂_init ≠ e(E₁_init, H₂)`, then with overwhelming probability: - /// `T + d²·D₂_init ≠ multi_pair([...]) + d²·e(E₁_init, H₂)` + /// Soundness: `d` is derived from the transcript AFTER `D₂_init` and `E₁_init` are + /// committed, so if `D₂_init ≠ e(E₁_init, Γ₂₀)`, then with overwhelming probability + /// `T + d²·D₂_init ≠ multi_pair([...]) + d²·e(E₁_init, Γ₂₀)`. /// + /// ## Final Combined Check /// - /// ## Combining Pairings - /// - /// After moving all pairings to LHS and using bilinearity: + /// The final check verifies both: + /// - (a) The fold-scalars/reduce protocol equation + /// - (b) The VMV constraint `D₂_init = e(E₁_init, Γ₂₀)` /// - /// Terms sharing H₂ (fold-scalars pairings + deferred VMV check): + /// Combined via: `(a) + d²·(b)` where `d` is the final challenge. /// /// ```text - /// e(E₁_acc, H₂)^(-γ⁻¹) · e((s₂·γ⁻¹)·Γ₁₀, H₂)^(-d) · e(E₁_init, H₂)^(d²) - /// = e((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀) + d²·E₁_init, H₂) + /// e(E₁_final + d·Γ₁₀, E₂_final + d⁻¹·Γ₂₀) [Pair 1: scalar product] + /// · e(H₁, (-γ)·(E₂_acc + (d⁻¹·s₁)·Γ₂₀)) [Pair 2: E₂ accumulator] + /// · e((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀), H₂) [Pair 3: E₁ accumulator] + /// · e(d²·E₁_init, Γ₂₀) [Pair 4: deferred VMV] + /// = C + (s₁·s₂)·HT + χ₀ + d·D₂ + d⁻¹·D₁ + d²·D₂_init /// ``` /// - /// ## Final Combined Check + /// Note: Pairs 3 and 4 cannot be combined into 3 ML because they use different + /// G2 elements (H₂ vs Γ₂₀). This differs from the original Dory construction + /// where `D₂ = e(Γ₁·v, H₂)` allowed H₂-sharing. /// - /// The final check verifies both: - /// - (a) The original fold-scalars/reduce protocol equation - /// - (b) The VMV constraint `D₂_init = e(E₁_init, H₂)` + /// # ZK Mode (1 ML + 1 FE) /// - /// Combined via: `(a) + d²·(b)` where `d` is the final challenge. + /// In ZK mode, the scalar product proof replaces the transparent check with a + /// Sigma-protocol equation proving knowledge of (v₁, v₂) opening (C, D₁, D₂). + /// E-accumulator and VMV binding are handled separately by Sigma₁/Sigma₂ proofs + /// verified earlier in the protocol. /// /// ```text - /// e(E₁_final + d·Γ₁₀, E₂_final + d⁻¹·Γ₂₀) - /// · e(H₁, (-γ)·(E₂_acc + (d⁻¹·s₁)·Γ₂₀)) - /// · e((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀) + d²·E₁_init, H₂) - /// = T + d²·D₂_init + /// e(sp.e₁ + d·Γ₁₀, sp.e₂ + d⁻¹·Γ₂₀) + /// = χ₀ + sp.r + c·sp.q + c²·C + /// + d·(sp.p₂ + c·D₂) + d⁻¹·(sp.p₁ + c·D₁) + /// − (sp.r₃ + d·sp.r₂ + d⁻¹·sp.r₁)·HT /// ``` - /// - /// This is 3 miller loops + 1 final exponentiation, - /// Whereas a naive check would be 6 ML + 6 FE + #[allow(clippy::type_complexity)] #[tracing::instrument(skip_all, name = "DoryVerifierState::verify_final")] pub fn verify_final( - &mut self, + &self, msg: &ScalarProductMessage, - gamma: &::Scalar, - d: &::Scalar, + gamma: &Scalar, + d: &Scalar, + zk_data: Option<( + &ScalarProductProof, E::GT>, + &Scalar, + )>, ) -> Result<(), DoryError> where - E::G2: Group::Scalar>, - E::GT: Group::Scalar>, - ::Scalar: Field, + E::G2: Group>, + E::GT: Group>, + Scalar: Field, { debug_assert_eq!( self.num_rounds, 0, "num_rounds must be 0 for final verification" ); - let gamma_inv = (*gamma).inv().expect("gamma must be invertible"); - let d_inv = (*d).inv().expect("d must be invertible"); - let d_sq = *d * *d; - let neg_gamma = -*gamma; - let neg_gamma_inv = -gamma_inv; - - // Compute RHS (non-pairing GT terms): - // T = C + (s₁·s₂)·HT + χ₀ + d·D₂ + d⁻¹·D₁ + d²·D₂_init - // The d²·D₂_init term is the deferred VMV check contribution. - // We use d² instead of d to ensure independence from the d·D₂ term. - let s_product = self.s1_acc * self.s2_acc; - let mut rhs = self.c + self.setup.ht.scale(&s_product); - rhs = rhs + self.setup.chi[0]; - rhs = rhs + self.d2.scale(d); - rhs = rhs + self.d1.scale(&d_inv); - rhs = rhs + self.d2_init.scale(&d_sq); - - // Pair 1: (E₁_final + d·Γ₁₀, E₂_final + d⁻¹·Γ₂₀) - let p1_g1 = msg.e1 + self.setup.g1_0.scale(d); - let p1_g2 = msg.e2 + self.setup.g2_0.scale(&d_inv); - - // Pair 2: (H₁, (-γ)·(E₂_acc + (d⁻¹·s₁)·Γ₂₀)) - let d_inv_s1 = d_inv * self.s1_acc; - let g2_term = self.e2 + self.setup.g2_0.scale(&d_inv_s1); - let p2_g1 = self.setup.h1; - let p2_g2 = g2_term.scale(&neg_gamma); - - // Pair 3: ((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀) + d²·E₁_init, H₂) - // The d²·E₁_init term is the deferred VMV check: d²·e(E₁_init, H₂) - // We use d² to ensure independence from other d-scaled terms. - let d_s2 = *d * self.s2_acc; - let g1_term = self.e1 + self.setup.g1_0.scale(&d_s2); - let p3_g1 = g1_term.scale(&neg_gamma_inv) + self.e1_init.scale(&d_sq); - let p3_g2 = self.setup.h2; - - // Single multi-pairing: 3 miller loops + 1 final exponentiation - let lhs = E::multi_pair(&[p1_g1, p2_g1, p3_g1], &[p1_g2, p2_g2, p3_g2]); - - if lhs == rhs { - Ok(()) + let d_inv = d.inv().ok_or(DoryError::InvalidProof)?; + + if let Some((sp, sigma_c)) = zk_data { + // ZK mode: 1 ML + 1 FE + let c = *sigma_c; + let c_sq = c * c; + + let lhs = E::pair( + &(sp.e1 + self.setup.g1_0.scale(d)), + &(sp.e2 + self.setup.g2_0.scale(&d_inv)), + ); + + let ht_scalar = sp.r3 + *d * sp.r2 + d_inv * sp.r1; + let mut rhs = self.setup.chi[0] + sp.r + sp.q.scale(&c) + self.c.scale(&c_sq); + rhs = rhs + sp.p2.scale(d) + self.d2.scale(&(*d * c)); + rhs = rhs + sp.p1.scale(&d_inv) + self.d1.scale(&(d_inv * c)); + rhs = rhs - self.setup.ht.scale(&ht_scalar); + + if lhs == rhs { + Ok(()) + } else { + Err(DoryError::InvalidProof) + } } else { - Err(DoryError::InvalidProof) + // Transparent mode: 4 ML + 1 FE + let gamma_inv = gamma.inv().ok_or(DoryError::InvalidProof)?; + let d_sq = *d * *d; + let neg_gamma = -*gamma; + let neg_gamma_inv = -gamma_inv; + + let s_product = self.s1_acc * self.s2_acc; + let rhs = self.c + + self.setup.ht.scale(&s_product) + + self.setup.chi[0] + + self.d2.scale(d) + + self.d1.scale(&d_inv) + + self.d2_init.scale(&d_sq); + + // Pair 1: e(E₁_final + d·Γ₁₀, E₂_final + d⁻¹·Γ₂₀) + let p1_g1 = msg.e1 + self.setup.g1_0.scale(d); + let p1_g2 = msg.e2 + self.setup.g2_0.scale(&d_inv); + + // Pair 2: e(H₁, (-γ)·(E₂_acc + (d⁻¹·s₁)·Γ₂₀)) + let p2_g1 = self.setup.h1; + let p2_g2 = (self.e2 + self.setup.g2_0.scale(&(d_inv * self.s1_acc))).scale(&neg_gamma); + + // Pair 3: e((-γ⁻¹)·(E₁_acc + (d·s₂)·Γ₁₀), H₂) + let p3_g1 = + (self.e1 + self.setup.g1_0.scale(&(*d * self.s2_acc))).scale(&neg_gamma_inv); + let p3_g2 = self.setup.h2; + + // Pair 4: e(d²·E₁_init, Γ₂₀) — deferred VMV check + let p4_g1 = self.e1_init.scale(&d_sq); + let p4_g2 = self.setup.g2_0; + + let lhs = E::multi_pair(&[p1_g1, p2_g1, p3_g1, p4_g1], &[p1_g2, p2_g2, p3_g2, p4_g2]); + + if lhs == rhs { + Ok(()) + } else { + Err(DoryError::InvalidProof) + } } } } diff --git a/src/setup.rs b/src/setup.rs index 6a60806..602261c 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -6,7 +6,6 @@ use crate::primitives::arithmetic::{Group, PairingCurve}; use crate::primitives::serialization::{DoryDeserialize, DorySerialize}; -use rand_core::RngCore; #[cfg(all(feature = "disk-persistence", not(target_arch = "wasm32")))] use std::fs::{self, File}; @@ -87,22 +86,21 @@ impl ProverSetup { /// supporting polynomials up to 2^max_log_n coefficients arranged as n×n matrices. /// /// # Parameters - /// - `rng`: Random number generator /// - `max_log_n`: Maximum log₂ of polynomial size (for n×n matrix with n² = 2^max_log_n) /// /// # Returns /// A new `ProverSetup` with randomly generated parameters - pub fn new(rng: &mut R, max_log_n: usize) -> Self { + pub fn new(max_log_n: usize) -> Self { // For square matrices: n = 2^((max_log_n+1)/2) let n = 1 << max_log_n.div_ceil(2); // Generate n random G1 generators (Γ₁) - let g1_vec: Vec = (0..n).map(|_| E::G1::random(rng)).collect(); + let g1_vec: Vec = (0..n).map(|_| E::G1::random()).collect(); // Generate n random G2 generators (Γ₂) - let g2_vec: Vec = (0..n).map(|_| E::G2::random(rng)).collect(); + let g2_vec: Vec = (0..n).map(|_| E::G2::random()).collect(); // Generate blinding generators - let h1 = E::G1::random(rng); - let h2 = E::G2::random(rng); + let h1 = E::G1::random(); + let h2 = E::G2::random(); // Precompute e(h₁, h₂) let ht = E::pair(&h1, &h2); diff --git a/tests/arkworks/cache.rs b/tests/arkworks/cache.rs index a3bbdb6..e66f866 100644 --- a/tests/arkworks/cache.rs +++ b/tests/arkworks/cache.rs @@ -1,17 +1,15 @@ use dory_pcs::backends::arkworks::{ArkG1, ArkG2, ArkGT, BN254}; use dory_pcs::primitives::arithmetic::{Group, PairingCurve}; -use rand::thread_rng; #[cfg(feature = "cache")] use dory_pcs::backends::arkworks::ark_cache; #[test] fn multi_pair_correctness() { - let mut rng = thread_rng(); let n = 10; - let ps: Vec = (0..n).map(|_| ArkG1::random(&mut rng)).collect(); - let qs: Vec = (0..n).map(|_| ArkG2::random(&mut rng)).collect(); + let ps: Vec = (0..n).map(|_| ArkG1::random()).collect(); + let qs: Vec = (0..n).map(|_| ArkG2::random()).collect(); let result = BN254::multi_pair(&ps, &qs); @@ -35,10 +33,8 @@ fn multi_pair_empty() { #[test] #[should_panic(expected = "multi_pair requires equal length vectors")] fn multi_pair_length_mismatch() { - let mut rng = thread_rng(); - - let ps: Vec = (0..5).map(|_| ArkG1::random(&mut rng)).collect(); - let qs: Vec = (0..3).map(|_| ArkG2::random(&mut rng)).collect(); + let ps: Vec = (0..5).map(|_| ArkG1::random()).collect(); + let qs: Vec = (0..3).map(|_| ArkG2::random()).collect(); BN254::multi_pair(&ps, &qs); } @@ -46,9 +42,8 @@ fn multi_pair_length_mismatch() { #[cfg(feature = "cache")] #[test] fn cache_initialization() { - let mut rng = thread_rng(); - let g1_vec: Vec = (0..10).map(|_| ArkG1::random(&mut rng)).collect(); - let g2_vec: Vec = (0..10).map(|_| ArkG2::random(&mut rng)).collect(); + let g1_vec: Vec = (0..10).map(|_| ArkG1::random()).collect(); + let g2_vec: Vec = (0..10).map(|_| ArkG2::random()).collect(); ark_cache::init_cache(&g1_vec, &g2_vec); @@ -61,11 +56,9 @@ fn cache_initialization() { #[cfg(feature = "cache")] #[test] fn cache_smart_reinit() { - let mut rng = thread_rng(); - // Initialize with small size - let g1_small: Vec = (0..5).map(|_| ArkG1::random(&mut rng)).collect(); - let g2_small: Vec = (0..5).map(|_| ArkG2::random(&mut rng)).collect(); + let g1_small: Vec = (0..5).map(|_| ArkG1::random()).collect(); + let g2_small: Vec = (0..5).map(|_| ArkG2::random()).collect(); ark_cache::init_cache(&g1_small, &g2_small); let cache = ark_cache::get_prepared_cache().unwrap(); @@ -77,8 +70,8 @@ fn cache_smart_reinit() { assert_eq!(cache.g1_prepared.len(), small_len); // Re-init with larger size — should replace cache - let g1_large: Vec = (0..20).map(|_| ArkG1::random(&mut rng)).collect(); - let g2_large: Vec = (0..20).map(|_| ArkG2::random(&mut rng)).collect(); + let g1_large: Vec = (0..20).map(|_| ArkG1::random()).collect(); + let g2_large: Vec = (0..20).map(|_| ArkG2::random()).collect(); ark_cache::init_cache(&g1_large, &g2_large); let cache = ark_cache::get_prepared_cache().unwrap(); @@ -89,11 +82,10 @@ fn cache_smart_reinit() { #[cfg(feature = "cache")] #[test] fn multi_pair_with_cache_optimization() { - let mut rng = thread_rng(); let n = 20; - let g1_vec: Vec = (0..n).map(|_| ArkG1::random(&mut rng)).collect(); - let g2_vec: Vec = (0..n).map(|_| ArkG2::random(&mut rng)).collect(); + let g1_vec: Vec = (0..n).map(|_| ArkG1::random()).collect(); + let g2_vec: Vec = (0..n).map(|_| ArkG2::random()).collect(); if !ark_cache::is_cached() { ark_cache::init_cache(&g1_vec, &g2_vec); diff --git a/tests/arkworks/commitment.rs b/tests/arkworks/commitment.rs index 382d4df..b7aa500 100644 --- a/tests/arkworks/commitment.rs +++ b/tests/arkworks/commitment.rs @@ -2,6 +2,7 @@ use super::*; use dory_pcs::primitives::poly::Polynomial; +use dory_pcs::Transparent; #[test] fn test_commit_small_polynomial() { @@ -11,10 +12,10 @@ fn test_commit_small_polynomial() { let nu = 2; let sigma = 2; - let result = poly.commit::(nu, sigma, &setup); + let result = poly.commit::(nu, sigma, &setup); assert!(result.is_ok()); - let (_commitment, row_commitments) = result.unwrap(); + let (_commitment, row_commitments, _) = result.unwrap(); assert_eq!(row_commitments.len(), 1 << nu); } @@ -26,10 +27,10 @@ fn test_commit_constant_polynomial() { let nu = 2; let sigma = 2; - let result = poly.commit::(nu, sigma, &setup); + let result = poly.commit::(nu, sigma, &setup); assert!(result.is_ok()); - let (_commitment, row_commitments) = result.unwrap(); + let (_commitment, row_commitments, _) = result.unwrap(); assert_eq!(row_commitments.len(), 1 << nu); } @@ -38,15 +39,15 @@ fn test_commit_different_sizes() { let setup = test_setup(8); let poly_4 = random_polynomial(4); - let result = poly_4.commit::(1, 1, &setup); + let result = poly_4.commit::(1, 1, &setup); assert!(result.is_ok()); let poly_16 = random_polynomial(16); - let result = poly_16.commit::(2, 2, &setup); + let result = poly_16.commit::(2, 2, &setup); assert!(result.is_ok()); let poly_64 = random_polynomial(64); - let result = poly_64.commit::(3, 3, &setup); + let result = poly_64.commit::(3, 3, &setup); assert!(result.is_ok()); } @@ -58,7 +59,7 @@ fn test_commit_invalid_size() { let nu = 3; let sigma = 2; - let result = poly.commit::(nu, sigma, &setup); + let result = poly.commit::(nu, sigma, &setup); assert!(result.is_err()); } @@ -70,8 +71,12 @@ fn test_commit_deterministic() { let poly1 = ArkworksPolynomial::new(coefficients.clone()); let poly2 = ArkworksPolynomial::new(coefficients); - let (comm1, _) = poly1.commit::(2, 2, &setup).unwrap(); - let (comm2, _) = poly2.commit::(2, 2, &setup).unwrap(); + let (comm1, _, _) = poly1 + .commit::(2, 2, &setup) + .unwrap(); + let (comm2, _, _) = poly2 + .commit::(2, 2, &setup) + .unwrap(); assert_eq!(comm1, comm2); } @@ -83,8 +88,12 @@ fn test_commit_different_polynomials() { let poly1 = random_polynomial(16); let poly2 = random_polynomial(16); - let (comm1, _) = poly1.commit::(2, 2, &setup).unwrap(); - let (comm2, _) = poly2.commit::(2, 2, &setup).unwrap(); + let (comm1, _, _) = poly1 + .commit::(2, 2, &setup) + .unwrap(); + let (comm2, _, _) = poly2 + .commit::(2, 2, &setup) + .unwrap(); assert_ne!(comm1, comm2); } diff --git a/tests/arkworks/evaluation.rs b/tests/arkworks/evaluation.rs index 9c4515f..0235541 100644 --- a/tests/arkworks/evaluation.rs +++ b/tests/arkworks/evaluation.rs @@ -2,7 +2,7 @@ use super::*; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, verify}; +use dory_pcs::{prove, verify, Transparent}; #[test] fn test_evaluation_proof_small() { @@ -15,15 +15,16 @@ fn test_evaluation_proof_small() { let nu = 2; let sigma = 2; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &setup, @@ -31,7 +32,7 @@ fn test_evaluation_proof_small() { ); assert!(result.is_ok()); - let proof = result.unwrap(); + let (proof, _) = result.unwrap(); let evaluation = poly.evaluate(&point); let mut verifier_transcript = fresh_transcript(); @@ -58,15 +59,16 @@ fn test_evaluation_proof_with_precomputed_commitment() { let nu = 2; let sigma = 2; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &setup, @@ -74,7 +76,7 @@ fn test_evaluation_proof_with_precomputed_commitment() { ); assert!(result.is_ok()); - let proof = result.unwrap(); + let (proof, _) = result.unwrap(); let evaluation = poly.evaluate(&point); let mut verifier_transcript = fresh_transcript(); @@ -104,15 +106,16 @@ fn test_evaluation_proof_constant_polynomial() { let expected_eval = poly.evaluate(&point); assert_eq!(expected_eval, ArkFr::from_u64(7)); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &setup, @@ -147,15 +150,16 @@ fn test_evaluation_proof_wrong_evaluation_fails() { let nu = 2; let sigma = 2; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &setup, @@ -188,13 +192,16 @@ fn test_evaluation_proof_different_sizes() { let poly = random_polynomial(4); let point = random_point(2); - let (tier_2, tier_1) = poly.commit::(1, 1, &setup).unwrap(); + let (tier_2, tier_1, commit_blind) = poly + .commit::(1, 1, &setup) + .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, 1, 1, &setup, @@ -222,13 +229,16 @@ fn test_evaluation_proof_different_sizes() { let poly = random_polynomial(64); let point = random_point(6); - let (tier_2, tier_1) = poly.commit::(3, 3, &setup).unwrap(); + let (tier_2, tier_1, commit_blind) = poly + .commit::(3, 3, &setup) + .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, 3, 3, &setup, @@ -259,18 +269,19 @@ fn test_multiple_evaluations_same_commitment() { let nu = 2; let sigma = 2; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &setup) .unwrap(); for _ in 0..3 { let point = random_point(4); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1.clone(), + commit_blind, nu, sigma, &setup, diff --git a/tests/arkworks/homomorphic.rs b/tests/arkworks/homomorphic.rs index 6c3122a..6a4fb2d 100644 --- a/tests/arkworks/homomorphic.rs +++ b/tests/arkworks/homomorphic.rs @@ -4,13 +4,12 @@ use super::*; use dory_pcs::backends::arkworks::ArkG1; use dory_pcs::primitives::arithmetic::{Field, Group}; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; +use dory_pcs::{prove, setup, verify, Transparent}; #[test] fn test_homomorphic_combination_e2e() { - let mut rng = rand::thread_rng(); let max_log_n = 10; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); let nu = 4; let sigma = 4; @@ -22,12 +21,12 @@ fn test_homomorphic_combination_e2e() { let commitments: Vec<_> = polys .iter() .map(|poly| { - poly.commit::(nu, sigma, &prover_setup) + poly.commit::(nu, sigma, &prover_setup) .unwrap() }) .collect(); - let coeffs: Vec = (0..5).map(|_| ArkFr::random(&mut rng)).collect(); + let coeffs: Vec = (0..5).map(|_| ArkFr::random()).collect(); // Homomorphically combine commitments #[allow(clippy::op_ref)] @@ -87,10 +86,11 @@ fn test_homomorphic_combination_e2e() { // Create evaluation proof using combined commitment let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &combined_poly, &point, combined_tier1, + ArkFr::zero(), nu, sigma, &prover_setup, @@ -116,8 +116,7 @@ fn test_homomorphic_combination_e2e() { #[test] fn test_homomorphic_combination_small() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 6); + let (prover_setup, verifier_setup) = setup::(6); let nu = 2; let sigma = 2; @@ -129,12 +128,12 @@ fn test_homomorphic_combination_small() { let commitments: Vec<_> = polys .iter() .map(|poly| { - poly.commit::(nu, sigma, &prover_setup) + poly.commit::(nu, sigma, &prover_setup) .unwrap() }) .collect(); - let coeffs: Vec = (0..5).map(|_| ArkFr::random(&mut rng)).collect(); + let coeffs: Vec = (0..5).map(|_| ArkFr::random()).collect(); #[allow(clippy::op_ref)] let mut combined_tier2 = coeffs[0] * &commitments[0].0; for i in 1..5 { @@ -177,10 +176,11 @@ fn test_homomorphic_combination_small() { let evaluation = combined_poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &combined_poly, &point, combined_tier1, + ArkFr::zero(), nu, sigma, &prover_setup, diff --git a/tests/arkworks/integration.rs b/tests/arkworks/integration.rs index 567cf78..a8e61ba 100644 --- a/tests/arkworks/integration.rs +++ b/tests/arkworks/integration.rs @@ -3,31 +3,31 @@ use super::*; use dory_pcs::primitives::arithmetic::Field; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; +use dory_pcs::{prove, setup, verify, Transparent}; #[test] fn test_full_workflow() { - let mut rng = rand::thread_rng(); let max_log_n = 10; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); let poly = random_polynomial(256); let nu = 4; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let point = random_point(8); let expected_evaluation = poly.evaluate(&point); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -52,25 +52,25 @@ fn test_full_workflow() { #[test] fn test_workflow_without_precommitment() { - let mut rng = rand::thread_rng(); let max_log_n = 10; - let (prover_setup, verifier_setup) = setup::(&mut rng, max_log_n); + let (prover_setup, verifier_setup) = setup::(max_log_n); let poly = random_polynomial(256); let point = random_point(8); let nu = 4; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -94,25 +94,25 @@ fn test_workflow_without_precommitment() { #[test] fn test_batched_proofs() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); let poly = random_polynomial(256); let nu = 4; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); for i in 0..5 { let point = random_point(8); let mut prover_transcript = Blake2bTranscript::new(format!("test-{i}").as_bytes()); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1.clone(), + commit_blind, nu, sigma, &prover_setup, @@ -137,8 +137,7 @@ fn test_batched_proofs() { #[test] fn test_linear_polynomial() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); let coefficients: Vec = (0..256).map(|i| ArkFr::from_u64(i as u64)).collect(); let poly = ArkworksPolynomial::new(coefficients); @@ -157,15 +156,16 @@ fn test_linear_polynomial() { let nu = 4; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -192,8 +192,7 @@ fn test_linear_polynomial() { #[test] fn test_zero_polynomial() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); let poly = constant_polynomial(0, 8); let point = random_point(8); @@ -201,15 +200,16 @@ fn test_zero_polynomial() { let nu = 4; let sigma = 4; - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -235,8 +235,7 @@ fn test_zero_polynomial() { #[test] fn test_soundness_wrong_commitment() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); let poly1 = random_polynomial(256); let poly2 = random_polynomial(256); @@ -245,19 +244,20 @@ fn test_soundness_wrong_commitment() { let nu = 4; let sigma = 4; - let (commitment1, _) = poly1 - .commit::(nu, sigma, &prover_setup) + let (commitment1, _, _commit_blind) = poly1 + .commit::(nu, sigma, &prover_setup) .unwrap(); - let (_, tier_1_poly2) = poly2 - .commit::(nu, sigma, &prover_setup) + let (_, tier_1_poly2, commit_blind) = poly2 + .commit::(nu, sigma, &prover_setup) .unwrap(); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly2, &point, tier_1_poly2, + commit_blind, nu, sigma, &prover_setup, diff --git a/tests/arkworks/mod.rs b/tests/arkworks/mod.rs index e235c47..bb7042c 100644 --- a/tests/arkworks/mod.rs +++ b/tests/arkworks/mod.rs @@ -8,7 +8,6 @@ use dory_pcs::backends::arkworks::{ use dory_pcs::primitives::arithmetic::Field; use dory_pcs::proof::DoryProof; use dory_pcs::setup::{ProverSetup, VerifierSetup}; -use rand::thread_rng; pub mod cache; pub mod commitment; @@ -16,12 +15,14 @@ pub mod evaluation; pub mod homomorphic; pub mod integration; pub mod non_square; +pub mod serialization; pub mod setup; pub mod soundness; +#[cfg(feature = "zk")] +pub mod zk; pub fn random_polynomial(size: usize) -> ArkworksPolynomial { - let mut rng = thread_rng(); - let coefficients: Vec = (0..size).map(|_| ArkFr::random(&mut rng)).collect(); + let coefficients: Vec = (0..size).map(|_| ArkFr::random()).collect(); ArkworksPolynomial::new(coefficients) } @@ -33,13 +34,11 @@ pub fn constant_polynomial(value: u64, num_vars: usize) -> ArkworksPolynomial { } pub fn random_point(num_vars: usize) -> Vec { - let mut rng = thread_rng(); - (0..num_vars).map(|_| ArkFr::random(&mut rng)).collect() + (0..num_vars).map(|_| ArkFr::random()).collect() } pub fn test_setup(max_log_n: usize) -> ProverSetup { - let mut rng = thread_rng(); - ProverSetup::new(&mut rng, max_log_n) + ProverSetup::new(max_log_n) } pub fn test_setup_pair(max_log_n: usize) -> (ProverSetup, VerifierSetup) { diff --git a/tests/arkworks/non_square.rs b/tests/arkworks/non_square.rs index 8ce8d57..2254f6c 100644 --- a/tests/arkworks/non_square.rs +++ b/tests/arkworks/non_square.rs @@ -2,12 +2,11 @@ use super::*; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, setup, verify}; +use dory_pcs::{prove, setup, verify, Transparent}; #[test] fn test_non_square_matrix_nu_eq_sigma_minus_1() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); // nu = 3, sigma = 4 => 2^3 x 2^4 = 8 rows x 16 columns = 128 coefficients let nu = 3; @@ -18,15 +17,16 @@ fn test_non_square_matrix_nu_eq_sigma_minus_1() { let poly = random_polynomial(poly_size); let point = random_point(num_vars); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -51,8 +51,7 @@ fn test_non_square_matrix_nu_eq_sigma_minus_1() { #[test] fn test_non_square_matrix_nu_greater_than_sigma_rejected() { - let mut rng = rand::thread_rng(); - let (prover_setup, _verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, _verifier_setup) = setup::(10); // nu = 4, sigma = 3 => This should be rejected let nu = 4; @@ -63,15 +62,16 @@ fn test_non_square_matrix_nu_greater_than_sigma_rejected() { let poly = random_polynomial(poly_size); let point = random_point(num_vars); - let (_, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (_, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let proof_result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let proof_result = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -86,8 +86,7 @@ fn test_non_square_matrix_nu_greater_than_sigma_rejected() { #[test] fn test_non_square_matrix_small() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 6); + let (prover_setup, verifier_setup) = setup::(6); // nu = 2, sigma = 3 => 2^2 x 2^3 = 4 rows x 8 columns = 32 coefficients let nu = 2; @@ -98,15 +97,16 @@ fn test_non_square_matrix_small() { let poly = random_polynomial(poly_size); let point = random_point(num_vars); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, @@ -134,8 +134,7 @@ fn test_non_square_matrix_small() { #[test] fn test_non_square_matrix_very_rectangular() { - let mut rng = rand::thread_rng(); - let (prover_setup, verifier_setup) = setup::(&mut rng, 10); + let (prover_setup, verifier_setup) = setup::(10); // nu = 2, sigma = 5 => 2^2 x 2^5 = 4 rows x 32 columns = 128 coefficients // This is much "less square" than nu = sigma - 1 @@ -147,15 +146,16 @@ fn test_non_square_matrix_very_rectangular() { let poly = random_polynomial(poly_size); let point = random_point(num_vars); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .expect("Commitment should succeed"); let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, diff --git a/tests/arkworks/serialization.rs b/tests/arkworks/serialization.rs new file mode 100644 index 0000000..4e454a3 --- /dev/null +++ b/tests/arkworks/serialization.rs @@ -0,0 +1,245 @@ +//! Proof serialization round-trip tests + +use super::*; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use dory_pcs::backends::arkworks::ArkDoryProof; +use dory_pcs::primitives::poly::Polynomial; +use dory_pcs::{prove, verify, Transparent}; + +fn make_transparent_proof() -> ( + ArkDoryProof, + dory_pcs::backends::arkworks::ArkGT, + Vec, +) { + let (setup, verifier_setup) = test_setup_pair(4); + + let poly = random_polynomial(16); + let point = random_point(4); + let (tier_2, tier_1, commit_blind) = poly + .commit::(2, 2, &setup) + .unwrap(); + let mut transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( + &poly, + &point, + tier_1, + commit_blind, + 2, + 2, + &setup, + &mut transcript, + ) + .unwrap(); + + // Sanity: verify before serialization + let eval = poly.evaluate(&point); + let mut vt = fresh_transcript(); + verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + eval, + &point, + &proof, + verifier_setup, + &mut vt, + ) + .unwrap(); + + (proof, tier_2, point) +} + +#[test] +fn test_transparent_proof_roundtrip_compressed() { + let (proof, _, _) = make_transparent_proof(); + + let mut buf = Vec::new(); + proof.serialize_compressed(&mut buf).unwrap(); + assert_eq!(buf.len(), proof.compressed_size()); + + let decoded = ArkDoryProof::deserialize_compressed(&buf[..]).unwrap(); + + assert_eq!(proof.nu, decoded.nu); + assert_eq!(proof.sigma, decoded.sigma); + assert_eq!(proof.first_messages.len(), decoded.first_messages.len()); + assert_eq!(proof.second_messages.len(), decoded.second_messages.len()); +} + +#[test] +fn test_transparent_proof_roundtrip_uncompressed() { + let (proof, _, _) = make_transparent_proof(); + + let mut buf = Vec::new(); + proof.serialize_uncompressed(&mut buf).unwrap(); + assert_eq!(buf.len(), proof.uncompressed_size()); + + let decoded = ArkDoryProof::deserialize_uncompressed(&buf[..]).unwrap(); + assert_eq!(proof.nu, decoded.nu); + assert_eq!(proof.sigma, decoded.sigma); +} + +#[test] +fn test_transparent_proof_roundtrip_verifies() { + let (setup, verifier_setup) = test_setup_pair(4); + + let poly = random_polynomial(16); + let point = random_point(4); + let (tier_2, tier_1, commit_blind) = poly + .commit::(2, 2, &setup) + .unwrap(); + + let mut transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( + &poly, + &point, + tier_1, + commit_blind, + 2, + 2, + &setup, + &mut transcript, + ) + .unwrap(); + + // Round-trip through serialization + let mut buf = Vec::new(); + proof.serialize_compressed(&mut buf).unwrap(); + let decoded = ArkDoryProof::deserialize_compressed(&buf[..]).unwrap(); + + // Verify the deserialized proof + let eval = poly.evaluate(&point); + let mut vt = fresh_transcript(); + verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + eval, + &point, + &decoded, + verifier_setup, + &mut vt, + ) + .unwrap(); +} + +#[cfg(feature = "zk")] +mod zk_roundtrip { + use super::*; + use dory_pcs::{prove, verify, ZK}; + + fn make_zk_proof() -> ( + ArkDoryProof, + dory_pcs::backends::arkworks::ArkGT, + Vec, + ) { + let (setup, verifier_setup) = test_setup_pair(4); + + let poly = random_polynomial(16); + let point = random_point(4); + let (tier_2, tier_1, commit_blind) = poly + .commit::(2, 2, &setup) + .unwrap(); + + let mut transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + tier_1, + commit_blind, + 2, + 2, + &setup, + &mut transcript, + ) + .unwrap(); + + // Sanity: ZK fields must be populated + assert!(proof.e2.is_some()); + assert!(proof.y_com.is_some()); + assert!(proof.sigma1_proof.is_some()); + assert!(proof.sigma2_proof.is_some()); + assert!(proof.scalar_product_proof.is_some()); + + let eval = poly.evaluate(&point); + let mut vt = fresh_transcript(); + verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + eval, + &point, + &proof, + verifier_setup, + &mut vt, + ) + .unwrap(); + + (proof, tier_2, point) + } + + #[test] + fn test_zk_proof_roundtrip_compressed() { + let (proof, _, _) = make_zk_proof(); + + let mut buf = Vec::new(); + proof.serialize_compressed(&mut buf).unwrap(); + assert_eq!(buf.len(), proof.compressed_size()); + + let decoded = ArkDoryProof::deserialize_compressed(&buf[..]).unwrap(); + assert_eq!(proof.nu, decoded.nu); + assert_eq!(proof.sigma, decoded.sigma); + assert!(decoded.e2.is_some()); + assert!(decoded.y_com.is_some()); + assert!(decoded.sigma1_proof.is_some()); + assert!(decoded.sigma2_proof.is_some()); + assert!(decoded.scalar_product_proof.is_some()); + } + + #[test] + fn test_zk_proof_roundtrip_verifies() { + let (setup, verifier_setup) = test_setup_pair(4); + + let poly = random_polynomial(16); + let point = random_point(4); + let (tier_2, tier_1, commit_blind) = poly + .commit::(2, 2, &setup) + .unwrap(); + + let mut transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + tier_1, + commit_blind, + 2, + 2, + &setup, + &mut transcript, + ) + .unwrap(); + + let mut buf = Vec::new(); + proof.serialize_compressed(&mut buf).unwrap(); + let decoded = ArkDoryProof::deserialize_compressed(&buf[..]).unwrap(); + + let eval = poly.evaluate(&point); + let mut vt = fresh_transcript(); + verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + eval, + &point, + &decoded, + verifier_setup, + &mut vt, + ) + .unwrap(); + } + + #[test] + fn test_zk_proof_larger_size_than_transparent() { + let (zk_proof, _, _) = make_zk_proof(); + let (transparent_proof, _, _) = super::make_transparent_proof(); + + let zk_size = zk_proof.compressed_size(); + let transparent_size = transparent_proof.compressed_size(); + + assert!( + zk_size > transparent_size, + "ZK proof ({zk_size}) should be larger than transparent ({transparent_size})" + ); + } +} diff --git a/tests/arkworks/setup.rs b/tests/arkworks/setup.rs index bb02021..4bcbcfd 100644 --- a/tests/arkworks/setup.rs +++ b/tests/arkworks/setup.rs @@ -57,6 +57,7 @@ fn test_setup_consistency() { } #[test] +#[cfg(feature = "disk-persistence")] fn test_setup_disk_persistence() { use dory_pcs::backends::arkworks::BN254; use dory_pcs::setup::{load_setup, save_setup}; @@ -88,18 +89,16 @@ fn test_setup_disk_persistence() { } #[test] +#[cfg(feature = "disk-persistence")] fn test_setup_function_uses_disk() { use dory_pcs::backends::arkworks::BN254; use dory_pcs::{generate_urs, setup}; - use rand::thread_rng; - - let mut rng = thread_rng(); let max_log_n = 11; - let (prover1, verifier1) = generate_urs::(&mut rng, max_log_n); + let (prover1, verifier1) = generate_urs::(max_log_n); - let (prover2, verifier2) = setup::(&mut rng, max_log_n); + let (prover2, verifier2) = setup::(max_log_n); assert_eq!(prover1.g1_vec[0], prover2.g1_vec[0]); assert_eq!(prover1.g2_vec[0], prover2.g2_vec[0]); @@ -110,12 +109,10 @@ fn test_setup_function_uses_disk() { fn test_arkworks_setup_canonical_serialization() { use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use dory_pcs::backends::arkworks::{ArkworksProverSetup, ArkworksVerifierSetup}; - use rand::thread_rng; - let mut rng = thread_rng(); let max_log_n = 6; - let prover = ArkworksProverSetup::new(&mut rng, max_log_n); + let prover = ArkworksProverSetup::new(max_log_n); let verifier = prover.to_verifier_setup(); let mut prover_bytes = Vec::new(); @@ -150,9 +147,7 @@ fn test_arkworks_setup_canonical_serialization() { fn test_arkworks_setup_new_from_urs() { use dory_pcs::backends::arkworks::ArkworksProverSetup; use dory_pcs::{backends::arkworks::BN254, generate_urs}; - use rand::thread_rng; - let mut rng = thread_rng(); let max_log_n = 14; // Clean up any existing cache file first @@ -194,9 +189,9 @@ fn test_arkworks_setup_new_from_urs() { let _ = std::fs::remove_file(&cache_file); } - let (prover1, _) = generate_urs::(&mut rng, max_log_n); + let (prover1, _) = generate_urs::(max_log_n); - let prover2 = ArkworksProverSetup::new_from_urs(&mut rng, max_log_n); + let prover2 = ArkworksProverSetup::new_from_urs(max_log_n); // Verify they match (proving it loaded from disk) assert_eq!( diff --git a/tests/arkworks/soundness.rs b/tests/arkworks/soundness.rs index 0078615..27a1097 100644 --- a/tests/arkworks/soundness.rs +++ b/tests/arkworks/soundness.rs @@ -5,7 +5,7 @@ use ark_bn254::{Fq12, Fr, G1Projective, G2Projective}; use ark_ff::UniformRand; use dory_pcs::backends::arkworks::{ArkFr, ArkG1, ArkG2, ArkGT}; use dory_pcs::primitives::poly::Polynomial; -use dory_pcs::{prove, verify}; +use dory_pcs::{prove, verify, Transparent}; use std::mem::swap; #[allow(clippy::type_complexity)] @@ -27,15 +27,15 @@ fn create_valid_proof_components( let poly = random_polynomial(size); let point = random_point(nu + sigma); - let (tier_2, tier_1) = poly - .commit::(nu, sigma, &prover_setup) + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) .unwrap(); - let mut prover_transcript = fresh_transcript(); - let proof = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _>( + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, Transparent>( &poly, &point, tier_1, + commit_blind, nu, sigma, &prover_setup, diff --git a/tests/arkworks/zk.rs b/tests/arkworks/zk.rs new file mode 100644 index 0000000..9915a6d --- /dev/null +++ b/tests/arkworks/zk.rs @@ -0,0 +1,611 @@ +//! Zero-knowledge mode tests for Dory PCS + +use super::*; +use ark_bn254::{Fq12, Fr, G1Projective, G2Projective}; +use ark_ff::UniformRand; +use dory_pcs::backends::arkworks::{ArkFr, ArkG1, ArkG2, ArkGT}; +use dory_pcs::primitives::poly::Polynomial; +use dory_pcs::{create_evaluation_proof, prove, setup, verify, ZK}; + +#[test] +fn test_zk_full_workflow() { + let max_log_n = 10; + + let (prover_setup, verifier_setup) = setup::(max_log_n); + + let poly = random_polynomial(256); + let nu = 4; + let sigma = 4; + + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(8); + let expected_evaluation = poly.evaluate(&point); + + let mut prover_transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + tier_1, + commit_blind, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + ) + .unwrap(); + let evaluation = poly.evaluate(&point); + assert_eq!(evaluation, expected_evaluation); + + let mut verifier_transcript = fresh_transcript(); + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!(result.is_ok(), "ZK proof verification failed: {:?}", result); +} + +#[test] +fn test_zk_small_polynomial() { + let (prover_setup, verifier_setup) = test_setup_pair(4); + + let poly = random_polynomial(4); + let nu = 1; + let sigma = 1; + + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(2); + let evaluation = poly.evaluate(&point); + + let mut prover_transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + tier_1, + commit_blind, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!( + result.is_ok(), + "ZK small polynomial test failed: {:?}", + result + ); +} + +#[test] +fn test_zk_larger_polynomial() { + let (prover_setup, verifier_setup) = setup::(12); + + let poly = random_polynomial(1024); + let nu = 5; + let sigma = 5; + + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(10); + let evaluation = poly.evaluate(&point); + + let mut prover_transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + tier_1, + commit_blind, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!( + result.is_ok(), + "ZK larger polynomial test failed: {:?}", + result + ); +} + +#[test] +fn test_zk_non_square_matrix() { + let (prover_setup, verifier_setup) = setup::(10); + + // Non-square: nu=3, sigma=4 (8 rows, 16 columns = 128 coefficients) + let poly = random_polynomial(128); + let nu = 3; + let sigma = 4; + + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(7); // nu + sigma = 7 + let evaluation = poly.evaluate(&point); + + let mut prover_transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + tier_1, + commit_blind, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!( + result.is_ok(), + "ZK non-square matrix test failed: {:?}", + result + ); +} + +#[test] +fn test_zk_hidden_evaluation() { + let (prover_setup, verifier_setup) = test_setup_pair(6); + + let poly = random_polynomial(16); + let nu = 2; + let sigma = 2; + + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(4); + let evaluation = poly.evaluate(&point); + + // Create ZK proof using unified API with ZK mode + let mut prover_transcript = fresh_transcript(); + let (proof, _) = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + Some(tier_1), + commit_blind, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + ) + .unwrap(); + + assert!(proof.y_com.is_some(), "ZK proof should contain y_com"); + assert!(proof.e2.is_some(), "ZK proof should contain e2"); + + let mut verifier_transcript = fresh_transcript(); + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!( + result.is_ok(), + "ZK hidden evaluation proof verification failed: {:?}", + result + ); +} + +/// Test that tampered e2 in proof is rejected +#[test] +fn test_zk_tampered_e2_rejected() { + use dory_pcs::primitives::arithmetic::Group; + + let (prover_setup, verifier_setup) = test_setup_pair(6); + + let poly = random_polynomial(16); + let nu = 2; + let sigma = 2; + + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(4); + let evaluation = poly.evaluate(&point); + + let mut prover_transcript = fresh_transcript(); + let (mut proof, _) = + create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + Some(tier_1), + commit_blind, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + ) + .unwrap(); + + if let Some(ref mut e2) = proof.e2 { + *e2 = *e2 + prover_setup.h2.scale(&ArkFr::from_u64(42)); + } + + let mut verifier_transcript = fresh_transcript(); + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!(result.is_err(), "Verification should fail with tampered e2"); +} + +/// Test full ZK with larger polynomial +#[test] +fn test_zk_hidden_evaluation_larger() { + let (prover_setup, verifier_setup) = setup::(10); + + let poly = random_polynomial(256); + let nu = 4; + let sigma = 4; + + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + + let point = random_point(8); + let evaluation = poly.evaluate(&point); + + let mut prover_transcript = fresh_transcript(); + let (proof, _) = create_evaluation_proof::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + Some(tier_1), + commit_blind, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + ) + .unwrap(); + + let mut verifier_transcript = fresh_transcript(); + let result = verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + tier_2, + evaluation, + &point, + &proof, + verifier_setup, + &mut verifier_transcript, + ); + + assert!( + result.is_ok(), + "ZK hidden evaluation (larger) failed: {:?}", + result + ); +} + +// --------------------------------------------------------------------------- +// ZK Soundness Tests +// --------------------------------------------------------------------------- + +#[allow(clippy::type_complexity)] +fn create_valid_zk_proof_components( + size: usize, + nu: usize, + sigma: usize, +) -> ( + VerifierSetup, + Vec, + ArkGT, + ArkFr, + DoryProof, +) { + let (prover_setup, verifier_setup) = test_setup_pair(nu + sigma + 2); + + let poly = random_polynomial(size); + let point = random_point(nu + sigma); + + let (tier_2, tier_1, commit_blind) = poly + .commit::(nu, sigma, &prover_setup) + .unwrap(); + let mut prover_transcript = fresh_transcript(); + let (proof, _) = prove::<_, BN254, TestG1Routines, TestG2Routines, _, _, ZK>( + &poly, + &point, + tier_1, + commit_blind, + nu, + sigma, + &prover_setup, + &mut prover_transcript, + ) + .unwrap(); + let evaluation = poly.evaluate(&point); + + (verifier_setup, point, tier_2, evaluation, proof) +} + +fn verify_tampered_zk_proof( + commitment: ArkGT, + evaluation: ArkFr, + point: &[ArkFr], + proof: &DoryProof, + verifier_setup: VerifierSetup, +) -> Result<(), dory_pcs::DoryError> { + let mut verifier_transcript = fresh_transcript(); + verify::<_, BN254, TestG1Routines, TestG2Routines, _>( + commitment, + evaluation, + point, + proof, + verifier_setup, + &mut verifier_transcript, + ) +} + +#[test] +fn test_zk_soundness_missing_sigma1_proof() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.sigma1_proof = None; + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with missing sigma1_proof"); +} + +#[test] +fn test_zk_soundness_missing_sigma2_proof() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.sigma2_proof = None; + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with missing sigma2_proof"); +} + +#[test] +fn test_zk_soundness_missing_scalar_product_proof() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.scalar_product_proof = None; + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!( + result.is_err(), + "Should fail with missing scalar_product_proof" + ); +} + +#[test] +fn test_zk_soundness_partial_zk_e2_only() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.y_com = None; + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!( + result.is_err(), + "Should fail with partial ZK fields (e2 only)" + ); +} + +#[test] +fn test_zk_soundness_partial_zk_ycom_only() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.e2 = None; + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!( + result.is_err(), + "Should fail with partial ZK fields (y_com only)" + ); +} + +#[test] +fn test_zk_soundness_tampered_sigma1_z1() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut s) = proof.sigma1_proof { + s.z1 = ArkFr(Fr::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered sigma1 z1"); +} + +#[test] +fn test_zk_soundness_tampered_sigma1_a1() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut s) = proof.sigma1_proof { + s.a1 = ArkG2(G2Projective::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered sigma1 a1"); +} + +#[test] +fn test_zk_soundness_tampered_sigma2_z1() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut s) = proof.sigma2_proof { + s.z1 = ArkFr(Fr::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered sigma2 z1"); +} + +#[test] +fn test_zk_soundness_tampered_sigma2_a() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut s) = proof.sigma2_proof { + s.a = ArkGT(Fq12::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered sigma2 a"); +} + +#[test] +fn test_zk_soundness_tampered_sp_e1() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut sp) = proof.scalar_product_proof { + sp.e1 = ArkG1(G1Projective::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!( + result.is_err(), + "Should fail with tampered scalar product e1" + ); +} + +#[test] +fn test_zk_soundness_tampered_sp_p1() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut sp) = proof.scalar_product_proof { + sp.p1 = ArkGT(Fq12::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!( + result.is_err(), + "Should fail with tampered scalar product p1" + ); +} + +#[test] +fn test_zk_soundness_tampered_sp_r3() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + if let Some(ref mut sp) = proof.scalar_product_proof { + sp.r3 = ArkFr(Fr::rand(&mut rand::thread_rng())); + } + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!( + result.is_err(), + "Should fail with tampered scalar product r3" + ); +} + +#[test] +fn test_zk_soundness_tampered_vmv_c() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.vmv_message.c = ArkGT(Fq12::rand(&mut rand::thread_rng())); + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered VMV c in ZK"); +} + +#[test] +fn test_zk_soundness_tampered_vmv_d2() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.vmv_message.d2 = ArkGT(Fq12::rand(&mut rand::thread_rng())); + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered VMV d2 in ZK"); +} + +#[test] +fn test_zk_soundness_tampered_vmv_e1() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.vmv_message.e1 = ArkG1(G1Projective::rand(&mut rand::thread_rng())); + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered VMV e1 in ZK"); +} + +#[test] +fn test_zk_soundness_tampered_e2() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.e2 = Some(ArkG2(G2Projective::rand(&mut rand::thread_rng()))); + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered e2 in ZK"); +} + +#[test] +fn test_zk_soundness_tampered_y_com() { + let (verifier_setup, point, commitment, evaluation, mut proof) = + create_valid_zk_proof_components(256, 4, 4); + + proof.y_com = Some(ArkG1(G1Projective::rand(&mut rand::thread_rng()))); + + let result = verify_tampered_zk_proof(commitment, evaluation, &point, &proof, verifier_setup); + assert!(result.is_err(), "Should fail with tampered y_com in ZK"); +}