Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 15 additions & 5 deletions jolt-core/src/poly/commitment/dory/dory_globals.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,10 @@ use allocative::Allocative;
use dory::backends::arkworks::{init_cache, ArkG1, ArkG2};
use std::sync::{
atomic::{AtomicU8, Ordering},
RwLock,
Mutex, MutexGuard, OnceLock, RwLock,
};
#[cfg(test)]
use std::{
sync::OnceLock,
time::{SystemTime, UNIX_EPOCH},
};
use std::time::{SystemTime, UNIX_EPOCH};

/// Dory matrix layout for OneHot polynomials.
///
Expand Down Expand Up @@ -185,6 +182,12 @@ pub struct DoryContextGuard {
previous_context: DoryContext,
}

pub struct DoryRuntimeGuard {
_guard: MutexGuard<'static, ()>,
}

static DORY_RUNTIME_GUARD: OnceLock<Mutex<()>> = OnceLock::new();

impl Drop for DoryContextGuard {
fn drop(&mut self) {
CURRENT_CONTEXT.store(self.previous_context as u8, Ordering::SeqCst);
Expand All @@ -195,6 +198,13 @@ impl Drop for DoryContextGuard {
pub struct DoryGlobals;

impl DoryGlobals {
pub fn acquire_runtime_guard() -> DoryRuntimeGuard {
let mutex = DORY_RUNTIME_GUARD.get_or_init(|| Mutex::new(()));
DoryRuntimeGuard {
_guard: mutex.lock().unwrap(),
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[Score 25/100 — Low] unwrap() on mutex lock — poisoning propagation.

If a thread panics while holding DoryRuntimeGuard, the mutex is poisoned and all future acquire_runtime_guard() calls will also panic. This is arguably correct (a panic during proving indicates a logic error), but it makes the process permanently broken rather than allowing recovery.

Consider lock().unwrap_or_else(|e| e.into_inner()) to recover from poisoned state, or use parking_lot::Mutex which doesn't support poisoning.

}
}

#[cfg(test)]
pub(crate) fn configure_test_cache_root() {
static TEST_CACHE_ROOT: OnceLock<()> = OnceLock::new();
Expand Down
95 changes: 72 additions & 23 deletions jolt-core/src/poly/opening_proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ use crate::{
};
use allocative::Allocative;
use num_derive::FromPrimitive;
#[cfg(test)]
use std::cell::RefCell;
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
Expand All @@ -22,6 +21,7 @@ use super::{
use crate::{
field::JoltField,
transcripts::Transcript,
utils::errors::ProofVerifyError,
zkvm::witness::{CommittedPolynomial, VirtualPolynomial},
};

Expand Down Expand Up @@ -212,9 +212,7 @@ where
opening_ids_by_poly: BTreeMap<PolynomialId, Vec<OpeningId>>,
/// Maps an `OpeningId` that was deduplicated to its canonical representative.
pub aliases: BTreeMap<OpeningId, OpeningId>,
#[cfg(test)]
pub appended_virtual_openings: RefCell<Vec<OpeningId>>,
#[cfg(test)]
pub appended_committed_openings: RefCell<Vec<OpeningId>>,
pub log_T: usize,
#[allocative(skip)]
Expand Down Expand Up @@ -245,6 +243,8 @@ where
pub zk_mode: bool,
pending_claims: Vec<F>,
pending_claim_ids: Vec<OpeningId>,
missing_opening: RefCell<Option<OpeningId>>,
malformed_proof: RefCell<Option<String>>,
}

pub trait OpeningAccumulator<F: JoltField> {
Expand Down Expand Up @@ -343,7 +343,6 @@ impl<F: JoltField> OpeningAccumulator<F> for ProverOpeningAccumulator<F> {
.openings
.get(&key)
.unwrap_or_else(|| panic!("opening for {sumcheck:?} {polynomial:?} not found"));
#[cfg(test)]
{
let mut virtual_openings = self.appended_virtual_openings.borrow_mut();
if let Some(index) = virtual_openings.iter().position(|id| id == &key) {
Expand All @@ -364,7 +363,6 @@ impl<F: JoltField> OpeningAccumulator<F> for ProverOpeningAccumulator<F> {
.openings
.get(&key)
.unwrap_or_else(|| panic!("opening for {sumcheck:?} {polynomial:?} not found"));
#[cfg(test)]
{
let mut committed_openings = self.appended_committed_openings.borrow_mut();
if let Some(index) = committed_openings.iter().position(|id| id == &key) {
Expand All @@ -385,7 +383,6 @@ impl<F: JoltField> OpeningAccumulator<F> for ProverOpeningAccumulator<F> {
};
let key = self.resolve_alias(opening_id);
let (point, claim) = self.openings.get(&key)?;
#[cfg(test)]
{
let mut committed_openings = self.appended_committed_openings.borrow_mut();
if let Some(index) = committed_openings.iter().position(|id| id == &key) {
Expand All @@ -405,9 +402,7 @@ where
openings: BTreeMap::new(),
opening_ids_by_poly: BTreeMap::new(),
aliases: BTreeMap::new(),
#[cfg(test)]
appended_virtual_openings: std::cell::RefCell::new(vec![]),
#[cfg(test)]
appended_committed_openings: std::cell::RefCell::new(vec![]),
log_T,
pending_claims: Vec::new(),
Expand Down Expand Up @@ -465,7 +460,6 @@ where
point: OpeningPoint<BIG_ENDIAN, F>,
claim: F,
) -> bool {
#[cfg(test)]
let should_track_committed =
matches!(underlying_polynomial_id(key), PolynomialId::Committed(_))
&& !point.r.is_empty();
Expand All @@ -488,7 +482,6 @@ where
self.pending_claim_ids.push(key);
self.openings.insert(key, (point, claim));
self.index_opening_id(key);
#[cfg(test)]
if should_track_committed {
self.appended_committed_openings.borrow_mut().push(key);
}
Expand Down Expand Up @@ -538,7 +531,6 @@ where
) {
let key = OpeningId::virt(polynomial, sumcheck);
if self.insert_or_alias_opening(key, opening_point, claim) {
#[cfg(test)]
self.appended_virtual_openings.borrow_mut().push(key);
}
}
Expand Down Expand Up @@ -577,6 +569,15 @@ where
pub fn take_pending_claim_ids(&mut self) -> Vec<OpeningId> {
std::mem::take(&mut self.pending_claim_ids)
}

pub fn assert_all_openings_consumed(&self) {
let missing_virtual = self.appended_virtual_openings.borrow();
let missing_committed = self.appended_committed_openings.borrow();
assert!(
missing_virtual.is_empty() && missing_committed.is_empty(),
"Not all openings have been proven. Missing virtual: {missing_virtual:?}. Missing committed: {missing_committed:?}",
);
}
}

impl<F> Default for VerifierOpeningAccumulator<F>
Expand All @@ -596,11 +597,10 @@ impl<F: JoltField> OpeningAccumulator<F> for VerifierOpeningAccumulator<F> {
) -> (OpeningPoint<BIG_ENDIAN, F>, F) {
let requested = OpeningId::Polynomial(PolynomialId::Virtual(polynomial), sumcheck);
let key = self.resolve_alias(requested);
let (point, claim) = self
.openings
self.openings
.get(&key)
.unwrap_or_else(|| panic!("No opening found for {sumcheck:?} {polynomial:?}"));
(point.clone(), *claim)
.map(|(point, claim)| (point.clone(), *claim))
.unwrap_or_else(|| self.record_missing_opening(key))
}

fn get_committed_polynomial_opening(
Expand All @@ -610,11 +610,10 @@ impl<F: JoltField> OpeningAccumulator<F> for VerifierOpeningAccumulator<F> {
) -> (OpeningPoint<BIG_ENDIAN, F>, F) {
let requested = OpeningId::Polynomial(PolynomialId::Committed(polynomial), sumcheck);
let key = self.resolve_alias(requested);
let (point, claim) = self
.openings
self.openings
.get(&key)
.unwrap_or_else(|| panic!("No opening found for {sumcheck:?} {polynomial:?}"));
(point.clone(), *claim)
.map(|(point, claim)| (point.clone(), *claim))
.unwrap_or_else(|| self.record_missing_opening(key))
}

fn get_advice_opening(
Expand Down Expand Up @@ -647,7 +646,31 @@ where
zk_mode,
pending_claims: Vec::new(),
pending_claim_ids: Vec::new(),
missing_opening: RefCell::new(None),
malformed_proof: RefCell::new(None),
}
}

fn record_missing_opening(&self, key: OpeningId) -> (OpeningPoint<BIG_ENDIAN, F>, F) {
let mut missing = self.missing_opening.borrow_mut();
if missing.is_none() {
*missing = Some(key);
}
let dummy_len = self.log_T.max(1) + 128;
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[Score 25/100 — Low] Magic number 128 in dummy point length.

The + 128 ensures the dummy point is long enough for any downstream split_at(log_K) call (since LOG_K for RV64 is at most 128). Consider documenting this derivation or using a named constant to make the intent clear and prevent it from silently becoming wrong if max address-space sizes change.

(
OpeningPoint::new(vec![F::Challenge::default(); dummy_len]),
F::zero(),
)
}

pub fn take_missing_opening_error(&self) -> Result<(), ProofVerifyError> {
if let Some(message) = self.malformed_proof.borrow_mut().take() {
return Err(ProofVerifyError::MalformedProof(message));
}
if let Some(opening_id) = self.missing_opening.borrow_mut().take() {
return Err(ProofVerifyError::MissingOpening(format!("{opening_id:?}")));
}
Ok(())
}

fn resolve_alias(&self, mut key: OpeningId) -> OpeningId {
Expand Down Expand Up @@ -702,10 +725,15 @@ where
self.find_existing_opening_at_point(underlying_polynomial_id(key), &point)
{
if existing_id != key {
assert_eq!(
*claim, existing_claim,
"Inconsistent duplicate opening claims: {key:?} vs {existing_id:?}"
);
if *claim != existing_claim {
let mut malformed = self.malformed_proof.borrow_mut();
if malformed.is_none() {
*malformed = Some(format!(
"inconsistent duplicate opening claims: {key:?} vs {existing_id:?}"
));
}
return;
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[Score 50/100 — Medium] Defense-in-depth: inconsistent opening leaves stale value accessible.

When inconsistent duplicate claims are detected, this records the MalformedProof error and returns without inserting or aliasing the new key. The original opening (at existing_id) remains in the accumulator with its first-seen claim value.

This is sound — take_missing_opening_error() catches the MalformedProof after the stage, and sumcheck/Fiat-Shamir binding independently rejects. However, as defense-in-depth, consider also poisoning the existing opening so that any subsequent get_* for either key triggers record_missing_opening:

// After recording malformed error:
self.openings.remove(&existing_id);

This ensures the verifier never uses a claim value from a proof with internally inconsistent openings, even within a single stage.

}
self.aliases.insert(key, existing_id);
return;
}
Expand Down Expand Up @@ -847,3 +875,24 @@ pub fn compute_advice_lagrange_factor<F: JoltField>(
})
.product()
}

#[cfg(test)]
mod tests {
use super::*;
use ark_bn254::Fr;

#[test]
fn verifier_accumulator_reports_missing_opening() {
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[Score 25/100 — Low] Missing test coverage for MalformedProof path.

This test exercises the MissingOpening error path via record_missing_opening. Consider adding a companion test for the MalformedProof path — triggered by inserting two openings at the same point with different claims via populate_or_alias_opening (line 728). This would exercise the priority ordering in take_missing_opening_error() where MalformedProof takes precedence over MissingOpening.

let accumulator = VerifierOpeningAccumulator::<Fr>::new(4, false);
let _ = accumulator.get_committed_polynomial_opening(
CommittedPolynomial::InstructionRa(0),
SumcheckId::HammingWeightClaimReduction,
);

assert!(matches!(
accumulator.take_missing_opening_error(),
Err(ProofVerifyError::MissingOpening(message))
if message.contains("InstructionRa(0)")
));
}
}
10 changes: 10 additions & 0 deletions jolt-core/src/utils/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,22 @@ pub enum ProofVerifyError {
InvalidKeyLength(usize),
#[error("Invalid opening proof -- the proof failed to verify")]
InvalidOpeningProof,
#[error("Invalid trace_length: {0}")]
InvalidTraceLength(usize),
#[error("trace_length {0} exceeds preprocessing maximum {1}")]
TraceLengthTooLarge(usize, usize),
#[error("Invalid read-write checking configuration: {0}")]
InvalidReadWriteConfig(String),
#[error("Invalid one-hot configuration: {0}")]
InvalidOneHotConfig(String),
#[error("Invalid ram_K: got {0}, minimum required {1}")]
InvalidRamK(usize, usize),
#[error("Invalid ram_K: got {0}, maximum supported {1}")]
RamKTooLarge(usize, usize),
#[error("Malformed proof: {0}")]
MalformedProof(String),
#[error("Missing opening: {0}")]
MissingOpening(String),
#[error("Dory proof verification failed: {0}")]
DoryError(String),
#[error("Sumcheck verification failed")]
Expand Down
4 changes: 2 additions & 2 deletions jolt-core/src/zkvm/claim_reductions/ram_ra.rs
Original file line number Diff line number Diff line change
Expand Up @@ -629,8 +629,8 @@ impl<F: JoltField> RaReductionParams<F> {
let (r_address_val, r_cycle_val) = r_val.split_at_r(log_K);

// Verify unified address (these should hold by construction after Stage 2 alignment).
debug_assert_eq!(r_address_raf, r_address_rw);
debug_assert_eq!(r_address_raf, r_address_val);
assert_eq!(r_address_raf, r_address_rw);
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[Score 50/100 — Medium] Interaction with record_missing_opening dummy values.

Upgrading debug_assert_eq!assert_eq! is the right call for release-mode safety. However, record_missing_opening (opening_proof.rs:654) now returns dummy all-zero points when an opening is missing. If a malformed proof is missing one of the three RamRa openings (but not all three), the dummy's all-zero r_address won't match the real openings' r_address, causing this assert_eq! to panic before take_missing_opening_error() gets a chance to surface the typed MissingOpening error.

The verifier still safely rejects (panic = rejection), but this violates the intent of the deferred error pattern introduced in this PR. In practice, the three RamRa openings are populated by stages 2-4, and take_missing_opening_error() after each stage would catch them first — so this is only reachable if sumcheck verification in stages 2-4 somehow succeeds with dummy values.

Consider either:

  • Guarding with a fallible check (if r_address_raf != r_address_rw { ... }) that returns a dummy Self so take_missing_opening_error() can surface the real error, or
  • Adding a comment that this path is unreachable due to inter-stage error checks

assert_eq!(r_address_raf, r_address_val);

// Sample γ for combining claims
let gamma: F = transcript.challenge_scalar();
Expand Down
28 changes: 28 additions & 0 deletions jolt-core/src/zkvm/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
use std::fs::File;

use crate::poly::commitment::dory::DoryLayout;
use crate::zkvm::config::OneHotParams;
use crate::zkvm::config::{OneHotConfig, ReadWriteConfig};
use crate::zkvm::witness::CommittedPolynomial;
use crate::{
curve::Bn254Curve,
Expand Down Expand Up @@ -166,11 +168,15 @@ where
}

/// Absorb public instance data into the transcript for Fiat-Shamir.
#[allow(clippy::too_many_arguments)]
pub fn fiat_shamir_preamble(
program_io: &JoltDevice,
ram_K: usize,
trace_length: usize,
entry_address: u64,
rw_config: &ReadWriteConfig,
one_hot_config: &OneHotConfig,
dory_layout: DoryLayout,
transcript: &mut impl Transcript,
) {
transcript.append_u64(b"max_input_size", program_io.memory_layout.max_input_size);
Expand All @@ -182,6 +188,28 @@ pub fn fiat_shamir_preamble(
transcript.append_u64(b"ram_K", ram_K as u64);
transcript.append_u64(b"trace_length", trace_length as u64);
transcript.append_u64(b"entry_address", entry_address);
transcript.append_u64(
b"ram_rw_phase1_num_rounds",
rw_config.ram_rw_phase1_num_rounds as u64,
);
transcript.append_u64(
b"ram_rw_phase2_num_rounds",
rw_config.ram_rw_phase2_num_rounds as u64,
);
transcript.append_u64(
b"registers_rw_phase1_num_rounds",
rw_config.registers_rw_phase1_num_rounds as u64,
);
transcript.append_u64(
b"registers_rw_phase2_num_rounds",
rw_config.registers_rw_phase2_num_rounds as u64,
);
transcript.append_u64(b"log_k_chunk", one_hot_config.log_k_chunk as u64);
transcript.append_u64(
b"lookups_ra_virtual_log_k_chunk",
one_hot_config.lookups_ra_virtual_log_k_chunk as u64,
);
transcript.append_u64(b"dory_layout", u8::from(dory_layout) as u64);
}

#[cfg(feature = "prover")]
Expand Down
Loading