From a4741134cab77332d1c86aae24afa8f388879494 Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Mon, 30 Mar 2026 10:38:01 +0200 Subject: [PATCH 1/6] Merge next --- CHANGELOG.md | 1 + Cargo.lock | 1 + .../block-producer/src/batch_builder/mod.rs | 8 +- crates/block-producer/src/domain/batch.rs | 4 - .../block-producer/src/domain/transaction.rs | 14 +- crates/block-producer/src/errors.rs | 23 +-- crates/block-producer/src/mempool/budget.rs | 3 + .../block-producer/src/mempool/graph/batch.rs | 20 +- .../block-producer/src/mempool/graph/dag.rs | 53 +----- .../src/mempool/graph/transaction.rs | 174 +++++++++++++++--- crates/block-producer/src/mempool/mod.rs | 111 ++++++----- crates/block-producer/src/mempool/tests.rs | 22 +-- .../src/mempool/tests/add_transaction.rs | 26 +-- .../src/mempool/tests/add_user_batch.rs | 107 +++++++++++ crates/block-producer/src/server/mod.rs | 59 +++--- crates/rpc/Cargo.toml | 1 + crates/rpc/src/server/api.rs | 89 ++++++--- proto/proto/internal/block_producer.proto | 19 +- proto/proto/rpc.proto | 18 +- proto/proto/types/transaction.proto | 13 +- 20 files changed, 504 insertions(+), 262 deletions(-) create mode 100644 crates/block-producer/src/mempool/tests/add_user_batch.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 9926c5631d..8dd8f56216 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ - Added per-IP gRPC rate limiting across services as well as global concurrent connection limit ([#1746](https://github.com/0xMiden/node/issues/1746)). - Added limit to execution cycles for a transaction network, configurable through CLI args (`--ntx-builder.max-tx-cycles`) ([#1801](https://github.com/0xMiden/node/issues/1801)). - Added monitor version and network name to the network monitor dashboard, network name is configurable via `--network-name` / `MIDEN_MONITOR_NETWORK_NAME` ([#1838](https://github.com/0xMiden/node/pull/1838)). +- Users can now submit atomic transaction batches via `SubmitBatch` gRPC endpoint ([#1846](https://github.com/0xMiden/node/pull/1846)). ### Changes diff --git a/Cargo.lock b/Cargo.lock index 98018786d3..57c2845232 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3294,6 +3294,7 @@ dependencies = [ "miden-protocol", "miden-standards", "miden-tx", + "miden-tx-batch-prover", "reqwest", "rstest", "semver 1.0.27", diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index 34dab83a3f..549d76261f 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -200,12 +200,12 @@ impl BatchJob { batch: SelectedBatch, ) -> Result<(SelectedBatch, BatchInputs), BuildBatchError> { let block_references = batch - .txs() + .transactions() .iter() .map(Deref::deref) .map(AuthenticatedTransaction::reference_block); let unauthenticated_notes = batch - .txs() + .transactions() .iter() .map(Deref::deref) .flat_map(AuthenticatedTransaction::unauthenticated_note_commitments); @@ -325,10 +325,10 @@ impl BatchProver { impl TelemetryInjectorExt for SelectedBatch { fn inject_telemetry(&self) { Span::current().set_attribute("batch.id", self.id()); - Span::current().set_attribute("transactions.count", self.txs().len()); + Span::current().set_attribute("transactions.count", self.transactions().len()); // Accumulate all telemetry based on transactions. let (tx_ids, input_notes_count, output_notes_count, unauth_notes_count) = - self.txs().iter().fold( + self.transactions().iter().fold( (vec![], 0, 0, 0), |( mut tx_ids, diff --git a/crates/block-producer/src/domain/batch.rs b/crates/block-producer/src/domain/batch.rs index 0fd2029bfc..9c3cbb7e17 100644 --- a/crates/block-producer/src/domain/batch.rs +++ b/crates/block-producer/src/domain/batch.rs @@ -34,10 +34,6 @@ impl SelectedBatch { self.id } - pub(crate) fn txs(&self) -> &[Arc] { - &self.txs - } - pub(crate) fn into_transactions(self) -> Vec> { self.txs } diff --git a/crates/block-producer/src/domain/transaction.rs b/crates/block-producer/src/domain/transaction.rs index c67fb64291..e9580f3c72 100644 --- a/crates/block-producer/src/domain/transaction.rs +++ b/crates/block-producer/src/domain/transaction.rs @@ -46,7 +46,7 @@ impl AuthenticatedTransaction { /// /// Returns an error if any of the transaction's nullifiers are marked as spent by the inputs. pub fn new_unchecked( - tx: ProvenTransaction, + tx: Arc, inputs: TransactionInputs, ) -> Result { let nullifiers_already_spent = tx @@ -58,7 +58,7 @@ impl AuthenticatedTransaction { } Ok(AuthenticatedTransaction { - inner: Arc::new(tx), + inner: tx, notes_authenticated_by_store: inputs.found_unauthenticated_notes, authentication_height: inputs.current_block_height, store_account_state: inputs.account_commitment, @@ -128,6 +128,10 @@ impl AuthenticatedTransaction { pub fn expires_at(&self) -> BlockNumber { self.inner.expiration_block_num() } + + pub fn raw_proven_transaction(&self) -> &ProvenTransaction { + &self.inner + } } #[cfg(test)] @@ -151,7 +155,7 @@ impl AuthenticatedTransaction { current_block_height: 0.into(), }; // SAFETY: nullifiers were set to None aka are definitely unspent. - Self::new_unchecked(inner, inputs).unwrap() + Self::new_unchecked(Arc::new(inner), inputs).unwrap() } /// Overrides the authentication height with the given value. @@ -171,8 +175,4 @@ impl AuthenticatedTransaction { self.store_account_state = None; self } - - pub fn raw_proven_transaction(&self) -> &ProvenTransaction { - &self.inner - } } diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 2c4fc3745a..b008f30b06 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -35,12 +35,12 @@ pub enum BlockProducerError { }, } -// Transaction adding errors +// Add transaction and add user batch errors // ================================================================================================= #[derive(Debug, Error, GrpcError)] -pub enum AddTransactionError { - #[error("failed to retrieve transaction inputs from the store")] +pub enum MempoolSubmissionError { + #[error("failed to retrieve inputs from the store")] #[grpc(internal)] StoreConnectionFailed(#[source] StoreError), @@ -56,8 +56,8 @@ pub enum AddTransactionError { stale_limit: BlockNumber, }, - #[error("transaction deserialization failed")] - TransactionDeserializationFailed(#[source] DeserializationError), + #[error("request deserialization failed")] + DeserializationFailed(#[source] DeserializationError), #[error( "transaction expired at block height {expired_at} but the block height limit was {limit}" @@ -74,8 +74,9 @@ pub enum AddTransactionError { CapacityExceeded, } -// Submitted transaction conflicts with current state +// Mempool submission conflicts with current state // ================================================================================================= + #[derive(Debug, Error, PartialEq, Eq)] pub enum StateConflict { #[error("nullifiers already exist: {0:?}")] @@ -94,16 +95,6 @@ pub enum StateConflict { }, } -// Submit proven batch by user errors -// ================================================================================================= - -#[derive(Debug, Error, GrpcError)] -#[grpc(internal)] -pub enum SubmitProvenBatchError { - #[error("batch deserialization failed")] - Deserialization(#[source] DeserializationError), -} - // Batch building errors // ================================================================================================= diff --git a/crates/block-producer/src/mempool/budget.rs b/crates/block-producer/src/mempool/budget.rs index a4c9c2167d..27b04e6050 100644 --- a/crates/block-producer/src/mempool/budget.rs +++ b/crates/block-producer/src/mempool/budget.rs @@ -18,6 +18,9 @@ pub struct BatchBudget { /// Maximum number of output notes allowed. pub output_notes: usize, /// Maximum number of updated accounts. + /// + /// Authenticated transactions are assumed to update at most one account; this field enforces + /// how many such single-account updates can fit into a batch. pub accounts: usize, } diff --git a/crates/block-producer/src/mempool/graph/batch.rs b/crates/block-producer/src/mempool/graph/batch.rs index 678ce05902..39f50ee47a 100644 --- a/crates/block-producer/src/mempool/graph/batch.rs +++ b/crates/block-producer/src/mempool/graph/batch.rs @@ -21,15 +21,15 @@ impl GraphNode for SelectedBatch { type Id = BatchId; fn nullifiers(&self) -> Box + '_> { - Box::new(self.txs().iter().flat_map(|tx| tx.nullifiers())) + Box::new(self.transactions().iter().flat_map(|tx| tx.nullifiers())) } fn output_notes(&self) -> Box + '_> { - Box::new(self.txs().iter().flat_map(|tx| tx.output_note_commitments())) + Box::new(self.transactions().iter().flat_map(|tx| tx.output_note_commitments())) } fn unauthenticated_notes(&self) -> Box + '_> { - Box::new(self.txs().iter().flat_map(|tx| tx.unauthenticated_note_commitments())) + Box::new(self.transactions().iter().flat_map(|tx| tx.unauthenticated_note_commitments())) } fn account_updates( @@ -98,10 +98,14 @@ impl BatchGraph { /// /// Batches are returned in reverse-chronological order. pub fn revert_expired(&mut self, chain_tip: BlockNumber) -> Vec { - let reverted = self.inner.revert_expired_unselected(chain_tip); + // We only revert transactions which are _not_ included in batches. + let mut to_revert = self.inner.expired(chain_tip); + to_revert.retain(|batch| !self.inner.is_selected(batch)); - for batch in &reverted { - self.proven.remove(&batch.id()); + let mut reverted = Vec::with_capacity(to_revert.len()); + + for batch in to_revert { + reverted.extend_from_slice(&self.revert_batch_and_descendants(batch)); } reverted @@ -150,9 +154,9 @@ impl BatchGraph { /// /// Panics if the batch does not exist, or has existing ancestors in the batch /// graph. - pub fn prune(&mut self, batch: BatchId) { - self.inner.prune(batch); + pub fn prune(&mut self, batch: BatchId) -> SelectedBatch { self.proven.remove(&batch); + self.inner.prune(batch) } pub fn proven_count(&self) -> usize { diff --git a/crates/block-producer/src/mempool/graph/dag.rs b/crates/block-producer/src/mempool/graph/dag.rs index 2e10ef05a5..2d1cfc44a9 100644 --- a/crates/block-producer/src/mempool/graph/dag.rs +++ b/crates/block-producer/src/mempool/graph/dag.rs @@ -199,28 +199,13 @@ where reverted } - /// Reverts nodes (and their descendants) which have expired and which are _not_ selected. - /// - /// Returns the reverted nodes in **reverse** chronological order. - pub fn revert_expired_unselected(&mut self, chain_tip: BlockNumber) -> Vec { - let mut reverted = Vec::default(); - - let expired = self - .nodes + /// Returns the set of nodes that expired at the given block height. + pub fn expired(&self, chain_tip: BlockNumber) -> HashSet { + self.nodes .iter() - .filter(|(id, _)| !self.is_selected(id)) .filter_map(|(id, node)| (node.expires_at() <= chain_tip).then_some(id)) .copied() - .collect::>(); - - for id in expired { - // Its possible the node is already reverted by a previous loop iteration. - if self.contains(&id) { - reverted.extend(self.revert_node_and_descendants(id)); - } - } - - reverted + .collect() } /// Returns `true` if the given node is a leaf node aka has no children. @@ -233,14 +218,14 @@ where /// # Panics /// /// Panics if this node has any ancestor nodes, or if this node was not selected. - pub fn prune(&mut self, id: N::Id) { + pub fn prune(&mut self, id: N::Id) -> N { assert!( self.edges.parents_of(&id).is_empty(), "Cannot prune node {id} as it still has ancestors", ); assert!(self.selected.contains(&id), "Cannot prune node {id} as it was not selected"); - self.remove(id); + self.remove(id) } /// Unconditionally removes the given node from the graph, deleting its edges and state. @@ -331,30 +316,4 @@ mod tests { graph.selection_candidates().keys().map(|id| **id).collect(); assert_eq!(candidates_after_parent, vec![2]); } - - #[test] - fn revert_expired_unselected_removes_descendants() { - let mut graph = Graph::::default(); - - graph - .append( - TestNode::new(1).with_output_notes([1]).with_expires_at(BlockNumber::from(2u32)), - ) - .unwrap(); - graph - .append( - TestNode::new(2) - .with_output_notes([2]) - .with_unauthenticated_notes([1]) - .with_expires_at(BlockNumber::from(3u32)), - ) - .unwrap(); - - let reverted = graph.revert_expired_unselected(BlockNumber::from(3u32)); - let reverted_ids: Vec = reverted.into_iter().map(|node| node.id).collect(); - - assert_eq!(reverted_ids, vec![2, 1]); - assert_eq!(graph.node_count(), 0); - assert_eq!(graph.selection_candidates().len(), 0); - } } diff --git a/crates/block-producer/src/mempool/graph/transaction.rs b/crates/block-producer/src/mempool/graph/transaction.rs index 9e8d21a5e3..e6fe4593bc 100644 --- a/crates/block-producer/src/mempool/graph/transaction.rs +++ b/crates/block-producer/src/mempool/graph/transaction.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use miden_protocol::Word; use miden_protocol::account::AccountId; +use miden_protocol::batch::BatchId; use miden_protocol::block::BlockNumber; use miden_protocol::note::Nullifier; use miden_protocol::transaction::TransactionId; @@ -75,6 +76,9 @@ pub struct TransactionGraph { /// These are batch or block proving errors in which the transaction was a part of. This is /// used to identify potentially buggy transactions that should be evicted. failures: HashMap, + + user_batch_txs: HashMap>, + txs_user_batch: HashMap, } impl TransactionGraph { @@ -85,16 +89,100 @@ impl TransactionGraph { self.inner.append(tx) } - pub fn select_batch(&mut self, mut budget: BatchBudget) -> Option { + pub fn append_user_batch( + &mut self, + batch: &[Arc], + ) -> Result<(), StateConflict> { + let batch_id = + BatchId::from_transactions(batch.iter().map(|tx| tx.raw_proven_transaction())); + + // Append each transaction, but revert atomically on error. + for (idx, tx) in batch.iter().enumerate() { + if let Err(err) = self.append(Arc::clone(tx)) { + // We revert in reverse order because inner.revert panics if the node doesn't exist. + for tx in batch.iter().take(idx).rev() { + let reverted = self.inner.revert_node_and_descendants(tx.id()); + assert_eq!(reverted.len(), 1); + assert_eq!(&reverted[0], tx); + } + + return Err(err); + } + } + + let txs = batch.iter().map(GraphNode::id).collect::>(); + for tx in &txs { + self.txs_user_batch.insert(*tx, batch_id); + } + self.user_batch_txs.insert(batch_id, txs); + + Ok(()) + } + + pub fn select_batch(&mut self, budget: BatchBudget) -> Option { + self.select_user_batch().or_else(|| self.select_conventional_batch(budget)) + } + + fn select_user_batch(&mut self) -> Option { + // Comb through all user batch candidates. + let candidate_batches = self + .inner + .selection_candidates() + .values() + .filter_map(|tx| self.txs_user_batch.get(&tx.id())) + .copied() + .collect::>(); + + 'outer: for candidate in candidate_batches { + let mut selected = SelectedBatch::builder(); + + let txs = self + .user_batch_txs + .get(&candidate) + .cloned() + .expect("bi-directional mapping should be coherent"); + + for tx in txs { + let Some(tx) = self.inner.selection_candidates().get(&tx).copied() else { + // Rollback this batch selection since it cannot complete. + for tx in selected.txs.into_iter().rev() { + self.inner.deselect(tx.id()); + } + + continue 'outer; + }; + let tx = Arc::clone(tx); + + self.inner.select_candidate(tx.id()); + selected.push(tx); + } + + assert!(!selected.is_empty(), "User batch should not be empty"); + return Some(selected.build()); + } + + None + } + + fn select_conventional_batch(&mut self, mut budget: BatchBudget) -> Option { let mut selected = SelectedBatch::builder(); - while let Some((id, tx)) = self.inner.selection_candidates().pop_first() { - if budget.check_then_subtract(tx) == BudgetStatus::Exceeded { + loop { + // Select arbitrary candidate which is _not_ part of a user batch. + let candidates = self.inner.selection_candidates(); + let Some(candidate) = + candidates.values().find(|tx| !self.txs_user_batch.contains_key(&tx.id())) + else { + break; + }; + + if budget.check_then_subtract(candidate) == BudgetStatus::Exceeded { break; } - selected.push(Arc::clone(tx)); - self.inner.select_candidate(*id); + let candidate = Arc::clone(candidate); + self.inner.select_candidate(candidate.id()); + selected.push(candidate); } if selected.is_empty() { @@ -112,39 +200,63 @@ impl TransactionGraph { /// /// Returns the identifiers of transactions that were removed from the graph. pub fn revert_expired(&mut self, chain_tip: BlockNumber) -> HashSet { - self.inner - .revert_expired_unselected(chain_tip) - .into_iter() - .map(|tx| tx.id()) - .collect() + // We only revert transactions which are _not_ included in batches. + let mut to_revert = self.inner.expired(chain_tip); + to_revert.retain(|tx| !self.inner.is_selected(tx)); + + let mut reverted = HashSet::with_capacity(to_revert.len()); + + for tx in to_revert { + reverted.extend(&self.revert_tx_and_descendants(tx)); + } + + reverted } /// Reverts the given transaction and _all_ its descendants _IFF_ it is present in the graph. /// /// This includes batches that have been marked as proven. /// - /// Returns the reverted batches in the _reverse_ chronological order they were appended in. + /// Returns the reverted transactions in the _reverse_ chronological order they were appended + /// in. pub fn revert_tx_and_descendants(&mut self, transaction: TransactionId) -> Vec { - // We need this check because `inner.revert..` panics if the node is unknown. - if !self.inner.contains(&transaction) { - return Vec::default(); - } + // This is a bit more involved because we also need to atomically revert user batches. + let mut to_revert = vec![transaction]; + let mut reverted = Vec::new(); + + while let Some(revert) = to_revert.pop() { + // We need this check because `inner.revert..` panics if the node is unknown. + // + // And this transaction might already have been reverted as part of descendents in a + // prior loop. + if !self.inner.contains(&revert) { + continue; + } - let reverted = self - .inner - .revert_node_and_descendants(transaction) - .into_iter() - .map(|tx| tx.id()) - .collect(); + let reverted_now = self.inner.revert_node_and_descendants(revert); + + // Clean up book keeping and also revert transactions from the same user batch, if any. + for tx in &reverted_now { + self.failures.remove(&tx.id()); + + // Note that this is a pretty rough shod approach. We just dump the entire batch of + // transactions in, which will result in at least the current + // transaction being duplicated in `to_revert`. This isn't a concern + // though since we skip already processed transactions at the top of the loop. + if let Some(batch) = self.txs_user_batch.remove(&tx.id()) { + if let Some(batch) = self.user_batch_txs.remove(&batch) { + to_revert.extend(batch); + } + } + } - for tx in &reverted { - self.failures.remove(tx); + reverted.extend(reverted_now.into_iter().map(|tx| tx.id())); } reverted } - /// Marks the batch's transactions are ready for selection again. + /// Marks the batch's transactions as ready for selection again. /// /// # Panics /// @@ -183,15 +295,19 @@ impl TransactionGraph { reverted } - /// Prunes the given transaction. + /// Prunes the given given batch's transactions. /// /// # Panics /// - /// Panics if the transaction does not exist, or has existing ancestors in the transaction + /// Panics if the transactions do not exist, or has existing ancestors in the transaction /// graph. - pub fn prune(&mut self, transaction: TransactionId) { - self.inner.prune(transaction); - self.failures.remove(&transaction); + pub fn prune(&mut self, batch: &SelectedBatch) { + for tx in batch.transactions() { + self.inner.prune(tx.id()); + self.failures.remove(&tx.id()); + self.txs_user_batch.remove(&tx.id()); + } + self.user_batch_txs.remove(&batch.id()); } /// Number of transactions which have not been selected for inclusion in a batch. diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index db1fb79fe1..eff9952a9e 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -66,7 +66,8 @@ use tracing::instrument; use crate::block_builder::SelectedBlock; use crate::domain::batch::SelectedBatch; use crate::domain::transaction::AuthenticatedTransaction; -use crate::errors::{AddTransactionError, StateConflict}; +use crate::errors::{MempoolSubmissionError, StateConflict}; +use crate::mempool::budget::BudgetStatus; use crate::{ COMPONENT, DEFAULT_MEMPOOL_TX_CAPACITY, @@ -230,22 +231,61 @@ impl Mempool { pub fn add_transaction( &mut self, tx: Arc, - ) -> Result { + ) -> Result { if self.unbatched_transactions_count() >= self.config.tx_capacity.get() { - return Err(AddTransactionError::CapacityExceeded); + return Err(MempoolSubmissionError::CapacityExceeded); } self.authentication_staleness_check(tx.authentication_height())?; self.expiration_check(tx.expires_at())?; + + // Insert the transaction node. self.transactions .append(Arc::clone(&tx)) - .map_err(AddTransactionError::StateConflict)?; + .map_err(MempoolSubmissionError::StateConflict)?; self.subscription.transaction_added(&tx); self.inject_telemetry(); Ok(self.chain_tip) } + #[instrument(target = COMPONENT, name = "mempool.add_user_batch", skip_all)] + pub fn add_user_batch( + &mut self, + txs: &[Arc], + ) -> Result { + assert!(!txs.is_empty(), "Cannot have a batch with no transactions"); + + if self.unbatched_transactions_count() + txs.len() > self.config.tx_capacity.get() { + return Err(MempoolSubmissionError::CapacityExceeded); + } + + // Ensure the batch doesn't exceed the mempool budget for batches. + let mut budget = self.config.batch_budget; + for tx in txs { + if budget.check_then_subtract(tx) == BudgetStatus::Exceeded { + // TODO: better error plox. + return Err(MempoolSubmissionError::CapacityExceeded); + } + } + + for tx in txs { + self.authentication_staleness_check(tx.authentication_height())?; + self.expiration_check(tx.expires_at())?; + } + + self.transactions + .append_user_batch(txs) + .map_err(MempoolSubmissionError::StateConflict)?; + + for tx in txs { + self.subscription.transaction_added(tx); + } + self.inject_telemetry(); + + Ok(self.chain_tip) + } + /// Returns a set of transactions for the next batch. /// /// Transactions are returned in a valid execution ordering. @@ -281,7 +321,7 @@ impl Mempool { self.transactions.requeue_transactions(reverted); } - // Find rolled back batch to mark its txs as failed. + // Find rolled back batch to mark the txs as failed. // // Note that its possible it doesn't exist, since this batch could have already been // reverted as part of a separate rollback. @@ -334,13 +374,13 @@ impl Mempool { /// Sends a [`MempoolEvent::BlockCommitted`] event to subscribers, as well as a /// [`MempoolEvent::TransactionsReverted`] for transactions that are now considered expired. /// - /// On success the internal state is updated in place: the chain tip advances, expired data - /// is pruned, and subscribers are notified about the committed block and any - /// reverted transactions. + /// On success the internal state is updated in place: the chain tip advances, expired data is + /// pruned, and subscribers are notified about the committed block and any reverted + /// transactions. /// /// # Panics /// - /// Panics if there is no block in flight. + /// Panics if there is no matching block in flight. #[instrument(target = COMPONENT, name = "mempool.commit_block", skip_all)] pub fn commit_block(&mut self, block_header: BlockHeader) { assert_eq!(self.chain_tip.child(), block_header.block_num()); @@ -369,31 +409,28 @@ impl Mempool { /// Notify the pool that construction of the in flight block failed. /// - /// The pool will purge the block and all of its contents from the pool. + /// The block's batches are reverted and transactions are requeued for batch selection. + /// Additionally, the transactions from this block have their failure count incremented, + /// potentially reverting them if they exceed the failure limit. /// /// Sends a [`MempoolEvent::TransactionsReverted`] event to subscribers. /// - /// The in-flight block state and all related transactions are discarded, and subscribers - /// are notified about the reverted transactions. - /// /// # Panics /// - /// Panics if there is no block in flight. + /// Panics if there is no matching block in flight. #[instrument(target = COMPONENT, name = "mempool.rollback_block", skip_all)] pub fn rollback_block(&mut self, block: BlockNumber) { - // Only revert if the given block is actually inflight. - // // FIXME: We should consider a more robust check here to identify the block by a hash. // If multiple jobs are possible, then so are multiple variants with the same // block number. - if self.pending_block.as_ref().is_none_or(|pending| pending.block_number != block) { - return; - } + let block = self + .pending_block + .take_if(|pending| pending.block_number == block) + .expect("pending block must match block to rollback"); // Revert the batches, and requeue the transactions for batch selection. // // Transactions which have failed excessively are also reverted. - let block = self.pending_block.take().expect("we just checked it is some"); for batch in &block.batches { let reverted = self.batches.revert_batch_and_descendants(batch.id()); @@ -471,9 +508,6 @@ impl Mempool { span.set_attribute("mempool.output_notes", self.transactions.output_note_count()); } - /// Prunes the oldest locally retained block if the number of blocks exceeds the configured - /// limit. - /// /// This includes pruning the block's batches and transactions from their graphs. fn prune_oldest_block(&mut self) { if self.committed_blocks.len() <= self.config.state_retention.get() { @@ -490,16 +524,8 @@ impl Mempool { // // The same logic follows for transactions. for batch in block.batches.iter().map(|batch| batch.id()) { - self.batches.prune(batch); - } - - for tx in block - .batches - .iter() - .flat_map(|batch| batch.transactions().as_slice()) - .map(TransactionHeader::id) - { - self.transactions.prune(tx); + let batch = self.batches.prune(batch); + self.transactions.prune(&batch); } } @@ -507,8 +533,8 @@ impl Mempool { /// /// Expired batch descendants are also reverted since these are now invalid. /// - /// Transactions from batches are requeued. Expired transactions and their descendants are - /// then reverted as well. + /// Transactions from batches are requeued. Expired transactions and their descendants are then + /// reverted as well. fn revert_expired(&mut self) -> HashSet { let batches = self.batches.revert_expired(self.chain_tip); for batch in batches { @@ -520,9 +546,8 @@ impl Mempool { /// Rejects authentication heights that fall outside the overlap guaranteed by the locally /// retained state. /// - /// The acceptable window is `[chain_tip - state_retention + 1, chain_tip]`; values below - /// this range are rejected as stale because the mempool no longer tracks the - /// intermediate history. + /// The acceptable window is `[chain_tip - state_retention + 1, chain_tip]`; values below this + /// range are rejected as stale because the mempool no longer tracks the intermediate history. /// /// # Panics /// @@ -532,14 +557,14 @@ impl Mempool { fn authentication_staleness_check( &self, authentication_height: BlockNumber, - ) -> Result<(), AddTransactionError> { + ) -> Result<(), MempoolSubmissionError> { let limit = self .chain_tip .checked_sub(self.committed_blocks.len() as u32) - .expect("amount of committed blocks cannot exceed the chain tip"); + .expect("number of committed blocks cannot exceed the chain tip"); if authentication_height < limit { - return Err(AddTransactionError::StaleInputs { + return Err(MempoolSubmissionError::StaleInputs { input_block: authentication_height, stale_limit: limit, }); @@ -554,10 +579,10 @@ impl Mempool { Ok(()) } - fn expiration_check(&self, expired_at: BlockNumber) -> Result<(), AddTransactionError> { + fn expiration_check(&self, expired_at: BlockNumber) -> Result<(), MempoolSubmissionError> { let limit = self.chain_tip + self.config.expiration_slack; if expired_at <= limit { - return Err(AddTransactionError::Expired { expired_at, limit }); + return Err(MempoolSubmissionError::Expired { expired_at, limit }); } Ok(()) diff --git a/crates/block-producer/src/mempool/tests.rs b/crates/block-producer/src/mempool/tests.rs index 0d5001fba9..4d3f04eb5a 100644 --- a/crates/block-producer/src/mempool/tests.rs +++ b/crates/block-producer/src/mempool/tests.rs @@ -2,7 +2,6 @@ use std::sync::Arc; use miden_protocol::Word; use miden_protocol::block::{BlockHeader, BlockNumber}; -use miden_protocol::transaction::TransactionHeader; use pretty_assertions::assert_eq; use serial_test::serial; @@ -12,6 +11,7 @@ use crate::test_utils::MockProvenTxBuilder; use crate::test_utils::batch::TransactionBatchConstructor; mod add_transaction; +mod add_user_batch; impl Mempool { /// Returns an empty [`Mempool`] and a perfect clone intended for use as the Unit Under Test and @@ -71,15 +71,15 @@ fn children_of_failed_batches_are_ignored() { let (mut uut, _) = Mempool::for_tests(); uut.add_transaction(txs[0].clone()).unwrap(); let parent_batch = uut.select_batch().unwrap(); - assert_eq!(parent_batch.txs(), vec![txs[0].clone()]); + assert_eq!(parent_batch.transactions(), vec![txs[0].clone()]); uut.add_transaction(txs[1].clone()).unwrap(); let child_batch_a = uut.select_batch().unwrap(); - assert_eq!(child_batch_a.txs(), vec![txs[1].clone()]); + assert_eq!(child_batch_a.transactions(), vec![txs[1].clone()]); uut.add_transaction(txs[2].clone()).unwrap(); let next_batch = uut.select_batch().unwrap(); - assert_eq!(next_batch.txs(), vec![txs[2].clone()]); + assert_eq!(next_batch.transactions(), vec![txs[2].clone()]); // Child batch jobs are now dangling. uut.rollback_batch(parent_batch.id()); @@ -118,7 +118,7 @@ fn failed_batch_transactions_are_requeued() { reference.add_transaction(txs[2].clone()).unwrap(); reference .transactions - .increment_failure_count(failed_batch.txs().iter().map(|tx| tx.id())); + .increment_failure_count(failed_batch.transactions().iter().map(|tx| tx.id())); assert_eq!(uut, reference); } @@ -350,9 +350,9 @@ fn pass_through_txs_on_an_empty_account() { // Ensure the batch contains a,b and final. Final should also be the last tx since its order // is required. - assert!(batch.txs().contains(&tx_pass_through_a)); - assert!(batch.txs().contains(&tx_pass_through_b)); - assert_eq!(batch.txs().last().unwrap(), &tx_final); + assert!(batch.transactions().contains(&tx_pass_through_a)); + assert!(batch.transactions().contains(&tx_pass_through_b)); + assert_eq!(batch.transactions().last().unwrap(), &tx_final); } /// Tests that pass through transactions retain parent-child relations based on notes, even though @@ -390,11 +390,11 @@ fn pass_through_txs_with_note_dependencies() { // relationship was correctly inferred by the mempool. uut.add_transaction(tx_pass_through_a.clone()).unwrap(); let batch_a = uut.select_batch().unwrap(); - assert_eq!(batch_a.txs(), std::slice::from_ref(&tx_pass_through_a)); + assert_eq!(batch_a.transactions(), std::slice::from_ref(&tx_pass_through_a)); uut.add_transaction(tx_pass_through_b.clone()).unwrap(); let batch_b = uut.select_batch().unwrap(); - assert_eq!(batch_b.txs(), std::slice::from_ref(&tx_pass_through_b)); + assert_eq!(batch_b.transactions(), std::slice::from_ref(&tx_pass_through_b)); // Rollback (a) and check that (b) also reverted by comparing to the reference. uut.rollback_batch(batch_a.id()); @@ -402,7 +402,7 @@ fn pass_through_txs_with_note_dependencies() { reference.add_transaction(tx_pass_through_b).unwrap(); reference .transactions - .increment_failure_count(batch_a.txs().iter().map(|tx| tx.id())); + .increment_failure_count(batch_a.transactions().iter().map(|tx| tx.id())); assert_eq!(uut, reference); } diff --git a/crates/block-producer/src/mempool/tests/add_transaction.rs b/crates/block-producer/src/mempool/tests/add_transaction.rs index 3f45a28438..dc73f317dc 100644 --- a/crates/block-producer/src/mempool/tests/add_transaction.rs +++ b/crates/block-producer/src/mempool/tests/add_transaction.rs @@ -5,7 +5,7 @@ use miden_protocol::Word; use miden_protocol::block::BlockHeader; use crate::domain::transaction::AuthenticatedTransaction; -use crate::errors::{AddTransactionError, StateConflict}; +use crate::errors::{MempoolSubmissionError, StateConflict}; use crate::mempool::Mempool; use crate::test_utils::{MockProvenTxBuilder, mock_account_id}; @@ -105,7 +105,7 @@ mod tx_expiration { assert_matches!( result, - Err(AddTransactionError::Expired { .. }), + Err(MempoolSubmissionError::Expired { .. }), "Failed run with expiration {i} and limit {limit}" ); } @@ -121,7 +121,7 @@ mod tx_expiration { let tx = Arc::new(tx); let result = uut.add_transaction(tx); - assert_matches!(result, Err(AddTransactionError::Expired { .. })); + assert_matches!(result, Err(MempoolSubmissionError::Expired { .. })); } } @@ -235,7 +235,7 @@ fn duplicate_nullifiers_are_rejected() { // We overlap with one nullifier. assert_matches!( result, - Err(AddTransactionError::StateConflict(StateConflict::NullifiersAlreadyExist(..))) + Err(MempoolSubmissionError::StateConflict(StateConflict::NullifiersAlreadyExist(..))) ); } @@ -268,7 +268,9 @@ fn duplicate_output_notes_are_rejected() { assert_matches!( result, - Err(AddTransactionError::StateConflict(StateConflict::OutputNotesAlreadyExist(..))) + Err(MempoolSubmissionError::StateConflict(StateConflict::OutputNotesAlreadyExist( + .. + ))) ); } @@ -301,9 +303,9 @@ fn unknown_unauthenticated_notes_are_rejected() { assert_matches!( result, - Err(AddTransactionError::StateConflict(StateConflict::UnauthenticatedNotesMissing( - .. - ))) + Err(MempoolSubmissionError::StateConflict( + StateConflict::UnauthenticatedNotesMissing(..) + )) ); } @@ -403,7 +405,7 @@ mod account_state { assert_matches!( result, - Err(AddTransactionError::StateConflict( + Err(MempoolSubmissionError::StateConflict( StateConflict::AccountCommitmentMismatch { .. } )) ); @@ -433,7 +435,7 @@ mod account_state { let result = uut.add_transaction(tx); assert_matches!( result, - Err(AddTransactionError::StateConflict( + Err(MempoolSubmissionError::StateConflict( StateConflict::AccountCommitmentMismatch { .. } )) ); @@ -483,7 +485,7 @@ mod new_account { let result = uut.add_transaction(tx); assert_matches!( result, - Err(AddTransactionError::StateConflict( + Err(MempoolSubmissionError::StateConflict( StateConflict::AccountCommitmentMismatch { .. } )) ); @@ -509,7 +511,7 @@ mod new_account { let result = uut.add_transaction(tx); assert_matches!( result, - Err(AddTransactionError::StateConflict( + Err(MempoolSubmissionError::StateConflict( StateConflict::AccountCommitmentMismatch { .. } )) ); diff --git a/crates/block-producer/src/mempool/tests/add_user_batch.rs b/crates/block-producer/src/mempool/tests/add_user_batch.rs new file mode 100644 index 0000000000..6d4aa3aeec --- /dev/null +++ b/crates/block-producer/src/mempool/tests/add_user_batch.rs @@ -0,0 +1,107 @@ +use std::sync::Arc; + +use assert_matches::assert_matches; +use miden_protocol::batch::BatchId; +use pretty_assertions::assert_eq; + +use crate::domain::transaction::AuthenticatedTransaction; +use crate::errors::{MempoolSubmissionError, StateConflict}; +use crate::mempool::Mempool; +use crate::test_utils::MockProvenTxBuilder; + +/// This checks that transactions from a user batch remain as the same batch upon selection. +/// +/// Since the selection process is random, its difficult to test this directly, but this at +/// least acts as a smoke test. We select two batches and check that one of them is the user +/// batch. +#[test] +fn user_batch_is_isolated_from_other_transactions() { + let (mut uut, _) = Mempool::for_tests(); + + let conventional_a = build_tx(MockProvenTxBuilder::with_account_index(200)); + let conventional_b = build_tx(MockProvenTxBuilder::with_account_index(201)); + + uut.add_transaction(conventional_a.clone()).unwrap(); + uut.add_transaction(conventional_b.clone()).unwrap(); + + let user_batch_txs = MockProvenTxBuilder::sequential(); + let user_batch_id = + BatchId::from_transactions(user_batch_txs.iter().map(|tx| tx.raw_proven_transaction())); + uut.add_user_batch(&user_batch_txs).unwrap(); + + let batch_a = uut.select_batch().unwrap(); + let batch_b = uut.select_batch().unwrap(); + + let (user, conventional) = if batch_a.id() == user_batch_id { + (batch_a, batch_b) + } else { + (batch_b, batch_a) + }; + + assert_eq!(user.id(), user_batch_id); + assert_eq!(user.transactions(), user_batch_txs.as_slice()); + + assert_eq!(conventional.transactions().len(), 2); + assert!(conventional.transactions().contains(&conventional_a)); + assert!(conventional.transactions().contains(&conventional_b)); +} + +#[test] +fn user_batch_respects_batch_budget() { + let (mut uut, _) = Mempool::for_tests(); + uut.config.batch_budget.transactions = 1; + + let user_batch_txs = MockProvenTxBuilder::sequential(); + let result = uut.add_user_batch(&user_batch_txs[..2]); + + assert_matches!(result, Err(MempoolSubmissionError::CapacityExceeded)); +} + +#[test] +fn user_batch_with_internal_state_conflicts_are_rejected() { + let (mut uut, reference) = Mempool::for_tests(); + + let conflicting_a = tx_with_nullifiers(10, 0..1); + let conflicting_b = tx_with_nullifiers(11, 0..1); + + let result = uut.add_user_batch(&[conflicting_a.clone(), conflicting_b.clone()]); + + assert_matches!( + result, + Err(MempoolSubmissionError::StateConflict(StateConflict::NullifiersAlreadyExist(..))) + ); + + assert_eq!(uut, reference); +} + +#[test] +fn user_batch_conflicts_with_existing_state_are_rejected() { + let (mut uut, mut reference) = Mempool::for_tests(); + + let existing = tx_with_nullifiers(20, 5..6); + uut.add_transaction(existing.clone()).unwrap(); + reference.add_transaction(existing.clone()).unwrap(); + + let conflicting = tx_with_nullifiers(21, 5..6); + let companion = tx_with_nullifiers(22, 6..7); + + let result = uut.add_user_batch(&[conflicting.clone(), companion.clone()]); + + assert_matches!( + result, + Err(MempoolSubmissionError::StateConflict(StateConflict::NullifiersAlreadyExist(..))) + ); + + assert_eq!(uut, reference); +} + +fn build_tx(builder: MockProvenTxBuilder) -> Arc { + Arc::new(AuthenticatedTransaction::from_inner(builder.build())) +} + +fn tx_with_nullifiers( + account_index: u32, + range: std::ops::Range, +) -> Arc { + build_tx(MockProvenTxBuilder::with_account_index(account_index).nullifiers_range(range)) +} diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 98e642338a..d18791f81c 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -14,7 +14,7 @@ use miden_node_utils::clap::GrpcOptionsInternal; use miden_node_utils::formatting::{format_input_notes, format_output_notes}; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; use miden_node_utils::tracing::grpc::grpc_trace_fn; -use miden_protocol::batch::ProvenBatch; +use miden_protocol::batch::ProposedBatch; use miden_protocol::block::BlockNumber; use miden_protocol::transaction::ProvenTransaction; use miden_protocol::utils::serde::Deserializable; @@ -29,7 +29,7 @@ use url::Url; use crate::batch_builder::BatchBuilder; use crate::block_builder::BlockBuilder; use crate::domain::transaction::AuthenticatedTransaction; -use crate::errors::{AddTransactionError, BlockProducerError, StoreError, SubmitProvenBatchError}; +use crate::errors::{BlockProducerError, MempoolSubmissionError, StoreError}; use crate::mempool::{BatchBudget, BlockBudget, Mempool, MempoolConfig, SharedMempool}; use crate::store::StoreClient; use crate::validator::BlockProducerValidatorClient; @@ -304,11 +304,11 @@ impl BlockProducerRpcServer { async fn submit_proven_transaction( &self, request: proto::transaction::ProvenTransaction, - ) -> Result { + ) -> Result { debug!(target: COMPONENT, ?request); let tx = ProvenTransaction::read_from_bytes(&request.transaction) - .map_err(AddTransactionError::TransactionDeserializationFailed)?; + .map_err(MempoolSubmissionError::DeserializationFailed)?; let tx_id = tx.id(); @@ -329,20 +329,14 @@ impl BlockProducerRpcServer { .store .get_tx_inputs(&tx) .await - .map_err(AddTransactionError::StoreConnectionFailed)?; + .map_err(MempoolSubmissionError::StoreConnectionFailed)?; // SAFETY: we assume that the rpc component has verified the transaction proof already. - let tx = AuthenticatedTransaction::new_unchecked(tx, inputs) + let tx = AuthenticatedTransaction::new_unchecked(Arc::new(tx), inputs) .map(Arc::new) - .map_err(AddTransactionError::StateConflict)?; + .map_err(MempoolSubmissionError::StateConflict)?; - self.mempool - .lock() - .await - .lock() - .await - .add_transaction(tx) - .map(|block_height| proto::blockchain::BlockNumber { block_num: block_height.as_u32() }) + self.mempool.lock().await.lock().await.add_transaction(tx).map(Into::into) } #[instrument( @@ -351,14 +345,33 @@ impl BlockProducerRpcServer { skip_all, err )] - async fn submit_proven_batch( + async fn submit_batch( &self, - request: proto::transaction::ProvenTransactionBatch, - ) -> Result { - let _batch = ProvenBatch::read_from_bytes(&request.encoded) - .map_err(SubmitProvenBatchError::Deserialization)?; + request: proto::transaction::TransactionBatch, + ) -> Result { + let batch = ProposedBatch::read_from_bytes(&request.proposed_batch) + .map_err(MempoolSubmissionError::DeserializationFailed)?; + + // We assume that the rpc component has verified everything, including the transaction + // proofs. + + let mut txs = Vec::with_capacity(batch.transactions().len()); + for tx in batch.transactions() { + let inputs = self + .store + .get_tx_inputs(tx) + .await + .map_err(MempoolSubmissionError::StoreConnectionFailed)?; + + // SAFETY: We assume that the rpc component has verified the transaction proofs, as well + // as the batch integrity itself. + let tx = AuthenticatedTransaction::new_unchecked(Arc::clone(tx), inputs) + .map(Arc::new) + .map_err(MempoolSubmissionError::StateConflict)?; + txs.push(tx); + } - todo!(); + self.mempool.lock().await.lock().await.add_user_batch(&txs).map(Into::into) } } @@ -377,11 +390,11 @@ impl api_server::Api for BlockProducerRpcServer { .map_err(Into::into) } - async fn submit_proven_batch( + async fn submit_batch( &self, - request: tonic::Request, + request: tonic::Request, ) -> Result, Status> { - self.submit_proven_batch(request.into_inner()) + self.submit_batch(request.into_inner()) .await .map(tonic::Response::new) // This Status::from mapping takes care of hiding internal errors. diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 60e48c3ded..52b9661e27 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -24,6 +24,7 @@ miden-node-proto-build = { workspace = true } miden-node-utils = { workspace = true } miden-protocol = { default-features = true, workspace = true } miden-tx = { default-features = true, workspace = true } +miden-tx-batch-prover = { workspace = true } semver = { version = "1.0" } thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index f819054a25..401f57c0df 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -23,7 +23,7 @@ use miden_node_utils::limiter::{ QueryParamNullifierLimit, QueryParamStorageMapKeyTotalLimit, }; -use miden_protocol::batch::ProvenBatch; +use miden_protocol::batch::ProposedBatch; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::transaction::{ OutputNote, @@ -34,6 +34,7 @@ use miden_protocol::transaction::{ use miden_protocol::utils::serde::{Deserializable, Serializable}; use miden_protocol::{MIN_PROOF_SECURITY_LEVEL, Word}; use miden_tx::TransactionVerifier; +use miden_tx_batch_prover::LocalBatchProver; use tonic::{IntoRequest, Request, Response, Status}; use tracing::{debug, info}; use url::Url; @@ -425,47 +426,81 @@ impl api_server::Api for RpcService { /// Deserializes the batch, strips MAST decorators from full output note scripts, rebuilds /// the batch, then forwards it to the block producer. - async fn submit_proven_batch( + async fn submit_batch( &self, - request: tonic::Request, + request: tonic::Request, ) -> Result, Status> { let Some(block_producer) = &self.block_producer else { return Err(Status::unavailable("Batch submission not available in read-only mode")); }; - let mut request = request.into_inner(); - - let batch = ProvenBatch::read_from_bytes(&request.encoded) - .map_err(|err| Status::invalid_argument(err.as_report_context("invalid batch")))?; - - // Build a new batch with output notes' decorators removed - let stripped_outputs: Vec = - strip_output_note_decorators(batch.output_notes().iter()).collect(); + let request = request.into_inner(); - let rebuilt_batch = ProvenBatch::new( - batch.id(), - batch.reference_block_commitment(), - batch.reference_block_num(), - batch.account_updates().clone(), - batch.input_notes().clone(), - stripped_outputs, - batch.batch_expiration_block_num(), - batch.transactions().clone(), - ) - .map_err(|e| Status::invalid_argument(e.to_string()))?; + let batch = ProposedBatch::read_from_bytes(&request.proposed_batch).map_err(|err| { + Status::invalid_argument(err.as_report_context("invalid proposed_batch")) + })?; - request.encoded = rebuilt_batch.to_bytes(); + // Perform this check here since its cheap. If this passes we can safely zip inputs and + // transactions. + if request.transaction_inputs.len() != batch.transactions().len() { + return Err(Status::invalid_argument(format!( + "Number of inputs {} does not match number of transaction {} in batch", + request.transaction_inputs.len(), + batch.transactions().len() + ))); + } - // Only allow deployment transactions for new network accounts - for tx in batch.transactions().as_slice() { - if tx.account_id().is_network() && !tx.initial_state_commitment().is_empty() { + // Only allow deployment transactions for new network accounts. + for tx in batch.transactions() { + if tx.account_id().is_network() + && !tx.account_update().initial_state_commitment().is_empty() + { return Err(Status::invalid_argument( "Network transactions may not be submitted by users yet", )); } } - block_producer.clone().submit_proven_batch(request).await + // Verify batch transaction proofs. + let proof = LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL).prove(batch.clone()).map_err( + |err| Status::invalid_argument(err.as_report_context("proposed block proof failed")), + )?; + // Verify the reference header matches the canonical chain. + let reference_header = self + .get_block_header_by_number(Request::new(proto::rpc::BlockHeaderByNumberRequest { + block_num: proof.reference_block_num().as_u32().into(), + include_mmr_proof: false.into(), + })) + .await? + .into_inner() + .block_header + .expect("store should always send block header"); + let reference_commitment: Word = reference_header + .chain_commitment + .expect("store should always fill block header") + .try_into() + .expect("store Word should be okay"); + if reference_commitment != proof.reference_block_commitment() { + return Err(Status::invalid_argument(format!( + "batch reference commitment {} at block {} does not match canonical chain's commitemnt of {}", + proof.reference_block_num(), + proof.reference_block_commitment(), + reference_commitment + ))); + } + + // Submit each transaction to the validator. + // + // SAFETY: We checked earlier that the two iterators are the same length. + for (tx, inputs) in batch.transactions().iter().zip(&request.transaction_inputs) { + let request = proto::transaction::ProvenTransaction { + transaction: tx.to_bytes(), + transaction_inputs: inputs.clone().into(), + }; + self.validator.clone().submit_proven_transaction(request).await?; + } + + block_producer.clone().submit_batch(request).await } // -- Status & utility endpoints ---------------------------------------------------------- diff --git a/proto/proto/internal/block_producer.proto b/proto/proto/internal/block_producer.proto index 6e34083561..5be040a7eb 100644 --- a/proto/proto/internal/block_producer.proto +++ b/proto/proto/internal/block_producer.proto @@ -2,12 +2,12 @@ syntax = "proto3"; package block_producer; +import "google/protobuf/empty.proto"; import "rpc.proto"; -import "types/note.proto"; import "types/blockchain.proto"; +import "types/note.proto"; import "types/primitives.proto"; import "types/transaction.proto"; -import "google/protobuf/empty.proto"; // BLOCK PRODUCER SERVICE // ================================================================================================ @@ -19,19 +19,12 @@ service Api { // Submits proven transaction to the Miden network. Returns the node's current block height. rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} - // Submits a proven batch to the Miden network. + // Submits a batch of transactions to the Miden network. // - // The batch may include transactions which were are: - // - // - already in the mempool i.e. previously successfully submitted - // - will be submitted to the mempool in the future - // - won't be submitted to the mempool at all - // - // All transactions in the batch but not in the mempool must build on the current mempool - // state following normal transaction submission rules. + // All transactions in this batch will be considered atomic, and be committed together or not all. // // Returns the node's current block height. - rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} + rpc SubmitBatch(transaction.TransactionBatch) returns (blockchain.BlockNumber) {} // Subscribe to mempool events. // @@ -87,5 +80,5 @@ message MempoolEvent { TransactionAdded transaction_added = 1; BlockCommitted block_committed = 2; TransactionsReverted transactions_reverted = 3; - }; + } } diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index 2823083d5d..a6d4bd30a5 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -2,12 +2,12 @@ syntax = "proto3"; package rpc; +import "google/protobuf/empty.proto"; import "types/account.proto"; import "types/blockchain.proto"; import "types/note.proto"; import "types/primitives.proto"; import "types/transaction.proto"; -import "google/protobuf/empty.proto"; // RPC API // ================================================================================================ @@ -59,19 +59,12 @@ service Api { // Submits proven transaction to the Miden network. Returns the node's current block height. rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} - // Submits a proven batch of transactions to the Miden network. - // - // The batch may include transactions which were are: - // - // - already in the mempool i.e. previously successfully submitted - // - will be submitted to the mempool in the future - // - won't be submitted to the mempool at all + // Submits a batch of transactions to the Miden network. // - // All transactions in the batch but not in the mempool must build on the current mempool - // state following normal transaction submission rules. + // All transactions in this batch will be considered atomic, and be committed together or not all. // // Returns the node's current block height. - rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} + rpc SubmitBatch(transaction.TransactionBatch) returns (blockchain.BlockNumber) {} // STATE SYNCHRONIZATION ENDPOINTS // -------------------------------------------------------------------------------------------- @@ -134,11 +127,9 @@ message RpcStatus { BlockProducerStatus block_producer = 4; } - // BLOCK PRODUCER STATUS // ================================================================================================ - // Represents the status of the block producer. message BlockProducerStatus { // The block producer's running version. @@ -277,7 +268,6 @@ message AccountRequest { // Represents the result of getting account proof. message AccountResponse { - message AccountDetails { // Account header. account.AccountHeader header = 1; diff --git a/proto/proto/types/transaction.proto b/proto/proto/types/transaction.proto index 54f1bdbb7e..e365e2dfa9 100644 --- a/proto/proto/types/transaction.proto +++ b/proto/proto/types/transaction.proto @@ -18,10 +18,15 @@ message ProvenTransaction { optional bytes transaction_inputs = 2; } -message ProvenTransactionBatch { - // Encoded using [miden_serde_utils::Serializable] implementation for - // [miden_protocol::transaction::proven_tx::ProvenTransaction]. - bytes encoded = 1; +message TransactionBatch { + // The proposed batch of transaction encoded using [winter_utils::Serializable] implementation + // for [miden_protocol::batch::ProposedBatch]. + bytes proposed_batch = 1; + // Each transaction's inputs encoded using [winter_utils::Serializable] implementation for + // [miden_protocol::transaction::TransactionInputs]. + // + // Order of inputs should match the transaction order in the batch. + repeated bytes transaction_inputs = 2; } // Represents a transaction ID. From 28920145a1e8f39a96236e669b841eb55617796f Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Mon, 30 Mar 2026 10:48:30 +0200 Subject: [PATCH 2/6] Improve comments & address review --- crates/block-producer/src/mempool/budget.rs | 3 --- .../block-producer/src/mempool/graph/batch.rs | 2 +- .../block-producer/src/mempool/graph/dag.rs | 2 +- .../src/mempool/graph/transaction.rs | 21 +++++++++++++++++-- crates/block-producer/src/mempool/mod.rs | 10 +++++++-- 5 files changed, 29 insertions(+), 9 deletions(-) diff --git a/crates/block-producer/src/mempool/budget.rs b/crates/block-producer/src/mempool/budget.rs index 27b04e6050..a4c9c2167d 100644 --- a/crates/block-producer/src/mempool/budget.rs +++ b/crates/block-producer/src/mempool/budget.rs @@ -18,9 +18,6 @@ pub struct BatchBudget { /// Maximum number of output notes allowed. pub output_notes: usize, /// Maximum number of updated accounts. - /// - /// Authenticated transactions are assumed to update at most one account; this field enforces - /// how many such single-account updates can fit into a batch. pub accounts: usize, } diff --git a/crates/block-producer/src/mempool/graph/batch.rs b/crates/block-producer/src/mempool/graph/batch.rs index 39f50ee47a..be00bd0983 100644 --- a/crates/block-producer/src/mempool/graph/batch.rs +++ b/crates/block-producer/src/mempool/graph/batch.rs @@ -148,7 +148,7 @@ impl BatchGraph { selected } - /// Prunes the given batch. + /// Prunes the given batch and returns it. /// /// # Panics /// diff --git a/crates/block-producer/src/mempool/graph/dag.rs b/crates/block-producer/src/mempool/graph/dag.rs index 2d1cfc44a9..b1277ceb0b 100644 --- a/crates/block-producer/src/mempool/graph/dag.rs +++ b/crates/block-producer/src/mempool/graph/dag.rs @@ -213,7 +213,7 @@ where self.edges.children_of(id).is_empty() } - /// Removes the node _IFF_ it has no ancestor nodes. + /// Removes the node _IFF_ it has no ancestor nodes and returns the pruned node. /// /// # Panics /// diff --git a/crates/block-producer/src/mempool/graph/transaction.rs b/crates/block-producer/src/mempool/graph/transaction.rs index e6fe4593bc..219e87a372 100644 --- a/crates/block-producer/src/mempool/graph/transaction.rs +++ b/crates/block-producer/src/mempool/graph/transaction.rs @@ -77,7 +77,11 @@ pub struct TransactionGraph { /// used to identify potentially buggy transactions that should be evicted. failures: HashMap, + /// Defines the transactions that belong to a user batch. user_batch_txs: HashMap>, + /// A mapping of transactions to their user batch (if any). + /// + /// Inverse map of `user_batch_txs`. txs_user_batch: HashMap, } @@ -195,10 +199,19 @@ impl TransactionGraph { /// Reverts expired transactions and their descendants. /// - /// Only unselected transactions are considered; selected transactions are assumed to be in - /// committed blocks and should not be reverted. + /// This is because we don't distinguish between committed and selected transactions. If we + /// didn't ignore selected transactions here, we would revert committed ones as well, which + /// breaks the state. /// /// Returns the identifiers of transactions that were removed from the graph. + /// + /// # Note + /// + /// Since this _ignores_ selected transactions, and the purpose is to revert expired + /// transactions after a block is committed, the caller **must** ensure that selected + /// transactions from expired batches (and therefore not committed) are deselected + /// _before_ calling this function. i.e. first revert expired batches and deselect their + /// transactions, then call this. pub fn revert_expired(&mut self, chain_tip: BlockNumber) -> HashSet { // We only revert transactions which are _not_ included in batches. let mut to_revert = self.inner.expired(chain_tip); @@ -272,6 +285,10 @@ impl TransactionGraph { /// /// This weeds out transactions which participate in batch and block failures, and might be the /// root cause. + /// + /// # Returns + /// + /// Returns the set of reverted transactions. pub fn increment_failure_count( &mut self, txs: impl Iterator, diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index eff9952a9e..734cb9204d 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -303,7 +303,9 @@ impl Mempool { /// Drops the proposed batch and all of its descendants. /// - /// Transactions are re-queued. + /// The transactions are re-queued for inclusion in a batch. Additionally, the batch's + /// transactions have their failure count incremented, reverting them if they now exceed the + /// failure limit. #[instrument(target = COMPONENT, name = "mempool.rollback_batch", skip_all)] pub fn rollback_batch(&mut self, batch: BatchId) { // Guards against bugs in the proof scheduler where a retry results in multiple results @@ -323,8 +325,12 @@ impl Mempool { // Find rolled back batch to mark the txs as failed. // - // Note that its possible it doesn't exist, since this batch could have already been + // Note that it's possible it doesn't exist, since this batch could have already been // reverted as part of a separate rollback. + // + // This could occur if this batch is the descendent of a separate batch or block rollback. + // The batch and transaction graphs already ignore unknown reversions, alternatively we + // could check this precondition above. if let Some(batch) = reverted_batches.iter().find(|reverted| reverted.id() == batch) { let failed_txs = batch.transactions().iter().map(|tx| tx.id()); let reverted_txs = self.transactions.increment_failure_count(failed_txs); From 149f3ab944e7600c42f13c80e13d4c04a2251902 Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Mon, 30 Mar 2026 11:05:49 +0200 Subject: [PATCH 3/6] Fix reference header commitment check --- crates/rpc/src/server/api.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 401f57c0df..fe45d7e45a 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -474,18 +474,20 @@ impl api_server::Api for RpcService { .await? .into_inner() .block_header - .expect("store should always send block header"); - let reference_commitment: Word = reference_header - .chain_commitment - .expect("store should always fill block header") - .try_into() - .expect("store Word should be okay"); - if reference_commitment != proof.reference_block_commitment() { + .map(BlockHeader::try_from) + .transpose()? + .ok_or_else(|| { + Status::invalid_argument(format!( + "unknown reference block {}", + proof.reference_block_num() + )) + })?; + if reference_header.commitment() != proof.reference_block_commitment() { return Err(Status::invalid_argument(format!( "batch reference commitment {} at block {} does not match canonical chain's commitemnt of {}", proof.reference_block_num(), proof.reference_block_commitment(), - reference_commitment + reference_header.commitment() ))); } From e528296f7e9c699cfa9114d5dcbd41b8e428c65a Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Mon, 30 Mar 2026 14:47:40 +0200 Subject: [PATCH 4/6] Require ProvenBatch in gRPC --- crates/block-producer/src/server/mod.rs | 11 +-- crates/rpc/src/server/api.rs | 54 +++++++++----- proto/proto/internal/block_producer.proto | 2 +- proto/proto/rpc.proto | 2 +- proto/proto/types/transaction.proto | 88 ++++++++++++----------- 5 files changed, 94 insertions(+), 63 deletions(-) diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index d18791f81c..f0266c98aa 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -345,11 +345,14 @@ impl BlockProducerRpcServer { skip_all, err )] - async fn submit_batch( + async fn submit_proven_batch( &self, request: proto::transaction::TransactionBatch, ) -> Result { - let batch = ProposedBatch::read_from_bytes(&request.proposed_batch) + let proposed = request + .proposed_batch + .expect("proposed batch existence is enforced by RPC component"); + let batch = ProposedBatch::read_from_bytes(&proposed) .map_err(MempoolSubmissionError::DeserializationFailed)?; // We assume that the rpc component has verified everything, including the transaction @@ -390,11 +393,11 @@ impl api_server::Api for BlockProducerRpcServer { .map_err(Into::into) } - async fn submit_batch( + async fn submit_proven_batch( &self, request: tonic::Request, ) -> Result, Status> { - self.submit_batch(request.into_inner()) + self.submit_proven_batch(request.into_inner()) .await .map(tonic::Response::new) // This Status::from mapping takes care of hiding internal errors. diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index fe45d7e45a..9d99cd580e 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -23,7 +23,7 @@ use miden_node_utils::limiter::{ QueryParamNullifierLimit, QueryParamStorageMapKeyTotalLimit, }; -use miden_protocol::batch::ProposedBatch; +use miden_protocol::batch::{ProposedBatch, ProvenBatch}; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::transaction::{ OutputNote, @@ -426,7 +426,7 @@ impl api_server::Api for RpcService { /// Deserializes the batch, strips MAST decorators from full output note scripts, rebuilds /// the batch, then forwards it to the block producer. - async fn submit_batch( + async fn submit_proven_batch( &self, request: tonic::Request, ) -> Result, Status> { @@ -436,22 +436,32 @@ impl api_server::Api for RpcService { let request = request.into_inner(); - let batch = ProposedBatch::read_from_bytes(&request.proposed_batch).map_err(|err| { - Status::invalid_argument(err.as_report_context("invalid proposed_batch")) + let proven_batch = ProvenBatch::read_from_bytes(&request.batch_proof).map_err(|err| { + Status::invalid_argument(err.as_report_context("invalid proven_batch")) })?; + let proposed_batch = request + .proposed_batch + .as_deref() + .map(ProposedBatch::read_from_bytes) + .transpose() + .map_err(|err| { + Status::invalid_argument(err.as_report_context("invalid proposed_batch")) + })? + .ok_or(Status::invalid_argument("missing `proposed_batch` field"))?; + // Perform this check here since its cheap. If this passes we can safely zip inputs and // transactions. - if request.transaction_inputs.len() != batch.transactions().len() { + if request.transaction_inputs.len() != proposed_batch.transactions().len() { return Err(Status::invalid_argument(format!( "Number of inputs {} does not match number of transaction {} in batch", request.transaction_inputs.len(), - batch.transactions().len() + proposed_batch.transactions().len() ))); } // Only allow deployment transactions for new network accounts. - for tx in batch.transactions() { + for tx in proposed_batch.transactions() { if tx.account_id().is_network() && !tx.account_update().initial_state_commitment().is_empty() { @@ -462,13 +472,23 @@ impl api_server::Api for RpcService { } // Verify batch transaction proofs. - let proof = LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL).prove(batch.clone()).map_err( - |err| Status::invalid_argument(err.as_report_context("proposed block proof failed")), - )?; + // + // Need to do this because ProvenBatch has no real kernel yet, so we can only + // really check that the calculated proof matches the one given in the request. + let expected_proof = LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL) + .prove(proposed_batch.clone()) + .map_err(|err| { + Status::invalid_argument(err.as_report_context("proposed block proof failed")) + })?; + + if expected_proof != proven_batch { + return Err(Status::invalid_argument("batch proof did not match proposed batch")); + } + // Verify the reference header matches the canonical chain. let reference_header = self .get_block_header_by_number(Request::new(proto::rpc::BlockHeaderByNumberRequest { - block_num: proof.reference_block_num().as_u32().into(), + block_num: expected_proof.reference_block_num().as_u32().into(), include_mmr_proof: false.into(), })) .await? @@ -479,14 +499,14 @@ impl api_server::Api for RpcService { .ok_or_else(|| { Status::invalid_argument(format!( "unknown reference block {}", - proof.reference_block_num() + expected_proof.reference_block_num() )) })?; - if reference_header.commitment() != proof.reference_block_commitment() { + if reference_header.commitment() != expected_proof.reference_block_commitment() { return Err(Status::invalid_argument(format!( "batch reference commitment {} at block {} does not match canonical chain's commitemnt of {}", - proof.reference_block_num(), - proof.reference_block_commitment(), + expected_proof.reference_block_num(), + expected_proof.reference_block_commitment(), reference_header.commitment() ))); } @@ -494,7 +514,7 @@ impl api_server::Api for RpcService { // Submit each transaction to the validator. // // SAFETY: We checked earlier that the two iterators are the same length. - for (tx, inputs) in batch.transactions().iter().zip(&request.transaction_inputs) { + for (tx, inputs) in proposed_batch.transactions().iter().zip(&request.transaction_inputs) { let request = proto::transaction::ProvenTransaction { transaction: tx.to_bytes(), transaction_inputs: inputs.clone().into(), @@ -502,7 +522,7 @@ impl api_server::Api for RpcService { self.validator.clone().submit_proven_transaction(request).await?; } - block_producer.clone().submit_batch(request).await + block_producer.clone().submit_proven_batch(request).await } // -- Status & utility endpoints ---------------------------------------------------------- diff --git a/proto/proto/internal/block_producer.proto b/proto/proto/internal/block_producer.proto index 5be040a7eb..02ed21afb0 100644 --- a/proto/proto/internal/block_producer.proto +++ b/proto/proto/internal/block_producer.proto @@ -24,7 +24,7 @@ service Api { // All transactions in this batch will be considered atomic, and be committed together or not all. // // Returns the node's current block height. - rpc SubmitBatch(transaction.TransactionBatch) returns (blockchain.BlockNumber) {} + rpc SubmitProvenBatch(transaction.TransactionBatch) returns (blockchain.BlockNumber) {} // Subscribe to mempool events. // diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index a6d4bd30a5..aeab0dacc8 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -64,7 +64,7 @@ service Api { // All transactions in this batch will be considered atomic, and be committed together or not all. // // Returns the node's current block height. - rpc SubmitBatch(transaction.TransactionBatch) returns (blockchain.BlockNumber) {} + rpc SubmitProvenBatch(transaction.TransactionBatch) returns (blockchain.BlockNumber) {} // STATE SYNCHRONIZATION ENDPOINTS // -------------------------------------------------------------------------------------------- diff --git a/proto/proto/types/transaction.proto b/proto/proto/types/transaction.proto index e365e2dfa9..66504d67a1 100644 --- a/proto/proto/types/transaction.proto +++ b/proto/proto/types/transaction.proto @@ -10,41 +10,49 @@ import "types/primitives.proto"; // Submits proven transaction to the Miden network. message ProvenTransaction { - // Transaction encoded using [miden_serde_utils::Serializable] implementation for - // [miden_protocol::transaction::proven_tx::ProvenTransaction]. - bytes transaction = 1; - // Transaction inputs encoded using [miden_serde_utils::Serializable] implementation for - // [miden_protocol::transaction::TransactionInputs]. - optional bytes transaction_inputs = 2; + // The transaction proof. + // + // Encoded using [miden_protocol::transaction::ProvenTransaction::to_bytes]. + bytes transaction = 1; + // The inputs used for the transaction proof. + // + // Encoded using [miden_protocol::transaction::TransactionInputs::to_bytes]. + optional bytes transaction_inputs = 2; } message TransactionBatch { - // The proposed batch of transaction encoded using [winter_utils::Serializable] implementation - // for [miden_protocol::batch::ProposedBatch]. - bytes proposed_batch = 1; - // Each transaction's inputs encoded using [winter_utils::Serializable] implementation for - // [miden_protocol::transaction::TransactionInputs]. - // - // Order of inputs should match the transaction order in the batch. - repeated bytes transaction_inputs = 2; + // The batch proof. + // + // Encoded using [miden_protocol::batch::ProvenBatch::to_bytes]. + bytes batch_proof = 1; + // The batch contents of the given proof. + // + // Encoded using [miden_protocol::batch::ProposedBatch::to_bytes]. + optional bytes proposed_batch = 2; + // The transaction inputs for each transaction in the batch. + // + // Must match the transaction ordering in the batch. + // + // Encoded using [miden_protocol::transaction::TransactionInputs::to_bytes]. + repeated bytes transaction_inputs = 3; } // Represents a transaction ID. message TransactionId { - // The transaction ID. - primitives.Digest id = 1; + // The transaction ID. + primitives.Digest id = 1; } // Represents a transaction summary. message TransactionSummary { - // A unique 32-byte identifier of a transaction. - TransactionId transaction_id = 1; + // A unique 32-byte identifier of a transaction. + TransactionId transaction_id = 1; - // The block number in which the transaction was executed. - fixed32 block_num = 2; + // The block number in which the transaction was executed. + fixed32 block_num = 2; - // The ID of the account affected by the transaction. - account.AccountId account_id = 3; + // The ID of the account affected by the transaction. + account.AccountId account_id = 3; } // Represents a commitment to an input note of a transaction. @@ -52,33 +60,33 @@ message TransactionSummary { // For authenticated notes, only the nullifier is present. // For unauthenticated notes, the note header is also included. message InputNoteCommitment { - // The nullifier of the input note. - primitives.Digest nullifier = 1; + // The nullifier of the input note. + primitives.Digest nullifier = 1; - // The note header, present only for unauthenticated input notes. - optional note.NoteHeader header = 2; + // The note header, present only for unauthenticated input notes. + optional note.NoteHeader header = 2; } // Represents a transaction header. message TransactionHeader { - // The unique identifier of the transaction. - TransactionId transaction_id = 1; + // The unique identifier of the transaction. + TransactionId transaction_id = 1; - // ID of the account against which the transaction was executed. - account.AccountId account_id = 2; + // ID of the account against which the transaction was executed. + account.AccountId account_id = 2; - // State commitment of the account before the transaction was executed. - primitives.Digest initial_state_commitment = 3; + // State commitment of the account before the transaction was executed. + primitives.Digest initial_state_commitment = 3; - // State commitment of the account after the transaction was executed. - primitives.Digest final_state_commitment = 4; + // State commitment of the account after the transaction was executed. + primitives.Digest final_state_commitment = 4; - // Input notes of the transaction. - repeated InputNoteCommitment input_notes = 5; + // Input notes of the transaction. + repeated InputNoteCommitment input_notes = 5; - // Output notes of the transaction. - repeated note.NoteHeader output_notes = 6; + // Output notes of the transaction. + repeated note.NoteHeader output_notes = 6; - // The fee paid by the transaction. - primitives.Asset fee = 7; + // The fee paid by the transaction. + primitives.Asset fee = 7; } From 7f2bce1051719d0583421c063371e474ce12cffa Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Tue, 31 Mar 2026 08:08:12 +0200 Subject: [PATCH 5/6] Fix typo --- crates/rpc/src/server/api.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 9d99cd580e..1083fcd5cc 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -504,7 +504,7 @@ impl api_server::Api for RpcService { })?; if reference_header.commitment() != expected_proof.reference_block_commitment() { return Err(Status::invalid_argument(format!( - "batch reference commitment {} at block {} does not match canonical chain's commitemnt of {}", + "batch reference commitment {} at block {} does not match canonical chain's commitment of {}", expected_proof.reference_block_num(), expected_proof.reference_block_commitment(), reference_header.commitment() From 74b397a9f5aea2d9258ff8bc9dce6a6f31929734 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare <43513081+bobbinth@users.noreply.github.com> Date: Tue, 31 Mar 2026 16:44:18 -0700 Subject: [PATCH 6/6] Apply suggestions from code review Co-authored-by: Philipp Gackstatter --- crates/rpc/src/server/api.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 1083fcd5cc..be5af9c184 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -505,8 +505,8 @@ impl api_server::Api for RpcService { if reference_header.commitment() != expected_proof.reference_block_commitment() { return Err(Status::invalid_argument(format!( "batch reference commitment {} at block {} does not match canonical chain's commitment of {}", - expected_proof.reference_block_num(), expected_proof.reference_block_commitment(), + expected_proof.reference_block_num(), reference_header.commitment() ))); }