From 02b13892c79503661219edf7367dbca9bcea48fb Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 1 Apr 2026 19:36:56 +0200 Subject: [PATCH 01/17] fix!: enforce merkle payment amount verification against quoted prices The merkle payment verifier only checked that paid amounts were non-zero, not that they met the candidate's quoted price. A malicious client could submit fake low prices in PoolCommitment candidates while keeping the real poolHash, causing the contract to charge almost nothing while nodes still accepted the proof. Replace `paid_amount.is_zero()` with `paid_amount < node.price` so each paid candidate must receive at least their ML-DSA-65 signed quoted price. Also fix existing unit tests that were missing the Amount field in paid_node_addresses tuples, and add test_merkle_underpayment_rejected. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/payment/verifier.rs | 202 +++++++++++++++++++++------------------- 1 file changed, 105 insertions(+), 97 deletions(-) diff --git a/src/payment/verifier.rs b/src/payment/verifier.rs index d134c26..407250e 100644 --- a/src/payment/verifier.rs +++ b/src/payment/verifier.rs @@ -10,9 +10,8 @@ use crate::payment::proof::{ deserialize_merkle_proof, deserialize_proof, detect_proof_type, ProofType, }; use crate::payment::quote::{verify_quote_content, verify_quote_signature}; -use evmlib::contract::merkle_payment_vault; -use evmlib::merkle_batch_payment::PoolHash; -use evmlib::merkle_payments::OnChainPaymentInfo; +use evmlib::contract::payment_vault; +use evmlib::merkle_batch_payment::{OnChainPaymentInfo, PoolHash}; use evmlib::Network as EvmNetwork; use evmlib::ProofOfPayment; use evmlib::RewardsAddress; @@ -329,74 +328,67 @@ impl PaymentVerifier { .await .map_err(|e| Error::Payment(format!("Signature verification task failed: {e}")))??; - // Verify on-chain payment. + // Verify on-chain payment via the contract's verifyPayment function. // // The SingleNode payment model pays only the median-priced quote (at 3x) - // and sends Amount::ZERO for the other 4. evmlib's pay_for_quotes() - // filters out zero-amount payments, so only 1 quote has an on-chain - // record. The contract's verifyPayment() returns amountPaid=0 and - // isValid=false for unpaid quotes, which is expected. - // - // We use the amountPaid field to distinguish paid from unpaid results: - // - At least one quote must have been paid (amountPaid > 0) - // - ALL paid quotes must be valid (isValid=true) - // - Unpaid quotes (amountPaid=0) are allowed to be invalid - // - // This matches autonomi's strict verification model (all paid must be - // valid) while accommodating payment models that don't pay every quote. + // and sends Amount::ZERO for the other 4. The contract's verifyPayment + // checks that each payment's amount and address match what was recorded + // on-chain. We submit all quotes and require at least one to be valid + // with a non-zero amount. let payment_digest = payment.digest(); if payment_digest.is_empty() { return Err(Error::Payment("Payment has no quotes".to_string())); } - let payment_verifications: Vec<_> = payment_digest - .into_iter() - .map( - evmlib::contract::payment_vault::interface::IPaymentVault::PaymentVerification::from, - ) - .collect(); - let provider = evmlib::utils::http_provider(self.config.evm.network.rpc_url().clone()); - let handler = evmlib::contract::payment_vault::handler::PaymentVaultHandler::new( - *self.config.evm.network.data_payments_address(), - provider, - ); + let vault_address = *self.config.evm.network.payment_vault_address(); + let contract = + evmlib::contract::payment_vault::interface::IPaymentVault::new(vault_address, provider); - let results = handler - .verify_payment(payment_verifications) + // Build DataPayment entries for the contract's verifyPayment call + let data_payments: Vec<_> = payment_digest + .iter() + .map(|(quote_hash, amount, rewards_address)| { + evmlib::contract::payment_vault::interface::IPaymentVault::DataPayment { + rewardsAddress: *rewards_address, + amount: *amount, + quoteHash: *quote_hash, + } + }) + .collect(); + + let results = contract + .verifyPayment(data_payments) + .call() .await .map_err(|e| { let xorname_hex = hex::encode(xorname); - Error::Payment(format!("EVM verification error for {xorname_hex}: {e}")) + Error::Payment(format!( + "EVM verifyPayment call failed for {xorname_hex}: {e}" + )) })?; - let paid_results: Vec<_> = results - .iter() - .filter(|r| r.amountPaid > evmlib::common::U256::ZERO) - .collect(); + let total_quotes = payment_digest.len(); + let mut valid_paid_count: usize = 0; + + for result in &results { + if result.isValid && result.amountPaid > evmlib::common::Amount::ZERO { + valid_paid_count += 1; + } + } - if paid_results.is_empty() { + if valid_paid_count == 0 { let xorname_hex = hex::encode(xorname); return Err(Error::Payment(format!( - "Payment verification failed on-chain for {xorname_hex} (no paid quotes found)" + "Payment verification failed on-chain for {xorname_hex}: \ + no valid paid quotes found ({total_quotes} checked)" ))); } - for result in &paid_results { - if !result.isValid { - let xorname_hex = hex::encode(xorname); - return Err(Error::Payment(format!( - "Payment verification failed on-chain for {xorname_hex} (paid quote is invalid)" - ))); - } - } - if tracing::enabled!(tracing::Level::INFO) { - let valid_count = paid_results.len(); - let total_results = results.len(); let xorname_hex = hex::encode(xorname); info!( - "EVM payment verified for {xorname_hex} ({valid_count} paid and valid, {total_results} total results)" + "EVM payment verified for {xorname_hex} ({valid_paid_count} valid, {total_quotes} total quotes)" ); } Ok(()) @@ -532,9 +524,9 @@ impl PaymentVerifier { debug!("Pool cache hit for hash {}", hex::encode(pool_hash)); info } else { - // Query on-chain for payment info + // Query on-chain for completed merkle payment let info = - merkle_payment_vault::get_merkle_payment_info(&self.config.evm.network, pool_hash) + payment_vault::get_completed_merkle_payment(&self.config.evm.network, pool_hash) .await .map_err(|e| { let pool_hex = hex::encode(pool_hash); @@ -546,7 +538,7 @@ impl PaymentVerifier { let paid_node_addresses: Vec<_> = info .paidNodeAddresses .iter() - .map(|pna| (pna.rewardsAddress, usize::from(pna.poolIndex))) + .map(|pna| (pna.rewardsAddress, usize::from(pna.poolIndex), pna.amount)) .collect(); let on_chain_info = OnChainPaymentInfo { @@ -625,7 +617,12 @@ impl PaymentVerifier { ))); } - // Verify paid node indices are valid within the candidate pool. + // Verify paid node indices, addresses, and amounts against the candidate pool. + // + // Each paid node must: + // 1. Have a valid index within the candidate pool + // 2. Match the expected reward address at that index + // 3. Have been paid at least the candidate's quoted price // // Note: unlike single-node payments, merkle proofs are NOT bound to a // specific storing node. The contract pays `depth` random nodes from the @@ -634,7 +631,7 @@ impl PaymentVerifier { // any node that can verify the merkle proof is allowed to store the chunk. // Replay protection comes from the per-address proof binding (each proof // is for a specific XorName in the paid tree). - for (addr, idx) in &payment_info.paid_node_addresses { + for (addr, idx, paid_amount) in &payment_info.paid_node_addresses { let node = merkle_proof .winner_pool .candidate_nodes @@ -651,6 +648,13 @@ impl PaymentVerifier { node.reward_address ))); } + if *paid_amount < node.price { + return Err(Error::Payment(format!( + "Underpayment for node at index {idx}: paid {paid_amount}, \ + candidate quoted {}", + node.price + ))); + } } if tracing::enabled!(tracing::Level::INFO) { @@ -684,6 +688,7 @@ impl PaymentVerifier { #[allow(clippy::expect_used)] mod tests { use super::*; + use evmlib::common::Amount; /// Create a verifier for unit tests. EVM is always on, but tests can /// pre-populate the cache to bypass on-chain verification. @@ -992,7 +997,6 @@ mod tests { #[tokio::test] async fn test_content_address_mismatch_rejected() { use crate::payment::proof::{serialize_single_node_proof, PaymentProof}; - use evmlib::quoting_metrics::QuotingMetrics; use evmlib::{EncodedPeerId, PaymentQuote, RewardsAddress}; use std::time::SystemTime; @@ -1006,17 +1010,7 @@ mod tests { let quote = PaymentQuote { content: xor_name::XorName(wrong_xorname), timestamp: SystemTime::now(), - quoting_metrics: QuotingMetrics { - data_size: 1024, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 1000, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - }, + price: Amount::from(1u64), rewards_address: RewardsAddress::new([1u8; 20]), pub_key: vec![0u8; 64], signature: vec![0u8; 64], @@ -1053,23 +1047,12 @@ mod tests { timestamp: SystemTime, rewards_address: RewardsAddress, ) -> evmlib::PaymentQuote { - use evmlib::quoting_metrics::QuotingMetrics; use evmlib::PaymentQuote; PaymentQuote { content: xor_name::XorName(xorname), timestamp, - quoting_metrics: QuotingMetrics { - data_size: 1024, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 1000, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - }, + price: Amount::from(1u64), rewards_address, pub_key: vec![0u8; 64], signature: vec![0u8; 64], @@ -1465,27 +1448,16 @@ mod tests { std::array::from_fn::<_, CANDIDATES_PER_POOL, _>(|i| { let ml_dsa = MlDsa65::new(); let (pub_key, secret_key) = ml_dsa.generate_keypair().expect("keygen"); - let metrics = evmlib::quoting_metrics::QuotingMetrics { - data_size: 1024, - data_type: 0, - close_records_stored: i * 10, - records_per_type: vec![], - max_records: 500, - received_payment_count: 0, - live_time: 100, - network_density: None, - network_size: None, - }; + let price = evmlib::common::Amount::from(1024u64); #[allow(clippy::cast_possible_truncation)] let reward_address = RewardsAddress::new([i as u8; 20]); - let msg = - MerklePaymentCandidateNode::bytes_to_sign(&metrics, &reward_address, timestamp); + let msg = MerklePaymentCandidateNode::bytes_to_sign(&price, &reward_address, timestamp); let sk = MlDsaSecretKey::from_bytes(secret_key.as_bytes()).expect("sk"); let signature = ml_dsa.sign(&sk, &msg).expect("sign").as_bytes().to_vec(); MerklePaymentCandidateNode { pub_key: pub_key.as_bytes().to_vec(), - quoting_metrics: metrics, + price, reward_address, merkle_payment_timestamp: timestamp, signature, @@ -1814,10 +1786,10 @@ mod tests { depth: 2, merkle_payment_timestamp: ts, paid_node_addresses: vec![ - // First paid node: valid (matches candidate 0) - (RewardsAddress::new([0u8; 20]), 0), + // First paid node: valid (matches candidate 0, price >= 1024) + (RewardsAddress::new([0u8; 20]), 0, Amount::from(1024u64)), // Second paid node: index 999 is way beyond CANDIDATES_PER_POOL (16) - (RewardsAddress::new([1u8; 20]), 999), + (RewardsAddress::new([1u8; 20]), 999, Amount::from(1024u64)), ], }; verifier.pool_cache.lock().put(pool_hash, info); @@ -1849,9 +1821,9 @@ mod tests { merkle_payment_timestamp: ts, paid_node_addresses: vec![ // Index 0 with matching address [0x00; 20] - (RewardsAddress::new([0u8; 20]), 0), + (RewardsAddress::new([0u8; 20]), 0, Amount::from(1024u64)), // Index 1 with WRONG address — candidate 1's address is [0x01; 20] - (RewardsAddress::new([0xFF; 20]), 1), + (RewardsAddress::new([0xFF; 20]), 1, Amount::from(1024u64)), ], }; verifier.pool_cache.lock().put(pool_hash, info); @@ -1878,7 +1850,11 @@ mod tests { let info = evmlib::merkle_payments::OnChainPaymentInfo { depth: 3, merkle_payment_timestamp: ts, - paid_node_addresses: vec![(RewardsAddress::new([0u8; 20]), 0)], + paid_node_addresses: vec![( + RewardsAddress::new([0u8; 20]), + 0, + Amount::from(1024u64), + )], }; verifier.pool_cache.lock().put(pool_hash, info); } @@ -1896,4 +1872,36 @@ mod tests { "Error should mention depth/count mismatch: {err_msg}" ); } + + #[tokio::test] + async fn test_merkle_underpayment_rejected() { + let verifier = create_test_verifier(); + let (xorname, tagged_proof, pool_hash, ts) = make_valid_merkle_proof_bytes(); + + // Tree depth=2, so 2 paid nodes required. Candidates have price=1024. + // Pay only 1 wei per node — far below the candidate's quoted price. + { + let info = evmlib::merkle_payments::OnChainPaymentInfo { + depth: 2, + merkle_payment_timestamp: ts, + paid_node_addresses: vec![ + (RewardsAddress::new([0u8; 20]), 0, Amount::from(1u64)), + (RewardsAddress::new([1u8; 20]), 1, Amount::from(1u64)), + ], + }; + verifier.pool_cache.lock().put(pool_hash, info); + } + + let result = verifier.verify_payment(&xorname, Some(&tagged_proof)).await; + + assert!( + result.is_err(), + "Should reject merkle payment where paid amount < candidate's quoted price" + ); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!( + err_msg.contains("Underpayment"), + "Error should mention underpayment: {err_msg}" + ); + } } From 94116139d8e6c04322cc525c5c6e938e457b8f39 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 1 Apr 2026 20:48:40 +0200 Subject: [PATCH 02/17] feat!: adapt to evmlib PaymentVault API and simplify pricing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Migrate from the old QuotingMetrics-based pricing and split DataPayments/MerklePayments contracts to the unified PaymentVault API in evmlib. Key changes: - Replace QuotingMetrics with a single `price: Amount` field on quotes - Replace logarithmic pricing with simple quadratic formula (n/6000)² - Unify data_payments_address + merkle_payments_address into payment_vault_address - Verify payments via completedPayments mapping instead of verify_data_payment batch call Co-Authored-By: Claude Opus 4.6 (1M context) --- Cargo.lock | 2 - Cargo.toml | 2 +- src/bin/ant-devnet/main.rs | 10 +- src/devnet.rs | 7 +- src/payment/pricing.rs | 336 +++++++++++------------------------- src/payment/proof.rs | 30 +--- src/payment/quote.rs | 116 ++++--------- src/payment/single_node.rs | 232 +++++++++---------------- src/storage/handler.rs | 4 +- tests/e2e/merkle_payment.rs | 18 +- 10 files changed, 238 insertions(+), 519 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb387cf..cfc5f6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2285,8 +2285,6 @@ dependencies = [ [[package]] name = "evmlib" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d608fcd0976beee509fef7fa391735571cb2fffd715ddca174322180300b6615" dependencies = [ "alloy", "ant-merkle", diff --git a/Cargo.toml b/Cargo.toml index d6e2e2c..9185bee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ saorsa-core = "0.21.0" saorsa-pqc = "0.5" # Payment verification - autonomi network lookup + EVM payment -evmlib = "0.5.0" +evmlib = { path = "../evmlib" } xor_name = "5" # Caching - LRU cache for verified XorNames diff --git a/src/bin/ant-devnet/main.rs b/src/bin/ant-devnet/main.rs index b3326b0..3103902 100644 --- a/src/bin/ant-devnet/main.rs +++ b/src/bin/ant-devnet/main.rs @@ -64,14 +64,11 @@ async fn main() -> color_eyre::Result<()> { .default_wallet_private_key() .map_err(|e| color_eyre::eyre::eyre!("Failed to get wallet key: {e}"))?; - let (rpc_url, token_addr, payments_addr, merkle_addr) = match &network { + let (rpc_url, token_addr, vault_addr) = match &network { evmlib::Network::Custom(custom) => ( custom.rpc_url_http.to_string(), format!("{:?}", custom.payment_token_address), - format!("{:?}", custom.data_payments_address), - custom - .merkle_payments_address - .map(|addr| format!("{addr:?}")), + format!("{:?}", custom.payment_vault_address), ), _ => { return Err(color_eyre::eyre::eyre!( @@ -93,8 +90,7 @@ async fn main() -> color_eyre::Result<()> { rpc_url, wallet_private_key: wallet_key, payment_token_address: token_addr, - data_payments_address: payments_addr, - merkle_payments_address: merkle_addr, + payment_vault_address: vault_addr, }) } else { None diff --git a/src/devnet.rs b/src/devnet.rs index 9bd0a1b..3b6da0c 100644 --- a/src/devnet.rs +++ b/src/devnet.rs @@ -244,11 +244,8 @@ pub struct DevnetEvmInfo { pub wallet_private_key: String, /// Payment token contract address. pub payment_token_address: String, - /// Data payments contract address. - pub data_payments_address: String, - /// Merkle payments contract address (for batch payments). - #[serde(default, skip_serializing_if = "Option::is_none")] - pub merkle_payments_address: Option, + /// Unified payment vault contract address (handles both single-node and merkle payments). + pub payment_vault_address: String, } /// Network state for devnet startup lifecycle. diff --git a/src/payment/pricing.rs b/src/payment/pricing.rs index 2290d5c..503661f 100644 --- a/src/payment/pricing.rs +++ b/src/payment/pricing.rs @@ -1,297 +1,159 @@ -//! Local fullness-based pricing algorithm for ant-node. +//! Simple quadratic pricing algorithm for ant-node. //! -//! Mirrors the logarithmic pricing curve from autonomi's `MerklePaymentVault` contract: -//! - Empty node → price ≈ `MIN_PRICE` (floor) -//! - Filling up → price increases logarithmically -//! - Nearly full → price spikes (ln(x) as x→0) -//! - At capacity → returns `u64::MAX` (effectively refuses new data) +//! Uses the formula `(close_records_stored / 6000)^2` to calculate storage price. +//! Integer division means nodes with fewer than 6000 records get a ratio of 0, +//! but a minimum floor of 1 prevents free storage. //! -//! ## Design Rationale: Capacity-Based Pricing +//! ## Design Rationale //! -//! Pricing is based on node **fullness** (percentage of storage capacity used), -//! not on a fixed cost-per-byte. This design mirrors the autonomi -//! `MerklePaymentVault` on-chain contract and creates natural load balancing: -//! -//! - **Empty nodes** charge the minimum floor price, attracting new data -//! - **Nearly full nodes** charge exponentially more via the logarithmic curve -//! - **This pushes clients toward emptier nodes**, distributing data across the network -//! -//! A flat cost-per-byte model would not incentivize distribution — all nodes would -//! charge the same regardless of remaining capacity. The logarithmic curve ensures -//! the network self-balances as nodes fill up. +//! The quadratic curve creates natural load balancing: +//! - **Lightly loaded nodes** (< 6000 records) charge the minimum floor price +//! - **Moderately loaded nodes** charge proportionally more as records grow +//! - **Heavily loaded nodes** charge quadratically more, pushing clients elsewhere use evmlib::common::Amount; -use evmlib::quoting_metrics::QuotingMetrics; -/// Minimum price floor (matches contract's `minPrice = 3`). -const MIN_PRICE: u64 = 3; +/// Divisor for the pricing formula. +const PRICING_DIVISOR: u64 = 6000; -/// Scaling factor for the logarithmic pricing curve. -/// In the contract this is 1e18; we normalize to 1.0 for f64 arithmetic. -const SCALING_FACTOR: f64 = 1.0; +/// PRICING_DIVISOR², precomputed to avoid repeated multiplication. +const DIVISOR_SQUARED: u64 = PRICING_DIVISOR * PRICING_DIVISOR; -/// ANT price constant (normalized to 1.0, matching contract's 1e18/1e18 ratio). -const ANT_PRICE: f64 = 1.0; +/// 1 token = 10^18 wei. +const WEI_PER_TOKEN: u128 = 1_000_000_000_000_000_000; -/// Calculate a local price estimate from node quoting metrics. +/// Minimum price in wei (1 wei) to prevent free storage. +const MIN_PRICE_WEI: u128 = 1; + +/// Calculate storage price in wei from the number of close records stored. /// -/// Implements the autonomi pricing formula: -/// ```text -/// price = (-s/ANT) * (ln|rUpper - 1| - ln|rLower - 1|) + pMin*(rUpper - rLower) - (rUpper - rLower)/ANT -/// ``` +/// Formula: `price_wei = n² × 10¹⁸ / 6000²` /// -/// where: -/// - `rLower = total_cost_units / max_cost_units` (current fullness ratio) -/// - `rUpper = (total_cost_units + cost_unit) / max_cost_units` (fullness after storing) -/// - `s` = scaling factor, `ANT` = ANT price, `pMin` = minimum price -#[allow( - clippy::cast_precision_loss, - clippy::cast_possible_truncation, - clippy::cast_sign_loss -)] +/// This is equivalent to `(n / 6000)²` in tokens, converted to wei, but +/// preserves sub-token precision by scaling before dividing. U256 arithmetic +/// prevents overflow for large record counts. #[must_use] -pub fn calculate_price(metrics: &QuotingMetrics) -> Amount { - let min_price = Amount::from(MIN_PRICE); - - // Edge case: zero or very small capacity - if metrics.max_records == 0 { - return min_price; +pub fn calculate_price(close_records_stored: usize) -> Amount { + let n = Amount::from(close_records_stored); + let n_squared = n.saturating_mul(n); + let price_wei = + n_squared.saturating_mul(Amount::from(WEI_PER_TOKEN)) / Amount::from(DIVISOR_SQUARED); + if price_wei.is_zero() { + Amount::from(MIN_PRICE_WEI) + } else { + price_wei } +} - // Use close_records_stored as the authoritative record count for pricing. - let total_records = metrics.close_records_stored as u64; - - let max_records = metrics.max_records as f64; - - // Normalize to [0, 1) range (matching contract's _getBound) - let r_lower = total_records as f64 / max_records; - // Adding one record (cost_unit = 1 normalized) - let r_upper = (total_records + 1) as f64 / max_records; - - // At capacity: return maximum price to effectively refuse new data - if r_lower >= 1.0 || r_upper >= 1.0 { - return Amount::from(u64::MAX); - } - if (r_upper - r_lower).abs() < f64::EPSILON { - return min_price; - } +#[cfg(test)] +#[allow(clippy::unwrap_used, clippy::expect_used)] +mod tests { + use super::*; - // Calculate |r - 1| for logarithm inputs - let upper_diff = (r_upper - 1.0).abs(); - let lower_diff = (r_lower - 1.0).abs(); + const WEI: u128 = WEI_PER_TOKEN; - // Avoid log(0) - if upper_diff < f64::EPSILON || lower_diff < f64::EPSILON { - return min_price; + /// Helper: expected price for n records = n² * 10^18 / 6000² + fn expected_price(n: u64) -> Amount { + let n = Amount::from(n); + n * n * Amount::from(WEI) / Amount::from(DIVISOR_SQUARED) } - let log_upper = upper_diff.ln(); - let log_lower = lower_diff.ln(); - let log_diff = log_upper - log_lower; - - let linear_part = r_upper - r_lower; - - // Formula: price = (-s/ANT) * logDiff + pMin * linearPart - linearPart/ANT - let part_one = (-SCALING_FACTOR / ANT_PRICE) * log_diff; - let part_two = MIN_PRICE as f64 * linear_part; - let part_three = linear_part / ANT_PRICE; - - let price = part_one + part_two - part_three; - - if price <= 0.0 || !price.is_finite() { - return min_price; + #[test] + fn test_zero_records_gets_min_price() { + let price = calculate_price(0); + assert_eq!(price, Amount::from(MIN_PRICE_WEI)); } - // Scale by data_size (larger data costs proportionally more) - let data_size_factor = metrics.data_size.max(1) as f64; - let scaled_price = price * data_size_factor; - - if !scaled_price.is_finite() { - return min_price; + #[test] + fn test_one_record_nonzero() { + // 1² * 1e18 / 36e6 = 1e18 / 36e6 ≈ 27_777_777_777 + let price = calculate_price(1); + assert_eq!(price, expected_price(1)); + assert!(price > Amount::ZERO); } - // Convert to Amount (U256), floor at MIN_PRICE - let price_u64 = if scaled_price > u64::MAX as f64 { - u64::MAX - } else { - (scaled_price as u64).max(MIN_PRICE) - }; - - Amount::from(price_u64) -} - -#[cfg(test)] -#[allow(clippy::unwrap_used, clippy::expect_used)] -mod tests { - use super::*; - - fn make_metrics( - records_stored: usize, - max_records: usize, - data_size: usize, - data_type: u32, - ) -> QuotingMetrics { - let records_per_type = if records_stored > 0 { - vec![(data_type, u32::try_from(records_stored).unwrap_or(u32::MAX))] - } else { - vec![] - }; - QuotingMetrics { - data_type, - data_size, - close_records_stored: records_stored, - records_per_type, - max_records, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: Some(500), - } + #[test] + fn test_at_divisor_gets_one_token() { + // 6000² * 1e18 / 6000² = 1e18 + let price = calculate_price(6000); + assert_eq!(price, Amount::from(WEI)); } #[test] - fn test_empty_node_gets_min_price() { - let metrics = make_metrics(0, 1000, 1, 0); - let price = calculate_price(&metrics); - // Empty node should return approximately MIN_PRICE - assert_eq!(price, Amount::from(MIN_PRICE)); + fn test_double_divisor_gets_four_tokens() { + // 12000² * 1e18 / 6000² = 4e18 + let price = calculate_price(12000); + assert_eq!(price, Amount::from(4 * WEI)); } #[test] - fn test_half_full_node_costs_more() { - let empty = make_metrics(0, 1000, 1024, 0); - let half = make_metrics(500, 1000, 1024, 0); - let price_empty = calculate_price(&empty); - let price_half = calculate_price(&half); - assert!( - price_half > price_empty, - "Half-full price ({price_half}) should exceed empty price ({price_empty})" - ); + fn test_triple_divisor_gets_nine_tokens() { + // 18000² * 1e18 / 6000² = 9e18 + let price = calculate_price(18000); + assert_eq!(price, Amount::from(9 * WEI)); } #[test] - fn test_nearly_full_node_costs_much_more() { - let half = make_metrics(500, 1000, 1024, 0); - let nearly_full = make_metrics(900, 1000, 1024, 0); - let price_half = calculate_price(&half); - let price_nearly_full = calculate_price(&nearly_full); + fn test_smooth_pricing_no_staircase() { + // With the old integer-division approach, 6000 and 11999 gave the same price. + // Now 11999 should give a higher price than 6000. + let price_6k = calculate_price(6000); + let price_11k = calculate_price(11999); assert!( - price_nearly_full > price_half, - "Nearly-full price ({price_nearly_full}) should far exceed half-full price ({price_half})" + price_11k > price_6k, + "11999 records ({price_11k}) should cost more than 6000 ({price_6k})" ); } #[test] - fn test_full_node_returns_max_price() { - // At capacity (r_lower >= 1.0), effectively refuse new data with max price - let metrics = make_metrics(1000, 1000, 1024, 0); - let price = calculate_price(&metrics); - assert_eq!(price, Amount::from(u64::MAX)); + fn test_price_increases_with_records() { + let price_low = calculate_price(6000); + let price_mid = calculate_price(12000); + let price_high = calculate_price(18000); + assert!(price_mid > price_low); + assert!(price_high > price_mid); } #[test] fn test_price_increases_monotonically() { - let max_records = 1000; - let data_size = 1024; let mut prev_price = Amount::ZERO; - - // Check from 0% to 99% full - for pct in 0..100 { - let records = pct * max_records / 100; - let metrics = make_metrics(records, max_records, data_size, 0); - let price = calculate_price(&metrics); + for records in (0..60000).step_by(100) { + let price = calculate_price(records); assert!( price >= prev_price, - "Price at {pct}% ({price}) should be >= price at previous step ({prev_price})" + "Price at {records} records ({price}) should be >= previous ({prev_price})" ); prev_price = price; } } #[test] - fn test_zero_max_records_returns_min_price() { - let metrics = make_metrics(0, 0, 1024, 0); - let price = calculate_price(&metrics); - assert_eq!(price, Amount::from(MIN_PRICE)); + fn test_large_value_no_overflow() { + let price = calculate_price(usize::MAX); + assert!(price > Amount::ZERO); } #[test] - fn test_different_data_sizes_same_fullness() { - let small = make_metrics(500, 1000, 100, 0); - let large = make_metrics(500, 1000, 10000, 0); - let price_small = calculate_price(&small); - let price_large = calculate_price(&large); - assert!( - price_large > price_small, - "Larger data ({price_large}) should cost more than smaller data ({price_small})" - ); - } - - #[test] - fn test_price_with_multiple_record_types() { - // 300 type-0 records + 200 type-1 records = 500 total out of 1000 - let metrics = QuotingMetrics { - data_type: 0, - data_size: 1024, - close_records_stored: 500, - records_per_type: vec![(0, 300), (1, 200)], - max_records: 1000, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: Some(500), - }; - let price_multi = calculate_price(&metrics); - - // Compare with single-type equivalent (500 of type 0) - let metrics_single = make_metrics(500, 1000, 1024, 0); - let price_single = calculate_price(&metrics_single); - - // Same total records → same price - assert_eq!(price_multi, price_single); - } - - #[test] - fn test_price_at_95_percent() { - let metrics = make_metrics(950, 1000, 1024, 0); - let price = calculate_price(&metrics); - let min = Amount::from(MIN_PRICE); - assert!( - price > min, - "Price at 95% should be above minimum, got {price}" - ); - } - - #[test] - fn test_price_at_99_percent() { - let metrics = make_metrics(990, 1000, 1024, 0); - let price = calculate_price(&metrics); - let price_95 = calculate_price(&make_metrics(950, 1000, 1024, 0)); - assert!( - price > price_95, - "Price at 99% ({price}) should exceed price at 95% ({price_95})" - ); + fn test_price_deterministic() { + let price1 = calculate_price(12000); + let price2 = calculate_price(12000); + assert_eq!(price1, price2); } #[test] - fn test_over_capacity_returns_max_price() { - // 1100 records stored but max is 1000 — over capacity - let metrics = make_metrics(1100, 1000, 1024, 0); - let price = calculate_price(&metrics); - assert_eq!( - price, - Amount::from(u64::MAX), - "Over-capacity should return max price" - ); + fn test_quadratic_growth() { + // price at 4x records should be 16x price at 1x + let price_1x = calculate_price(6000); + let price_4x = calculate_price(24000); + assert_eq!(price_1x, Amount::from(WEI)); + assert_eq!(price_4x, Amount::from(16 * WEI)); } #[test] - fn test_price_deterministic() { - let metrics = make_metrics(500, 1000, 1024, 0); - let price1 = calculate_price(&metrics); - let price2 = calculate_price(&metrics); - let price3 = calculate_price(&metrics); - assert_eq!(price1, price2); - assert_eq!(price2, price3); + fn test_small_record_counts_are_cheap() { + // 100 records: 100² * 1e18 / 36e6 ≈ 277_777_777_777_777 wei ≈ 0.000278 tokens + let price = calculate_price(100); + assert_eq!(price, expected_price(100)); + assert!(price < Amount::from(WEI)); // well below 1 token } } diff --git a/src/payment/proof.rs b/src/payment/proof.rs index 03dfc8f..0925bd5 100644 --- a/src/payment/proof.rs +++ b/src/payment/proof.rs @@ -116,11 +116,11 @@ pub fn deserialize_merkle_proof(bytes: &[u8]) -> std::result::Result QuotingMetrics { - self.metrics_tracker.get_metrics(0, 0) + pub fn records_stored(&self) -> usize { + self.metrics_tracker.records_stored() } /// Record a payment received (delegates to metrics tracker). @@ -214,11 +210,11 @@ impl QuoteGenerator { .as_ref() .ok_or_else(|| Error::Payment("Quote signing not configured".to_string()))?; - let quoting_metrics = self.metrics_tracker.get_metrics(data_size, data_type); + let price = calculate_price(self.metrics_tracker.records_stored()); // Compute the same bytes_to_sign used by the upstream library let msg = MerklePaymentCandidateNode::bytes_to_sign( - "ing_metrics, + &price, &self.rewards_address, merkle_payment_timestamp, ); @@ -233,7 +229,7 @@ impl QuoteGenerator { let candidate = MerklePaymentCandidateNode { pub_key: self.pub_key.clone(), - quoting_metrics, + price, reward_address: self.rewards_address, merkle_payment_timestamp, signature, @@ -355,7 +351,7 @@ pub fn verify_merkle_candidate_signature(candidate: &MerklePaymentCandidateNode) }; let msg = MerklePaymentCandidateNode::bytes_to_sign( - &candidate.quoting_metrics, + &candidate.price, &candidate.reward_address, candidate.merkle_payment_timestamp, ); @@ -414,6 +410,7 @@ pub fn wire_ml_dsa_signer( mod tests { use super::*; use crate::payment::metrics::QuotingMetricsTracker; + use evmlib::common::Amount; use saorsa_pqc::pqc::types::MlDsaSecretKey; fn create_test_generator() -> QuoteGenerator { @@ -532,29 +529,12 @@ mod tests { } #[test] - fn test_current_metrics() { + fn test_records_stored() { let rewards_address = RewardsAddress::new([1u8; 20]); let metrics_tracker = QuotingMetricsTracker::new(500, 50); let generator = QuoteGenerator::new(rewards_address, metrics_tracker); - let metrics = generator.current_metrics(); - assert_eq!(metrics.max_records, 500); - assert_eq!(metrics.close_records_stored, 50); - assert_eq!(metrics.data_size, 0); - assert_eq!(metrics.data_type, 0); - } - - #[test] - fn test_record_payment_delegation() { - let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); - let generator = QuoteGenerator::new(rewards_address, metrics_tracker); - - generator.record_payment(); - generator.record_payment(); - - let metrics = generator.current_metrics(); - assert_eq!(metrics.received_payment_count, 2); + assert_eq!(generator.records_stored(), 50); } #[test] @@ -567,8 +547,7 @@ mod tests { generator.record_store(1); generator.record_store(0); - let metrics = generator.current_metrics(); - assert_eq!(metrics.close_records_stored, 3); + assert_eq!(generator.records_stored(), 3); } #[test] @@ -576,17 +555,15 @@ mod tests { let generator = create_test_generator(); let content = [10u8; 32]; - // Data type 0 (chunk) + // All data types produce the same price (price depends on records_stored, not data_type) let q0 = generator.create_quote(content, 1024, 0).expect("type 0"); - assert_eq!(q0.quoting_metrics.data_type, 0); - - // Data type 1 let q1 = generator.create_quote(content, 512, 1).expect("type 1"); - assert_eq!(q1.quoting_metrics.data_type, 1); - - // Data type 2 let q2 = generator.create_quote(content, 256, 2).expect("type 2"); - assert_eq!(q2.quoting_metrics.data_type, 2); + + // All quotes should have a valid price (minimum floor of 1) + assert!(q0.price >= Amount::from(1u64)); + assert!(q1.price >= Amount::from(1u64)); + assert!(q2.price >= Amount::from(1u64)); } #[test] @@ -594,8 +571,9 @@ mod tests { let generator = create_test_generator(); let content = [11u8; 32]; + // Price depends on records_stored, not data size let quote = generator.create_quote(content, 0, 0).expect("zero size"); - assert_eq!(quote.quoting_metrics.data_size, 0); + assert!(quote.price >= Amount::from(1u64)); } #[test] @@ -603,10 +581,11 @@ mod tests { let generator = create_test_generator(); let content = [12u8; 32]; + // Price depends on records_stored, not data size let quote = generator .create_quote(content, 10_000_000, 0) .expect("large size"); - assert_eq!(quote.quoting_metrics.data_size, 10_000_000); + assert!(quote.price >= Amount::from(1u64)); } #[test] @@ -614,17 +593,7 @@ mod tests { let quote = PaymentQuote { content: xor_name::XorName([0u8; 32]), timestamp: SystemTime::now(), - quoting_metrics: QuotingMetrics { - data_size: 0, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 0, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - }, + price: Amount::from(1u64), rewards_address: RewardsAddress::new([0u8; 20]), pub_key: vec![], signature: vec![], @@ -722,11 +691,8 @@ mod tests { // Verify the timestamp was set correctly assert_eq!(candidate.merkle_payment_timestamp, timestamp); - // Verify metrics match what the tracker would produce - assert_eq!(candidate.quoting_metrics.data_size, 2048); - assert_eq!(candidate.quoting_metrics.data_type, 0); - assert_eq!(candidate.quoting_metrics.max_records, 800); - assert_eq!(candidate.quoting_metrics.close_records_stored, 50); + // Verify price was calculated from records_stored using the pricing formula + assert_eq!(candidate.price, calculate_price(50)); // Verify the public key is the ML-DSA-65 public key (not ed25519) assert_eq!( @@ -763,25 +729,15 @@ mod tests { .duration_since(std::time::UNIX_EPOCH) .expect("system time") .as_secs(); - let metrics = QuotingMetrics { - data_size: 4096, - data_type: 0, - close_records_stored: 10, - records_per_type: vec![], - max_records: 500, - received_payment_count: 3, - live_time: 600, - network_density: None, - network_size: None, - }; + let price = Amount::from(42u64); - let msg = MerklePaymentCandidateNode::bytes_to_sign(&metrics, &rewards_address, timestamp); + let msg = MerklePaymentCandidateNode::bytes_to_sign(&price, &rewards_address, timestamp); let sk = MlDsaSecretKey::from_bytes(secret_key.as_bytes()).expect("sk"); let signature = ml_dsa.sign(&sk, &msg).expect("sign").as_bytes().to_vec(); MerklePaymentCandidateNode { pub_key: public_key.as_bytes().to_vec(), - quoting_metrics: metrics, + price, reward_address: rewards_address, merkle_payment_timestamp: timestamp, signature, @@ -821,12 +777,12 @@ mod tests { } #[test] - fn test_verify_merkle_candidate_tampered_metrics() { + fn test_verify_merkle_candidate_tampered_price() { let mut candidate = make_valid_merkle_candidate(); - candidate.quoting_metrics.data_size = 999_999; + candidate.price = Amount::from(999_999u64); assert!( !verify_merkle_candidate_signature(&candidate), - "Tampered quoting_metrics must invalidate the signature" + "Tampered price must invalidate the signature" ); } diff --git a/src/payment/single_node.rs b/src/payment/single_node.rs index 627e1ff..2b22e65 100644 --- a/src/payment/single_node.rs +++ b/src/payment/single_node.rs @@ -13,31 +13,12 @@ use crate::ant_protocol::CLOSE_GROUP_SIZE; use crate::error::{Error, Result}; use evmlib::common::{Amount, QuoteHash}; -use evmlib::contract::payment_vault; -use evmlib::quoting_metrics::QuotingMetrics; use evmlib::wallet::Wallet; use evmlib::Network as EvmNetwork; use evmlib::PaymentQuote; use evmlib::RewardsAddress; use tracing::info; -/// Create zero-valued `QuotingMetrics` for payment verification. -/// -/// The contract doesn't validate metric values, so we use zeroes. -fn zero_quoting_metrics() -> QuotingMetrics { - QuotingMetrics { - data_size: 0, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 0, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - } -} - /// Index of the median-priced node after sorting, derived from `CLOSE_GROUP_SIZE`. const MEDIAN_INDEX: usize = CLOSE_GROUP_SIZE / 2; @@ -63,8 +44,6 @@ pub struct QuotePaymentInfo { pub rewards_address: RewardsAddress, /// The amount to pay (3x for median, 0 for others) pub amount: Amount, - /// The quoting metrics - pub quoting_metrics: QuotingMetrics, } impl SingleNodePayment { @@ -120,7 +99,6 @@ impl SingleNodePayment { } else { Amount::ZERO }, - quoting_metrics: quote.quoting_metrics, }) .collect(); @@ -220,49 +198,65 @@ impl SingleNodePayment { network: &EvmNetwork, owned_quote_hash: Option, ) -> Result { - // Build payment digest for all 5 quotes - // Each quote needs an owned QuotingMetrics (tuple requires ownership) - let payment_digest: Vec<_> = self - .quotes - .iter() - .map(|q| (q.quote_hash, zero_quoting_metrics(), q.rewards_address)) - .collect(); - - // Mark owned quotes - let owned_quote_hashes = owned_quote_hash.map_or_else(Vec::new, |hash| vec![hash]); - info!( - "Verifying {} payments (owned: {})", - payment_digest.len(), - owned_quote_hashes.len() + "Verifying {} payments via completedPayments mapping", + self.quotes.len() ); - let verified_amount = - payment_vault::verify_data_payment(network, owned_quote_hashes.clone(), payment_digest) + let provider = evmlib::utils::http_provider(network.rpc_url().clone()); + let vault_address = *network.payment_vault_address(); + let contract = + evmlib::contract::payment_vault::interface::IPaymentVault::new(vault_address, provider); + + let mut total_verified = Amount::ZERO; + let mut owned_on_chain = Amount::ZERO; + + for quote_info in &self.quotes { + let result = contract + .completedPayments(quote_info.quote_hash) + .call() .await - .map_err(|e| Error::Payment(format!("Payment verification failed: {e}")))?; + .map_err(|e| Error::Payment(format!("completedPayments lookup failed: {e}")))?; - if owned_quote_hashes.is_empty() { - info!("Payment verified as valid on-chain"); - } else { - // If we own a quote, verify the amount matches + let on_chain_amount = Amount::from(result.amount); + if on_chain_amount > Amount::ZERO { + total_verified = total_verified.checked_add(on_chain_amount).ok_or_else(|| { + Error::Payment("Overflow summing verified amounts".to_string()) + })?; + + if owned_quote_hash == Some(quote_info.quote_hash) { + owned_on_chain = on_chain_amount; + } + } + } + + if total_verified == Amount::ZERO { + return Err(Error::Payment( + "No payments found on-chain for any quote".to_string(), + )); + } + + // If we own a quote, verify the amount matches + if let Some(owned_hash) = owned_quote_hash { let expected = self .quotes .iter() - .find(|q| Some(q.quote_hash) == owned_quote_hash) + .find(|q| q.quote_hash == owned_hash) .ok_or_else(|| Error::Payment("Owned quote hash not found in payment".to_string()))? .amount; - if verified_amount != expected { + if owned_on_chain != expected { return Err(Error::Payment(format!( - "Payment amount mismatch: expected {expected}, verified {verified_amount}" + "Payment amount mismatch: expected {expected}, on-chain {owned_on_chain}" ))); } - info!("Payment verified: {verified_amount} atto received"); + info!("Payment verified: {owned_on_chain} atto received"); + } else { + info!("Payment verified as valid on-chain"); } - Ok(verified_amount) + Ok(total_verified) } } @@ -270,9 +264,7 @@ impl SingleNodePayment { mod tests { use super::*; use alloy::node_bindings::{Anvil, AnvilInstance}; - use evmlib::contract::payment_vault::interface; - use evmlib::quoting_metrics::QuotingMetrics; - use evmlib::testnet::{deploy_data_payments_contract, deploy_network_token_contract, Testnet}; + use evmlib::testnet::{deploy_network_token_contract, deploy_payment_vault_contract, Testnet}; use evmlib::transaction_config::TransactionConfig; use evmlib::utils::{dummy_address, dummy_hash}; use evmlib::wallet::Wallet; @@ -285,17 +277,7 @@ mod tests { PaymentQuote { content: XorName::random(&mut rand::thread_rng()), timestamp: SystemTime::now(), - quoting_metrics: QuotingMetrics { - data_size: 1024, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 1000, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - }, + price: Amount::from(1u64), rewards_address: RewardsAddress::new([rewards_addr_seed; 20]), pub_key: vec![], signature: vec![], @@ -337,7 +319,7 @@ mod tests { .await .expect("deploy network token"); let mut payment_vault = - deploy_data_payments_contract(&rpc_url, &node, *network_token.contract.address()) + deploy_payment_vault_contract(&rpc_url, &node, *network_token.contract.address()) .await .expect("deploy data payments"); @@ -375,23 +357,20 @@ mod tests { assert!(result.is_ok(), "Payment failed: {:?}", result.err()); println!("✓ Paid for {} quotes", quote_payments.len()); - // Verify payments using handler directly - let payment_verifications: Vec<_> = quote_payments - .into_iter() - .map(|v| interface::IPaymentVault::PaymentVerification { - metrics: zero_quoting_metrics().into(), - rewardsAddress: v.1, - quoteHash: v.0, - }) - .collect(); - - let results = payment_vault - .verify_payment(payment_verifications) - .await - .expect("Verify payment failed"); + // Verify payments via completedPayments mapping + for (quote_hash, _reward_address, amount) in "e_payments { + let result = payment_vault + .contract + .completedPayments(*quote_hash) + .call() + .await + .expect("completedPayments lookup failed"); - for result in results { - assert!(result.isValid, "Payment verification should be valid"); + let on_chain_amount = result.amount; + assert!( + on_chain_amount >= u128::try_from(*amount).expect("amount fits u128"), + "On-chain amount should be >= paid amount" + ); } println!("✓ All 5 payments verified successfully"); @@ -408,7 +387,7 @@ mod tests { .await .expect("deploy network token"); let mut payment_vault = - deploy_data_payments_contract(&rpc_url, &node, *network_token.contract.address()) + deploy_payment_vault_contract(&rpc_url, &node, *network_token.contract.address()) .await .expect("deploy data payments"); @@ -452,31 +431,32 @@ mod tests { assert!(result.is_ok(), "Payment failed: {:?}", result.err()); println!("✓ Paid: 1 real (3 atto) + 4 dummy (0 atto)"); - // Verify all 5 payments - let payment_verifications: Vec<_> = quote_payments - .into_iter() - .map(|v| interface::IPaymentVault::PaymentVerification { - metrics: zero_quoting_metrics().into(), - rewardsAddress: v.1, - quoteHash: v.0, - }) - .collect(); + // Verify via completedPayments mapping - let results = payment_vault - .verify_payment(payment_verifications) + // Check that real payment is recorded on-chain + let real_result = payment_vault + .contract + .completedPayments(real_quote_hash) + .call() .await - .expect("Verify payment failed"); + .expect("completedPayments lookup failed"); - // Check that real payment is valid assert!( - results.first().is_some_and(|r| r.isValid), - "Real payment should be valid" + real_result.amount > 0, + "Real payment should have non-zero amount on-chain" ); println!("✓ Real payment verified (3 atto)"); - // Check dummy payments - for (i, result) in results.iter().skip(1).enumerate() { - println!(" Dummy payment {}: valid={}", i + 1, result.isValid); + // Check dummy payments (should have 0 amount) + for (i, (hash, _, _)) in quote_payments.iter().skip(1).enumerate() { + let result = payment_vault + .contract + .completedPayments(*hash) + .call() + .await + .expect("completedPayments lookup failed"); + + println!(" Dummy payment {}: amount={}", i + 1, result.amount); } println!("\n✅ SingleNode payment strategy works!"); @@ -492,17 +472,7 @@ mod tests { let quote = PaymentQuote { content: XorName::random(&mut rand::thread_rng()), timestamp: SystemTime::now(), - quoting_metrics: QuotingMetrics { - data_size: 1024, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![(0, 10)], - max_records: 1000, - received_payment_count: 5, - live_time: 3600, - network_density: None, - network_size: Some(100), - }, + price: Amount::from(*price), rewards_address: RewardsAddress::new([1u8; 20]), pub_key: vec![], signature: vec![], @@ -633,63 +603,33 @@ mod tests { // Approve tokens wallet - .approve_to_spend_tokens(*network.data_payments_address(), evmlib::common::U256::MAX) + .approve_to_spend_tokens(*network.payment_vault_address(), evmlib::common::U256::MAX) .await .map_err(|e| Error::Payment(format!("Failed to approve tokens: {e}")))?; println!("✓ Approved tokens"); - // Create 5 quotes with real prices from contract + // Create 5 quotes with prices calculated from record counts let chunk_xor = XorName::random(&mut rand::thread_rng()); - let chunk_size = 1024usize; let mut quotes_with_prices = Vec::new(); for i in 0..CLOSE_GROUP_SIZE { - let quoting_metrics = QuotingMetrics { - data_size: chunk_size, - data_type: 0, - close_records_stored: 10 + i, - records_per_type: vec![( - 0, - u32::try_from(10 + i) - .map_err(|e| Error::Payment(format!("Invalid record count: {e}")))?, - )], - max_records: 1000, - received_payment_count: 5, - live_time: 3600, - network_density: None, - network_size: Some(100), - }; - - // Get market price for this quote - // PERF-004: Clone required - payment_vault::get_market_price (external API from evmlib) - // takes ownership of Vec. We need quoting_metrics again below for - // PaymentQuote construction, so the clone is unavoidable. - let prices = payment_vault::get_market_price(&network, vec![quoting_metrics.clone()]) - .await - .map_err(|e| Error::Payment(format!("Failed to get market price: {e}")))?; - - let price = prices.first().ok_or_else(|| { - Error::Payment(format!( - "Empty price list from get_market_price for quote {}: expected at least 1 price but got {} elements", - i, - prices.len() - )) - })?; + let records_stored = 10 + i; + let price = crate::payment::pricing::calculate_price(records_stored); let quote = PaymentQuote { content: chunk_xor, timestamp: SystemTime::now(), - quoting_metrics, + price, rewards_address: wallet.address(), pub_key: vec![], signature: vec![], }; - quotes_with_prices.push((quote, *price)); + quotes_with_prices.push((quote, price)); } - println!("✓ Got 5 real quotes from contract"); + println!("✓ Got 5 quotes with calculated prices"); // Create SingleNode payment (will sort internally and select median) let payment = SingleNodePayment::from_quotes(quotes_with_prices)?; diff --git a/src/storage/handler.rs b/src/storage/handler.rs index 038f6c0..68a0358 100644 --- a/src/storage/handler.rs +++ b/src/storage/handler.rs @@ -827,8 +827,8 @@ mod tests { ); assert_eq!(candidate.merkle_payment_timestamp, timestamp); - assert_eq!(candidate.quoting_metrics.data_size, 4096); - assert_eq!(candidate.quoting_metrics.data_type, DATA_TYPE_CHUNK); + // Node-calculated price based on records stored + assert!(candidate.price >= evmlib::common::Amount::ZERO); } other => panic!("expected MerkleCandidateQuoteResponse::Success, got: {other:?}"), } diff --git a/tests/e2e/merkle_payment.rs b/tests/e2e/merkle_payment.rs index ee59522..e9bf6bb 100644 --- a/tests/e2e/merkle_payment.rs +++ b/tests/e2e/merkle_payment.rs @@ -22,11 +22,11 @@ use ant_node::compute_address; use ant_node::payment::{ serialize_merkle_proof, MAX_PAYMENT_PROOF_SIZE_BYTES, MIN_PAYMENT_PROOF_SIZE_BYTES, }; +use evmlib::common::Amount; use evmlib::merkle_payments::{ MerklePaymentCandidateNode, MerklePaymentCandidatePool, MerklePaymentProof, MerkleTree, CANDIDATES_PER_POOL, }; -use evmlib::quoting_metrics::QuotingMetrics; use evmlib::testnet::Testnet; use evmlib::RewardsAddress; use rand::Rng; @@ -178,26 +178,16 @@ fn build_candidate_nodes(timestamp: u64) -> [MerklePaymentCandidateNode; CANDIDA std::array::from_fn(|i| { let ml_dsa = MlDsa65::new(); let (pub_key, secret_key) = ml_dsa.generate_keypair().expect("keygen"); - let metrics = QuotingMetrics { - data_size: 1024, - data_type: 0, - close_records_stored: i * 10, - records_per_type: vec![], - max_records: 500, - received_payment_count: 0, - live_time: 100, - network_density: None, - network_size: None, - }; + let price = Amount::from(1024u64); #[allow(clippy::cast_possible_truncation)] let reward_address = RewardsAddress::new([i as u8; 20]); - let msg = MerklePaymentCandidateNode::bytes_to_sign(&metrics, &reward_address, timestamp); + let msg = MerklePaymentCandidateNode::bytes_to_sign(&price, &reward_address, timestamp); let sk = MlDsaSecretKey::from_bytes(secret_key.as_bytes()).expect("sk"); let signature = ml_dsa.sign(&sk, &msg).expect("sign").as_bytes().to_vec(); MerklePaymentCandidateNode { pub_key: pub_key.as_bytes().to_vec(), - quoting_metrics: metrics, + price, reward_address, merkle_payment_timestamp: timestamp, signature, From 08bb16f5e240d0c963c36afebd1e1b6c43368d81 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 1 Apr 2026 22:42:11 +0200 Subject: [PATCH 03/17] chore: switch evmlib dependency to Git-based source on refactor/unify-payment-vault-v2 branch --- Cargo.lock | 1 + Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index cfc5f6f..ca3700c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2285,6 +2285,7 @@ dependencies = [ [[package]] name = "evmlib" version = "0.5.0" +source = "git+https://github.com/WithAutonomi/evmlib/?branch=refactor/unify-payment-vault-v2#f4cdd45ea76a98ced0a416794c92b9b4bc2da224" dependencies = [ "alloy", "ant-merkle", diff --git a/Cargo.toml b/Cargo.toml index 9185bee..a7a65b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ saorsa-core = "0.21.0" saorsa-pqc = "0.5" # Payment verification - autonomi network lookup + EVM payment -evmlib = { path = "../evmlib" } +evmlib = { git = "https://github.com/WithAutonomi/evmlib/", branch = "refactor/unify-payment-vault-v2" } xor_name = "5" # Caching - LRU cache for verified XorNames From 7998ab42594e0be73cf3196f64854ce5af119192 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 1 Apr 2026 23:49:26 +0200 Subject: [PATCH 04/17] fix: verify merkle payment amounts against contract formula The verifier checked `paid_amount >= node.price` (individual quote) but the contract pays each winner `median16(quotes) * 2^depth / depth`. A winner quoting above the median could be paid less than their quote, causing the node to incorrectly reject a valid payment. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/payment/verifier.rs | 52 ++++++++++++++++++++++++++++++----------- 1 file changed, 38 insertions(+), 14 deletions(-) diff --git a/src/payment/verifier.rs b/src/payment/verifier.rs index 407250e..b2acab5 100644 --- a/src/payment/verifier.rs +++ b/src/payment/verifier.rs @@ -10,6 +10,7 @@ use crate::payment::proof::{ deserialize_merkle_proof, deserialize_proof, detect_proof_type, ProofType, }; use crate::payment::quote::{verify_quote_content, verify_quote_signature}; +use evmlib::common::Amount; use evmlib::contract::payment_vault; use evmlib::merkle_batch_payment::{OnChainPaymentInfo, PoolHash}; use evmlib::Network as EvmNetwork; @@ -372,7 +373,7 @@ impl PaymentVerifier { let mut valid_paid_count: usize = 0; for result in &results { - if result.isValid && result.amountPaid > evmlib::common::Amount::ZERO { + if result.isValid && result.amountPaid > Amount::ZERO { valid_paid_count += 1; } } @@ -617,12 +618,32 @@ impl PaymentVerifier { ))); } + // Compute expected per-node payment using the contract formula: + // totalAmount = median16(candidate_prices) * (1 << depth) + // amountPerNode = totalAmount / depth + let expected_per_node = if payment_info.depth > 0 { + let mut candidate_prices: Vec = merkle_proof + .winner_pool + .candidate_nodes + .iter() + .map(|c| c.price) + .collect(); + candidate_prices.sort_unstable(); // ascending + // Upper median (index 8 of 16) — matches Solidity's median16 (k = 8) + let median_price = candidate_prices[candidate_prices.len() / 2]; + let total_amount = median_price * Amount::from(1u64 << payment_info.depth); + total_amount / Amount::from(u64::from(payment_info.depth)) + } else { + Amount::ZERO + }; + // Verify paid node indices, addresses, and amounts against the candidate pool. // // Each paid node must: // 1. Have a valid index within the candidate pool // 2. Match the expected reward address at that index - // 3. Have been paid at least the candidate's quoted price + // 3. Have been paid at least the expected per-node amount from the + // contract formula: median16(prices) * 2^depth / depth // // Note: unlike single-node payments, merkle proofs are NOT bound to a // specific storing node. The contract pays `depth` random nodes from the @@ -648,11 +669,12 @@ impl PaymentVerifier { node.reward_address ))); } - if *paid_amount < node.price { + if *paid_amount < expected_per_node { return Err(Error::Payment(format!( "Underpayment for node at index {idx}: paid {paid_amount}, \ - candidate quoted {}", - node.price + expected at least {expected_per_node} \ + (median16 formula, depth={})", + payment_info.depth ))); } } @@ -688,7 +710,6 @@ impl PaymentVerifier { #[allow(clippy::expect_used)] mod tests { use super::*; - use evmlib::common::Amount; /// Create a verifier for unit tests. EVM is always on, but tests can /// pre-populate the cache to bypass on-chain verification. @@ -1786,10 +1807,11 @@ mod tests { depth: 2, merkle_payment_timestamp: ts, paid_node_addresses: vec![ - // First paid node: valid (matches candidate 0, price >= 1024) - (RewardsAddress::new([0u8; 20]), 0, Amount::from(1024u64)), + // First paid node: valid (matches candidate 0, amount matches formula) + // Expected per-node: median(1024) * 2^2 / 2 = 2048 + (RewardsAddress::new([0u8; 20]), 0, Amount::from(2048u64)), // Second paid node: index 999 is way beyond CANDIDATES_PER_POOL (16) - (RewardsAddress::new([1u8; 20]), 999, Amount::from(1024u64)), + (RewardsAddress::new([1u8; 20]), 999, Amount::from(2048u64)), ], }; verifier.pool_cache.lock().put(pool_hash, info); @@ -1821,9 +1843,10 @@ mod tests { merkle_payment_timestamp: ts, paid_node_addresses: vec![ // Index 0 with matching address [0x00; 20] - (RewardsAddress::new([0u8; 20]), 0, Amount::from(1024u64)), + // Expected per-node: median(1024) * 2^2 / 2 = 2048 + (RewardsAddress::new([0u8; 20]), 0, Amount::from(2048u64)), // Index 1 with WRONG address — candidate 1's address is [0x01; 20] - (RewardsAddress::new([0xFF; 20]), 1, Amount::from(1024u64)), + (RewardsAddress::new([0xFF; 20]), 1, Amount::from(2048u64)), ], }; verifier.pool_cache.lock().put(pool_hash, info); @@ -1878,8 +1901,9 @@ mod tests { let verifier = create_test_verifier(); let (xorname, tagged_proof, pool_hash, ts) = make_valid_merkle_proof_bytes(); - // Tree depth=2, so 2 paid nodes required. Candidates have price=1024. - // Pay only 1 wei per node — far below the candidate's quoted price. + // Tree depth=2, so 2 paid nodes required. Candidates all quote price=1024. + // Expected per-node: median(1024) * 2^2 / 2 = 2048. + // Pay only 1 wei per node — far below the expected amount. { let info = evmlib::merkle_payments::OnChainPaymentInfo { depth: 2, @@ -1896,7 +1920,7 @@ mod tests { assert!( result.is_err(), - "Should reject merkle payment where paid amount < candidate's quoted price" + "Should reject merkle payment where paid amount < expected per-node amount" ); let err_msg = format!("{}", result.expect_err("should fail")); assert!( From b224ad3ed59e8a80c629264669cf5b3f4c1574f7 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 00:16:54 +0200 Subject: [PATCH 05/17] fix: add backticks in doc comment to fix clippy::doc_markdown warning Co-Authored-By: Claude Opus 4.6 (1M context) --- src/payment/pricing.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/payment/pricing.rs b/src/payment/pricing.rs index 503661f..96c5bd8 100644 --- a/src/payment/pricing.rs +++ b/src/payment/pricing.rs @@ -16,7 +16,7 @@ use evmlib::common::Amount; /// Divisor for the pricing formula. const PRICING_DIVISOR: u64 = 6000; -/// PRICING_DIVISOR², precomputed to avoid repeated multiplication. +/// `PRICING_DIVISOR²`, precomputed to avoid repeated multiplication. const DIVISOR_SQUARED: u64 = PRICING_DIVISOR * PRICING_DIVISOR; /// 1 token = 10^18 wei. From f283b9ac333b1a4a9251a3ece57c9302b24078f9 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 00:51:21 +0200 Subject: [PATCH 06/17] fix: use SingleNodePayment to reconstruct paid amounts for verification Integrate `SingleNodePayment::from_quotes` to derive correct on-chain payment amounts. This ensures exact-match checks in the contract's `verifyPayment` function pass by reconstructing amounts as used by the client. --- src/payment/verifier.rs | 47 +++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/src/payment/verifier.rs b/src/payment/verifier.rs index b2acab5..92d51c3 100644 --- a/src/payment/verifier.rs +++ b/src/payment/verifier.rs @@ -10,6 +10,7 @@ use crate::payment::proof::{ deserialize_merkle_proof, deserialize_proof, detect_proof_type, ProofType, }; use crate::payment::quote::{verify_quote_content, verify_quote_signature}; +use crate::payment::single_node::SingleNodePayment; use evmlib::common::Amount; use evmlib::contract::payment_vault; use evmlib::merkle_batch_payment::{OnChainPaymentInfo, PoolHash}; @@ -332,30 +333,40 @@ impl PaymentVerifier { // Verify on-chain payment via the contract's verifyPayment function. // // The SingleNode payment model pays only the median-priced quote (at 3x) - // and sends Amount::ZERO for the other 4. The contract's verifyPayment - // checks that each payment's amount and address match what was recorded - // on-chain. We submit all quotes and require at least one to be valid - // with a non-zero amount. - let payment_digest = payment.digest(); - if payment_digest.is_empty() { - return Err(Error::Payment("Payment has no quotes".to_string())); - } + // and sends Amount::ZERO for the other 4. We must reconstruct the same + // payment amounts the client used so the contract's exact-match check + // (`completedPayments[hash].amount == expected`) passes. + // + // ProofOfPayment::digest() returns raw quote prices, NOT the actual paid + // amounts. We use SingleNodePayment::from_quotes() — the same function + // the client uses — to derive the correct on-chain amounts. + let quotes_with_prices: Vec<_> = payment + .peer_quotes + .iter() + .map(|(_, quote)| (quote.clone(), quote.price)) + .collect(); + let single_payment = SingleNodePayment::from_quotes(quotes_with_prices).map_err(|e| { + Error::Payment(format!( + "Failed to reconstruct payment for verification: {e}" + )) + })?; let provider = evmlib::utils::http_provider(self.config.evm.network.rpc_url().clone()); let vault_address = *self.config.evm.network.payment_vault_address(); let contract = evmlib::contract::payment_vault::interface::IPaymentVault::new(vault_address, provider); - // Build DataPayment entries for the contract's verifyPayment call - let data_payments: Vec<_> = payment_digest + // Build DataPayment entries with the actual paid amounts (3x median, 0 others) + let data_payments: Vec<_> = single_payment + .quotes .iter() - .map(|(quote_hash, amount, rewards_address)| { - evmlib::contract::payment_vault::interface::IPaymentVault::DataPayment { - rewardsAddress: *rewards_address, - amount: *amount, - quoteHash: *quote_hash, - } - }) + .map( + |q| evmlib::contract::payment_vault::interface::IPaymentVault::DataPayment { + rewardsAddress: q.rewards_address, + amount: q.amount, + quoteHash: q.quote_hash, + }, + ) .collect(); let results = contract @@ -369,7 +380,7 @@ impl PaymentVerifier { )) })?; - let total_quotes = payment_digest.len(); + let total_quotes = single_payment.quotes.len(); let mut valid_paid_count: usize = 0; for result in &results { From 3900c3cc88360a5262c19554ffaef6804c4a9bbc Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 09:45:09 +0200 Subject: [PATCH 07/17] refactor!: remove max_chunks storage limit cap Nodes should store as many chunks as disk allows. The LMDB map size (db_size_gb) remains as the only storage bound. Co-Authored-By: Claude Opus 4.6 (1M context) --- config/production.toml | 3 -- src/config.rs | 7 --- src/devnet.rs | 1 - src/node.rs | 1 - src/storage/handler.rs | 1 - src/storage/lmdb.rs | 82 +++-------------------------------- tests/e2e/data_types/chunk.rs | 1 - tests/e2e/testnet.rs | 1 - 8 files changed, 5 insertions(+), 92 deletions(-) diff --git a/config/production.toml b/config/production.toml index bbcb1a2..ce44e01 100644 --- a/config/production.toml +++ b/config/production.toml @@ -43,9 +43,6 @@ metrics_port = 9100 [storage] enabled = true -# Maximum number of chunks to store (0 = unlimited) -max_chunks = 0 - # Verify content hash on read verify_on_read = true diff --git a/src/config.rs b/src/config.rs index 47cfbeb..260cf3d 100644 --- a/src/config.rs +++ b/src/config.rs @@ -430,7 +430,6 @@ const fn default_bootstrap_stale_days() -> u64 { /// /// Controls how chunks are stored, including: /// - Whether storage is enabled -/// - Maximum chunks to store (for capacity management) /// - Content verification on read #[derive(Debug, Clone, Serialize, Deserialize)] pub struct StorageConfig { @@ -439,11 +438,6 @@ pub struct StorageConfig { #[serde(default = "default_storage_enabled")] pub enabled: bool, - /// Maximum number of chunks to store (0 = unlimited). - /// Default: 0 (unlimited) - #[serde(default)] - pub max_chunks: usize, - /// Verify content hash matches address on read. /// Default: true #[serde(default = "default_storage_verify_on_read")] @@ -460,7 +454,6 @@ impl Default for StorageConfig { fn default() -> Self { Self { enabled: default_storage_enabled(), - max_chunks: 0, verify_on_read: default_storage_verify_on_read(), db_size_gb: 0, } diff --git a/src/devnet.rs b/src/devnet.rs index 3b6da0c..f7520c5 100644 --- a/src/devnet.rs +++ b/src/devnet.rs @@ -566,7 +566,6 @@ impl Devnet { let storage_config = LmdbStorageConfig { root_dir: data_dir.to_path_buf(), verify_on_read: true, - max_chunks: 0, max_map_size: 0, }; let storage = LmdbStorage::new(storage_config) diff --git a/src/node.rs b/src/node.rs index 2a2e724..b0b233d 100644 --- a/src/node.rs +++ b/src/node.rs @@ -330,7 +330,6 @@ impl NodeBuilder { let storage_config = LmdbStorageConfig { root_dir: config.root_dir.clone(), verify_on_read: config.storage.verify_on_read, - max_chunks: config.storage.max_chunks, max_map_size: config.storage.db_size_gb.saturating_mul(1_073_741_824), }; let storage = LmdbStorage::new(storage_config) diff --git a/src/storage/handler.rs b/src/storage/handler.rs index 68a0358..b059eee 100644 --- a/src/storage/handler.rs +++ b/src/storage/handler.rs @@ -402,7 +402,6 @@ mod tests { let storage_config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), verify_on_read: true, - max_chunks: 0, max_map_size: 0, }; let storage = Arc::new( diff --git a/src/storage/lmdb.rs b/src/storage/lmdb.rs index 8b60b92..eef7e3c 100644 --- a/src/storage/lmdb.rs +++ b/src/storage/lmdb.rs @@ -27,8 +27,6 @@ pub struct LmdbStorageConfig { pub root_dir: PathBuf, /// Whether to verify content on read (compares hash to address). pub verify_on_read: bool, - /// Maximum number of chunks to store (0 = unlimited). - pub max_chunks: usize, /// Maximum LMDB map size in bytes (0 = use default of 32 GiB). pub max_map_size: usize, } @@ -38,7 +36,6 @@ impl Default for LmdbStorageConfig { Self { root_dir: PathBuf::from(".ant/chunks"), verify_on_read: true, - max_chunks: 0, max_map_size: 0, } } @@ -185,11 +182,10 @@ impl LmdbStorage { let value = content.to_vec(); let env = self.env.clone(); let db = self.db; - let max_chunks = self.config.max_chunks; - // Existence check, capacity enforcement, and write all happen atomically - // inside a single write transaction. LMDB serializes write transactions, - // so there are no TOCTOU races or counter-drift issues. + // Existence check and write happen atomically inside a single write + // transaction. LMDB serializes write transactions, so there are no + // TOCTOU races. let was_new = spawn_blocking(move || -> Result { let mut wtxn = env .write_txn() @@ -204,19 +200,6 @@ impl LmdbStorage { return Ok(false); } - // Enforce capacity limit (0 = unlimited) - if max_chunks > 0 { - let current = db - .stat(&wtxn) - .map_err(|e| Error::Storage(format!("Failed to read db stats: {e}")))? - .entries; - if current >= max_chunks { - return Err(Error::Storage(format!( - "Storage capacity reached: {current} chunks stored, max is {max_chunks}" - ))); - } - } - db.put(&mut wtxn, &key, &value) .map_err(|e| Error::Storage(format!("Failed to put chunk: {e}")))?; wtxn.commit() @@ -421,7 +404,6 @@ mod tests { let config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), verify_on_read: true, - max_chunks: 0, max_map_size: 0, }; let storage = LmdbStorage::new(config).await.expect("create storage"); @@ -509,34 +491,6 @@ mod tests { assert!(!deleted2); } - #[tokio::test] - async fn test_max_chunks_enforced() { - let temp_dir = TempDir::new().expect("create temp dir"); - let config = LmdbStorageConfig { - root_dir: temp_dir.path().to_path_buf(), - verify_on_read: true, - max_chunks: 2, - max_map_size: 0, - }; - let storage = LmdbStorage::new(config).await.expect("create storage"); - - let content1 = b"chunk one"; - let content2 = b"chunk two"; - let content3 = b"chunk three"; - let addr1 = LmdbStorage::compute_address(content1); - let addr2 = LmdbStorage::compute_address(content2); - let addr3 = LmdbStorage::compute_address(content3); - - // First two should succeed - assert!(storage.put(&addr1, content1).await.is_ok()); - assert!(storage.put(&addr2, content2).await.is_ok()); - - // Third should be rejected - let result = storage.put(&addr3, content3).await; - assert!(result.is_err()); - assert!(result.unwrap_err().to_string().contains("capacity reached")); - } - #[tokio::test] async fn test_address_mismatch() { let (storage, _temp) = create_test_storage().await; @@ -586,32 +540,6 @@ mod tests { assert_eq!(stats.current_chunks, 2); } - #[tokio::test] - async fn test_capacity_recovers_after_delete() { - let temp_dir = TempDir::new().expect("create temp dir"); - let config = LmdbStorageConfig { - root_dir: temp_dir.path().to_path_buf(), - verify_on_read: true, - max_chunks: 1, - max_map_size: 0, - }; - let storage = LmdbStorage::new(config).await.expect("create storage"); - - let first = b"first chunk"; - let second = b"second chunk"; - let addr1 = LmdbStorage::compute_address(first); - let addr2 = LmdbStorage::compute_address(second); - - storage.put(&addr1, first).await.expect("put first"); - storage.delete(&addr1).await.expect("delete first"); - - // Should succeed because delete freed capacity. - storage.put(&addr2, second).await.expect("put second"); - - let stats = storage.stats(); - assert_eq!(stats.current_chunks, 1); - } - #[tokio::test] async fn test_persistence_across_reopen() { let temp_dir = TempDir::new().expect("create temp dir"); @@ -623,7 +551,7 @@ mod tests { let config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), verify_on_read: true, - max_chunks: 0, + max_map_size: 0, }; let storage = LmdbStorage::new(config).await.expect("create storage"); @@ -635,7 +563,7 @@ mod tests { let config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), verify_on_read: true, - max_chunks: 0, + max_map_size: 0, }; let storage = LmdbStorage::new(config).await.expect("reopen storage"); diff --git a/tests/e2e/data_types/chunk.rs b/tests/e2e/data_types/chunk.rs index 557892a..7bbb476 100644 --- a/tests/e2e/data_types/chunk.rs +++ b/tests/e2e/data_types/chunk.rs @@ -432,7 +432,6 @@ mod tests { let storage = LmdbStorage::new(LmdbStorageConfig { root_dir: temp_dir.clone(), verify_on_read: true, - max_chunks: 0, max_map_size: 0, }) .await?; diff --git a/tests/e2e/testnet.rs b/tests/e2e/testnet.rs index ced13a3..6a1a95b 100644 --- a/tests/e2e/testnet.rs +++ b/tests/e2e/testnet.rs @@ -1060,7 +1060,6 @@ impl TestNetwork { let storage_config = LmdbStorageConfig { root_dir: data_dir.to_path_buf(), verify_on_read: true, - max_chunks: 0, // Unlimited for tests max_map_size: 0, }; let storage = LmdbStorage::new(storage_config) From f320a04bd8acdcf9adc3fb198a73ac18e9b94a40 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 10:13:36 +0200 Subject: [PATCH 08/17] refactor!: remove max_records and NODE_STORAGE_LIMIT_BYTES Nodes no longer advertise or track a maximum record count. The QuotingMetricsTracker API is simplified to only take initial_records. The evmlib QuotingMetrics::max_records field is set to 0 since we cannot remove it from the external type. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/devnet.rs | 6 +---- src/node.rs | 15 ++--------- src/payment/metrics.rs | 50 +++++++++++++---------------------- src/payment/quote.rs | 22 +++++++-------- src/payment/verifier.rs | 2 +- src/storage/handler.rs | 2 +- tests/e2e/data_types/chunk.rs | 2 +- tests/e2e/testnet.rs | 6 +---- 8 files changed, 36 insertions(+), 69 deletions(-) diff --git a/src/devnet.rs b/src/devnet.rs index f7520c5..9177cec 100644 --- a/src/devnet.rs +++ b/src/devnet.rs @@ -79,9 +79,6 @@ const DEVNET_PAYMENT_CACHE_CAPACITY: usize = 1000; /// Devnet rewards address (20 bytes, all 0x01). const DEVNET_REWARDS_ADDRESS: [u8; 20] = [0x01; 20]; -/// Max records for quoting metrics (devnet value). -const DEVNET_MAX_RECORDS: usize = 100_000; - /// Initial records for quoting metrics (devnet value). const DEVNET_INITIAL_RECORDS: usize = 1000; @@ -586,8 +583,7 @@ impl Devnet { local_rewards_address: rewards_address, }; let payment_verifier = PaymentVerifier::new(payment_config); - let metrics_tracker = - QuotingMetricsTracker::new(DEVNET_MAX_RECORDS, DEVNET_INITIAL_RECORDS); + let metrics_tracker = QuotingMetricsTracker::new(DEVNET_INITIAL_RECORDS); let mut quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); // Wire ML-DSA-65 signing from the devnet node's identity diff --git a/src/node.rs b/src/node.rs index b0b233d..e76b028 100644 --- a/src/node.rs +++ b/src/node.rs @@ -1,6 +1,6 @@ //! Node implementation - thin wrapper around saorsa-core's `P2PNode`. -use crate::ant_protocol::{CHUNK_PROTOCOL_ID, MAX_CHUNK_SIZE}; +use crate::ant_protocol::CHUNK_PROTOCOL_ID; use crate::config::{ default_nodes_dir, default_root_dir, EvmNetworkConfig, NetworkMode, NodeConfig, NODE_IDENTITY_FILENAME, @@ -30,14 +30,6 @@ use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tracing::{debug, error, info, warn}; -/// Node storage capacity limit (5 GB). -/// -/// Used to derive `max_records` for the quoting metrics pricing curve. -/// A node advertises `NODE_STORAGE_LIMIT_BYTES / MAX_CHUNK_SIZE` as -/// its maximum record count, giving the pricing algorithm a meaningful -/// fullness ratio instead of a hardcoded constant. -pub const NODE_STORAGE_LIMIT_BYTES: u64 = 5 * 1024 * 1024 * 1024; - #[cfg(unix)] use tokio::signal::unix::{signal, SignalKind}; @@ -359,10 +351,7 @@ impl NodeBuilder { local_rewards_address: rewards_address, }; let payment_verifier = PaymentVerifier::new(payment_config); - // Safe: 5GB fits in usize on all supported 64-bit platforms. - #[allow(clippy::cast_possible_truncation)] - let max_records = (NODE_STORAGE_LIMIT_BYTES as usize) / MAX_CHUNK_SIZE; - let metrics_tracker = QuotingMetricsTracker::new(max_records, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let mut quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); // Wire ML-DSA-65 signing from node identity. diff --git a/src/payment/metrics.rs b/src/payment/metrics.rs index c14e4bb..d2f6528 100644 --- a/src/payment/metrics.rs +++ b/src/payment/metrics.rs @@ -18,13 +18,11 @@ const PERSIST_INTERVAL: usize = 10; /// Tracker for quoting metrics. /// /// Maintains state that influences quote pricing, including payment history, -/// storage capacity, and network information. +/// storage usage, and network information. #[derive(Debug)] pub struct QuotingMetricsTracker { /// Number of payments received by this node. received_payment_count: AtomicUsize, - /// Maximum records the node can store. - max_records: usize, /// Number of records currently stored. close_records_stored: AtomicUsize, /// Records stored by type: `Vec<(data_type_index, count)>`. @@ -44,13 +42,11 @@ impl QuotingMetricsTracker { /// /// # Arguments /// - /// * `max_records` - Maximum number of records this node can store /// * `initial_records` - Initial number of records stored #[must_use] - pub fn new(max_records: usize, initial_records: usize) -> Self { + pub fn new(initial_records: usize) -> Self { Self { received_payment_count: AtomicUsize::new(0), - max_records, close_records_stored: AtomicUsize::new(initial_records), records_per_type: RwLock::new(Vec::new()), start_time: Instant::now(), @@ -64,11 +60,10 @@ impl QuotingMetricsTracker { /// /// # Arguments /// - /// * `max_records` - Maximum number of records /// * `persist_path` - Path to persist metrics to disk #[must_use] - pub fn with_persistence(max_records: usize, persist_path: &std::path::Path) -> Self { - let mut tracker = Self::new(max_records, 0); + pub fn with_persistence(persist_path: &std::path::Path) -> Self { + let mut tracker = Self::new(0); tracker.persist_path = Some(persist_path.to_path_buf()); // Try to load existing metrics @@ -153,7 +148,6 @@ impl QuotingMetricsTracker { data_size, close_records_stored: self.close_records_stored.load(Ordering::SeqCst), records_per_type: self.records_per_type.read().clone(), - max_records: self.max_records, received_payment_count: self.received_payment_count.load(Ordering::SeqCst), live_time: self.live_time_hours(), network_density: None, // Not used in pricing; reserved for future DHT range filtering @@ -215,14 +209,14 @@ mod tests { #[test] fn test_new_tracker() { - let tracker = QuotingMetricsTracker::new(1000, 50); + let tracker = QuotingMetricsTracker::new(50); assert_eq!(tracker.payment_count(), 0); assert_eq!(tracker.records_stored(), 50); } #[test] fn test_record_payment() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); assert_eq!(tracker.payment_count(), 0); tracker.record_payment(); @@ -234,7 +228,7 @@ mod tests { #[test] fn test_record_store() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); assert_eq!(tracker.records_stored(), 0); tracker.record_store(0); // Chunk type @@ -250,14 +244,13 @@ mod tests { #[test] fn test_get_metrics() { - let tracker = QuotingMetricsTracker::new(1000, 100); + let tracker = QuotingMetricsTracker::new(100); tracker.record_payment(); tracker.record_payment(); let metrics = tracker.get_metrics(2048, 0); assert_eq!(metrics.data_size, 2048); assert_eq!(metrics.data_type, 0); - assert_eq!(metrics.max_records, 1000); assert_eq!(metrics.close_records_stored, 100); assert_eq!(metrics.received_payment_count, 2); } @@ -269,28 +262,28 @@ mod tests { // Create and populate tracker { - let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + let tracker = QuotingMetricsTracker::with_persistence(&path); tracker.record_payment(); tracker.record_payment(); tracker.record_store(0); } // Load from disk - let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + let tracker = QuotingMetricsTracker::with_persistence(&path); assert_eq!(tracker.payment_count(), 2); assert_eq!(tracker.records_stored(), 1); } #[test] fn test_live_time_hours() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); // Just started, so live_time should be 0 hours assert_eq!(tracker.live_time_hours(), 0); } #[test] fn test_set_network_size() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); tracker.set_network_size(1000); let metrics = tracker.get_metrics(0, 0); @@ -299,7 +292,7 @@ mod tests { #[test] fn test_records_per_type_multiple_types() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); tracker.record_store(0); tracker.record_store(0); @@ -326,14 +319,14 @@ mod tests { let path = dir.path().join("metrics_types.bin"); { - let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + let tracker = QuotingMetricsTracker::with_persistence(&path); tracker.record_store(0); tracker.record_store(0); tracker.record_store(1); tracker.record_payment(); } - let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + let tracker = QuotingMetricsTracker::with_persistence(&path); assert_eq!(tracker.payment_count(), 1); assert_eq!(tracker.records_stored(), 3); // 2 type-0 + 1 type-1 @@ -347,21 +340,14 @@ mod tests { let path = dir.path().join("nonexistent_subdir").join("metrics.bin"); // Should not panic — just starts with defaults - let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + let tracker = QuotingMetricsTracker::with_persistence(&path); assert_eq!(tracker.payment_count(), 0); assert_eq!(tracker.records_stored(), 0); } - #[test] - fn test_max_records_zero() { - let tracker = QuotingMetricsTracker::new(0, 0); - let metrics = tracker.get_metrics(1024, 0); - assert_eq!(metrics.max_records, 0); - } - #[test] fn test_get_metrics_passes_data_params() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); let metrics = tracker.get_metrics(4096, 3); assert_eq!(metrics.data_size, 4096); assert_eq!(metrics.data_type, 3); @@ -369,7 +355,7 @@ mod tests { #[test] fn test_default_network_size() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); let metrics = tracker.get_metrics(0, 0); assert_eq!(metrics.network_size, Some(500)); } diff --git a/src/payment/quote.rs b/src/payment/quote.rs index a5c2b10..4d6e616 100644 --- a/src/payment/quote.rs +++ b/src/payment/quote.rs @@ -415,7 +415,7 @@ mod tests { fn create_test_generator() -> QuoteGenerator { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 100); + let metrics_tracker = QuotingMetricsTracker::new(100); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); @@ -462,7 +462,7 @@ mod tests { #[test] fn test_generator_without_signer() { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 100); + let metrics_tracker = QuotingMetricsTracker::new(100); let generator = QuoteGenerator::new(rewards_address, metrics_tracker); assert!(!generator.can_sign()); @@ -478,7 +478,7 @@ mod tests { let (public_key, secret_key) = ml_dsa.generate_keypair().expect("keypair generation"); let rewards_address = RewardsAddress::new([2u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 100); + let metrics_tracker = QuotingMetricsTracker::new(100); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); let pub_key_bytes = public_key.as_bytes().to_vec(); @@ -522,7 +522,7 @@ mod tests { #[test] fn test_rewards_address_getter() { let addr = RewardsAddress::new([42u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let generator = QuoteGenerator::new(addr, metrics_tracker); assert_eq!(*generator.rewards_address(), addr); @@ -531,7 +531,7 @@ mod tests { #[test] fn test_records_stored() { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(500, 50); + let metrics_tracker = QuotingMetricsTracker::new(50); let generator = QuoteGenerator::new(rewards_address, metrics_tracker); assert_eq!(generator.records_stored(), 50); @@ -540,7 +540,7 @@ mod tests { #[test] fn test_record_store_delegation() { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let generator = QuoteGenerator::new(rewards_address, metrics_tracker); generator.record_store(0); @@ -606,7 +606,7 @@ mod tests { #[test] fn test_can_sign_after_set_signer() { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); assert!(!generator.can_sign()); @@ -620,7 +620,7 @@ mod tests { fn test_wire_ml_dsa_signer_returns_ok_with_valid_identity() { let identity = saorsa_core::identity::NodeIdentity::generate().expect("keypair generation"); let rewards_address = RewardsAddress::new([3u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); let result = wire_ml_dsa_signer(&mut generator, &identity); @@ -634,7 +634,7 @@ mod tests { #[test] fn test_probe_signer_fails_without_signer() { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let generator = QuoteGenerator::new(rewards_address, metrics_tracker); let result = generator.probe_signer(); @@ -644,7 +644,7 @@ mod tests { #[test] fn test_probe_signer_fails_with_empty_signature() { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); generator.set_signer(vec![0u8; 32], |_| vec![]); @@ -659,7 +659,7 @@ mod tests { let (public_key, secret_key) = ml_dsa.generate_keypair().expect("keypair generation"); let rewards_address = RewardsAddress::new([0x42u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(800, 50); + let metrics_tracker = QuotingMetricsTracker::new(50); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); // Wire ML-DSA-65 signing (same as production nodes) diff --git a/src/payment/verifier.rs b/src/payment/verifier.rs index 92d51c3..42d109f 100644 --- a/src/payment/verifier.rs +++ b/src/payment/verifier.rs @@ -986,7 +986,7 @@ mod tests { let (public_key, secret_key) = ml_dsa.generate_keypair().expect("keygen"); let rewards_address = RewardsAddress::new([i; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); let pub_key_bytes = public_key.as_bytes().to_vec(); diff --git a/src/storage/handler.rs b/src/storage/handler.rs index b059eee..27f8528 100644 --- a/src/storage/handler.rs +++ b/src/storage/handler.rs @@ -417,7 +417,7 @@ mod tests { local_rewards_address: rewards_address, }; let payment_verifier = Arc::new(PaymentVerifier::new(payment_config)); - let metrics_tracker = QuotingMetricsTracker::new(1000, 100); + let metrics_tracker = QuotingMetricsTracker::new(100); let mut quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); // Wire ML-DSA-65 signing so quote requests succeed diff --git a/tests/e2e/data_types/chunk.rs b/tests/e2e/data_types/chunk.rs index 7bbb476..114b6ad 100644 --- a/tests/e2e/data_types/chunk.rs +++ b/tests/e2e/data_types/chunk.rs @@ -442,7 +442,7 @@ mod tests { cache_capacity: 100, local_rewards_address: rewards_address, }); - let metrics_tracker = QuotingMetricsTracker::new(1000, 100); + let metrics_tracker = QuotingMetricsTracker::new(100); let quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); let protocol = AntProtocol::new( diff --git a/tests/e2e/testnet.rs b/tests/e2e/testnet.rs index 6a1a95b..b1c7695 100644 --- a/tests/e2e/testnet.rs +++ b/tests/e2e/testnet.rs @@ -105,10 +105,6 @@ const TEST_PAYMENT_CACHE_CAPACITY: usize = 1000; /// Test rewards address (20 bytes, all 0x01). const TEST_REWARDS_ADDRESS: [u8; 20] = [0x01; 20]; -/// Max records for quoting metrics (derived from node storage limit / max chunk size). -/// 5 GB / 4 MB = 1280 records. -const TEST_MAX_RECORDS: usize = 1280; - /// Initial records for quoting metrics (test value). const TEST_INITIAL_RECORDS: usize = 1000; @@ -1080,7 +1076,7 @@ impl TestNetwork { let payment_verifier = PaymentVerifier::new(payment_config); // Create quote generator with ML-DSA-65 signing from the test node's identity - let metrics_tracker = QuotingMetricsTracker::new(TEST_MAX_RECORDS, TEST_INITIAL_RECORDS); + let metrics_tracker = QuotingMetricsTracker::new(TEST_INITIAL_RECORDS); let mut quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); // Wire ML-DSA-65 signing so quotes are properly signed and verifiable From 2e48d9ee02f976de5e931619ec53141db4bb0a9a Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 10:47:05 +0200 Subject: [PATCH 09/17] chore: update evmlib to 779b996 (removes max_records from QuotingMetrics) Co-Authored-By: Claude Opus 4.6 (1M context) --- Cargo.lock | 69 +++++++----------------------------------------------- 1 file changed, 8 insertions(+), 61 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ca3700c..1ce74f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2285,7 +2285,7 @@ dependencies = [ [[package]] name = "evmlib" version = "0.5.0" -source = "git+https://github.com/WithAutonomi/evmlib/?branch=refactor/unify-payment-vault-v2#f4cdd45ea76a98ced0a416794c92b9b4bc2da224" +source = "git+https://github.com/WithAutonomi/evmlib/?branch=refactor/unify-payment-vault-v2#779b996b9b02e381e67efb9d0770bec7ba0adcd2" dependencies = [ "alloy", "ant-merkle", @@ -2939,7 +2939,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.2", + "windows-core", ] [[package]] @@ -6230,7 +6230,7 @@ version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" dependencies = [ - "windows-core 0.58.0", + "windows-core", "windows-targets 0.52.6", ] @@ -6240,26 +6240,13 @@ version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" dependencies = [ - "windows-implement 0.58.0", - "windows-interface 0.58.0", - "windows-result 0.2.0", - "windows-strings 0.1.0", + "windows-implement", + "windows-interface", + "windows-result", + "windows-strings", "windows-targets 0.52.6", ] -[[package]] -name = "windows-core" -version = "0.62.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" -dependencies = [ - "windows-implement 0.60.2", - "windows-interface 0.59.3", - "windows-link", - "windows-result 0.4.1", - "windows-strings 0.5.1", -] - [[package]] name = "windows-implement" version = "0.58.0" @@ -6271,17 +6258,6 @@ dependencies = [ "syn 2.0.117", ] -[[package]] -name = "windows-implement" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.117", -] - [[package]] name = "windows-interface" version = "0.58.0" @@ -6293,17 +6269,6 @@ dependencies = [ "syn 2.0.117", ] -[[package]] -name = "windows-interface" -version = "0.59.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.117", -] - [[package]] name = "windows-link" version = "0.2.1" @@ -6319,34 +6284,16 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-result" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" -dependencies = [ - "windows-link", -] - [[package]] name = "windows-strings" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ - "windows-result 0.2.0", + "windows-result", "windows-targets 0.52.6", ] -[[package]] -name = "windows-strings" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" -dependencies = [ - "windows-link", -] - [[package]] name = "windows-sys" version = "0.45.0" From 19b0d1138a379dbc00e11f148b97c11a21de7972 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 12:31:40 +0200 Subject: [PATCH 10/17] fix: verify median quote payment from all nodes, not just the median MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, verify() checked each node's own on-chain payment amount. Non-median nodes expected 0 and saw 0, so 4/5 nodes couldn't detect underpayment. Now all nodes check the single median quote's on-chain amount against the expected 3× price. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/payment/single_node.rs | 107 ++++++++++--------------------------- 1 file changed, 29 insertions(+), 78 deletions(-) diff --git a/src/payment/single_node.rs b/src/payment/single_node.rs index 2b22e65..f243e65 100644 --- a/src/payment/single_node.rs +++ b/src/payment/single_node.rs @@ -176,87 +176,48 @@ impl SingleNodePayment { Ok(result_hashes) } - /// Verify all payments on-chain. + /// Verify that the median quote was paid at least 3× its price on-chain. /// - /// This checks that all 5 payments were recorded on the blockchain. - /// The contract requires exactly 5 payment verifications. - /// - /// # Arguments - /// - /// * `network` - The EVM network to verify on - /// * `owned_quote_hash` - Optional quote hash that this node owns (expects to receive payment) + /// Every node in the close group runs this same check: look up the median + /// quote's on-chain payment amount and confirm it meets the 3× threshold. + /// This ensures all 5 nodes can independently detect underpayment, not + /// just the median node. /// /// # Returns /// - /// The total verified payment amount received by owned quotes. + /// The on-chain payment amount for the median quote. /// /// # Errors /// - /// Returns an error if verification fails or payment is invalid. - pub async fn verify( - &self, - network: &EvmNetwork, - owned_quote_hash: Option, - ) -> Result { - info!( - "Verifying {} payments via completedPayments mapping", - self.quotes.len() - ); + /// Returns an error if the on-chain lookup fails or the median quote + /// was paid less than 3× its price. + pub async fn verify(&self, network: &EvmNetwork) -> Result { + let median = &self.quotes[MEDIAN_INDEX]; + let expected_amount = median.amount; + + info!("Verifying median quote payment: expected at least {expected_amount} atto"); let provider = evmlib::utils::http_provider(network.rpc_url().clone()); let vault_address = *network.payment_vault_address(); let contract = evmlib::contract::payment_vault::interface::IPaymentVault::new(vault_address, provider); - let mut total_verified = Amount::ZERO; - let mut owned_on_chain = Amount::ZERO; - - for quote_info in &self.quotes { - let result = contract - .completedPayments(quote_info.quote_hash) - .call() - .await - .map_err(|e| Error::Payment(format!("completedPayments lookup failed: {e}")))?; - - let on_chain_amount = Amount::from(result.amount); - if on_chain_amount > Amount::ZERO { - total_verified = total_verified.checked_add(on_chain_amount).ok_or_else(|| { - Error::Payment("Overflow summing verified amounts".to_string()) - })?; - - if owned_quote_hash == Some(quote_info.quote_hash) { - owned_on_chain = on_chain_amount; - } - } - } - - if total_verified == Amount::ZERO { - return Err(Error::Payment( - "No payments found on-chain for any quote".to_string(), - )); - } + let result = contract + .completedPayments(median.quote_hash) + .call() + .await + .map_err(|e| Error::Payment(format!("completedPayments lookup failed: {e}")))?; - // If we own a quote, verify the amount matches - if let Some(owned_hash) = owned_quote_hash { - let expected = self - .quotes - .iter() - .find(|q| q.quote_hash == owned_hash) - .ok_or_else(|| Error::Payment("Owned quote hash not found in payment".to_string()))? - .amount; - - if owned_on_chain != expected { - return Err(Error::Payment(format!( - "Payment amount mismatch: expected {expected}, on-chain {owned_on_chain}" - ))); - } + let on_chain_amount = Amount::from(result.amount); - info!("Payment verified: {owned_on_chain} atto received"); - } else { - info!("Payment verified as valid on-chain"); + if on_chain_amount < expected_amount { + return Err(Error::Payment(format!( + "Median quote underpaid: on-chain {on_chain_amount}, expected at least {expected_amount}" + ))); } - Ok(total_verified) + info!("Payment verified: {on_chain_amount} atto paid for median quote"); + Ok(on_chain_amount) } } @@ -669,22 +630,12 @@ mod tests { let tx_hashes = payment.pay(&wallet).await?; println!("✓ Payment successful: {} transactions", tx_hashes.len()); - // Verify payment (as owner of median quote) - let median_quote = payment - .quotes - .get(MEDIAN_INDEX) - .ok_or_else(|| { - Error::Payment(format!( - "Index out of bounds: tried to access median index {} but quotes array has {} elements", - MEDIAN_INDEX, - payment.quotes.len() - )) - })?; - let median_quote_hash = median_quote.quote_hash; - let verified_amount = payment.verify(&network, Some(median_quote_hash)).await?; + // Verify median quote payment — all nodes run this same check + let verified_amount = payment.verify(&network).await?; + let expected_median_amount = payment.quotes[MEDIAN_INDEX].amount; assert_eq!( - verified_amount, median_quote.amount, + verified_amount, expected_median_amount, "Verified amount should match median payment" ); From 26d1fe6d3ccb1e917c4259d4f46ec51f02b11df9 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 12:54:52 +0200 Subject: [PATCH 11/17] feat: auto-scale LMDB map size to available disk space MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the fixed 32 GiB LMDB map ceiling with dynamic sizing that adapts to the storage partition. Nodes can now use all available disk space for chunks without an artificial cap. - Startup: map_size = current_db_size + available_space − reserve - Resize-on-demand: when MDB_MAP_FULL is hit (e.g. operator added storage), re-query disk space, resize map, retry the write - Disk-space guard: refuse writes when available space drops below a configurable reserve (default 1 GiB), with a 5-second TTL cache to avoid a statvfs syscall on every put - Safety: resize never shrinks below env.info().map_size so existing data is never at risk - New config: storage.disk_reserve_gb (default 1) - Operator override: storage.db_size_gb > 0 still imposes a hard cap Co-Authored-By: Claude Opus 4.6 (1M context) --- Cargo.lock | 1 + Cargo.toml | 3 + src/config.rs | 21 ++- src/devnet.rs | 2 +- src/node.rs | 6 +- src/storage/handler.rs | 2 +- src/storage/lmdb.rs | 336 ++++++++++++++++++++++++++++------ tests/e2e/data_types/chunk.rs | 2 +- tests/e2e/testnet.rs | 2 +- 9 files changed, 314 insertions(+), 61 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ce74f1..20a85d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -856,6 +856,7 @@ dependencies = [ "lru", "objc2", "objc2-foundation", + "page_size", "parking_lot", "postcard", "proptest", diff --git a/Cargo.toml b/Cargo.toml index a7a65b8..6bdd4e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -93,6 +93,9 @@ sha2 = "0.10" # Cross-platform file locking for upgrade caches fs2 = "0.4" +# System page size (for LMDB map alignment during resize) +page_size = "0.6" + # Protocol serialization postcard = { version = "1.1.3", features = ["use-std"] } diff --git a/src/config.rs b/src/config.rs index 260cf3d..4079787 100644 --- a/src/config.rs +++ b/src/config.rs @@ -431,6 +431,7 @@ const fn default_bootstrap_stale_days() -> u64 { /// Controls how chunks are stored, including: /// - Whether storage is enabled /// - Content verification on read +/// - Database size limits (auto-scales with available disk by default) #[derive(Debug, Clone, Serialize, Deserialize)] pub struct StorageConfig { /// Enable chunk storage. @@ -443,11 +444,20 @@ pub struct StorageConfig { #[serde(default = "default_storage_verify_on_read")] pub verify_on_read: bool, - /// Maximum LMDB database size in GiB (0 = use default of 32 GiB). - /// On Unix the mmap is a lazy reservation and costs nothing until pages - /// are faulted in. + /// Explicit LMDB database size cap in GiB. + /// + /// When set to 0 (default), the map size is computed automatically from + /// available disk space at startup and grows on demand when the operator + /// adds storage. Set a non-zero value to impose a hard cap. #[serde(default)] pub db_size_gb: usize, + + /// Minimum free disk space (in GiB) to preserve on the storage partition. + /// + /// Writes are refused when available space drops below this threshold, + /// preventing the node from filling the disk completely. Default: 1 GiB. + #[serde(default = "default_disk_reserve_gb")] + pub disk_reserve_gb: u64, } impl Default for StorageConfig { @@ -456,10 +466,15 @@ impl Default for StorageConfig { enabled: default_storage_enabled(), verify_on_read: default_storage_verify_on_read(), db_size_gb: 0, + disk_reserve_gb: default_disk_reserve_gb(), } } } +const fn default_disk_reserve_gb() -> u64 { + 1 +} + const fn default_storage_enabled() -> bool { true } diff --git a/src/devnet.rs b/src/devnet.rs index 9177cec..a702ce5 100644 --- a/src/devnet.rs +++ b/src/devnet.rs @@ -563,7 +563,7 @@ impl Devnet { let storage_config = LmdbStorageConfig { root_dir: data_dir.to_path_buf(), verify_on_read: true, - max_map_size: 0, + ..LmdbStorageConfig::default() }; let storage = LmdbStorage::new(storage_config) .await diff --git a/src/node.rs b/src/node.rs index e76b028..bcfc02d 100644 --- a/src/node.rs +++ b/src/node.rs @@ -322,7 +322,11 @@ impl NodeBuilder { let storage_config = LmdbStorageConfig { root_dir: config.root_dir.clone(), verify_on_read: config.storage.verify_on_read, - max_map_size: config.storage.db_size_gb.saturating_mul(1_073_741_824), + max_map_size: config.storage.db_size_gb.saturating_mul(1024 * 1024 * 1024), + disk_reserve: config + .storage + .disk_reserve_gb + .saturating_mul(1024 * 1024 * 1024), }; let storage = LmdbStorage::new(storage_config) .await diff --git a/src/storage/handler.rs b/src/storage/handler.rs index 27f8528..95c1262 100644 --- a/src/storage/handler.rs +++ b/src/storage/handler.rs @@ -402,7 +402,7 @@ mod tests { let storage_config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), verify_on_read: true, - max_map_size: 0, + ..LmdbStorageConfig::default() }; let storage = Arc::new( LmdbStorage::new(storage_config) diff --git a/src/storage/lmdb.rs b/src/storage/lmdb.rs index eef7e3c..e040f03 100644 --- a/src/storage/lmdb.rs +++ b/src/storage/lmdb.rs @@ -10,15 +10,33 @@ use crate::ant_protocol::XorName; use crate::error::{Error, Result}; use heed::types::Bytes; -use heed::{Database, Env, EnvOpenOptions}; +use heed::{Database, Env, EnvOpenOptions, MdbError}; use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::time::Instant; use tokio::task::spawn_blocking; -use tracing::{debug, trace, warn}; +use tracing::{debug, info, trace, warn}; -/// Default LMDB map size: 32 GiB. +/// Bytes in one GiB. +const GIB: u64 = 1024 * 1024 * 1024; + +/// Convert a byte count to GiB for human-readable log messages. +#[allow(clippy::cast_precision_loss)] // display only — sub-byte precision is irrelevant +fn bytes_to_gib(bytes: u64) -> f64 { + bytes as f64 / GIB as f64 +} + +/// Absolute minimum LMDB map size. /// -/// Node operators can override this via `storage.db_size_gb` in `config.toml`. -const DEFAULT_MAX_MAP_SIZE: usize = 32 * 1_073_741_824; // 32 GiB +/// Even on a nearly-full disk the database must be able to open. +/// Set to 256 MiB — enough for millions of LMDB pages. +const MIN_MAP_SIZE: usize = 256 * 1024 * 1024; + +/// How often to re-query available disk space (in seconds). +/// +/// Between checks the cached result is trusted. Disk space changes slowly +/// relative to chunk-write throughput, so a multi-second window is safe. +const DISK_CHECK_INTERVAL_SECS: u64 = 5; /// Configuration for LMDB storage. #[derive(Debug, Clone)] @@ -27,8 +45,15 @@ pub struct LmdbStorageConfig { pub root_dir: PathBuf, /// Whether to verify content on read (compares hash to address). pub verify_on_read: bool, - /// Maximum LMDB map size in bytes (0 = use default of 32 GiB). + /// Explicit LMDB map size cap in bytes. + /// + /// When 0 (default), the map size is computed automatically from available + /// disk space and grows on demand when more storage becomes available. pub max_map_size: usize, + /// Minimum free disk space (in bytes) to preserve on the storage partition. + /// + /// Writes are refused when available space drops below this threshold. + pub disk_reserve: u64, } impl Default for LmdbStorageConfig { @@ -37,6 +62,7 @@ impl Default for LmdbStorageConfig { root_dir: PathBuf::from(".ant/chunks"), verify_on_read: true, max_map_size: 0, + disk_reserve: GIB, } } } @@ -71,8 +97,21 @@ pub struct LmdbStorage { db: Database, /// Storage configuration. config: LmdbStorageConfig, + /// Path to the LMDB environment directory (for disk-space queries). + env_dir: PathBuf, /// Operation statistics. stats: parking_lot::RwLock, + /// Serialises access to the LMDB environment during a map resize. + /// + /// Normal read/write operations acquire a **shared** lock. The rare + /// resize path acquires an **exclusive** lock, ensuring no transactions + /// are active when `env.resize()` is called (an LMDB safety requirement). + env_lock: Arc>, + /// Timestamp of the last successful disk-space check. + /// + /// `None` means "never checked — check on next write". Updated only + /// after a passing check, so a low-space result is always rechecked. + last_disk_ok: parking_lot::Mutex>, } impl LmdbStorage { @@ -80,6 +119,12 @@ impl LmdbStorage { /// /// Opens (or creates) an LMDB environment at `{root_dir}/chunks.mdb/`. /// + /// When `config.max_map_size` is 0 (the default) the map size is derived + /// from the available disk space on the partition that hosts the database, + /// minus `config.disk_reserve`. This allows a node to use all available + /// storage without a fixed cap. If the operator adds more storage later + /// the map is resized on demand (see [`Self::put`]). + /// /// # Errors /// /// Returns an error if the LMDB environment cannot be opened. @@ -92,9 +137,17 @@ impl LmdbStorage { .map_err(|e| Error::Storage(format!("Failed to create LMDB directory: {e}")))?; let map_size = if config.max_map_size > 0 { + // Operator provided an explicit cap. config.max_map_size } else { - DEFAULT_MAX_MAP_SIZE + // Auto-scale: current DB footprint + available space − reserve. + let computed = compute_map_size(&env_dir, config.disk_reserve)?; + info!( + "Auto-computed LMDB map size: {:.2} GiB (available disk minus {:.2} GiB reserve)", + bytes_to_gib(computed as u64), + bytes_to_gib(config.disk_reserve), + ); + computed }; let env_dir_clone = env_dir.clone(); @@ -132,12 +185,15 @@ impl LmdbStorage { env, db, config, + env_dir, stats: parking_lot::RwLock::new(StorageStats::default()), + env_lock: Arc::new(parking_lot::RwLock::new(())), + last_disk_ok: parking_lot::Mutex::new(None), }; debug!( "Initialized LMDB storage at {:?} ({} existing chunks)", - env_dir, + storage.env_dir, storage.current_chunks()? ); @@ -146,10 +202,10 @@ impl LmdbStorage { /// Store a chunk. /// - /// # Arguments - /// - /// * `address` - Content address (should be BLAKE3 of content) - /// * `content` - Chunk data + /// Before writing, verifies that available disk space exceeds the + /// configured reserve. If the LMDB map is full but more disk space + /// exists (e.g. the operator added storage), the map is resized + /// automatically and the write is retried. /// /// # Returns /// @@ -157,7 +213,8 @@ impl LmdbStorage { /// /// # Errors /// - /// Returns an error if the write fails or content doesn't match address. + /// Returns an error if the write fails, content doesn't match address, + /// or the disk is too full to accept new chunks. pub async fn put(&self, address: &XorName, content: &[u8]) -> Result { // Verify content address let computed = Self::compute_address(content); @@ -169,6 +226,9 @@ impl LmdbStorage { ))); } + // ── Disk-space guard (cached — at most one syscall per interval) ─ + self.check_disk_space_cached()?; + // Fast-path duplicate check (read-only, no write lock needed). // This is an optimistic hint — the authoritative check happens inside // the write transaction below to prevent TOCTOU races. @@ -178,15 +238,63 @@ impl LmdbStorage { return Ok(false); } + // ── Write (with resize-on-demand) ─────────────────────────────── + match self.try_put(address, content).await? { + PutOutcome::New => {} + PutOutcome::Duplicate => { + trace!("Chunk {} already exists", hex::encode(address)); + self.stats.write().duplicates += 1; + return Ok(false); + } + PutOutcome::MapFull => { + // The map ceiling was reached but there may be more disk space + // available (e.g. operator expanded the partition). + self.try_resize().await?; + // Retry once after resize. + match self.try_put(address, content).await? { + PutOutcome::New => {} + PutOutcome::Duplicate => { + self.stats.write().duplicates += 1; + return Ok(false); + } + PutOutcome::MapFull => { + return Err(Error::Storage( + "LMDB map full after resize — disk may be at capacity".into(), + )); + } + } + } + } + + { + let mut stats = self.stats.write(); + stats.chunks_stored += 1; + stats.bytes_stored += content.len() as u64; + } + + debug!( + "Stored chunk {} ({} bytes)", + hex::encode(address), + content.len() + ); + + Ok(true) + } + + /// Attempt a single put inside a write transaction. + /// + /// Returns [`PutOutcome::MapFull`] instead of an error when the LMDB map + /// ceiling is reached, so the caller can resize and retry. + async fn try_put(&self, address: &XorName, content: &[u8]) -> Result { let key = *address; let value = content.to_vec(); let env = self.env.clone(); let db = self.db; + let lock = Arc::clone(&self.env_lock); + + spawn_blocking(move || -> Result { + let _guard = lock.read(); - // Existence check and write happen atomically inside a single write - // transaction. LMDB serializes write transactions, so there are no - // TOCTOU races. - let was_new = spawn_blocking(move || -> Result { let mut wtxn = env .write_txn() .map_err(|e| Error::Storage(format!("Failed to create write txn: {e}")))?; @@ -197,45 +305,29 @@ impl LmdbStorage { .map_err(|e| Error::Storage(format!("Failed to check existence: {e}")))? .is_some() { - return Ok(false); + return Ok(PutOutcome::Duplicate); } - db.put(&mut wtxn, &key, &value) - .map_err(|e| Error::Storage(format!("Failed to put chunk: {e}")))?; - wtxn.commit() - .map_err(|e| Error::Storage(format!("Failed to commit put: {e}")))?; - Ok(true) + match db.put(&mut wtxn, &key, &value) { + Ok(()) => {} + Err(heed::Error::Mdb(MdbError::MapFull)) => return Ok(PutOutcome::MapFull), + Err(e) => { + return Err(Error::Storage(format!("Failed to put chunk: {e}"))); + } + } + + match wtxn.commit() { + Ok(()) => Ok(PutOutcome::New), + Err(heed::Error::Mdb(MdbError::MapFull)) => Ok(PutOutcome::MapFull), + Err(e) => Err(Error::Storage(format!("Failed to commit put: {e}"))), + } }) .await - .map_err(|e| Error::Storage(format!("LMDB put task failed: {e}")))??; - - if !was_new { - trace!("Chunk {} already exists", hex::encode(address)); - self.stats.write().duplicates += 1; - return Ok(false); - } - - { - let mut stats = self.stats.write(); - stats.chunks_stored += 1; - stats.bytes_stored += content.len() as u64; - } - - debug!( - "Stored chunk {} ({} bytes)", - hex::encode(address), - content.len() - ); - - Ok(true) + .map_err(|e| Error::Storage(format!("LMDB put task failed: {e}")))? } /// Retrieve a chunk. /// - /// # Arguments - /// - /// * `address` - Content address to retrieve - /// /// # Returns /// /// Returns `Some(content)` if found, `None` if not found. @@ -247,8 +339,10 @@ impl LmdbStorage { let key = *address; let env = self.env.clone(); let db = self.db; + let lock = Arc::clone(&self.env_lock); let content = spawn_blocking(move || -> Result>> { + let _guard = lock.read(); let rtxn = env .read_txn() .map_err(|e| Error::Storage(format!("Failed to create read txn: {e}")))?; @@ -303,6 +397,7 @@ impl LmdbStorage { /// /// Returns an error if the LMDB read transaction fails. pub fn exists(&self, address: &XorName) -> Result { + let _guard = self.env_lock.read(); let rtxn = self .env .read_txn() @@ -324,8 +419,10 @@ impl LmdbStorage { let key = *address; let env = self.env.clone(); let db = self.db; + let lock = Arc::clone(&self.env_lock); let deleted = spawn_blocking(move || -> Result { + let _guard = lock.read(); let mut wtxn = env .write_txn() .map_err(|e| Error::Storage(format!("Failed to create write txn: {e}")))?; @@ -368,6 +465,7 @@ impl LmdbStorage { /// /// Returns an error if the LMDB read transaction fails. pub fn current_chunks(&self) -> Result { + let _guard = self.env_lock.read(); let rtxn = self .env .read_txn() @@ -391,6 +489,140 @@ impl LmdbStorage { pub fn root_dir(&self) -> &Path { &self.config.root_dir } + + /// Check available disk space, skipping the syscall if a recent check passed. + /// + /// Only caches *passing* results — a low-space condition is always + /// rechecked so we detect freed space promptly. + fn check_disk_space_cached(&self) -> Result<()> { + { + let last = self.last_disk_ok.lock(); + if let Some(t) = *last { + if t.elapsed().as_secs() < DISK_CHECK_INTERVAL_SECS { + return Ok(()); + } + } + } + // Cache miss or stale — perform the actual statvfs check. + check_disk_space(&self.env_dir, self.config.disk_reserve)?; + // Passed — update the cache timestamp. + *self.last_disk_ok.lock() = Some(Instant::now()); + Ok(()) + } + + /// Grow the LMDB map to match currently available disk space. + /// + /// The new size is the **larger** of: + /// 1. the current map size (so existing data is never truncated), and + /// 2. `current_db_file_size + available_space − reserve` + /// (so all reachable disk space can be used). + /// + /// Acquires an **exclusive** lock on `env_lock` so that no read or write + /// transactions are active when the underlying `mdb_env_set_mapsize` is + /// called (an LMDB safety requirement). + #[allow(unsafe_code)] + async fn try_resize(&self) -> Result<()> { + let from_disk = compute_map_size(&self.env_dir, self.config.disk_reserve)?; + let env = self.env.clone(); + let lock = Arc::clone(&self.env_lock); + + spawn_blocking(move || -> Result<()> { + // Exclusive lock guarantees no concurrent transactions. + let _guard = lock.write(); + + // Never shrink below the current map — existing data must remain + // addressable regardless of what the disk-space calculation says. + let current_map = env.info().map_size; + let new_size = from_disk.max(current_map); + + if new_size <= current_map { + debug!("LMDB map resize skipped — no additional disk space available"); + return Ok(()); + } + + // SAFETY: We hold an exclusive lock, so no transactions are active. + unsafe { + env.resize(new_size) + .map_err(|e| Error::Storage(format!("Failed to resize LMDB map: {e}")))?; + } + + info!( + "Resized LMDB map to {:.2} GiB (was {:.2} GiB)", + bytes_to_gib(new_size as u64), + bytes_to_gib(current_map as u64), + ); + Ok(()) + }) + .await + .map_err(|e| Error::Storage(format!("LMDB resize task failed: {e}")))? + } +} + +// ──────────────────────────────────────────────────────────────────────────── +// Helpers +// ──────────────────────────────────────────────────────────────────────────── + +/// Outcome of a single `try_put` attempt. +enum PutOutcome { + /// Chunk was newly stored. + New, + /// Chunk already existed (idempotent). + Duplicate, + /// The LMDB map ceiling was reached — caller should resize and retry. + MapFull, +} + +/// Compute the LMDB map size from the disk hosting `db_dir`. +/// +/// The result covers **all existing data** plus all remaining usable disk +/// space: +/// +/// ```text +/// map_size = current_db_file_size + max(0, available_space − reserve) +/// ``` +/// +/// `available_space` (from `statvfs`) reports only the *free* bytes on the +/// partition — the DB file's own footprint is **not** included, so adding +/// it back ensures the map is always large enough for the data already +/// stored. +/// +/// The result is page-aligned and never falls below [`MIN_MAP_SIZE`]. +fn compute_map_size(db_dir: &Path, reserve: u64) -> Result { + let available = fs2::available_space(db_dir) + .map_err(|e| Error::Storage(format!("Failed to query available disk space: {e}")))?; + + // The MDB data file may not exist yet on first run. + let mdb_file = db_dir.join("data.mdb"); + let current_db_bytes = std::fs::metadata(&mdb_file).map(|m| m.len()).unwrap_or(0); + + // available_space excludes the DB file, so we add it back to get the + // total space the DB could occupy while still leaving `reserve` free. + let growth_room = available.saturating_sub(reserve); + let target = current_db_bytes.saturating_add(growth_room); + + // Align up to system page size (required by heed's resize). + let page = page_size::get() as u64; + let aligned = target.div_ceil(page) * page; + + let result = usize::try_from(aligned).unwrap_or(usize::MAX); + Ok(result.max(MIN_MAP_SIZE)) +} + +/// Reject the write early if available disk space is below `reserve`. +fn check_disk_space(db_dir: &Path, reserve: u64) -> Result<()> { + let available = fs2::available_space(db_dir) + .map_err(|e| Error::Storage(format!("Failed to query available disk space: {e}")))?; + + if available < reserve { + return Err(Error::Storage(format!( + "Insufficient disk space: {:.2} GiB available, {:.2} GiB reserve required. \ + Free disk space or increase the partition to continue storing chunks.", + bytes_to_gib(available), + bytes_to_gib(reserve), + ))); + } + + Ok(()) } #[cfg(test)] @@ -404,7 +636,7 @@ mod tests { let config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), verify_on_read: true, - max_map_size: 0, + ..LmdbStorageConfig::default() }; let storage = LmdbStorage::new(config).await.expect("create storage"); (storage, temp_dir) @@ -551,8 +783,7 @@ mod tests { let config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), verify_on_read: true, - - max_map_size: 0, + ..LmdbStorageConfig::default() }; let storage = LmdbStorage::new(config).await.expect("create storage"); storage.put(&address, content).await.expect("put"); @@ -563,8 +794,7 @@ mod tests { let config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), verify_on_read: true, - - max_map_size: 0, + ..LmdbStorageConfig::default() }; let storage = LmdbStorage::new(config).await.expect("reopen storage"); assert_eq!(storage.current_chunks().expect("current_chunks"), 1); diff --git a/tests/e2e/data_types/chunk.rs b/tests/e2e/data_types/chunk.rs index 114b6ad..09fd7cd 100644 --- a/tests/e2e/data_types/chunk.rs +++ b/tests/e2e/data_types/chunk.rs @@ -432,7 +432,7 @@ mod tests { let storage = LmdbStorage::new(LmdbStorageConfig { root_dir: temp_dir.clone(), verify_on_read: true, - max_map_size: 0, + ..LmdbStorageConfig::default() }) .await?; diff --git a/tests/e2e/testnet.rs b/tests/e2e/testnet.rs index b1c7695..22792c4 100644 --- a/tests/e2e/testnet.rs +++ b/tests/e2e/testnet.rs @@ -1056,7 +1056,7 @@ impl TestNetwork { let storage_config = LmdbStorageConfig { root_dir: data_dir.to_path_buf(), verify_on_read: true, - max_map_size: 0, + ..LmdbStorageConfig::default() }; let storage = LmdbStorage::new(storage_config) .await From 109f184bd7017291f52be6985a2df55004f05640 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 15:19:10 +0200 Subject: [PATCH 12/17] refactor: simplify single-node payment verification to median-only check Replace the batch verifyPayment(DataPayment[5]) contract call with a single completedPayments(quoteHash) lookup on just the median quote, verifying it was paid at least 3x its price on-chain. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/payment/verifier.rs | 78 ++++++++++------------------------------- 1 file changed, 18 insertions(+), 60 deletions(-) diff --git a/src/payment/verifier.rs b/src/payment/verifier.rs index 42d109f..0a778a9 100644 --- a/src/payment/verifier.rs +++ b/src/payment/verifier.rs @@ -289,15 +289,17 @@ impl PaymentVerifier { self.cache.insert(xorname); } - /// Verify an EVM payment proof. + /// Verify a single-node EVM payment proof. /// - /// This verification ALWAYS validates payment proofs on-chain. - /// It verifies that: - /// 1. All quotes target the correct content address (xorname binding) - /// 2. All quote ML-DSA-65 signatures are valid (offloaded to a blocking - /// thread via `spawn_blocking` since post-quantum signature verification - /// is CPU-intensive) - /// 3. The payment was made on-chain via the EVM payment vault contract + /// Verification steps: + /// 1. Exactly `CLOSE_GROUP_SIZE` quotes are present + /// 2. All quotes target the correct content address (xorname binding) + /// 3. Quote timestamps are fresh (not expired or future-dated) + /// 4. Peer ID bindings match the ML-DSA-65 public keys + /// 5. This node is among the quoted recipients + /// 6. All ML-DSA-65 signatures are valid (offloaded to `spawn_blocking`) + /// 7. The median-priced quote was paid at least 3x its price on-chain + /// (looked up via `completedPayments(quoteHash)` on the payment vault) /// /// For unit tests that don't need on-chain verification, pre-populate /// the cache so `verify_payment` returns `CachedAsVerified` before @@ -330,16 +332,8 @@ impl PaymentVerifier { .await .map_err(|e| Error::Payment(format!("Signature verification task failed: {e}")))??; - // Verify on-chain payment via the contract's verifyPayment function. - // - // The SingleNode payment model pays only the median-priced quote (at 3x) - // and sends Amount::ZERO for the other 4. We must reconstruct the same - // payment amounts the client used so the contract's exact-match check - // (`completedPayments[hash].amount == expected`) passes. - // - // ProofOfPayment::digest() returns raw quote prices, NOT the actual paid - // amounts. We use SingleNodePayment::from_quotes() — the same function - // the client uses — to derive the correct on-chain amounts. + // Reconstruct the SingleNodePayment to identify the median quote. + // from_quotes() sorts by price and marks the median for 3x payment. let quotes_with_prices: Vec<_> = payment .peer_quotes .iter() @@ -351,57 +345,21 @@ impl PaymentVerifier { )) })?; - let provider = evmlib::utils::http_provider(self.config.evm.network.rpc_url().clone()); - let vault_address = *self.config.evm.network.payment_vault_address(); - let contract = - evmlib::contract::payment_vault::interface::IPaymentVault::new(vault_address, provider); - - // Build DataPayment entries with the actual paid amounts (3x median, 0 others) - let data_payments: Vec<_> = single_payment - .quotes - .iter() - .map( - |q| evmlib::contract::payment_vault::interface::IPaymentVault::DataPayment { - rewardsAddress: q.rewards_address, - amount: q.amount, - quoteHash: q.quote_hash, - }, - ) - .collect(); - - let results = contract - .verifyPayment(data_payments) - .call() + // Verify the median quote was paid at least 3x its price on-chain + // via completedPayments(quoteHash) on the payment vault contract. + let verified_amount = single_payment + .verify(&self.config.evm.network) .await .map_err(|e| { let xorname_hex = hex::encode(xorname); Error::Payment(format!( - "EVM verifyPayment call failed for {xorname_hex}: {e}" + "Median quote payment verification failed for {xorname_hex}: {e}" )) })?; - let total_quotes = single_payment.quotes.len(); - let mut valid_paid_count: usize = 0; - - for result in &results { - if result.isValid && result.amountPaid > Amount::ZERO { - valid_paid_count += 1; - } - } - - if valid_paid_count == 0 { - let xorname_hex = hex::encode(xorname); - return Err(Error::Payment(format!( - "Payment verification failed on-chain for {xorname_hex}: \ - no valid paid quotes found ({total_quotes} checked)" - ))); - } - if tracing::enabled!(tracing::Level::INFO) { let xorname_hex = hex::encode(xorname); - info!( - "EVM payment verified for {xorname_hex} ({valid_paid_count} valid, {total_quotes} total quotes)" - ); + info!("EVM payment verified for {xorname_hex} (median paid {verified_amount} atto)"); } Ok(()) } From 1ca840ced9f97c9f1a54474319a9995d8b0d26c9 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 15:30:35 +0200 Subject: [PATCH 13/17] fix: check all tied median quotes during single-node payment verification When multiple quotes share the median price, stable sort ordering may differ between client and verifier, causing the verifier to check the wrong quote hash on-chain. Now checks all tied quotes and accepts if any one was paid correctly. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/payment/single_node.rs | 96 +++++++++++++++++++++++++++++--------- 1 file changed, 74 insertions(+), 22 deletions(-) diff --git a/src/payment/single_node.rs b/src/payment/single_node.rs index f243e65..a200332 100644 --- a/src/payment/single_node.rs +++ b/src/payment/single_node.rs @@ -44,6 +44,8 @@ pub struct QuotePaymentInfo { pub rewards_address: RewardsAddress, /// The amount to pay (3x for median, 0 for others) pub amount: Amount, + /// The original quoted price (before 3x multiplier) + pub price: Amount, } impl SingleNodePayment { @@ -91,7 +93,7 @@ impl SingleNodePayment { let quotes_vec: Vec = quotes_with_prices .into_iter() .enumerate() - .map(|(idx, (quote, _))| QuotePaymentInfo { + .map(|(idx, (quote, price))| QuotePaymentInfo { quote_hash: quote.hash(), rewards_address: quote.rewards_address, amount: if idx == MEDIAN_INDEX { @@ -99,6 +101,7 @@ impl SingleNodePayment { } else { Amount::ZERO }, + price, }) .collect(); @@ -176,48 +179,63 @@ impl SingleNodePayment { Ok(result_hashes) } - /// Verify that the median quote was paid at least 3× its price on-chain. + /// Verify that a median-priced quote was paid at least 3× its price on-chain. /// - /// Every node in the close group runs this same check: look up the median - /// quote's on-chain payment amount and confirm it meets the 3× threshold. - /// This ensures all 5 nodes can independently detect underpayment, not - /// just the median node. + /// When multiple quotes share the median price (a tie), the client and + /// verifier may sort them in different order. This method checks all + /// quotes tied at the median price and accepts the payment if any one + /// of them was paid the correct amount. /// /// # Returns /// - /// The on-chain payment amount for the median quote. + /// The on-chain payment amount for the verified quote. /// /// # Errors /// - /// Returns an error if the on-chain lookup fails or the median quote - /// was paid less than 3× its price. + /// Returns an error if the on-chain lookup fails or none of the + /// median-priced quotes were paid at least 3× the median price. pub async fn verify(&self, network: &EvmNetwork) -> Result { let median = &self.quotes[MEDIAN_INDEX]; + let median_price = median.price; let expected_amount = median.amount; - info!("Verifying median quote payment: expected at least {expected_amount} atto"); + // Collect all quotes tied at the median price + let tied_quotes: Vec<&QuotePaymentInfo> = self + .quotes + .iter() + .filter(|q| q.price == median_price) + .collect(); + + info!( + "Verifying median quote payment: expected at least {expected_amount} atto, {} quote(s) tied at median price", + tied_quotes.len() + ); let provider = evmlib::utils::http_provider(network.rpc_url().clone()); let vault_address = *network.payment_vault_address(); let contract = evmlib::contract::payment_vault::interface::IPaymentVault::new(vault_address, provider); - let result = contract - .completedPayments(median.quote_hash) - .call() - .await - .map_err(|e| Error::Payment(format!("completedPayments lookup failed: {e}")))?; + // Check each tied quote — accept if any one was paid correctly + for candidate in &tied_quotes { + let result = contract + .completedPayments(candidate.quote_hash) + .call() + .await + .map_err(|e| Error::Payment(format!("completedPayments lookup failed: {e}")))?; - let on_chain_amount = Amount::from(result.amount); + let on_chain_amount = Amount::from(result.amount); - if on_chain_amount < expected_amount { - return Err(Error::Payment(format!( - "Median quote underpaid: on-chain {on_chain_amount}, expected at least {expected_amount}" - ))); + if on_chain_amount >= expected_amount { + info!("Payment verified: {on_chain_amount} atto paid for median-priced quote"); + return Ok(on_chain_amount); + } } - info!("Payment verified: {on_chain_amount} atto paid for median quote"); - Ok(on_chain_amount) + Err(Error::Payment(format!( + "No median-priced quote was paid enough: expected at least {expected_amount}, checked {} tied quote(s)", + tied_quotes.len() + ))) } } @@ -531,6 +549,40 @@ mod tests { assert_eq!(addresses.len(), CLOSE_GROUP_SIZE); } + #[test] + #[allow(clippy::unwrap_used)] + fn test_tied_median_prices_all_share_median_price() { + // Prices: 10, 30, 30, 30, 50 — three quotes tied at median price 30 + let prices = [10u64, 30, 30, 30, 50]; + let mut quotes_with_prices = Vec::new(); + + for (i, price) in prices.iter().enumerate() { + let quote = PaymentQuote { + content: XorName::random(&mut rand::thread_rng()), + timestamp: SystemTime::now(), + price: Amount::from(*price), + rewards_address: RewardsAddress::new([i as u8 + 1; 20]), + pub_key: vec![], + signature: vec![], + }; + quotes_with_prices.push((quote, Amount::from(*price))); + } + + let payment = SingleNodePayment::from_quotes(quotes_with_prices).unwrap(); + + // All three tied quotes should have price == 30 + let tied_count = payment + .quotes + .iter() + .filter(|q| q.price == Amount::from(30u64)) + .count(); + assert_eq!(tied_count, 3, "Should have 3 quotes tied at median price"); + + // Only the median index gets the 3x amount + assert_eq!(payment.quotes[MEDIAN_INDEX].amount, Amount::from(90u64)); + assert_eq!(payment.total_amount(), Amount::from(90u64)); + } + #[test] #[allow(clippy::unwrap_used)] fn test_total_amount_equals_3x_median() { From 2eb6fb22369b118ecb918fe1a5311a1231aa26b3 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 16:21:48 +0200 Subject: [PATCH 14/17] fix: suppress clippy::cast_possible_truncation lint in single-node rewards address generation Add `#[allow(clippy::cast_possible_truncation)]` to clarify safe use of cast, as `i` is always < 5. --- src/payment/single_node.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/payment/single_node.rs b/src/payment/single_node.rs index a200332..4edebc9 100644 --- a/src/payment/single_node.rs +++ b/src/payment/single_node.rs @@ -561,6 +561,7 @@ mod tests { content: XorName::random(&mut rand::thread_rng()), timestamp: SystemTime::now(), price: Amount::from(*price), + #[allow(clippy::cast_possible_truncation)] // i is always < 5 rewards_address: RewardsAddress::new([i as u8 + 1; 20]), pub_key: vec![], signature: vec![], From 347a5ac5f7e5a3d6a897e1672daec393fbd3a742 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 17:33:24 +0200 Subject: [PATCH 15/17] fix: use zero disk_reserve in test configs to avoid CI flakes on constrained runners LmdbStorageConfig::default() sets disk_reserve to 1 GiB, which makes tests depend on the host having that much free space. Add a test_default() constructor with disk_reserve=0 and use it in all test/e2e configs. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/storage/handler.rs | 3 +-- src/storage/lmdb.rs | 22 ++++++++++++++++------ tests/e2e/data_types/chunk.rs | 3 +-- tests/e2e/testnet.rs | 3 +-- 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/src/storage/handler.rs b/src/storage/handler.rs index 95c1262..5bed0ab 100644 --- a/src/storage/handler.rs +++ b/src/storage/handler.rs @@ -401,8 +401,7 @@ mod tests { let storage_config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), - verify_on_read: true, - ..LmdbStorageConfig::default() + ..LmdbStorageConfig::test_default() }; let storage = Arc::new( LmdbStorage::new(storage_config) diff --git a/src/storage/lmdb.rs b/src/storage/lmdb.rs index e040f03..5f756cd 100644 --- a/src/storage/lmdb.rs +++ b/src/storage/lmdb.rs @@ -67,6 +67,19 @@ impl Default for LmdbStorageConfig { } } +impl LmdbStorageConfig { + /// A test-friendly default with `disk_reserve` set to 0 so unit tests + /// don't depend on the host having >= 1 GiB free disk space. + #[cfg(any(test, feature = "test-utils"))] + #[must_use] + pub fn test_default() -> Self { + Self { + disk_reserve: 0, + ..Self::default() + } + } +} + /// Statistics about storage operations. #[derive(Debug, Clone, Default)] pub struct StorageStats { @@ -635,8 +648,7 @@ mod tests { let temp_dir = TempDir::new().expect("create temp dir"); let config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), - verify_on_read: true, - ..LmdbStorageConfig::default() + ..LmdbStorageConfig::test_default() }; let storage = LmdbStorage::new(config).await.expect("create storage"); (storage, temp_dir) @@ -782,8 +794,7 @@ mod tests { { let config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), - verify_on_read: true, - ..LmdbStorageConfig::default() + ..LmdbStorageConfig::test_default() }; let storage = LmdbStorage::new(config).await.expect("create storage"); storage.put(&address, content).await.expect("put"); @@ -793,8 +804,7 @@ mod tests { { let config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), - verify_on_read: true, - ..LmdbStorageConfig::default() + ..LmdbStorageConfig::test_default() }; let storage = LmdbStorage::new(config).await.expect("reopen storage"); assert_eq!(storage.current_chunks().expect("current_chunks"), 1); diff --git a/tests/e2e/data_types/chunk.rs b/tests/e2e/data_types/chunk.rs index 09fd7cd..c4a2d70 100644 --- a/tests/e2e/data_types/chunk.rs +++ b/tests/e2e/data_types/chunk.rs @@ -431,8 +431,7 @@ mod tests { let storage = LmdbStorage::new(LmdbStorageConfig { root_dir: temp_dir.clone(), - verify_on_read: true, - ..LmdbStorageConfig::default() + ..LmdbStorageConfig::test_default() }) .await?; diff --git a/tests/e2e/testnet.rs b/tests/e2e/testnet.rs index 22792c4..7fbb22f 100644 --- a/tests/e2e/testnet.rs +++ b/tests/e2e/testnet.rs @@ -1055,8 +1055,7 @@ impl TestNetwork { // Create LMDB storage let storage_config = LmdbStorageConfig { root_dir: data_dir.to_path_buf(), - verify_on_read: true, - ..LmdbStorageConfig::default() + ..LmdbStorageConfig::test_default() }; let storage = LmdbStorage::new(storage_config) .await From 41511ab2339a0e2bd3a1904c50e5cc63de9283bc Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 17:35:46 +0200 Subject: [PATCH 16/17] chore: update evmlib to 0.7.0 from crates.io for payment verification --- Cargo.lock | 5 +++-- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20a85d4..88a5f64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2285,8 +2285,9 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.5.0" -source = "git+https://github.com/WithAutonomi/evmlib/?branch=refactor/unify-payment-vault-v2#779b996b9b02e381e67efb9d0770bec7ba0adcd2" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af67fe5494790b75d91fed4c5dd3215098e5adf071f73f40a238d199116c75ac" dependencies = [ "alloy", "ant-merkle", diff --git a/Cargo.toml b/Cargo.toml index 6bdd4e2..203d89e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ saorsa-core = "0.21.0" saorsa-pqc = "0.5" # Payment verification - autonomi network lookup + EVM payment -evmlib = { git = "https://github.com/WithAutonomi/evmlib/", branch = "refactor/unify-payment-vault-v2" } +evmlib = "0.7" xor_name = "5" # Caching - LRU cache for verified XorNames From 432c9a522421451210cb82978cdcca96d0d5f38c Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Apr 2026 18:23:44 +0200 Subject: [PATCH 17/17] fix: move disk-space guard after duplicate check to preserve put() idempotency Storing an already-present chunk could fail with "Insufficient disk space" because the reserve check ran before the fast-path duplicate return. Since duplicates don't write anything, the guard is unnecessary for them. Moving it after the exists() check keeps duplicate puts as harmless no-ops. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/storage/lmdb.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/storage/lmdb.rs b/src/storage/lmdb.rs index 5f756cd..bc22211 100644 --- a/src/storage/lmdb.rs +++ b/src/storage/lmdb.rs @@ -239,9 +239,6 @@ impl LmdbStorage { ))); } - // ── Disk-space guard (cached — at most one syscall per interval) ─ - self.check_disk_space_cached()?; - // Fast-path duplicate check (read-only, no write lock needed). // This is an optimistic hint — the authoritative check happens inside // the write transaction below to prevent TOCTOU races. @@ -251,6 +248,11 @@ impl LmdbStorage { return Ok(false); } + // ── Disk-space guard (cached — at most one syscall per interval) ─ + // Placed after the duplicate check so that re-storing an existing + // chunk remains a harmless no-op even when disk space is low. + self.check_disk_space_cached()?; + // ── Write (with resize-on-demand) ─────────────────────────────── match self.try_put(address, content).await? { PutOutcome::New => {}