diff --git a/Cargo.lock b/Cargo.lock index fb387cf..88a5f64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -856,6 +856,7 @@ dependencies = [ "lru", "objc2", "objc2-foundation", + "page_size", "parking_lot", "postcard", "proptest", @@ -2284,9 +2285,9 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d608fcd0976beee509fef7fa391735571cb2fffd715ddca174322180300b6615" +checksum = "af67fe5494790b75d91fed4c5dd3215098e5adf071f73f40a238d199116c75ac" dependencies = [ "alloy", "ant-merkle", @@ -2940,7 +2941,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.2", + "windows-core", ] [[package]] @@ -6231,7 +6232,7 @@ version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" dependencies = [ - "windows-core 0.58.0", + "windows-core", "windows-targets 0.52.6", ] @@ -6241,26 +6242,13 @@ version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" dependencies = [ - "windows-implement 0.58.0", - "windows-interface 0.58.0", - "windows-result 0.2.0", - "windows-strings 0.1.0", + "windows-implement", + "windows-interface", + "windows-result", + "windows-strings", "windows-targets 0.52.6", ] -[[package]] -name = "windows-core" -version = "0.62.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" -dependencies = [ - "windows-implement 0.60.2", - "windows-interface 0.59.3", - "windows-link", - "windows-result 0.4.1", - "windows-strings 0.5.1", -] - [[package]] name = "windows-implement" version = "0.58.0" @@ -6272,17 +6260,6 @@ dependencies = [ "syn 2.0.117", ] -[[package]] -name = "windows-implement" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.117", -] - [[package]] name = "windows-interface" version = "0.58.0" @@ -6294,17 +6271,6 @@ dependencies = [ "syn 2.0.117", ] -[[package]] -name = "windows-interface" -version = "0.59.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.117", -] - [[package]] name = "windows-link" version = "0.2.1" @@ -6320,34 +6286,16 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-result" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" -dependencies = [ - "windows-link", -] - [[package]] name = "windows-strings" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ - "windows-result 0.2.0", + "windows-result", "windows-targets 0.52.6", ] -[[package]] -name = "windows-strings" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" -dependencies = [ - "windows-link", -] - [[package]] name = "windows-sys" version = "0.45.0" diff --git a/Cargo.toml b/Cargo.toml index d6e2e2c..203d89e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ saorsa-core = "0.21.0" saorsa-pqc = "0.5" # Payment verification - autonomi network lookup + EVM payment -evmlib = "0.5.0" +evmlib = "0.7" xor_name = "5" # Caching - LRU cache for verified XorNames @@ -93,6 +93,9 @@ sha2 = "0.10" # Cross-platform file locking for upgrade caches fs2 = "0.4" +# System page size (for LMDB map alignment during resize) +page_size = "0.6" + # Protocol serialization postcard = { version = "1.1.3", features = ["use-std"] } diff --git a/config/production.toml b/config/production.toml index bbcb1a2..ce44e01 100644 --- a/config/production.toml +++ b/config/production.toml @@ -43,9 +43,6 @@ metrics_port = 9100 [storage] enabled = true -# Maximum number of chunks to store (0 = unlimited) -max_chunks = 0 - # Verify content hash on read verify_on_read = true diff --git a/src/bin/ant-devnet/main.rs b/src/bin/ant-devnet/main.rs index b3326b0..3103902 100644 --- a/src/bin/ant-devnet/main.rs +++ b/src/bin/ant-devnet/main.rs @@ -64,14 +64,11 @@ async fn main() -> color_eyre::Result<()> { .default_wallet_private_key() .map_err(|e| color_eyre::eyre::eyre!("Failed to get wallet key: {e}"))?; - let (rpc_url, token_addr, payments_addr, merkle_addr) = match &network { + let (rpc_url, token_addr, vault_addr) = match &network { evmlib::Network::Custom(custom) => ( custom.rpc_url_http.to_string(), format!("{:?}", custom.payment_token_address), - format!("{:?}", custom.data_payments_address), - custom - .merkle_payments_address - .map(|addr| format!("{addr:?}")), + format!("{:?}", custom.payment_vault_address), ), _ => { return Err(color_eyre::eyre::eyre!( @@ -93,8 +90,7 @@ async fn main() -> color_eyre::Result<()> { rpc_url, wallet_private_key: wallet_key, payment_token_address: token_addr, - data_payments_address: payments_addr, - merkle_payments_address: merkle_addr, + payment_vault_address: vault_addr, }) } else { None diff --git a/src/config.rs b/src/config.rs index 47cfbeb..4079787 100644 --- a/src/config.rs +++ b/src/config.rs @@ -430,8 +430,8 @@ const fn default_bootstrap_stale_days() -> u64 { /// /// Controls how chunks are stored, including: /// - Whether storage is enabled -/// - Maximum chunks to store (for capacity management) /// - Content verification on read +/// - Database size limits (auto-scales with available disk by default) #[derive(Debug, Clone, Serialize, Deserialize)] pub struct StorageConfig { /// Enable chunk storage. @@ -439,34 +439,42 @@ pub struct StorageConfig { #[serde(default = "default_storage_enabled")] pub enabled: bool, - /// Maximum number of chunks to store (0 = unlimited). - /// Default: 0 (unlimited) - #[serde(default)] - pub max_chunks: usize, - /// Verify content hash matches address on read. /// Default: true #[serde(default = "default_storage_verify_on_read")] pub verify_on_read: bool, - /// Maximum LMDB database size in GiB (0 = use default of 32 GiB). - /// On Unix the mmap is a lazy reservation and costs nothing until pages - /// are faulted in. + /// Explicit LMDB database size cap in GiB. + /// + /// When set to 0 (default), the map size is computed automatically from + /// available disk space at startup and grows on demand when the operator + /// adds storage. Set a non-zero value to impose a hard cap. #[serde(default)] pub db_size_gb: usize, + + /// Minimum free disk space (in GiB) to preserve on the storage partition. + /// + /// Writes are refused when available space drops below this threshold, + /// preventing the node from filling the disk completely. Default: 1 GiB. + #[serde(default = "default_disk_reserve_gb")] + pub disk_reserve_gb: u64, } impl Default for StorageConfig { fn default() -> Self { Self { enabled: default_storage_enabled(), - max_chunks: 0, verify_on_read: default_storage_verify_on_read(), db_size_gb: 0, + disk_reserve_gb: default_disk_reserve_gb(), } } } +const fn default_disk_reserve_gb() -> u64 { + 1 +} + const fn default_storage_enabled() -> bool { true } diff --git a/src/devnet.rs b/src/devnet.rs index 9bd0a1b..a702ce5 100644 --- a/src/devnet.rs +++ b/src/devnet.rs @@ -79,9 +79,6 @@ const DEVNET_PAYMENT_CACHE_CAPACITY: usize = 1000; /// Devnet rewards address (20 bytes, all 0x01). const DEVNET_REWARDS_ADDRESS: [u8; 20] = [0x01; 20]; -/// Max records for quoting metrics (devnet value). -const DEVNET_MAX_RECORDS: usize = 100_000; - /// Initial records for quoting metrics (devnet value). const DEVNET_INITIAL_RECORDS: usize = 1000; @@ -244,11 +241,8 @@ pub struct DevnetEvmInfo { pub wallet_private_key: String, /// Payment token contract address. pub payment_token_address: String, - /// Data payments contract address. - pub data_payments_address: String, - /// Merkle payments contract address (for batch payments). - #[serde(default, skip_serializing_if = "Option::is_none")] - pub merkle_payments_address: Option, + /// Unified payment vault contract address (handles both single-node and merkle payments). + pub payment_vault_address: String, } /// Network state for devnet startup lifecycle. @@ -569,8 +563,7 @@ impl Devnet { let storage_config = LmdbStorageConfig { root_dir: data_dir.to_path_buf(), verify_on_read: true, - max_chunks: 0, - max_map_size: 0, + ..LmdbStorageConfig::default() }; let storage = LmdbStorage::new(storage_config) .await @@ -590,8 +583,7 @@ impl Devnet { local_rewards_address: rewards_address, }; let payment_verifier = PaymentVerifier::new(payment_config); - let metrics_tracker = - QuotingMetricsTracker::new(DEVNET_MAX_RECORDS, DEVNET_INITIAL_RECORDS); + let metrics_tracker = QuotingMetricsTracker::new(DEVNET_INITIAL_RECORDS); let mut quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); // Wire ML-DSA-65 signing from the devnet node's identity diff --git a/src/node.rs b/src/node.rs index 2a2e724..bcfc02d 100644 --- a/src/node.rs +++ b/src/node.rs @@ -1,6 +1,6 @@ //! Node implementation - thin wrapper around saorsa-core's `P2PNode`. -use crate::ant_protocol::{CHUNK_PROTOCOL_ID, MAX_CHUNK_SIZE}; +use crate::ant_protocol::CHUNK_PROTOCOL_ID; use crate::config::{ default_nodes_dir, default_root_dir, EvmNetworkConfig, NetworkMode, NodeConfig, NODE_IDENTITY_FILENAME, @@ -30,14 +30,6 @@ use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tracing::{debug, error, info, warn}; -/// Node storage capacity limit (5 GB). -/// -/// Used to derive `max_records` for the quoting metrics pricing curve. -/// A node advertises `NODE_STORAGE_LIMIT_BYTES / MAX_CHUNK_SIZE` as -/// its maximum record count, giving the pricing algorithm a meaningful -/// fullness ratio instead of a hardcoded constant. -pub const NODE_STORAGE_LIMIT_BYTES: u64 = 5 * 1024 * 1024 * 1024; - #[cfg(unix)] use tokio::signal::unix::{signal, SignalKind}; @@ -330,8 +322,11 @@ impl NodeBuilder { let storage_config = LmdbStorageConfig { root_dir: config.root_dir.clone(), verify_on_read: config.storage.verify_on_read, - max_chunks: config.storage.max_chunks, - max_map_size: config.storage.db_size_gb.saturating_mul(1_073_741_824), + max_map_size: config.storage.db_size_gb.saturating_mul(1024 * 1024 * 1024), + disk_reserve: config + .storage + .disk_reserve_gb + .saturating_mul(1024 * 1024 * 1024), }; let storage = LmdbStorage::new(storage_config) .await @@ -360,10 +355,7 @@ impl NodeBuilder { local_rewards_address: rewards_address, }; let payment_verifier = PaymentVerifier::new(payment_config); - // Safe: 5GB fits in usize on all supported 64-bit platforms. - #[allow(clippy::cast_possible_truncation)] - let max_records = (NODE_STORAGE_LIMIT_BYTES as usize) / MAX_CHUNK_SIZE; - let metrics_tracker = QuotingMetricsTracker::new(max_records, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let mut quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); // Wire ML-DSA-65 signing from node identity. diff --git a/src/payment/metrics.rs b/src/payment/metrics.rs index c14e4bb..d2f6528 100644 --- a/src/payment/metrics.rs +++ b/src/payment/metrics.rs @@ -18,13 +18,11 @@ const PERSIST_INTERVAL: usize = 10; /// Tracker for quoting metrics. /// /// Maintains state that influences quote pricing, including payment history, -/// storage capacity, and network information. +/// storage usage, and network information. #[derive(Debug)] pub struct QuotingMetricsTracker { /// Number of payments received by this node. received_payment_count: AtomicUsize, - /// Maximum records the node can store. - max_records: usize, /// Number of records currently stored. close_records_stored: AtomicUsize, /// Records stored by type: `Vec<(data_type_index, count)>`. @@ -44,13 +42,11 @@ impl QuotingMetricsTracker { /// /// # Arguments /// - /// * `max_records` - Maximum number of records this node can store /// * `initial_records` - Initial number of records stored #[must_use] - pub fn new(max_records: usize, initial_records: usize) -> Self { + pub fn new(initial_records: usize) -> Self { Self { received_payment_count: AtomicUsize::new(0), - max_records, close_records_stored: AtomicUsize::new(initial_records), records_per_type: RwLock::new(Vec::new()), start_time: Instant::now(), @@ -64,11 +60,10 @@ impl QuotingMetricsTracker { /// /// # Arguments /// - /// * `max_records` - Maximum number of records /// * `persist_path` - Path to persist metrics to disk #[must_use] - pub fn with_persistence(max_records: usize, persist_path: &std::path::Path) -> Self { - let mut tracker = Self::new(max_records, 0); + pub fn with_persistence(persist_path: &std::path::Path) -> Self { + let mut tracker = Self::new(0); tracker.persist_path = Some(persist_path.to_path_buf()); // Try to load existing metrics @@ -153,7 +148,6 @@ impl QuotingMetricsTracker { data_size, close_records_stored: self.close_records_stored.load(Ordering::SeqCst), records_per_type: self.records_per_type.read().clone(), - max_records: self.max_records, received_payment_count: self.received_payment_count.load(Ordering::SeqCst), live_time: self.live_time_hours(), network_density: None, // Not used in pricing; reserved for future DHT range filtering @@ -215,14 +209,14 @@ mod tests { #[test] fn test_new_tracker() { - let tracker = QuotingMetricsTracker::new(1000, 50); + let tracker = QuotingMetricsTracker::new(50); assert_eq!(tracker.payment_count(), 0); assert_eq!(tracker.records_stored(), 50); } #[test] fn test_record_payment() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); assert_eq!(tracker.payment_count(), 0); tracker.record_payment(); @@ -234,7 +228,7 @@ mod tests { #[test] fn test_record_store() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); assert_eq!(tracker.records_stored(), 0); tracker.record_store(0); // Chunk type @@ -250,14 +244,13 @@ mod tests { #[test] fn test_get_metrics() { - let tracker = QuotingMetricsTracker::new(1000, 100); + let tracker = QuotingMetricsTracker::new(100); tracker.record_payment(); tracker.record_payment(); let metrics = tracker.get_metrics(2048, 0); assert_eq!(metrics.data_size, 2048); assert_eq!(metrics.data_type, 0); - assert_eq!(metrics.max_records, 1000); assert_eq!(metrics.close_records_stored, 100); assert_eq!(metrics.received_payment_count, 2); } @@ -269,28 +262,28 @@ mod tests { // Create and populate tracker { - let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + let tracker = QuotingMetricsTracker::with_persistence(&path); tracker.record_payment(); tracker.record_payment(); tracker.record_store(0); } // Load from disk - let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + let tracker = QuotingMetricsTracker::with_persistence(&path); assert_eq!(tracker.payment_count(), 2); assert_eq!(tracker.records_stored(), 1); } #[test] fn test_live_time_hours() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); // Just started, so live_time should be 0 hours assert_eq!(tracker.live_time_hours(), 0); } #[test] fn test_set_network_size() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); tracker.set_network_size(1000); let metrics = tracker.get_metrics(0, 0); @@ -299,7 +292,7 @@ mod tests { #[test] fn test_records_per_type_multiple_types() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); tracker.record_store(0); tracker.record_store(0); @@ -326,14 +319,14 @@ mod tests { let path = dir.path().join("metrics_types.bin"); { - let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + let tracker = QuotingMetricsTracker::with_persistence(&path); tracker.record_store(0); tracker.record_store(0); tracker.record_store(1); tracker.record_payment(); } - let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + let tracker = QuotingMetricsTracker::with_persistence(&path); assert_eq!(tracker.payment_count(), 1); assert_eq!(tracker.records_stored(), 3); // 2 type-0 + 1 type-1 @@ -347,21 +340,14 @@ mod tests { let path = dir.path().join("nonexistent_subdir").join("metrics.bin"); // Should not panic — just starts with defaults - let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + let tracker = QuotingMetricsTracker::with_persistence(&path); assert_eq!(tracker.payment_count(), 0); assert_eq!(tracker.records_stored(), 0); } - #[test] - fn test_max_records_zero() { - let tracker = QuotingMetricsTracker::new(0, 0); - let metrics = tracker.get_metrics(1024, 0); - assert_eq!(metrics.max_records, 0); - } - #[test] fn test_get_metrics_passes_data_params() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); let metrics = tracker.get_metrics(4096, 3); assert_eq!(metrics.data_size, 4096); assert_eq!(metrics.data_type, 3); @@ -369,7 +355,7 @@ mod tests { #[test] fn test_default_network_size() { - let tracker = QuotingMetricsTracker::new(1000, 0); + let tracker = QuotingMetricsTracker::new(0); let metrics = tracker.get_metrics(0, 0); assert_eq!(metrics.network_size, Some(500)); } diff --git a/src/payment/pricing.rs b/src/payment/pricing.rs index 2290d5c..96c5bd8 100644 --- a/src/payment/pricing.rs +++ b/src/payment/pricing.rs @@ -1,297 +1,159 @@ -//! Local fullness-based pricing algorithm for ant-node. +//! Simple quadratic pricing algorithm for ant-node. //! -//! Mirrors the logarithmic pricing curve from autonomi's `MerklePaymentVault` contract: -//! - Empty node → price ≈ `MIN_PRICE` (floor) -//! - Filling up → price increases logarithmically -//! - Nearly full → price spikes (ln(x) as x→0) -//! - At capacity → returns `u64::MAX` (effectively refuses new data) +//! Uses the formula `(close_records_stored / 6000)^2` to calculate storage price. +//! Integer division means nodes with fewer than 6000 records get a ratio of 0, +//! but a minimum floor of 1 prevents free storage. //! -//! ## Design Rationale: Capacity-Based Pricing +//! ## Design Rationale //! -//! Pricing is based on node **fullness** (percentage of storage capacity used), -//! not on a fixed cost-per-byte. This design mirrors the autonomi -//! `MerklePaymentVault` on-chain contract and creates natural load balancing: -//! -//! - **Empty nodes** charge the minimum floor price, attracting new data -//! - **Nearly full nodes** charge exponentially more via the logarithmic curve -//! - **This pushes clients toward emptier nodes**, distributing data across the network -//! -//! A flat cost-per-byte model would not incentivize distribution — all nodes would -//! charge the same regardless of remaining capacity. The logarithmic curve ensures -//! the network self-balances as nodes fill up. +//! The quadratic curve creates natural load balancing: +//! - **Lightly loaded nodes** (< 6000 records) charge the minimum floor price +//! - **Moderately loaded nodes** charge proportionally more as records grow +//! - **Heavily loaded nodes** charge quadratically more, pushing clients elsewhere use evmlib::common::Amount; -use evmlib::quoting_metrics::QuotingMetrics; -/// Minimum price floor (matches contract's `minPrice = 3`). -const MIN_PRICE: u64 = 3; +/// Divisor for the pricing formula. +const PRICING_DIVISOR: u64 = 6000; -/// Scaling factor for the logarithmic pricing curve. -/// In the contract this is 1e18; we normalize to 1.0 for f64 arithmetic. -const SCALING_FACTOR: f64 = 1.0; +/// `PRICING_DIVISOR²`, precomputed to avoid repeated multiplication. +const DIVISOR_SQUARED: u64 = PRICING_DIVISOR * PRICING_DIVISOR; -/// ANT price constant (normalized to 1.0, matching contract's 1e18/1e18 ratio). -const ANT_PRICE: f64 = 1.0; +/// 1 token = 10^18 wei. +const WEI_PER_TOKEN: u128 = 1_000_000_000_000_000_000; -/// Calculate a local price estimate from node quoting metrics. +/// Minimum price in wei (1 wei) to prevent free storage. +const MIN_PRICE_WEI: u128 = 1; + +/// Calculate storage price in wei from the number of close records stored. /// -/// Implements the autonomi pricing formula: -/// ```text -/// price = (-s/ANT) * (ln|rUpper - 1| - ln|rLower - 1|) + pMin*(rUpper - rLower) - (rUpper - rLower)/ANT -/// ``` +/// Formula: `price_wei = n² × 10¹⁸ / 6000²` /// -/// where: -/// - `rLower = total_cost_units / max_cost_units` (current fullness ratio) -/// - `rUpper = (total_cost_units + cost_unit) / max_cost_units` (fullness after storing) -/// - `s` = scaling factor, `ANT` = ANT price, `pMin` = minimum price -#[allow( - clippy::cast_precision_loss, - clippy::cast_possible_truncation, - clippy::cast_sign_loss -)] +/// This is equivalent to `(n / 6000)²` in tokens, converted to wei, but +/// preserves sub-token precision by scaling before dividing. U256 arithmetic +/// prevents overflow for large record counts. #[must_use] -pub fn calculate_price(metrics: &QuotingMetrics) -> Amount { - let min_price = Amount::from(MIN_PRICE); - - // Edge case: zero or very small capacity - if metrics.max_records == 0 { - return min_price; +pub fn calculate_price(close_records_stored: usize) -> Amount { + let n = Amount::from(close_records_stored); + let n_squared = n.saturating_mul(n); + let price_wei = + n_squared.saturating_mul(Amount::from(WEI_PER_TOKEN)) / Amount::from(DIVISOR_SQUARED); + if price_wei.is_zero() { + Amount::from(MIN_PRICE_WEI) + } else { + price_wei } +} - // Use close_records_stored as the authoritative record count for pricing. - let total_records = metrics.close_records_stored as u64; - - let max_records = metrics.max_records as f64; - - // Normalize to [0, 1) range (matching contract's _getBound) - let r_lower = total_records as f64 / max_records; - // Adding one record (cost_unit = 1 normalized) - let r_upper = (total_records + 1) as f64 / max_records; - - // At capacity: return maximum price to effectively refuse new data - if r_lower >= 1.0 || r_upper >= 1.0 { - return Amount::from(u64::MAX); - } - if (r_upper - r_lower).abs() < f64::EPSILON { - return min_price; - } +#[cfg(test)] +#[allow(clippy::unwrap_used, clippy::expect_used)] +mod tests { + use super::*; - // Calculate |r - 1| for logarithm inputs - let upper_diff = (r_upper - 1.0).abs(); - let lower_diff = (r_lower - 1.0).abs(); + const WEI: u128 = WEI_PER_TOKEN; - // Avoid log(0) - if upper_diff < f64::EPSILON || lower_diff < f64::EPSILON { - return min_price; + /// Helper: expected price for n records = n² * 10^18 / 6000² + fn expected_price(n: u64) -> Amount { + let n = Amount::from(n); + n * n * Amount::from(WEI) / Amount::from(DIVISOR_SQUARED) } - let log_upper = upper_diff.ln(); - let log_lower = lower_diff.ln(); - let log_diff = log_upper - log_lower; - - let linear_part = r_upper - r_lower; - - // Formula: price = (-s/ANT) * logDiff + pMin * linearPart - linearPart/ANT - let part_one = (-SCALING_FACTOR / ANT_PRICE) * log_diff; - let part_two = MIN_PRICE as f64 * linear_part; - let part_three = linear_part / ANT_PRICE; - - let price = part_one + part_two - part_three; - - if price <= 0.0 || !price.is_finite() { - return min_price; + #[test] + fn test_zero_records_gets_min_price() { + let price = calculate_price(0); + assert_eq!(price, Amount::from(MIN_PRICE_WEI)); } - // Scale by data_size (larger data costs proportionally more) - let data_size_factor = metrics.data_size.max(1) as f64; - let scaled_price = price * data_size_factor; - - if !scaled_price.is_finite() { - return min_price; + #[test] + fn test_one_record_nonzero() { + // 1² * 1e18 / 36e6 = 1e18 / 36e6 ≈ 27_777_777_777 + let price = calculate_price(1); + assert_eq!(price, expected_price(1)); + assert!(price > Amount::ZERO); } - // Convert to Amount (U256), floor at MIN_PRICE - let price_u64 = if scaled_price > u64::MAX as f64 { - u64::MAX - } else { - (scaled_price as u64).max(MIN_PRICE) - }; - - Amount::from(price_u64) -} - -#[cfg(test)] -#[allow(clippy::unwrap_used, clippy::expect_used)] -mod tests { - use super::*; - - fn make_metrics( - records_stored: usize, - max_records: usize, - data_size: usize, - data_type: u32, - ) -> QuotingMetrics { - let records_per_type = if records_stored > 0 { - vec![(data_type, u32::try_from(records_stored).unwrap_or(u32::MAX))] - } else { - vec![] - }; - QuotingMetrics { - data_type, - data_size, - close_records_stored: records_stored, - records_per_type, - max_records, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: Some(500), - } + #[test] + fn test_at_divisor_gets_one_token() { + // 6000² * 1e18 / 6000² = 1e18 + let price = calculate_price(6000); + assert_eq!(price, Amount::from(WEI)); } #[test] - fn test_empty_node_gets_min_price() { - let metrics = make_metrics(0, 1000, 1, 0); - let price = calculate_price(&metrics); - // Empty node should return approximately MIN_PRICE - assert_eq!(price, Amount::from(MIN_PRICE)); + fn test_double_divisor_gets_four_tokens() { + // 12000² * 1e18 / 6000² = 4e18 + let price = calculate_price(12000); + assert_eq!(price, Amount::from(4 * WEI)); } #[test] - fn test_half_full_node_costs_more() { - let empty = make_metrics(0, 1000, 1024, 0); - let half = make_metrics(500, 1000, 1024, 0); - let price_empty = calculate_price(&empty); - let price_half = calculate_price(&half); - assert!( - price_half > price_empty, - "Half-full price ({price_half}) should exceed empty price ({price_empty})" - ); + fn test_triple_divisor_gets_nine_tokens() { + // 18000² * 1e18 / 6000² = 9e18 + let price = calculate_price(18000); + assert_eq!(price, Amount::from(9 * WEI)); } #[test] - fn test_nearly_full_node_costs_much_more() { - let half = make_metrics(500, 1000, 1024, 0); - let nearly_full = make_metrics(900, 1000, 1024, 0); - let price_half = calculate_price(&half); - let price_nearly_full = calculate_price(&nearly_full); + fn test_smooth_pricing_no_staircase() { + // With the old integer-division approach, 6000 and 11999 gave the same price. + // Now 11999 should give a higher price than 6000. + let price_6k = calculate_price(6000); + let price_11k = calculate_price(11999); assert!( - price_nearly_full > price_half, - "Nearly-full price ({price_nearly_full}) should far exceed half-full price ({price_half})" + price_11k > price_6k, + "11999 records ({price_11k}) should cost more than 6000 ({price_6k})" ); } #[test] - fn test_full_node_returns_max_price() { - // At capacity (r_lower >= 1.0), effectively refuse new data with max price - let metrics = make_metrics(1000, 1000, 1024, 0); - let price = calculate_price(&metrics); - assert_eq!(price, Amount::from(u64::MAX)); + fn test_price_increases_with_records() { + let price_low = calculate_price(6000); + let price_mid = calculate_price(12000); + let price_high = calculate_price(18000); + assert!(price_mid > price_low); + assert!(price_high > price_mid); } #[test] fn test_price_increases_monotonically() { - let max_records = 1000; - let data_size = 1024; let mut prev_price = Amount::ZERO; - - // Check from 0% to 99% full - for pct in 0..100 { - let records = pct * max_records / 100; - let metrics = make_metrics(records, max_records, data_size, 0); - let price = calculate_price(&metrics); + for records in (0..60000).step_by(100) { + let price = calculate_price(records); assert!( price >= prev_price, - "Price at {pct}% ({price}) should be >= price at previous step ({prev_price})" + "Price at {records} records ({price}) should be >= previous ({prev_price})" ); prev_price = price; } } #[test] - fn test_zero_max_records_returns_min_price() { - let metrics = make_metrics(0, 0, 1024, 0); - let price = calculate_price(&metrics); - assert_eq!(price, Amount::from(MIN_PRICE)); + fn test_large_value_no_overflow() { + let price = calculate_price(usize::MAX); + assert!(price > Amount::ZERO); } #[test] - fn test_different_data_sizes_same_fullness() { - let small = make_metrics(500, 1000, 100, 0); - let large = make_metrics(500, 1000, 10000, 0); - let price_small = calculate_price(&small); - let price_large = calculate_price(&large); - assert!( - price_large > price_small, - "Larger data ({price_large}) should cost more than smaller data ({price_small})" - ); - } - - #[test] - fn test_price_with_multiple_record_types() { - // 300 type-0 records + 200 type-1 records = 500 total out of 1000 - let metrics = QuotingMetrics { - data_type: 0, - data_size: 1024, - close_records_stored: 500, - records_per_type: vec![(0, 300), (1, 200)], - max_records: 1000, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: Some(500), - }; - let price_multi = calculate_price(&metrics); - - // Compare with single-type equivalent (500 of type 0) - let metrics_single = make_metrics(500, 1000, 1024, 0); - let price_single = calculate_price(&metrics_single); - - // Same total records → same price - assert_eq!(price_multi, price_single); - } - - #[test] - fn test_price_at_95_percent() { - let metrics = make_metrics(950, 1000, 1024, 0); - let price = calculate_price(&metrics); - let min = Amount::from(MIN_PRICE); - assert!( - price > min, - "Price at 95% should be above minimum, got {price}" - ); - } - - #[test] - fn test_price_at_99_percent() { - let metrics = make_metrics(990, 1000, 1024, 0); - let price = calculate_price(&metrics); - let price_95 = calculate_price(&make_metrics(950, 1000, 1024, 0)); - assert!( - price > price_95, - "Price at 99% ({price}) should exceed price at 95% ({price_95})" - ); + fn test_price_deterministic() { + let price1 = calculate_price(12000); + let price2 = calculate_price(12000); + assert_eq!(price1, price2); } #[test] - fn test_over_capacity_returns_max_price() { - // 1100 records stored but max is 1000 — over capacity - let metrics = make_metrics(1100, 1000, 1024, 0); - let price = calculate_price(&metrics); - assert_eq!( - price, - Amount::from(u64::MAX), - "Over-capacity should return max price" - ); + fn test_quadratic_growth() { + // price at 4x records should be 16x price at 1x + let price_1x = calculate_price(6000); + let price_4x = calculate_price(24000); + assert_eq!(price_1x, Amount::from(WEI)); + assert_eq!(price_4x, Amount::from(16 * WEI)); } #[test] - fn test_price_deterministic() { - let metrics = make_metrics(500, 1000, 1024, 0); - let price1 = calculate_price(&metrics); - let price2 = calculate_price(&metrics); - let price3 = calculate_price(&metrics); - assert_eq!(price1, price2); - assert_eq!(price2, price3); + fn test_small_record_counts_are_cheap() { + // 100 records: 100² * 1e18 / 36e6 ≈ 277_777_777_777_777 wei ≈ 0.000278 tokens + let price = calculate_price(100); + assert_eq!(price, expected_price(100)); + assert!(price < Amount::from(WEI)); // well below 1 token } } diff --git a/src/payment/proof.rs b/src/payment/proof.rs index 03dfc8f..0925bd5 100644 --- a/src/payment/proof.rs +++ b/src/payment/proof.rs @@ -116,11 +116,11 @@ pub fn deserialize_merkle_proof(bytes: &[u8]) -> std::result::Result QuotingMetrics { - self.metrics_tracker.get_metrics(0, 0) + pub fn records_stored(&self) -> usize { + self.metrics_tracker.records_stored() } /// Record a payment received (delegates to metrics tracker). @@ -214,11 +210,11 @@ impl QuoteGenerator { .as_ref() .ok_or_else(|| Error::Payment("Quote signing not configured".to_string()))?; - let quoting_metrics = self.metrics_tracker.get_metrics(data_size, data_type); + let price = calculate_price(self.metrics_tracker.records_stored()); // Compute the same bytes_to_sign used by the upstream library let msg = MerklePaymentCandidateNode::bytes_to_sign( - "ing_metrics, + &price, &self.rewards_address, merkle_payment_timestamp, ); @@ -233,7 +229,7 @@ impl QuoteGenerator { let candidate = MerklePaymentCandidateNode { pub_key: self.pub_key.clone(), - quoting_metrics, + price, reward_address: self.rewards_address, merkle_payment_timestamp, signature, @@ -355,7 +351,7 @@ pub fn verify_merkle_candidate_signature(candidate: &MerklePaymentCandidateNode) }; let msg = MerklePaymentCandidateNode::bytes_to_sign( - &candidate.quoting_metrics, + &candidate.price, &candidate.reward_address, candidate.merkle_payment_timestamp, ); @@ -414,11 +410,12 @@ pub fn wire_ml_dsa_signer( mod tests { use super::*; use crate::payment::metrics::QuotingMetricsTracker; + use evmlib::common::Amount; use saorsa_pqc::pqc::types::MlDsaSecretKey; fn create_test_generator() -> QuoteGenerator { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 100); + let metrics_tracker = QuotingMetricsTracker::new(100); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); @@ -465,7 +462,7 @@ mod tests { #[test] fn test_generator_without_signer() { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 100); + let metrics_tracker = QuotingMetricsTracker::new(100); let generator = QuoteGenerator::new(rewards_address, metrics_tracker); assert!(!generator.can_sign()); @@ -481,7 +478,7 @@ mod tests { let (public_key, secret_key) = ml_dsa.generate_keypair().expect("keypair generation"); let rewards_address = RewardsAddress::new([2u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 100); + let metrics_tracker = QuotingMetricsTracker::new(100); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); let pub_key_bytes = public_key.as_bytes().to_vec(); @@ -525,50 +522,32 @@ mod tests { #[test] fn test_rewards_address_getter() { let addr = RewardsAddress::new([42u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let generator = QuoteGenerator::new(addr, metrics_tracker); assert_eq!(*generator.rewards_address(), addr); } #[test] - fn test_current_metrics() { - let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(500, 50); - let generator = QuoteGenerator::new(rewards_address, metrics_tracker); - - let metrics = generator.current_metrics(); - assert_eq!(metrics.max_records, 500); - assert_eq!(metrics.close_records_stored, 50); - assert_eq!(metrics.data_size, 0); - assert_eq!(metrics.data_type, 0); - } - - #[test] - fn test_record_payment_delegation() { + fn test_records_stored() { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(50); let generator = QuoteGenerator::new(rewards_address, metrics_tracker); - generator.record_payment(); - generator.record_payment(); - - let metrics = generator.current_metrics(); - assert_eq!(metrics.received_payment_count, 2); + assert_eq!(generator.records_stored(), 50); } #[test] fn test_record_store_delegation() { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let generator = QuoteGenerator::new(rewards_address, metrics_tracker); generator.record_store(0); generator.record_store(1); generator.record_store(0); - let metrics = generator.current_metrics(); - assert_eq!(metrics.close_records_stored, 3); + assert_eq!(generator.records_stored(), 3); } #[test] @@ -576,17 +555,15 @@ mod tests { let generator = create_test_generator(); let content = [10u8; 32]; - // Data type 0 (chunk) + // All data types produce the same price (price depends on records_stored, not data_type) let q0 = generator.create_quote(content, 1024, 0).expect("type 0"); - assert_eq!(q0.quoting_metrics.data_type, 0); - - // Data type 1 let q1 = generator.create_quote(content, 512, 1).expect("type 1"); - assert_eq!(q1.quoting_metrics.data_type, 1); - - // Data type 2 let q2 = generator.create_quote(content, 256, 2).expect("type 2"); - assert_eq!(q2.quoting_metrics.data_type, 2); + + // All quotes should have a valid price (minimum floor of 1) + assert!(q0.price >= Amount::from(1u64)); + assert!(q1.price >= Amount::from(1u64)); + assert!(q2.price >= Amount::from(1u64)); } #[test] @@ -594,8 +571,9 @@ mod tests { let generator = create_test_generator(); let content = [11u8; 32]; + // Price depends on records_stored, not data size let quote = generator.create_quote(content, 0, 0).expect("zero size"); - assert_eq!(quote.quoting_metrics.data_size, 0); + assert!(quote.price >= Amount::from(1u64)); } #[test] @@ -603,10 +581,11 @@ mod tests { let generator = create_test_generator(); let content = [12u8; 32]; + // Price depends on records_stored, not data size let quote = generator .create_quote(content, 10_000_000, 0) .expect("large size"); - assert_eq!(quote.quoting_metrics.data_size, 10_000_000); + assert!(quote.price >= Amount::from(1u64)); } #[test] @@ -614,17 +593,7 @@ mod tests { let quote = PaymentQuote { content: xor_name::XorName([0u8; 32]), timestamp: SystemTime::now(), - quoting_metrics: QuotingMetrics { - data_size: 0, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 0, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - }, + price: Amount::from(1u64), rewards_address: RewardsAddress::new([0u8; 20]), pub_key: vec![], signature: vec![], @@ -637,7 +606,7 @@ mod tests { #[test] fn test_can_sign_after_set_signer() { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); assert!(!generator.can_sign()); @@ -651,7 +620,7 @@ mod tests { fn test_wire_ml_dsa_signer_returns_ok_with_valid_identity() { let identity = saorsa_core::identity::NodeIdentity::generate().expect("keypair generation"); let rewards_address = RewardsAddress::new([3u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); let result = wire_ml_dsa_signer(&mut generator, &identity); @@ -665,7 +634,7 @@ mod tests { #[test] fn test_probe_signer_fails_without_signer() { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let generator = QuoteGenerator::new(rewards_address, metrics_tracker); let result = generator.probe_signer(); @@ -675,7 +644,7 @@ mod tests { #[test] fn test_probe_signer_fails_with_empty_signature() { let rewards_address = RewardsAddress::new([1u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); generator.set_signer(vec![0u8; 32], |_| vec![]); @@ -690,7 +659,7 @@ mod tests { let (public_key, secret_key) = ml_dsa.generate_keypair().expect("keypair generation"); let rewards_address = RewardsAddress::new([0x42u8; 20]); - let metrics_tracker = QuotingMetricsTracker::new(800, 50); + let metrics_tracker = QuotingMetricsTracker::new(50); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); // Wire ML-DSA-65 signing (same as production nodes) @@ -722,11 +691,8 @@ mod tests { // Verify the timestamp was set correctly assert_eq!(candidate.merkle_payment_timestamp, timestamp); - // Verify metrics match what the tracker would produce - assert_eq!(candidate.quoting_metrics.data_size, 2048); - assert_eq!(candidate.quoting_metrics.data_type, 0); - assert_eq!(candidate.quoting_metrics.max_records, 800); - assert_eq!(candidate.quoting_metrics.close_records_stored, 50); + // Verify price was calculated from records_stored using the pricing formula + assert_eq!(candidate.price, calculate_price(50)); // Verify the public key is the ML-DSA-65 public key (not ed25519) assert_eq!( @@ -763,25 +729,15 @@ mod tests { .duration_since(std::time::UNIX_EPOCH) .expect("system time") .as_secs(); - let metrics = QuotingMetrics { - data_size: 4096, - data_type: 0, - close_records_stored: 10, - records_per_type: vec![], - max_records: 500, - received_payment_count: 3, - live_time: 600, - network_density: None, - network_size: None, - }; + let price = Amount::from(42u64); - let msg = MerklePaymentCandidateNode::bytes_to_sign(&metrics, &rewards_address, timestamp); + let msg = MerklePaymentCandidateNode::bytes_to_sign(&price, &rewards_address, timestamp); let sk = MlDsaSecretKey::from_bytes(secret_key.as_bytes()).expect("sk"); let signature = ml_dsa.sign(&sk, &msg).expect("sign").as_bytes().to_vec(); MerklePaymentCandidateNode { pub_key: public_key.as_bytes().to_vec(), - quoting_metrics: metrics, + price, reward_address: rewards_address, merkle_payment_timestamp: timestamp, signature, @@ -821,12 +777,12 @@ mod tests { } #[test] - fn test_verify_merkle_candidate_tampered_metrics() { + fn test_verify_merkle_candidate_tampered_price() { let mut candidate = make_valid_merkle_candidate(); - candidate.quoting_metrics.data_size = 999_999; + candidate.price = Amount::from(999_999u64); assert!( !verify_merkle_candidate_signature(&candidate), - "Tampered quoting_metrics must invalidate the signature" + "Tampered price must invalidate the signature" ); } diff --git a/src/payment/single_node.rs b/src/payment/single_node.rs index 627e1ff..4edebc9 100644 --- a/src/payment/single_node.rs +++ b/src/payment/single_node.rs @@ -13,31 +13,12 @@ use crate::ant_protocol::CLOSE_GROUP_SIZE; use crate::error::{Error, Result}; use evmlib::common::{Amount, QuoteHash}; -use evmlib::contract::payment_vault; -use evmlib::quoting_metrics::QuotingMetrics; use evmlib::wallet::Wallet; use evmlib::Network as EvmNetwork; use evmlib::PaymentQuote; use evmlib::RewardsAddress; use tracing::info; -/// Create zero-valued `QuotingMetrics` for payment verification. -/// -/// The contract doesn't validate metric values, so we use zeroes. -fn zero_quoting_metrics() -> QuotingMetrics { - QuotingMetrics { - data_size: 0, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 0, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - } -} - /// Index of the median-priced node after sorting, derived from `CLOSE_GROUP_SIZE`. const MEDIAN_INDEX: usize = CLOSE_GROUP_SIZE / 2; @@ -63,8 +44,8 @@ pub struct QuotePaymentInfo { pub rewards_address: RewardsAddress, /// The amount to pay (3x for median, 0 for others) pub amount: Amount, - /// The quoting metrics - pub quoting_metrics: QuotingMetrics, + /// The original quoted price (before 3x multiplier) + pub price: Amount, } impl SingleNodePayment { @@ -112,7 +93,7 @@ impl SingleNodePayment { let quotes_vec: Vec = quotes_with_prices .into_iter() .enumerate() - .map(|(idx, (quote, _))| QuotePaymentInfo { + .map(|(idx, (quote, price))| QuotePaymentInfo { quote_hash: quote.hash(), rewards_address: quote.rewards_address, amount: if idx == MEDIAN_INDEX { @@ -120,7 +101,7 @@ impl SingleNodePayment { } else { Amount::ZERO }, - quoting_metrics: quote.quoting_metrics, + price, }) .collect(); @@ -198,71 +179,63 @@ impl SingleNodePayment { Ok(result_hashes) } - /// Verify all payments on-chain. + /// Verify that a median-priced quote was paid at least 3× its price on-chain. /// - /// This checks that all 5 payments were recorded on the blockchain. - /// The contract requires exactly 5 payment verifications. - /// - /// # Arguments - /// - /// * `network` - The EVM network to verify on - /// * `owned_quote_hash` - Optional quote hash that this node owns (expects to receive payment) + /// When multiple quotes share the median price (a tie), the client and + /// verifier may sort them in different order. This method checks all + /// quotes tied at the median price and accepts the payment if any one + /// of them was paid the correct amount. /// /// # Returns /// - /// The total verified payment amount received by owned quotes. + /// The on-chain payment amount for the verified quote. /// /// # Errors /// - /// Returns an error if verification fails or payment is invalid. - pub async fn verify( - &self, - network: &EvmNetwork, - owned_quote_hash: Option, - ) -> Result { - // Build payment digest for all 5 quotes - // Each quote needs an owned QuotingMetrics (tuple requires ownership) - let payment_digest: Vec<_> = self + /// Returns an error if the on-chain lookup fails or none of the + /// median-priced quotes were paid at least 3× the median price. + pub async fn verify(&self, network: &EvmNetwork) -> Result { + let median = &self.quotes[MEDIAN_INDEX]; + let median_price = median.price; + let expected_amount = median.amount; + + // Collect all quotes tied at the median price + let tied_quotes: Vec<&QuotePaymentInfo> = self .quotes .iter() - .map(|q| (q.quote_hash, zero_quoting_metrics(), q.rewards_address)) + .filter(|q| q.price == median_price) .collect(); - // Mark owned quotes - let owned_quote_hashes = owned_quote_hash.map_or_else(Vec::new, |hash| vec![hash]); - info!( - "Verifying {} payments (owned: {})", - payment_digest.len(), - owned_quote_hashes.len() + "Verifying median quote payment: expected at least {expected_amount} atto, {} quote(s) tied at median price", + tied_quotes.len() ); - let verified_amount = - payment_vault::verify_data_payment(network, owned_quote_hashes.clone(), payment_digest) + let provider = evmlib::utils::http_provider(network.rpc_url().clone()); + let vault_address = *network.payment_vault_address(); + let contract = + evmlib::contract::payment_vault::interface::IPaymentVault::new(vault_address, provider); + + // Check each tied quote — accept if any one was paid correctly + for candidate in &tied_quotes { + let result = contract + .completedPayments(candidate.quote_hash) + .call() .await - .map_err(|e| Error::Payment(format!("Payment verification failed: {e}")))?; - - if owned_quote_hashes.is_empty() { - info!("Payment verified as valid on-chain"); - } else { - // If we own a quote, verify the amount matches - let expected = self - .quotes - .iter() - .find(|q| Some(q.quote_hash) == owned_quote_hash) - .ok_or_else(|| Error::Payment("Owned quote hash not found in payment".to_string()))? - .amount; - - if verified_amount != expected { - return Err(Error::Payment(format!( - "Payment amount mismatch: expected {expected}, verified {verified_amount}" - ))); - } + .map_err(|e| Error::Payment(format!("completedPayments lookup failed: {e}")))?; - info!("Payment verified: {verified_amount} atto received"); + let on_chain_amount = Amount::from(result.amount); + + if on_chain_amount >= expected_amount { + info!("Payment verified: {on_chain_amount} atto paid for median-priced quote"); + return Ok(on_chain_amount); + } } - Ok(verified_amount) + Err(Error::Payment(format!( + "No median-priced quote was paid enough: expected at least {expected_amount}, checked {} tied quote(s)", + tied_quotes.len() + ))) } } @@ -270,9 +243,7 @@ impl SingleNodePayment { mod tests { use super::*; use alloy::node_bindings::{Anvil, AnvilInstance}; - use evmlib::contract::payment_vault::interface; - use evmlib::quoting_metrics::QuotingMetrics; - use evmlib::testnet::{deploy_data_payments_contract, deploy_network_token_contract, Testnet}; + use evmlib::testnet::{deploy_network_token_contract, deploy_payment_vault_contract, Testnet}; use evmlib::transaction_config::TransactionConfig; use evmlib::utils::{dummy_address, dummy_hash}; use evmlib::wallet::Wallet; @@ -285,17 +256,7 @@ mod tests { PaymentQuote { content: XorName::random(&mut rand::thread_rng()), timestamp: SystemTime::now(), - quoting_metrics: QuotingMetrics { - data_size: 1024, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 1000, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - }, + price: Amount::from(1u64), rewards_address: RewardsAddress::new([rewards_addr_seed; 20]), pub_key: vec![], signature: vec![], @@ -337,7 +298,7 @@ mod tests { .await .expect("deploy network token"); let mut payment_vault = - deploy_data_payments_contract(&rpc_url, &node, *network_token.contract.address()) + deploy_payment_vault_contract(&rpc_url, &node, *network_token.contract.address()) .await .expect("deploy data payments"); @@ -375,23 +336,20 @@ mod tests { assert!(result.is_ok(), "Payment failed: {:?}", result.err()); println!("✓ Paid for {} quotes", quote_payments.len()); - // Verify payments using handler directly - let payment_verifications: Vec<_> = quote_payments - .into_iter() - .map(|v| interface::IPaymentVault::PaymentVerification { - metrics: zero_quoting_metrics().into(), - rewardsAddress: v.1, - quoteHash: v.0, - }) - .collect(); - - let results = payment_vault - .verify_payment(payment_verifications) - .await - .expect("Verify payment failed"); + // Verify payments via completedPayments mapping + for (quote_hash, _reward_address, amount) in "e_payments { + let result = payment_vault + .contract + .completedPayments(*quote_hash) + .call() + .await + .expect("completedPayments lookup failed"); - for result in results { - assert!(result.isValid, "Payment verification should be valid"); + let on_chain_amount = result.amount; + assert!( + on_chain_amount >= u128::try_from(*amount).expect("amount fits u128"), + "On-chain amount should be >= paid amount" + ); } println!("✓ All 5 payments verified successfully"); @@ -408,7 +366,7 @@ mod tests { .await .expect("deploy network token"); let mut payment_vault = - deploy_data_payments_contract(&rpc_url, &node, *network_token.contract.address()) + deploy_payment_vault_contract(&rpc_url, &node, *network_token.contract.address()) .await .expect("deploy data payments"); @@ -452,31 +410,32 @@ mod tests { assert!(result.is_ok(), "Payment failed: {:?}", result.err()); println!("✓ Paid: 1 real (3 atto) + 4 dummy (0 atto)"); - // Verify all 5 payments - let payment_verifications: Vec<_> = quote_payments - .into_iter() - .map(|v| interface::IPaymentVault::PaymentVerification { - metrics: zero_quoting_metrics().into(), - rewardsAddress: v.1, - quoteHash: v.0, - }) - .collect(); + // Verify via completedPayments mapping - let results = payment_vault - .verify_payment(payment_verifications) + // Check that real payment is recorded on-chain + let real_result = payment_vault + .contract + .completedPayments(real_quote_hash) + .call() .await - .expect("Verify payment failed"); + .expect("completedPayments lookup failed"); - // Check that real payment is valid assert!( - results.first().is_some_and(|r| r.isValid), - "Real payment should be valid" + real_result.amount > 0, + "Real payment should have non-zero amount on-chain" ); println!("✓ Real payment verified (3 atto)"); - // Check dummy payments - for (i, result) in results.iter().skip(1).enumerate() { - println!(" Dummy payment {}: valid={}", i + 1, result.isValid); + // Check dummy payments (should have 0 amount) + for (i, (hash, _, _)) in quote_payments.iter().skip(1).enumerate() { + let result = payment_vault + .contract + .completedPayments(*hash) + .call() + .await + .expect("completedPayments lookup failed"); + + println!(" Dummy payment {}: amount={}", i + 1, result.amount); } println!("\n✅ SingleNode payment strategy works!"); @@ -492,17 +451,7 @@ mod tests { let quote = PaymentQuote { content: XorName::random(&mut rand::thread_rng()), timestamp: SystemTime::now(), - quoting_metrics: QuotingMetrics { - data_size: 1024, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![(0, 10)], - max_records: 1000, - received_payment_count: 5, - live_time: 3600, - network_density: None, - network_size: Some(100), - }, + price: Amount::from(*price), rewards_address: RewardsAddress::new([1u8; 20]), pub_key: vec![], signature: vec![], @@ -600,6 +549,41 @@ mod tests { assert_eq!(addresses.len(), CLOSE_GROUP_SIZE); } + #[test] + #[allow(clippy::unwrap_used)] + fn test_tied_median_prices_all_share_median_price() { + // Prices: 10, 30, 30, 30, 50 — three quotes tied at median price 30 + let prices = [10u64, 30, 30, 30, 50]; + let mut quotes_with_prices = Vec::new(); + + for (i, price) in prices.iter().enumerate() { + let quote = PaymentQuote { + content: XorName::random(&mut rand::thread_rng()), + timestamp: SystemTime::now(), + price: Amount::from(*price), + #[allow(clippy::cast_possible_truncation)] // i is always < 5 + rewards_address: RewardsAddress::new([i as u8 + 1; 20]), + pub_key: vec![], + signature: vec![], + }; + quotes_with_prices.push((quote, Amount::from(*price))); + } + + let payment = SingleNodePayment::from_quotes(quotes_with_prices).unwrap(); + + // All three tied quotes should have price == 30 + let tied_count = payment + .quotes + .iter() + .filter(|q| q.price == Amount::from(30u64)) + .count(); + assert_eq!(tied_count, 3, "Should have 3 quotes tied at median price"); + + // Only the median index gets the 3x amount + assert_eq!(payment.quotes[MEDIAN_INDEX].amount, Amount::from(90u64)); + assert_eq!(payment.total_amount(), Amount::from(90u64)); + } + #[test] #[allow(clippy::unwrap_used)] fn test_total_amount_equals_3x_median() { @@ -633,63 +617,33 @@ mod tests { // Approve tokens wallet - .approve_to_spend_tokens(*network.data_payments_address(), evmlib::common::U256::MAX) + .approve_to_spend_tokens(*network.payment_vault_address(), evmlib::common::U256::MAX) .await .map_err(|e| Error::Payment(format!("Failed to approve tokens: {e}")))?; println!("✓ Approved tokens"); - // Create 5 quotes with real prices from contract + // Create 5 quotes with prices calculated from record counts let chunk_xor = XorName::random(&mut rand::thread_rng()); - let chunk_size = 1024usize; let mut quotes_with_prices = Vec::new(); for i in 0..CLOSE_GROUP_SIZE { - let quoting_metrics = QuotingMetrics { - data_size: chunk_size, - data_type: 0, - close_records_stored: 10 + i, - records_per_type: vec![( - 0, - u32::try_from(10 + i) - .map_err(|e| Error::Payment(format!("Invalid record count: {e}")))?, - )], - max_records: 1000, - received_payment_count: 5, - live_time: 3600, - network_density: None, - network_size: Some(100), - }; - - // Get market price for this quote - // PERF-004: Clone required - payment_vault::get_market_price (external API from evmlib) - // takes ownership of Vec. We need quoting_metrics again below for - // PaymentQuote construction, so the clone is unavoidable. - let prices = payment_vault::get_market_price(&network, vec![quoting_metrics.clone()]) - .await - .map_err(|e| Error::Payment(format!("Failed to get market price: {e}")))?; - - let price = prices.first().ok_or_else(|| { - Error::Payment(format!( - "Empty price list from get_market_price for quote {}: expected at least 1 price but got {} elements", - i, - prices.len() - )) - })?; + let records_stored = 10 + i; + let price = crate::payment::pricing::calculate_price(records_stored); let quote = PaymentQuote { content: chunk_xor, timestamp: SystemTime::now(), - quoting_metrics, + price, rewards_address: wallet.address(), pub_key: vec![], signature: vec![], }; - quotes_with_prices.push((quote, *price)); + quotes_with_prices.push((quote, price)); } - println!("✓ Got 5 real quotes from contract"); + println!("✓ Got 5 quotes with calculated prices"); // Create SingleNode payment (will sort internally and select median) let payment = SingleNodePayment::from_quotes(quotes_with_prices)?; @@ -729,22 +683,12 @@ mod tests { let tx_hashes = payment.pay(&wallet).await?; println!("✓ Payment successful: {} transactions", tx_hashes.len()); - // Verify payment (as owner of median quote) - let median_quote = payment - .quotes - .get(MEDIAN_INDEX) - .ok_or_else(|| { - Error::Payment(format!( - "Index out of bounds: tried to access median index {} but quotes array has {} elements", - MEDIAN_INDEX, - payment.quotes.len() - )) - })?; - let median_quote_hash = median_quote.quote_hash; - let verified_amount = payment.verify(&network, Some(median_quote_hash)).await?; + // Verify median quote payment — all nodes run this same check + let verified_amount = payment.verify(&network).await?; + let expected_median_amount = payment.quotes[MEDIAN_INDEX].amount; assert_eq!( - verified_amount, median_quote.amount, + verified_amount, expected_median_amount, "Verified amount should match median payment" ); diff --git a/src/payment/verifier.rs b/src/payment/verifier.rs index d134c26..0a778a9 100644 --- a/src/payment/verifier.rs +++ b/src/payment/verifier.rs @@ -10,9 +10,10 @@ use crate::payment::proof::{ deserialize_merkle_proof, deserialize_proof, detect_proof_type, ProofType, }; use crate::payment::quote::{verify_quote_content, verify_quote_signature}; -use evmlib::contract::merkle_payment_vault; -use evmlib::merkle_batch_payment::PoolHash; -use evmlib::merkle_payments::OnChainPaymentInfo; +use crate::payment::single_node::SingleNodePayment; +use evmlib::common::Amount; +use evmlib::contract::payment_vault; +use evmlib::merkle_batch_payment::{OnChainPaymentInfo, PoolHash}; use evmlib::Network as EvmNetwork; use evmlib::ProofOfPayment; use evmlib::RewardsAddress; @@ -288,15 +289,17 @@ impl PaymentVerifier { self.cache.insert(xorname); } - /// Verify an EVM payment proof. + /// Verify a single-node EVM payment proof. /// - /// This verification ALWAYS validates payment proofs on-chain. - /// It verifies that: - /// 1. All quotes target the correct content address (xorname binding) - /// 2. All quote ML-DSA-65 signatures are valid (offloaded to a blocking - /// thread via `spawn_blocking` since post-quantum signature verification - /// is CPU-intensive) - /// 3. The payment was made on-chain via the EVM payment vault contract + /// Verification steps: + /// 1. Exactly `CLOSE_GROUP_SIZE` quotes are present + /// 2. All quotes target the correct content address (xorname binding) + /// 3. Quote timestamps are fresh (not expired or future-dated) + /// 4. Peer ID bindings match the ML-DSA-65 public keys + /// 5. This node is among the quoted recipients + /// 6. All ML-DSA-65 signatures are valid (offloaded to `spawn_blocking`) + /// 7. The median-priced quote was paid at least 3x its price on-chain + /// (looked up via `completedPayments(quoteHash)` on the payment vault) /// /// For unit tests that don't need on-chain verification, pre-populate /// the cache so `verify_payment` returns `CachedAsVerified` before @@ -329,75 +332,34 @@ impl PaymentVerifier { .await .map_err(|e| Error::Payment(format!("Signature verification task failed: {e}")))??; - // Verify on-chain payment. - // - // The SingleNode payment model pays only the median-priced quote (at 3x) - // and sends Amount::ZERO for the other 4. evmlib's pay_for_quotes() - // filters out zero-amount payments, so only 1 quote has an on-chain - // record. The contract's verifyPayment() returns amountPaid=0 and - // isValid=false for unpaid quotes, which is expected. - // - // We use the amountPaid field to distinguish paid from unpaid results: - // - At least one quote must have been paid (amountPaid > 0) - // - ALL paid quotes must be valid (isValid=true) - // - Unpaid quotes (amountPaid=0) are allowed to be invalid - // - // This matches autonomi's strict verification model (all paid must be - // valid) while accommodating payment models that don't pay every quote. - let payment_digest = payment.digest(); - if payment_digest.is_empty() { - return Err(Error::Payment("Payment has no quotes".to_string())); - } - - let payment_verifications: Vec<_> = payment_digest - .into_iter() - .map( - evmlib::contract::payment_vault::interface::IPaymentVault::PaymentVerification::from, - ) + // Reconstruct the SingleNodePayment to identify the median quote. + // from_quotes() sorts by price and marks the median for 3x payment. + let quotes_with_prices: Vec<_> = payment + .peer_quotes + .iter() + .map(|(_, quote)| (quote.clone(), quote.price)) .collect(); + let single_payment = SingleNodePayment::from_quotes(quotes_with_prices).map_err(|e| { + Error::Payment(format!( + "Failed to reconstruct payment for verification: {e}" + )) + })?; - let provider = evmlib::utils::http_provider(self.config.evm.network.rpc_url().clone()); - let handler = evmlib::contract::payment_vault::handler::PaymentVaultHandler::new( - *self.config.evm.network.data_payments_address(), - provider, - ); - - let results = handler - .verify_payment(payment_verifications) + // Verify the median quote was paid at least 3x its price on-chain + // via completedPayments(quoteHash) on the payment vault contract. + let verified_amount = single_payment + .verify(&self.config.evm.network) .await .map_err(|e| { let xorname_hex = hex::encode(xorname); - Error::Payment(format!("EVM verification error for {xorname_hex}: {e}")) + Error::Payment(format!( + "Median quote payment verification failed for {xorname_hex}: {e}" + )) })?; - let paid_results: Vec<_> = results - .iter() - .filter(|r| r.amountPaid > evmlib::common::U256::ZERO) - .collect(); - - if paid_results.is_empty() { - let xorname_hex = hex::encode(xorname); - return Err(Error::Payment(format!( - "Payment verification failed on-chain for {xorname_hex} (no paid quotes found)" - ))); - } - - for result in &paid_results { - if !result.isValid { - let xorname_hex = hex::encode(xorname); - return Err(Error::Payment(format!( - "Payment verification failed on-chain for {xorname_hex} (paid quote is invalid)" - ))); - } - } - if tracing::enabled!(tracing::Level::INFO) { - let valid_count = paid_results.len(); - let total_results = results.len(); let xorname_hex = hex::encode(xorname); - info!( - "EVM payment verified for {xorname_hex} ({valid_count} paid and valid, {total_results} total results)" - ); + info!("EVM payment verified for {xorname_hex} (median paid {verified_amount} atto)"); } Ok(()) } @@ -532,9 +494,9 @@ impl PaymentVerifier { debug!("Pool cache hit for hash {}", hex::encode(pool_hash)); info } else { - // Query on-chain for payment info + // Query on-chain for completed merkle payment let info = - merkle_payment_vault::get_merkle_payment_info(&self.config.evm.network, pool_hash) + payment_vault::get_completed_merkle_payment(&self.config.evm.network, pool_hash) .await .map_err(|e| { let pool_hex = hex::encode(pool_hash); @@ -546,7 +508,7 @@ impl PaymentVerifier { let paid_node_addresses: Vec<_> = info .paidNodeAddresses .iter() - .map(|pna| (pna.rewardsAddress, usize::from(pna.poolIndex))) + .map(|pna| (pna.rewardsAddress, usize::from(pna.poolIndex), pna.amount)) .collect(); let on_chain_info = OnChainPaymentInfo { @@ -625,7 +587,32 @@ impl PaymentVerifier { ))); } - // Verify paid node indices are valid within the candidate pool. + // Compute expected per-node payment using the contract formula: + // totalAmount = median16(candidate_prices) * (1 << depth) + // amountPerNode = totalAmount / depth + let expected_per_node = if payment_info.depth > 0 { + let mut candidate_prices: Vec = merkle_proof + .winner_pool + .candidate_nodes + .iter() + .map(|c| c.price) + .collect(); + candidate_prices.sort_unstable(); // ascending + // Upper median (index 8 of 16) — matches Solidity's median16 (k = 8) + let median_price = candidate_prices[candidate_prices.len() / 2]; + let total_amount = median_price * Amount::from(1u64 << payment_info.depth); + total_amount / Amount::from(u64::from(payment_info.depth)) + } else { + Amount::ZERO + }; + + // Verify paid node indices, addresses, and amounts against the candidate pool. + // + // Each paid node must: + // 1. Have a valid index within the candidate pool + // 2. Match the expected reward address at that index + // 3. Have been paid at least the expected per-node amount from the + // contract formula: median16(prices) * 2^depth / depth // // Note: unlike single-node payments, merkle proofs are NOT bound to a // specific storing node. The contract pays `depth` random nodes from the @@ -634,7 +621,7 @@ impl PaymentVerifier { // any node that can verify the merkle proof is allowed to store the chunk. // Replay protection comes from the per-address proof binding (each proof // is for a specific XorName in the paid tree). - for (addr, idx) in &payment_info.paid_node_addresses { + for (addr, idx, paid_amount) in &payment_info.paid_node_addresses { let node = merkle_proof .winner_pool .candidate_nodes @@ -651,6 +638,14 @@ impl PaymentVerifier { node.reward_address ))); } + if *paid_amount < expected_per_node { + return Err(Error::Payment(format!( + "Underpayment for node at index {idx}: paid {paid_amount}, \ + expected at least {expected_per_node} \ + (median16 formula, depth={})", + payment_info.depth + ))); + } } if tracing::enabled!(tracing::Level::INFO) { @@ -949,7 +944,7 @@ mod tests { let (public_key, secret_key) = ml_dsa.generate_keypair().expect("keygen"); let rewards_address = RewardsAddress::new([i; 20]); - let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let metrics_tracker = QuotingMetricsTracker::new(0); let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); let pub_key_bytes = public_key.as_bytes().to_vec(); @@ -992,7 +987,6 @@ mod tests { #[tokio::test] async fn test_content_address_mismatch_rejected() { use crate::payment::proof::{serialize_single_node_proof, PaymentProof}; - use evmlib::quoting_metrics::QuotingMetrics; use evmlib::{EncodedPeerId, PaymentQuote, RewardsAddress}; use std::time::SystemTime; @@ -1006,17 +1000,7 @@ mod tests { let quote = PaymentQuote { content: xor_name::XorName(wrong_xorname), timestamp: SystemTime::now(), - quoting_metrics: QuotingMetrics { - data_size: 1024, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 1000, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - }, + price: Amount::from(1u64), rewards_address: RewardsAddress::new([1u8; 20]), pub_key: vec![0u8; 64], signature: vec![0u8; 64], @@ -1053,23 +1037,12 @@ mod tests { timestamp: SystemTime, rewards_address: RewardsAddress, ) -> evmlib::PaymentQuote { - use evmlib::quoting_metrics::QuotingMetrics; use evmlib::PaymentQuote; PaymentQuote { content: xor_name::XorName(xorname), timestamp, - quoting_metrics: QuotingMetrics { - data_size: 1024, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 1000, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - }, + price: Amount::from(1u64), rewards_address, pub_key: vec![0u8; 64], signature: vec![0u8; 64], @@ -1465,27 +1438,16 @@ mod tests { std::array::from_fn::<_, CANDIDATES_PER_POOL, _>(|i| { let ml_dsa = MlDsa65::new(); let (pub_key, secret_key) = ml_dsa.generate_keypair().expect("keygen"); - let metrics = evmlib::quoting_metrics::QuotingMetrics { - data_size: 1024, - data_type: 0, - close_records_stored: i * 10, - records_per_type: vec![], - max_records: 500, - received_payment_count: 0, - live_time: 100, - network_density: None, - network_size: None, - }; + let price = evmlib::common::Amount::from(1024u64); #[allow(clippy::cast_possible_truncation)] let reward_address = RewardsAddress::new([i as u8; 20]); - let msg = - MerklePaymentCandidateNode::bytes_to_sign(&metrics, &reward_address, timestamp); + let msg = MerklePaymentCandidateNode::bytes_to_sign(&price, &reward_address, timestamp); let sk = MlDsaSecretKey::from_bytes(secret_key.as_bytes()).expect("sk"); let signature = ml_dsa.sign(&sk, &msg).expect("sign").as_bytes().to_vec(); MerklePaymentCandidateNode { pub_key: pub_key.as_bytes().to_vec(), - quoting_metrics: metrics, + price, reward_address, merkle_payment_timestamp: timestamp, signature, @@ -1814,10 +1776,11 @@ mod tests { depth: 2, merkle_payment_timestamp: ts, paid_node_addresses: vec![ - // First paid node: valid (matches candidate 0) - (RewardsAddress::new([0u8; 20]), 0), + // First paid node: valid (matches candidate 0, amount matches formula) + // Expected per-node: median(1024) * 2^2 / 2 = 2048 + (RewardsAddress::new([0u8; 20]), 0, Amount::from(2048u64)), // Second paid node: index 999 is way beyond CANDIDATES_PER_POOL (16) - (RewardsAddress::new([1u8; 20]), 999), + (RewardsAddress::new([1u8; 20]), 999, Amount::from(2048u64)), ], }; verifier.pool_cache.lock().put(pool_hash, info); @@ -1849,9 +1812,10 @@ mod tests { merkle_payment_timestamp: ts, paid_node_addresses: vec![ // Index 0 with matching address [0x00; 20] - (RewardsAddress::new([0u8; 20]), 0), + // Expected per-node: median(1024) * 2^2 / 2 = 2048 + (RewardsAddress::new([0u8; 20]), 0, Amount::from(2048u64)), // Index 1 with WRONG address — candidate 1's address is [0x01; 20] - (RewardsAddress::new([0xFF; 20]), 1), + (RewardsAddress::new([0xFF; 20]), 1, Amount::from(2048u64)), ], }; verifier.pool_cache.lock().put(pool_hash, info); @@ -1878,7 +1842,11 @@ mod tests { let info = evmlib::merkle_payments::OnChainPaymentInfo { depth: 3, merkle_payment_timestamp: ts, - paid_node_addresses: vec![(RewardsAddress::new([0u8; 20]), 0)], + paid_node_addresses: vec![( + RewardsAddress::new([0u8; 20]), + 0, + Amount::from(1024u64), + )], }; verifier.pool_cache.lock().put(pool_hash, info); } @@ -1896,4 +1864,37 @@ mod tests { "Error should mention depth/count mismatch: {err_msg}" ); } + + #[tokio::test] + async fn test_merkle_underpayment_rejected() { + let verifier = create_test_verifier(); + let (xorname, tagged_proof, pool_hash, ts) = make_valid_merkle_proof_bytes(); + + // Tree depth=2, so 2 paid nodes required. Candidates all quote price=1024. + // Expected per-node: median(1024) * 2^2 / 2 = 2048. + // Pay only 1 wei per node — far below the expected amount. + { + let info = evmlib::merkle_payments::OnChainPaymentInfo { + depth: 2, + merkle_payment_timestamp: ts, + paid_node_addresses: vec![ + (RewardsAddress::new([0u8; 20]), 0, Amount::from(1u64)), + (RewardsAddress::new([1u8; 20]), 1, Amount::from(1u64)), + ], + }; + verifier.pool_cache.lock().put(pool_hash, info); + } + + let result = verifier.verify_payment(&xorname, Some(&tagged_proof)).await; + + assert!( + result.is_err(), + "Should reject merkle payment where paid amount < expected per-node amount" + ); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!( + err_msg.contains("Underpayment"), + "Error should mention underpayment: {err_msg}" + ); + } } diff --git a/src/storage/handler.rs b/src/storage/handler.rs index 038f6c0..5bed0ab 100644 --- a/src/storage/handler.rs +++ b/src/storage/handler.rs @@ -401,9 +401,7 @@ mod tests { let storage_config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), - verify_on_read: true, - max_chunks: 0, - max_map_size: 0, + ..LmdbStorageConfig::test_default() }; let storage = Arc::new( LmdbStorage::new(storage_config) @@ -418,7 +416,7 @@ mod tests { local_rewards_address: rewards_address, }; let payment_verifier = Arc::new(PaymentVerifier::new(payment_config)); - let metrics_tracker = QuotingMetricsTracker::new(1000, 100); + let metrics_tracker = QuotingMetricsTracker::new(100); let mut quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); // Wire ML-DSA-65 signing so quote requests succeed @@ -827,8 +825,8 @@ mod tests { ); assert_eq!(candidate.merkle_payment_timestamp, timestamp); - assert_eq!(candidate.quoting_metrics.data_size, 4096); - assert_eq!(candidate.quoting_metrics.data_type, DATA_TYPE_CHUNK); + // Node-calculated price based on records stored + assert!(candidate.price >= evmlib::common::Amount::ZERO); } other => panic!("expected MerkleCandidateQuoteResponse::Success, got: {other:?}"), } diff --git a/src/storage/lmdb.rs b/src/storage/lmdb.rs index 8b60b92..bc22211 100644 --- a/src/storage/lmdb.rs +++ b/src/storage/lmdb.rs @@ -10,15 +10,33 @@ use crate::ant_protocol::XorName; use crate::error::{Error, Result}; use heed::types::Bytes; -use heed::{Database, Env, EnvOpenOptions}; +use heed::{Database, Env, EnvOpenOptions, MdbError}; use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::time::Instant; use tokio::task::spawn_blocking; -use tracing::{debug, trace, warn}; +use tracing::{debug, info, trace, warn}; -/// Default LMDB map size: 32 GiB. +/// Bytes in one GiB. +const GIB: u64 = 1024 * 1024 * 1024; + +/// Convert a byte count to GiB for human-readable log messages. +#[allow(clippy::cast_precision_loss)] // display only — sub-byte precision is irrelevant +fn bytes_to_gib(bytes: u64) -> f64 { + bytes as f64 / GIB as f64 +} + +/// Absolute minimum LMDB map size. +/// +/// Even on a nearly-full disk the database must be able to open. +/// Set to 256 MiB — enough for millions of LMDB pages. +const MIN_MAP_SIZE: usize = 256 * 1024 * 1024; + +/// How often to re-query available disk space (in seconds). /// -/// Node operators can override this via `storage.db_size_gb` in `config.toml`. -const DEFAULT_MAX_MAP_SIZE: usize = 32 * 1_073_741_824; // 32 GiB +/// Between checks the cached result is trusted. Disk space changes slowly +/// relative to chunk-write throughput, so a multi-second window is safe. +const DISK_CHECK_INTERVAL_SECS: u64 = 5; /// Configuration for LMDB storage. #[derive(Debug, Clone)] @@ -27,10 +45,15 @@ pub struct LmdbStorageConfig { pub root_dir: PathBuf, /// Whether to verify content on read (compares hash to address). pub verify_on_read: bool, - /// Maximum number of chunks to store (0 = unlimited). - pub max_chunks: usize, - /// Maximum LMDB map size in bytes (0 = use default of 32 GiB). + /// Explicit LMDB map size cap in bytes. + /// + /// When 0 (default), the map size is computed automatically from available + /// disk space and grows on demand when more storage becomes available. pub max_map_size: usize, + /// Minimum free disk space (in bytes) to preserve on the storage partition. + /// + /// Writes are refused when available space drops below this threshold. + pub disk_reserve: u64, } impl Default for LmdbStorageConfig { @@ -38,8 +61,21 @@ impl Default for LmdbStorageConfig { Self { root_dir: PathBuf::from(".ant/chunks"), verify_on_read: true, - max_chunks: 0, max_map_size: 0, + disk_reserve: GIB, + } + } +} + +impl LmdbStorageConfig { + /// A test-friendly default with `disk_reserve` set to 0 so unit tests + /// don't depend on the host having >= 1 GiB free disk space. + #[cfg(any(test, feature = "test-utils"))] + #[must_use] + pub fn test_default() -> Self { + Self { + disk_reserve: 0, + ..Self::default() } } } @@ -74,8 +110,21 @@ pub struct LmdbStorage { db: Database, /// Storage configuration. config: LmdbStorageConfig, + /// Path to the LMDB environment directory (for disk-space queries). + env_dir: PathBuf, /// Operation statistics. stats: parking_lot::RwLock, + /// Serialises access to the LMDB environment during a map resize. + /// + /// Normal read/write operations acquire a **shared** lock. The rare + /// resize path acquires an **exclusive** lock, ensuring no transactions + /// are active when `env.resize()` is called (an LMDB safety requirement). + env_lock: Arc>, + /// Timestamp of the last successful disk-space check. + /// + /// `None` means "never checked — check on next write". Updated only + /// after a passing check, so a low-space result is always rechecked. + last_disk_ok: parking_lot::Mutex>, } impl LmdbStorage { @@ -83,6 +132,12 @@ impl LmdbStorage { /// /// Opens (or creates) an LMDB environment at `{root_dir}/chunks.mdb/`. /// + /// When `config.max_map_size` is 0 (the default) the map size is derived + /// from the available disk space on the partition that hosts the database, + /// minus `config.disk_reserve`. This allows a node to use all available + /// storage without a fixed cap. If the operator adds more storage later + /// the map is resized on demand (see [`Self::put`]). + /// /// # Errors /// /// Returns an error if the LMDB environment cannot be opened. @@ -95,9 +150,17 @@ impl LmdbStorage { .map_err(|e| Error::Storage(format!("Failed to create LMDB directory: {e}")))?; let map_size = if config.max_map_size > 0 { + // Operator provided an explicit cap. config.max_map_size } else { - DEFAULT_MAX_MAP_SIZE + // Auto-scale: current DB footprint + available space − reserve. + let computed = compute_map_size(&env_dir, config.disk_reserve)?; + info!( + "Auto-computed LMDB map size: {:.2} GiB (available disk minus {:.2} GiB reserve)", + bytes_to_gib(computed as u64), + bytes_to_gib(config.disk_reserve), + ); + computed }; let env_dir_clone = env_dir.clone(); @@ -135,12 +198,15 @@ impl LmdbStorage { env, db, config, + env_dir, stats: parking_lot::RwLock::new(StorageStats::default()), + env_lock: Arc::new(parking_lot::RwLock::new(())), + last_disk_ok: parking_lot::Mutex::new(None), }; debug!( "Initialized LMDB storage at {:?} ({} existing chunks)", - env_dir, + storage.env_dir, storage.current_chunks()? ); @@ -149,10 +215,10 @@ impl LmdbStorage { /// Store a chunk. /// - /// # Arguments - /// - /// * `address` - Content address (should be BLAKE3 of content) - /// * `content` - Chunk data + /// Before writing, verifies that available disk space exceeds the + /// configured reserve. If the LMDB map is full but more disk space + /// exists (e.g. the operator added storage), the map is resized + /// automatically and the write is retried. /// /// # Returns /// @@ -160,7 +226,8 @@ impl LmdbStorage { /// /// # Errors /// - /// Returns an error if the write fails or content doesn't match address. + /// Returns an error if the write fails, content doesn't match address, + /// or the disk is too full to accept new chunks. pub async fn put(&self, address: &XorName, content: &[u8]) -> Result { // Verify content address let computed = Self::compute_address(content); @@ -181,16 +248,68 @@ impl LmdbStorage { return Ok(false); } + // ── Disk-space guard (cached — at most one syscall per interval) ─ + // Placed after the duplicate check so that re-storing an existing + // chunk remains a harmless no-op even when disk space is low. + self.check_disk_space_cached()?; + + // ── Write (with resize-on-demand) ─────────────────────────────── + match self.try_put(address, content).await? { + PutOutcome::New => {} + PutOutcome::Duplicate => { + trace!("Chunk {} already exists", hex::encode(address)); + self.stats.write().duplicates += 1; + return Ok(false); + } + PutOutcome::MapFull => { + // The map ceiling was reached but there may be more disk space + // available (e.g. operator expanded the partition). + self.try_resize().await?; + // Retry once after resize. + match self.try_put(address, content).await? { + PutOutcome::New => {} + PutOutcome::Duplicate => { + self.stats.write().duplicates += 1; + return Ok(false); + } + PutOutcome::MapFull => { + return Err(Error::Storage( + "LMDB map full after resize — disk may be at capacity".into(), + )); + } + } + } + } + + { + let mut stats = self.stats.write(); + stats.chunks_stored += 1; + stats.bytes_stored += content.len() as u64; + } + + debug!( + "Stored chunk {} ({} bytes)", + hex::encode(address), + content.len() + ); + + Ok(true) + } + + /// Attempt a single put inside a write transaction. + /// + /// Returns [`PutOutcome::MapFull`] instead of an error when the LMDB map + /// ceiling is reached, so the caller can resize and retry. + async fn try_put(&self, address: &XorName, content: &[u8]) -> Result { let key = *address; let value = content.to_vec(); let env = self.env.clone(); let db = self.db; - let max_chunks = self.config.max_chunks; + let lock = Arc::clone(&self.env_lock); + + spawn_blocking(move || -> Result { + let _guard = lock.read(); - // Existence check, capacity enforcement, and write all happen atomically - // inside a single write transaction. LMDB serializes write transactions, - // so there are no TOCTOU races or counter-drift issues. - let was_new = spawn_blocking(move || -> Result { let mut wtxn = env .write_txn() .map_err(|e| Error::Storage(format!("Failed to create write txn: {e}")))?; @@ -201,58 +320,29 @@ impl LmdbStorage { .map_err(|e| Error::Storage(format!("Failed to check existence: {e}")))? .is_some() { - return Ok(false); + return Ok(PutOutcome::Duplicate); } - // Enforce capacity limit (0 = unlimited) - if max_chunks > 0 { - let current = db - .stat(&wtxn) - .map_err(|e| Error::Storage(format!("Failed to read db stats: {e}")))? - .entries; - if current >= max_chunks { - return Err(Error::Storage(format!( - "Storage capacity reached: {current} chunks stored, max is {max_chunks}" - ))); + match db.put(&mut wtxn, &key, &value) { + Ok(()) => {} + Err(heed::Error::Mdb(MdbError::MapFull)) => return Ok(PutOutcome::MapFull), + Err(e) => { + return Err(Error::Storage(format!("Failed to put chunk: {e}"))); } } - db.put(&mut wtxn, &key, &value) - .map_err(|e| Error::Storage(format!("Failed to put chunk: {e}")))?; - wtxn.commit() - .map_err(|e| Error::Storage(format!("Failed to commit put: {e}")))?; - Ok(true) + match wtxn.commit() { + Ok(()) => Ok(PutOutcome::New), + Err(heed::Error::Mdb(MdbError::MapFull)) => Ok(PutOutcome::MapFull), + Err(e) => Err(Error::Storage(format!("Failed to commit put: {e}"))), + } }) .await - .map_err(|e| Error::Storage(format!("LMDB put task failed: {e}")))??; - - if !was_new { - trace!("Chunk {} already exists", hex::encode(address)); - self.stats.write().duplicates += 1; - return Ok(false); - } - - { - let mut stats = self.stats.write(); - stats.chunks_stored += 1; - stats.bytes_stored += content.len() as u64; - } - - debug!( - "Stored chunk {} ({} bytes)", - hex::encode(address), - content.len() - ); - - Ok(true) + .map_err(|e| Error::Storage(format!("LMDB put task failed: {e}")))? } /// Retrieve a chunk. /// - /// # Arguments - /// - /// * `address` - Content address to retrieve - /// /// # Returns /// /// Returns `Some(content)` if found, `None` if not found. @@ -264,8 +354,10 @@ impl LmdbStorage { let key = *address; let env = self.env.clone(); let db = self.db; + let lock = Arc::clone(&self.env_lock); let content = spawn_blocking(move || -> Result>> { + let _guard = lock.read(); let rtxn = env .read_txn() .map_err(|e| Error::Storage(format!("Failed to create read txn: {e}")))?; @@ -320,6 +412,7 @@ impl LmdbStorage { /// /// Returns an error if the LMDB read transaction fails. pub fn exists(&self, address: &XorName) -> Result { + let _guard = self.env_lock.read(); let rtxn = self .env .read_txn() @@ -341,8 +434,10 @@ impl LmdbStorage { let key = *address; let env = self.env.clone(); let db = self.db; + let lock = Arc::clone(&self.env_lock); let deleted = spawn_blocking(move || -> Result { + let _guard = lock.read(); let mut wtxn = env .write_txn() .map_err(|e| Error::Storage(format!("Failed to create write txn: {e}")))?; @@ -385,6 +480,7 @@ impl LmdbStorage { /// /// Returns an error if the LMDB read transaction fails. pub fn current_chunks(&self) -> Result { + let _guard = self.env_lock.read(); let rtxn = self .env .read_txn() @@ -408,6 +504,140 @@ impl LmdbStorage { pub fn root_dir(&self) -> &Path { &self.config.root_dir } + + /// Check available disk space, skipping the syscall if a recent check passed. + /// + /// Only caches *passing* results — a low-space condition is always + /// rechecked so we detect freed space promptly. + fn check_disk_space_cached(&self) -> Result<()> { + { + let last = self.last_disk_ok.lock(); + if let Some(t) = *last { + if t.elapsed().as_secs() < DISK_CHECK_INTERVAL_SECS { + return Ok(()); + } + } + } + // Cache miss or stale — perform the actual statvfs check. + check_disk_space(&self.env_dir, self.config.disk_reserve)?; + // Passed — update the cache timestamp. + *self.last_disk_ok.lock() = Some(Instant::now()); + Ok(()) + } + + /// Grow the LMDB map to match currently available disk space. + /// + /// The new size is the **larger** of: + /// 1. the current map size (so existing data is never truncated), and + /// 2. `current_db_file_size + available_space − reserve` + /// (so all reachable disk space can be used). + /// + /// Acquires an **exclusive** lock on `env_lock` so that no read or write + /// transactions are active when the underlying `mdb_env_set_mapsize` is + /// called (an LMDB safety requirement). + #[allow(unsafe_code)] + async fn try_resize(&self) -> Result<()> { + let from_disk = compute_map_size(&self.env_dir, self.config.disk_reserve)?; + let env = self.env.clone(); + let lock = Arc::clone(&self.env_lock); + + spawn_blocking(move || -> Result<()> { + // Exclusive lock guarantees no concurrent transactions. + let _guard = lock.write(); + + // Never shrink below the current map — existing data must remain + // addressable regardless of what the disk-space calculation says. + let current_map = env.info().map_size; + let new_size = from_disk.max(current_map); + + if new_size <= current_map { + debug!("LMDB map resize skipped — no additional disk space available"); + return Ok(()); + } + + // SAFETY: We hold an exclusive lock, so no transactions are active. + unsafe { + env.resize(new_size) + .map_err(|e| Error::Storage(format!("Failed to resize LMDB map: {e}")))?; + } + + info!( + "Resized LMDB map to {:.2} GiB (was {:.2} GiB)", + bytes_to_gib(new_size as u64), + bytes_to_gib(current_map as u64), + ); + Ok(()) + }) + .await + .map_err(|e| Error::Storage(format!("LMDB resize task failed: {e}")))? + } +} + +// ──────────────────────────────────────────────────────────────────────────── +// Helpers +// ──────────────────────────────────────────────────────────────────────────── + +/// Outcome of a single `try_put` attempt. +enum PutOutcome { + /// Chunk was newly stored. + New, + /// Chunk already existed (idempotent). + Duplicate, + /// The LMDB map ceiling was reached — caller should resize and retry. + MapFull, +} + +/// Compute the LMDB map size from the disk hosting `db_dir`. +/// +/// The result covers **all existing data** plus all remaining usable disk +/// space: +/// +/// ```text +/// map_size = current_db_file_size + max(0, available_space − reserve) +/// ``` +/// +/// `available_space` (from `statvfs`) reports only the *free* bytes on the +/// partition — the DB file's own footprint is **not** included, so adding +/// it back ensures the map is always large enough for the data already +/// stored. +/// +/// The result is page-aligned and never falls below [`MIN_MAP_SIZE`]. +fn compute_map_size(db_dir: &Path, reserve: u64) -> Result { + let available = fs2::available_space(db_dir) + .map_err(|e| Error::Storage(format!("Failed to query available disk space: {e}")))?; + + // The MDB data file may not exist yet on first run. + let mdb_file = db_dir.join("data.mdb"); + let current_db_bytes = std::fs::metadata(&mdb_file).map(|m| m.len()).unwrap_or(0); + + // available_space excludes the DB file, so we add it back to get the + // total space the DB could occupy while still leaving `reserve` free. + let growth_room = available.saturating_sub(reserve); + let target = current_db_bytes.saturating_add(growth_room); + + // Align up to system page size (required by heed's resize). + let page = page_size::get() as u64; + let aligned = target.div_ceil(page) * page; + + let result = usize::try_from(aligned).unwrap_or(usize::MAX); + Ok(result.max(MIN_MAP_SIZE)) +} + +/// Reject the write early if available disk space is below `reserve`. +fn check_disk_space(db_dir: &Path, reserve: u64) -> Result<()> { + let available = fs2::available_space(db_dir) + .map_err(|e| Error::Storage(format!("Failed to query available disk space: {e}")))?; + + if available < reserve { + return Err(Error::Storage(format!( + "Insufficient disk space: {:.2} GiB available, {:.2} GiB reserve required. \ + Free disk space or increase the partition to continue storing chunks.", + bytes_to_gib(available), + bytes_to_gib(reserve), + ))); + } + + Ok(()) } #[cfg(test)] @@ -420,9 +650,7 @@ mod tests { let temp_dir = TempDir::new().expect("create temp dir"); let config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), - verify_on_read: true, - max_chunks: 0, - max_map_size: 0, + ..LmdbStorageConfig::test_default() }; let storage = LmdbStorage::new(config).await.expect("create storage"); (storage, temp_dir) @@ -509,34 +737,6 @@ mod tests { assert!(!deleted2); } - #[tokio::test] - async fn test_max_chunks_enforced() { - let temp_dir = TempDir::new().expect("create temp dir"); - let config = LmdbStorageConfig { - root_dir: temp_dir.path().to_path_buf(), - verify_on_read: true, - max_chunks: 2, - max_map_size: 0, - }; - let storage = LmdbStorage::new(config).await.expect("create storage"); - - let content1 = b"chunk one"; - let content2 = b"chunk two"; - let content3 = b"chunk three"; - let addr1 = LmdbStorage::compute_address(content1); - let addr2 = LmdbStorage::compute_address(content2); - let addr3 = LmdbStorage::compute_address(content3); - - // First two should succeed - assert!(storage.put(&addr1, content1).await.is_ok()); - assert!(storage.put(&addr2, content2).await.is_ok()); - - // Third should be rejected - let result = storage.put(&addr3, content3).await; - assert!(result.is_err()); - assert!(result.unwrap_err().to_string().contains("capacity reached")); - } - #[tokio::test] async fn test_address_mismatch() { let (storage, _temp) = create_test_storage().await; @@ -586,32 +786,6 @@ mod tests { assert_eq!(stats.current_chunks, 2); } - #[tokio::test] - async fn test_capacity_recovers_after_delete() { - let temp_dir = TempDir::new().expect("create temp dir"); - let config = LmdbStorageConfig { - root_dir: temp_dir.path().to_path_buf(), - verify_on_read: true, - max_chunks: 1, - max_map_size: 0, - }; - let storage = LmdbStorage::new(config).await.expect("create storage"); - - let first = b"first chunk"; - let second = b"second chunk"; - let addr1 = LmdbStorage::compute_address(first); - let addr2 = LmdbStorage::compute_address(second); - - storage.put(&addr1, first).await.expect("put first"); - storage.delete(&addr1).await.expect("delete first"); - - // Should succeed because delete freed capacity. - storage.put(&addr2, second).await.expect("put second"); - - let stats = storage.stats(); - assert_eq!(stats.current_chunks, 1); - } - #[tokio::test] async fn test_persistence_across_reopen() { let temp_dir = TempDir::new().expect("create temp dir"); @@ -622,9 +796,7 @@ mod tests { { let config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), - verify_on_read: true, - max_chunks: 0, - max_map_size: 0, + ..LmdbStorageConfig::test_default() }; let storage = LmdbStorage::new(config).await.expect("create storage"); storage.put(&address, content).await.expect("put"); @@ -634,9 +806,7 @@ mod tests { { let config = LmdbStorageConfig { root_dir: temp_dir.path().to_path_buf(), - verify_on_read: true, - max_chunks: 0, - max_map_size: 0, + ..LmdbStorageConfig::test_default() }; let storage = LmdbStorage::new(config).await.expect("reopen storage"); assert_eq!(storage.current_chunks().expect("current_chunks"), 1); diff --git a/tests/e2e/data_types/chunk.rs b/tests/e2e/data_types/chunk.rs index 557892a..c4a2d70 100644 --- a/tests/e2e/data_types/chunk.rs +++ b/tests/e2e/data_types/chunk.rs @@ -431,9 +431,7 @@ mod tests { let storage = LmdbStorage::new(LmdbStorageConfig { root_dir: temp_dir.clone(), - verify_on_read: true, - max_chunks: 0, - max_map_size: 0, + ..LmdbStorageConfig::test_default() }) .await?; @@ -443,7 +441,7 @@ mod tests { cache_capacity: 100, local_rewards_address: rewards_address, }); - let metrics_tracker = QuotingMetricsTracker::new(1000, 100); + let metrics_tracker = QuotingMetricsTracker::new(100); let quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); let protocol = AntProtocol::new( diff --git a/tests/e2e/merkle_payment.rs b/tests/e2e/merkle_payment.rs index ee59522..e9bf6bb 100644 --- a/tests/e2e/merkle_payment.rs +++ b/tests/e2e/merkle_payment.rs @@ -22,11 +22,11 @@ use ant_node::compute_address; use ant_node::payment::{ serialize_merkle_proof, MAX_PAYMENT_PROOF_SIZE_BYTES, MIN_PAYMENT_PROOF_SIZE_BYTES, }; +use evmlib::common::Amount; use evmlib::merkle_payments::{ MerklePaymentCandidateNode, MerklePaymentCandidatePool, MerklePaymentProof, MerkleTree, CANDIDATES_PER_POOL, }; -use evmlib::quoting_metrics::QuotingMetrics; use evmlib::testnet::Testnet; use evmlib::RewardsAddress; use rand::Rng; @@ -178,26 +178,16 @@ fn build_candidate_nodes(timestamp: u64) -> [MerklePaymentCandidateNode; CANDIDA std::array::from_fn(|i| { let ml_dsa = MlDsa65::new(); let (pub_key, secret_key) = ml_dsa.generate_keypair().expect("keygen"); - let metrics = QuotingMetrics { - data_size: 1024, - data_type: 0, - close_records_stored: i * 10, - records_per_type: vec![], - max_records: 500, - received_payment_count: 0, - live_time: 100, - network_density: None, - network_size: None, - }; + let price = Amount::from(1024u64); #[allow(clippy::cast_possible_truncation)] let reward_address = RewardsAddress::new([i as u8; 20]); - let msg = MerklePaymentCandidateNode::bytes_to_sign(&metrics, &reward_address, timestamp); + let msg = MerklePaymentCandidateNode::bytes_to_sign(&price, &reward_address, timestamp); let sk = MlDsaSecretKey::from_bytes(secret_key.as_bytes()).expect("sk"); let signature = ml_dsa.sign(&sk, &msg).expect("sign").as_bytes().to_vec(); MerklePaymentCandidateNode { pub_key: pub_key.as_bytes().to_vec(), - quoting_metrics: metrics, + price, reward_address, merkle_payment_timestamp: timestamp, signature, diff --git a/tests/e2e/testnet.rs b/tests/e2e/testnet.rs index ced13a3..7fbb22f 100644 --- a/tests/e2e/testnet.rs +++ b/tests/e2e/testnet.rs @@ -105,10 +105,6 @@ const TEST_PAYMENT_CACHE_CAPACITY: usize = 1000; /// Test rewards address (20 bytes, all 0x01). const TEST_REWARDS_ADDRESS: [u8; 20] = [0x01; 20]; -/// Max records for quoting metrics (derived from node storage limit / max chunk size). -/// 5 GB / 4 MB = 1280 records. -const TEST_MAX_RECORDS: usize = 1280; - /// Initial records for quoting metrics (test value). const TEST_INITIAL_RECORDS: usize = 1000; @@ -1059,9 +1055,7 @@ impl TestNetwork { // Create LMDB storage let storage_config = LmdbStorageConfig { root_dir: data_dir.to_path_buf(), - verify_on_read: true, - max_chunks: 0, // Unlimited for tests - max_map_size: 0, + ..LmdbStorageConfig::test_default() }; let storage = LmdbStorage::new(storage_config) .await @@ -1081,7 +1075,7 @@ impl TestNetwork { let payment_verifier = PaymentVerifier::new(payment_config); // Create quote generator with ML-DSA-65 signing from the test node's identity - let metrics_tracker = QuotingMetricsTracker::new(TEST_MAX_RECORDS, TEST_INITIAL_RECORDS); + let metrics_tracker = QuotingMetricsTracker::new(TEST_INITIAL_RECORDS); let mut quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); // Wire ML-DSA-65 signing so quotes are properly signed and verifiable