diff --git a/.github/workflows/claude-code-review.yml b/.github/workflows/claude-code-review.yml deleted file mode 100644 index 310277d5..00000000 --- a/.github/workflows/claude-code-review.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Claude Code Review - -on: - # Use pull_request_target to support fork PRs (has access to secrets) - pull_request_target: - types: [opened, synchronize, ready_for_review, reopened] - # Optional: Only run on specific file changes - # paths: - # - "src/**/*.ts" - # - "src/**/*.tsx" - # - "src/**/*.js" - # - "src/**/*.jsx" - -jobs: - claude-review: - # Optional: Filter by PR author - # if: | - # github.event.pull_request.user.login == 'external-contributor' || - # github.event.pull_request.user.login == 'new-developer' || - # github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR' - - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: read - issues: read - id-token: write - - steps: - # Checkout the PR head (fork or same-repo) using PR ref for security - - name: Checkout PR - uses: actions/checkout@v4 - with: - ref: refs/pull/${{ github.event.pull_request.number }}/head - fetch-depth: 20 - - # Use fork with PR #614 fix until merged into main - # See: https://github.com/anthropics/claude-code-action/pull/614 - - name: Run Claude Code Review - id: claude-review - uses: keithah/claude-code-action@fork-pr-support - with: - claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }} - - # Allow external contributors without write access (for fork PRs) - allowed_non_write_users: "*" - - plugin_marketplaces: 'https://github.com/anthropics/claude-code.git' - plugins: 'code-review@claude-code-plugins' - prompt: '/code-review:code-review ${{ github.repository }}/pull/${{ github.event.pull_request.number }}' - diff --git a/CLAUDE.md b/CLAUDE.md index c2a282d3..1413ba5d 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -47,6 +47,19 @@ RUST_LOG=debug cargo run --release -- --listen 0.0.0.0:10000 - No `panic!()` - Return `Result` instead - **Exception**: Test code may use these for assertions +### Payment Verification Policy +**Production nodes require payment by default.** + +- All new chunk storage requires EVM payment verification on Arbitrum +- Payment verification is **enabled by default** via `PaymentConfig::default()` +- Test environments can disable payment via: + - CLI flag: `--disable-payment-verification` + - Config: `PaymentVerifierConfig { evm: EvmVerifierConfig { enabled: false, .. }, .. }` +- Previously-paid chunks are cached and do not require re-verification +- Test utilities (e.g., `create_test_protocol()`) explicitly disable EVM verification + +See `src/payment/verifier.rs` for implementation details. + --- ## 🚨 CRITICAL: Saorsa Network Infrastructure & Port Isolation diff --git a/Cargo.toml b/Cargo.toml index 6cded9e7..a9779ef6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,18 +27,20 @@ name = "saorsa-devnet" path = "src/bin/saorsa-devnet/main.rs" [[bin]] -name = "saorsa-client" -path = "src/bin/saorsa-client/main.rs" +name = "saorsa-cli" +path = "src/bin/saorsa-cli/main.rs" [dependencies] # Core (provides EVERYTHING: networking, DHT, security, trust, storage) -saorsa-core = "0.12.1" +saorsa-core = "0.13.0" saorsa-pqc = "0.4.0" # Payment verification - autonomi network lookup + EVM payment ant-evm = "0.1.19" evmlib = "0.4.7" xor_name = "5" +libp2p = "0.56" # For PeerId in payment proofs +multihash = "0.19" # For identity multihash in PeerId construction # Caching - LRU cache for verified XorNames lru = "0.16.3" @@ -100,6 +102,7 @@ postcard = { version = "1.1.3", features = ["use-std"] } tokio-test = "0.4" proptest = "1" alloy = { version = "1", features = ["node-bindings"] } +serial_test = "3" # E2E test infrastructure [[test]] diff --git a/README.md b/README.md index 79e08fe4..9afaa85c 100644 --- a/README.md +++ b/README.md @@ -983,7 +983,7 @@ RUST_LOG=saorsa_node=debug,saorsa_core=debug ./saorsa-node |---------|-------------|------------| | **saorsa-core** | Core networking and security library | [github.com/dirvine/saorsa-core](https://github.com/dirvine/saorsa-core) | | **saorsa-pqc** | Post-quantum cryptography primitives | [github.com/dirvine/saorsa-pqc](https://github.com/dirvine/saorsa-pqc) | -| **saorsa-client** | Client library for applications | [github.com/dirvine/saorsa-client](https://github.com/dirvine/saorsa-client) | +| **saorsa-cli** | Unified CLI for file and chunk operations with EVM payments | Built into saorsa-node | --- diff --git a/config/production.toml b/config/production.toml new file mode 100644 index 00000000..67379b73 --- /dev/null +++ b/config/production.toml @@ -0,0 +1,70 @@ +# Production Configuration for saorsa-node +# +# This file matches the NodeConfig struct schema. +# See src/config.rs for all available fields and defaults. + +# Root directory for node data +root_dir = "/var/lib/saorsa-node" + +# Listening port (10000-10999 for production) +port = 10000 + +# IP version: "ipv4", "ipv6", or "dual" +ip_version = "dual" + +# Bootstrap peer addresses (socket addrs) +bootstrap = [] + +# Network mode: "production", "testnet", or "development" +network_mode = "production" + +# Log level: "trace", "debug", "info", "warn", "error" +log_level = "info" + +# Maximum application-layer message size in bytes (default: 5 MiB) +# max_message_size = 5242880 + +# --- Payment verification --- +# Production nodes require payment by default. +[payment] +# DO NOT set enabled = false in production +enabled = true + +# Cache capacity for verified content addresses +cache_capacity = 100000 + +# REQUIRED: Set to your Arbitrum wallet address before running in production. +# rewards_address = "0xYourAddressHere" + +# EVM network: "arbitrum-one" or "arbitrum-sepolia" +evm_network = "arbitrum-one" + +# Prometheus metrics port (0 to disable) +metrics_port = 9100 + +# --- Storage --- +[storage] +enabled = true + +# Maximum number of chunks to store (0 = unlimited) +max_chunks = 0 + +# Verify content hash on read +verify_on_read = true + +# Maximum LMDB database size in GiB (0 = default 32 GiB) +db_size_gb = 0 + +# --- Upgrade --- +[upgrade] +enabled = false +channel = "stable" +check_interval_hours = 1 +github_repo = "dirvine/saorsa-node" +staged_rollout_hours = 1 + +# --- Bootstrap cache --- +[bootstrap_cache] +enabled = true +max_contacts = 10000 +stale_threshold_days = 7 diff --git a/docs/DESIGN.md b/docs/DESIGN.md index 5f390e22..f6497a27 100644 --- a/docs/DESIGN.md +++ b/docs/DESIGN.md @@ -14,7 +14,7 @@ Build a **pure quantum-proof network node** (`saorsa-node`) that: **Clean separation of concerns:** - **saorsa-node** = Pure quantum-proof node (no legacy baggage) -- **saorsa-client** = Bridge layer (reads old network, writes new network) +- **saorsa-cli** = Client layer (file/chunk operations with EVM payments) - **Auto-migration** = Nodes discover and upload local ant-node data - **Dual IP DHT** = IPv4 and IPv6 close groups for resilience @@ -34,8 +34,8 @@ This avoids the complexity of bridge nodes by pushing migration logic to: ├─────────────────────────────────────────────────────────────────┤ │ │ │ ┌─────────────┐ ┌─────────────────┐ │ -│ │ ant-network │ ◄─────► │ saorsa-client │ │ -│ │ (classical) │ read │ (bridge layer) │ │ +│ │ ant-network │ ◄─────► │ saorsa-cli │ │ +│ │ (classical) │ read │ (client layer) │ │ │ └─────────────┘ └────────┬────────┘ │ │ │ write │ │ ▼ │ @@ -451,7 +451,7 @@ pub struct NodeLifecycle { ### 1. Node Architecture: Pure Quantum-Proof (No Legacy) - **No libp2p** - saorsa-node is clean, uses only ant-quic + saorsa-core -- **Client is the bridge** - saorsa-client handles reading from ant-network +- **Client is the bridge** - saorsa-cli handles reading from ant-network - **Node auto-migrates** - scans local ant-node data and uploads to network - **Rationale**: Simpler node, cleaner security model, easier maintenance diff --git a/docs/infrastructure/INFRASTRUCTURE.md b/docs/infrastructure/INFRASTRUCTURE.md index 369fc775..2d064ba7 100644 --- a/docs/infrastructure/INFRASTRUCTURE.md +++ b/docs/infrastructure/INFRASTRUCTURE.md @@ -218,6 +218,18 @@ cd /opt/communitas ./communitas-headless --listen 0.0.0.0:11000 --bootstrap ``` +## Production Configuration + +Before deploying, create `/etc/saorsa/production.toml` based on the template in `config/production.toml`: + +```bash +sudo mkdir -p /etc/saorsa +sudo cp config/production.toml /etc/saorsa/production.toml +sudo nano /etc/saorsa/production.toml # Set your rewards_address +``` + +**CRITICAL**: Ensure `payment.enabled = true` in the config file. + ## Systemd Service Templates ### ant-quic Bootstrap Service @@ -248,7 +260,8 @@ After=network.target [Service] Type=simple User=root -ExecStart=/opt/saorsa-node/saorsa-node --listen 0.0.0.0:10000 --bootstrap +ExecStart=/opt/saorsa-node/saorsa-node --config /etc/saorsa/production.toml --listen 0.0.0.0:10000 --bootstrap +# CRITICAL: DO NOT add --disable-payment-verification flag in production Restart=always RestartSec=10 diff --git a/scripts/test_e2e.sh b/scripts/test_e2e.sh new file mode 100755 index 00000000..627b668e --- /dev/null +++ b/scripts/test_e2e.sh @@ -0,0 +1,409 @@ +#!/usr/bin/env bash +# +# End-to-end integration test for saorsa-node file upload/download with EVM payments. +# +# This script: +# 1. Builds release binaries +# 2. Starts a devnet with EVM payment enforcement (Anvil + nodes) +# 3. Uploads each file in ./ugly_files/ with payment +# 4. Verifies on-chain payment via Anvil RPC +# 5. Downloads and verifies file integrity (SHA256 checksum) +# 6. Tests client-side payment rejection (CLI rejects without SECRET_KEY) +# 7. Tests server-side payment rejection (node rejects unpaid PUT) +# 8. Stops the devnet and reports results +# +# Exit 0 if ALL tests pass, non-zero otherwise. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" +UGLY_FILES_DIR="${PROJECT_DIR}/ugly_files" +TEST_RUN_ID="$$_$(date +%s)" +MANIFEST_FILE="/tmp/saorsa_e2e_manifest_${TEST_RUN_ID}.json" +DOWNLOAD_DIR="/tmp/saorsa_e2e_download_${TEST_RUN_ID}" +LOG_FILE="/tmp/saorsa_e2e_devnet_${TEST_RUN_ID}.log" +CLI_LOG="/tmp/saorsa_e2e_cli_${TEST_RUN_ID}.log" +CLI_STDOUT="/tmp/saorsa_e2e_cli_stdout_${TEST_RUN_ID}.txt" + +DEVNET_PID="" +PASS_COUNT=0 +FAIL_COUNT=0 +TOTAL_COUNT=0 + +cleanup() { + echo "" + echo "=== Cleaning up ===" + if [ -n "${DEVNET_PID}" ] && kill -0 "${DEVNET_PID}" 2>/dev/null; then + echo "Stopping devnet (PID ${DEVNET_PID})..." + kill "${DEVNET_PID}" 2>/dev/null || true + wait "${DEVNET_PID}" 2>/dev/null || true + fi + # Kill any lingering child processes + pkill -P $$ 2>/dev/null || true + echo "Cleanup complete." +} + +trap cleanup EXIT + +pass() { + local test_name="$1" + PASS_COUNT=$((PASS_COUNT + 1)) + TOTAL_COUNT=$((TOTAL_COUNT + 1)) + echo " PASS: ${test_name}" +} + +fail() { + local test_name="$1" + local reason="${2:-}" + FAIL_COUNT=$((FAIL_COUNT + 1)) + TOTAL_COUNT=$((TOTAL_COUNT + 1)) + echo " FAIL: ${test_name}" + if [ -n "${reason}" ]; then + echo " Reason: ${reason}" + fi +} + +# Strip ANSI escape codes from stdin +strip_ansi() { + sed $'s/\x1b\\[[0-9;]*m//g' +} + +# Parse a KEY=VALUE from a file, stripping ANSI codes +parse_field() { + local file="$1" + local key="$2" + grep "^${key}=" "${file}" 2>/dev/null | sed $'s/\x1b\\[[0-9;]*m//g' | head -1 | cut -d= -f2 +} + +echo "==============================================" +echo " saorsa-node E2E Integration Test" +echo "==============================================" +echo "" + +# Step 1: Build release binaries +echo "=== Step 1: Building release binaries ===" +cd "${PROJECT_DIR}" +cargo build --release 2>&1 | tail -3 +echo "Build complete." +echo "" + +SAORSA_DEVNET="${PROJECT_DIR}/target/release/saorsa-devnet" +SAORSA_CLI="${PROJECT_DIR}/target/release/saorsa-cli" + +if [ ! -f "${SAORSA_DEVNET}" ]; then + echo "ERROR: saorsa-devnet binary not found at ${SAORSA_DEVNET}" + exit 1 +fi +if [ ! -f "${SAORSA_CLI}" ]; then + echo "ERROR: saorsa-cli binary not found at ${SAORSA_CLI}" + exit 1 +fi + +# Step 2: Start devnet with EVM +DEVNET_NODES="${SAORSA_TEST_DEVNET_NODES:-5}" +BOOTSTRAP_COUNT="${SAORSA_TEST_BOOTSTRAP_COUNT:-2}" +echo "=== Step 2: Starting devnet with EVM (${DEVNET_NODES} nodes, ${BOOTSTRAP_COUNT} bootstrap) ===" +mkdir -p "${DOWNLOAD_DIR}" + +RUST_LOG=warn "${SAORSA_DEVNET}" \ + --nodes "${DEVNET_NODES}" \ + --bootstrap-count "${BOOTSTRAP_COUNT}" \ + --enable-evm \ + --manifest "${MANIFEST_FILE}" \ + --stabilization-timeout-secs 120 \ + > "${LOG_FILE}" 2>&1 & +DEVNET_PID=$! + +echo "Devnet starting (PID ${DEVNET_PID}), waiting for manifest..." + +# Wait for manifest file to appear (max 180 seconds) +WAIT_COUNT=0 +MAX_WAIT=180 +while [ ! -f "${MANIFEST_FILE}" ] && [ ${WAIT_COUNT} -lt ${MAX_WAIT} ]; do + if ! kill -0 "${DEVNET_PID}" 2>/dev/null; then + echo "ERROR: Devnet process died before producing manifest." + echo "Log output:" + tail -50 "${LOG_FILE}" 2>/dev/null || true + exit 1 + fi + sleep 1 + WAIT_COUNT=$((WAIT_COUNT + 1)) +done + +if [ ! -f "${MANIFEST_FILE}" ]; then + echo "ERROR: Manifest not created after ${MAX_WAIT} seconds." + echo "Log tail:" + tail -30 "${LOG_FILE}" 2>/dev/null || true + exit 1 +fi + +echo "Manifest created at ${MANIFEST_FILE}" + +# Extract EVM info from manifest +WALLET_KEY=$(python3 -c "import json; m=json.load(open('${MANIFEST_FILE}')); print(m['evm']['wallet_private_key'])" 2>/dev/null || true) +RPC_URL=$(python3 -c "import json; m=json.load(open('${MANIFEST_FILE}')); print(m['evm']['rpc_url'])" 2>/dev/null || true) + +if [ -z "${WALLET_KEY}" ] || [ -z "${RPC_URL}" ]; then + echo "ERROR: Could not extract EVM info from manifest." + cat "${MANIFEST_FILE}" + exit 1 +fi + +echo "Wallet key: ${WALLET_KEY:0:10}..." +echo "Anvil RPC: ${RPC_URL}" +echo "" + +# Verify Anvil is responding +BLOCK_RESPONSE=$(curl -s -X POST -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ + "${RPC_URL}" 2>/dev/null || echo "FAILED") +if echo "${BLOCK_RESPONSE}" | grep -q "result"; then + echo "Anvil RPC confirmed working" +else + echo "ERROR: Anvil RPC not responding at ${RPC_URL}" + echo "Response: ${BLOCK_RESPONSE}" + exit 1 +fi + +# Wait for network to stabilize +STABILIZE_SECS="${SAORSA_TEST_STABILIZE_SECS:-15}" +echo "Waiting ${STABILIZE_SECS} seconds for network stabilization..." +sleep "${STABILIZE_SECS}" +echo "" + +# Accumulate TX hashes from all uploads for on-chain verification in Step 5 +ALL_TX_HASHES="" + +# Step 3 & 4: Upload and download each file in ugly_files/ +echo "=== Step 3: File upload/download tests ===" + +# Max file size for E2E tests (default 1MB; override with SAORSA_TEST_MAX_FILE_SIZE) +MAX_FILE_SIZE="${SAORSA_TEST_MAX_FILE_SIZE:-1048576}" + +# Find test files (skip directories, .DS_Store, and files larger than MAX_FILE_SIZE) +TEST_FILES=() + +if [ -d "${UGLY_FILES_DIR}" ]; then + while IFS= read -r -d '' file; do + fsize=$(wc -c < "${file}" | tr -d ' ') + if [ "${fsize}" -le "${MAX_FILE_SIZE}" ]; then + TEST_FILES+=("${file}") + else + echo "Skipping ${file} (${fsize} bytes > ${MAX_FILE_SIZE} max)" + fi + done < <(find "${UGLY_FILES_DIR}" -maxdepth 1 -type f ! -name '.DS_Store' -print0 2>/dev/null | sort -z) +fi + +# If no ugly_files found, create a synthetic test file +if [ ${#TEST_FILES[@]} -eq 0 ]; then + echo "No test files in ${UGLY_FILES_DIR}, creating synthetic test file..." + SYNTHETIC_FILE="/tmp/saorsa_e2e_synthetic_${TEST_RUN_ID}.txt" + echo "saorsa E2E test payload: $(date -u +%Y-%m-%dT%H:%M:%SZ) run=${TEST_RUN_ID}" > "${SYNTHETIC_FILE}" + TEST_FILES+=("${SYNTHETIC_FILE}") +fi + +for filepath in "${TEST_FILES[@]}"; do + filename=$(basename "${filepath}") + filesize=$(wc -c < "${filepath}" | tr -d ' ') + echo "" + echo "--- Testing file: ${filename} (${filesize} bytes) ---" + + # Upload with payment - write stdout to file to avoid terminal ANSI leakage + echo " Uploading..." + SECRET_KEY="${WALLET_KEY}" "${SAORSA_CLI}" \ + --devnet-manifest "${MANIFEST_FILE}" \ + --evm-network local \ + --timeout-secs 120 \ + --log-level error \ + file upload "${filepath}" \ + > "${CLI_STDOUT}" 2>"${CLI_LOG}" || { + fail "${filename} upload" "Upload command failed (exit $?)" + tail -10 "${CLI_LOG}" 2>/dev/null || true + continue + } + + # Parse upload output from file (avoids terminal ANSI contamination) + FILE_ADDRESS=$(parse_field "${CLI_STDOUT}" "FILE_ADDRESS") + CHUNKS=$(parse_field "${CLI_STDOUT}" "CHUNKS") + PAYMENTS=$(parse_field "${CLI_STDOUT}" "PAYMENTS") + + if [ -z "${FILE_ADDRESS}" ]; then + fail "${filename} upload" "No FILE_ADDRESS in output" + echo " Raw output:" + cat "${CLI_STDOUT}" 2>/dev/null || true + continue + fi + + echo " Address: ${FILE_ADDRESS}" + echo " Chunks: ${CHUNKS}, Payments: ${PAYMENTS}" + + # Parse TX_HASHES from upload output + TX_HASHES=$(parse_field "${CLI_STDOUT}" "TX_HASHES") + + # Verify PAYMENTS is non-zero + if [ -n "${PAYMENTS}" ] && [ "${PAYMENTS}" -gt 0 ] 2>/dev/null; then + pass "${filename} upload (${PAYMENTS} payments)" + else + fail "${filename} upload" "PAYMENTS should be > 0, got: ${PAYMENTS}" + continue + fi + + # Collect TX hashes for on-chain verification in Step 5 + if [ -n "${TX_HASHES}" ]; then + ALL_TX_HASHES="${ALL_TX_HASHES:+${ALL_TX_HASHES},}${TX_HASHES}" + fi + + # Download and verify + DOWNLOAD_PATH="${DOWNLOAD_DIR}/${filename}" + echo " Downloading..." + SECRET_KEY="${WALLET_KEY}" "${SAORSA_CLI}" \ + --devnet-manifest "${MANIFEST_FILE}" \ + --evm-network local \ + --timeout-secs 120 \ + --log-level error \ + file download "${FILE_ADDRESS}" --output "${DOWNLOAD_PATH}" \ + > "${CLI_STDOUT}" 2>"${CLI_LOG}" || { + fail "${filename} download" "Download command failed (exit $?)" + tail -10 "${CLI_LOG}" 2>/dev/null || true + continue + } + + if [ ! -f "${DOWNLOAD_PATH}" ]; then + fail "${filename} download" "Downloaded file not found at ${DOWNLOAD_PATH}" + continue + fi + + # Compare checksums + ORIG_HASH=$(shasum -a 256 "${filepath}" | cut -d' ' -f1) + DOWN_HASH=$(shasum -a 256 "${DOWNLOAD_PATH}" | cut -d' ' -f1) + + if [ "${ORIG_HASH}" = "${DOWN_HASH}" ]; then + pass "${filename} integrity (SHA256 match)" + else + fail "${filename} integrity" "SHA256 mismatch: original=${ORIG_HASH}, downloaded=${DOWN_HASH}" + fi +done + +echo "" + +# Step 5: On-chain payment verification (verify actual TX hashes from uploads) +echo "=== Step 5: On-chain payment verification ===" + +if [ -z "${ALL_TX_HASHES}" ]; then + fail "On-chain payment verification" "No TX hashes collected from uploads" +else + # Verify each TX hash exists on Anvil via eth_getTransactionByHash + VERIFIED_TX=0 + FAILED_TX=0 + IFS=',' read -ra TX_ARRAY <<< "${ALL_TX_HASHES}" + TOTAL_TX=${#TX_ARRAY[@]} + echo " Verifying ${TOTAL_TX} transaction hash(es) on Anvil..." + + for tx_hash in "${TX_ARRAY[@]}"; do + # Strip whitespace + tx_hash=$(echo "${tx_hash}" | tr -d ' ') + if [ -z "${tx_hash}" ]; then + continue + fi + + TX_RESPONSE=$(curl -s -X POST -H "Content-Type: application/json" \ + --data "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getTransactionByHash\",\"params\":[\"${tx_hash}\"],\"id\":1}" \ + "${RPC_URL}" 2>/dev/null || echo "FAILED") + + # Check that result is not null (tx exists on chain) + if echo "${TX_RESPONSE}" | python3 -c "import sys,json; r=json.load(sys.stdin); assert r.get('result') is not None" 2>/dev/null; then + VERIFIED_TX=$((VERIFIED_TX + 1)) + else + FAILED_TX=$((FAILED_TX + 1)) + echo " TX not found on chain: ${tx_hash}" + fi + done + + if [ "${VERIFIED_TX}" -gt 0 ] && [ "${FAILED_TX}" -eq 0 ]; then + pass "On-chain payment verification (${VERIFIED_TX}/${TOTAL_TX} TXs verified on Anvil)" + elif [ "${VERIFIED_TX}" -gt 0 ]; then + fail "On-chain payment verification" "${FAILED_TX}/${TOTAL_TX} TXs not found on Anvil" + else + fail "On-chain payment verification" "No TXs could be verified on Anvil" + fi +fi + +echo "" + +# Step 6: Test client-side payment rejection (upload without SECRET_KEY) +echo "=== Step 6: Client-side payment rejection test ===" + +REJECTION_FILE="" +for filepath in "${TEST_FILES[@]}"; do + filesize=$(wc -c < "${filepath}" | tr -d ' ') + if [ "${filesize}" -lt 1000000 ]; then + REJECTION_FILE="${filepath}" + break + fi +done + +if [ -n "${REJECTION_FILE}" ]; then + echo " Attempting upload WITHOUT SECRET_KEY (should fail at client)..." + REJECTION_OUTPUT=$("${SAORSA_CLI}" \ + --devnet-manifest "${MANIFEST_FILE}" \ + --evm-network local \ + --timeout-secs 10 \ + --log-level error \ + file upload "${REJECTION_FILE}" 2>&1 || true) + + # Strip ANSI before matching (color-eyre embeds ANSI codes in error output) + CLEAN_REJECTION=$(echo "${REJECTION_OUTPUT}" | strip_ansi) + + if echo "${CLEAN_REJECTION}" | grep -qi "SECRET_KEY"; then + pass "Client-side payment rejection (SECRET_KEY required)" + else + fail "Client-side payment rejection" "Expected SECRET_KEY error from client" + echo " Output: ${CLEAN_REJECTION}" + fi +else + echo " WARNING: No test files available for rejection test" +fi + +echo "" + +# Step 7: Test chunk put rejection without wallet +echo "=== Step 7: Chunk put rejection without wallet ===" +echo " Attempting chunk put WITHOUT SECRET_KEY (should fail at client)..." +echo "test data for rejection e2e" > /tmp/saorsa_rejection_test_${TEST_RUN_ID}.txt +CHUNK_REJECT_OUTPUT=$("${SAORSA_CLI}" \ + --devnet-manifest "${MANIFEST_FILE}" \ + --evm-network local \ + --timeout-secs 10 \ + --log-level error \ + chunk put /tmp/saorsa_rejection_test_${TEST_RUN_ID}.txt 2>&1 || true) + +CLEAN_CHUNK_OUTPUT=$(echo "${CHUNK_REJECT_OUTPUT}" | strip_ansi) + +if echo "${CLEAN_CHUNK_OUTPUT}" | grep -qi "SECRET_KEY\|wallet\|payment"; then + pass "Chunk put rejection without wallet" +else + fail "Chunk put rejection" "Expected wallet/payment error" + echo " Output: $(echo "${CLEAN_CHUNK_OUTPUT}" | tail -5)" +fi + +echo "" + +# Step 8: Summary +echo "==============================================" +echo " E2E Test Results" +echo "==============================================" +echo " Total: ${TOTAL_COUNT}" +echo " Passed: ${PASS_COUNT}" +echo " Failed: ${FAIL_COUNT}" +echo "==============================================" + +if [ "${FAIL_COUNT}" -gt 0 ]; then + echo "" + echo "RESULT: FAILED (${FAIL_COUNT} failures)" + exit 1 +else + echo "" + echo "RESULT: ALL TESTS PASSED" + exit 0 +fi diff --git a/src/bin/saorsa-cli/cli.rs b/src/bin/saorsa-cli/cli.rs new file mode 100644 index 00000000..18cd926c --- /dev/null +++ b/src/bin/saorsa-cli/cli.rs @@ -0,0 +1,206 @@ +//! CLI definition for saorsa-cli. + +use clap::{Parser, Subcommand}; +use std::net::SocketAddr; +use std::path::PathBuf; + +/// Saorsa CLI for file upload and download with EVM payments. +#[derive(Parser, Debug)] +#[command(name = "saorsa-cli")] +#[command(author, version, about, long_about = None)] +pub struct Cli { + /// Bootstrap peer addresses. + #[arg(long, short)] + pub bootstrap: Vec, + + /// Path to devnet manifest JSON (output of saorsa-devnet). + #[arg(long)] + pub devnet_manifest: Option, + + /// Timeout for network operations (seconds). + #[arg(long, default_value_t = 60)] + pub timeout_secs: u64, + + /// Log level. + #[arg(long, default_value = "info")] + pub log_level: String, + + /// EVM network for payment processing. + #[arg(long, default_value = "local")] + pub evm_network: String, + + /// Command to run. + #[command(subcommand)] + pub command: CliCommand, +} + +/// CLI commands. +#[derive(Subcommand, Debug)] +pub enum CliCommand { + /// File operations (multi-chunk upload/download with EVM payment). + File { + #[command(subcommand)] + action: FileAction, + }, + /// Single-chunk operations (low-level put/get without file splitting). + Chunk { + #[command(subcommand)] + action: ChunkAction, + }, +} + +/// Chunk subcommands. +#[derive(Subcommand, Debug)] +pub enum ChunkAction { + /// Store a single chunk. Reads from FILE or stdin. + Put { + /// Input file (reads from stdin if omitted). + file: Option, + }, + /// Retrieve a single chunk. Writes to FILE or stdout. + Get { + /// Hex-encoded chunk address (64 hex chars). + address: String, + /// Output file (writes to stdout if omitted). + #[arg(long, short)] + output: Option, + }, +} + +/// File subcommands. +#[derive(Subcommand, Debug)] +pub enum FileAction { + /// Upload a file to the network with EVM payment. + Upload { + /// Path to the file to upload. + path: PathBuf, + }, + /// Download a file from the network. + Download { + /// Hex-encoded manifest address (returned by upload). + address: String, + /// Output file path (defaults to stdout). + #[arg(long, short)] + output: Option, + }, +} + +#[cfg(test)] +#[allow(clippy::unwrap_used, clippy::expect_used, clippy::panic)] +mod tests { + use super::*; + + #[test] + fn test_parse_upload_command() { + let cli = Cli::try_parse_from([ + "saorsa-cli", + "--bootstrap", + "127.0.0.1:10000", + "file", + "upload", + "/tmp/test.txt", + ]) + .unwrap(); + + assert!(!cli.bootstrap.is_empty()); + assert!(matches!( + cli.command, + CliCommand::File { + action: FileAction::Upload { .. } + } + )); + } + + #[test] + fn test_parse_download_command() { + let cli = Cli::try_parse_from([ + "saorsa-cli", + "--devnet-manifest", + "/tmp/manifest.json", + "file", + "download", + "abcd1234", + "--output", + "/tmp/out.bin", + ]) + .unwrap(); + + assert!(cli.devnet_manifest.is_some()); + } + + #[test] + fn test_secret_key_from_env() { + // SECRET_KEY is read at runtime, not parsed by clap + let cli = Cli::try_parse_from([ + "saorsa-cli", + "--bootstrap", + "127.0.0.1:10000", + "file", + "upload", + "/tmp/test.txt", + ]) + .unwrap(); + + assert_eq!(cli.evm_network, "local"); + } + + #[test] + fn test_parse_chunk_put() { + let cli = Cli::try_parse_from([ + "saorsa-cli", + "--bootstrap", + "127.0.0.1:10000", + "chunk", + "put", + "/tmp/test.txt", + ]) + .unwrap(); + assert!(matches!( + cli.command, + CliCommand::Chunk { + action: ChunkAction::Put { .. } + } + )); + } + + #[test] + fn test_parse_chunk_get() { + let cli = Cli::try_parse_from([ + "saorsa-cli", + "--bootstrap", + "127.0.0.1:10000", + "chunk", + "get", + "abcd1234", + "--output", + "/tmp/out.bin", + ]) + .unwrap(); + assert!(matches!( + cli.command, + CliCommand::Chunk { + action: ChunkAction::Get { .. } + } + )); + } + + #[test] + fn test_parse_chunk_put_stdin() { + let cli = Cli::try_parse_from([ + "saorsa-cli", + "--bootstrap", + "127.0.0.1:10000", + "chunk", + "put", + ]) + .unwrap(); + if let CliCommand::Chunk { + action: ChunkAction::Put { file }, + } = cli.command + { + assert!(file.is_none()); + } else { + panic!("Expected Chunk Put"); + } + } +} diff --git a/src/bin/saorsa-cli/main.rs b/src/bin/saorsa-cli/main.rs new file mode 100644 index 00000000..5d78b1bf --- /dev/null +++ b/src/bin/saorsa-cli/main.rs @@ -0,0 +1,364 @@ +//! saorsa-cli entry point — file upload/download with EVM payments. + +mod cli; + +use bytes::Bytes; +use clap::Parser; +use cli::{ChunkAction, Cli, CliCommand, FileAction}; +use evmlib::wallet::Wallet; +use evmlib::Network as EvmNetwork; +use saorsa_core::P2PNode; +use saorsa_node::ant_protocol::MAX_WIRE_MESSAGE_SIZE; +use saorsa_node::client::{ + create_manifest, deserialize_manifest, reassemble_file, serialize_manifest, split_file, + QuantumClient, QuantumConfig, XorName, +}; +use saorsa_node::devnet::DevnetManifest; +use saorsa_node::error::Error; +use std::io::Read as _; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use tracing::info; +use tracing_subscriber::{fmt, prelude::*, EnvFilter}; + +/// Length of an `XorName` address in bytes. +const XORNAME_BYTE_LEN: usize = 32; + +#[tokio::main] +async fn main() -> color_eyre::Result<()> { + color_eyre::install()?; + + let cli = Cli::parse(); + + let filter = + EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(&cli.log_level)); + + tracing_subscriber::registry() + .with(fmt::layer().with_writer(std::io::stderr)) + .with(filter) + .init(); + + info!("saorsa-cli v{}", env!("CARGO_PKG_VERSION")); + + // Resolve private key from SECRET_KEY env var (check early, before network bootstrap) + let private_key = std::env::var("SECRET_KEY").ok(); + + // Fail fast if storage operations require SECRET_KEY but it's not set + let needs_wallet = matches!( + cli.command, + CliCommand::File { + action: FileAction::Upload { .. } + } | CliCommand::Chunk { + action: ChunkAction::Put { .. } + } + ); + if needs_wallet && private_key.is_none() { + return Err(color_eyre::eyre::eyre!( + "SECRET_KEY environment variable required for storage operations (payment)" + )); + } + + let (bootstrap, manifest) = resolve_bootstrap(&cli)?; + let node = create_client_node(bootstrap).await?; + + // Build client with timeout + let mut client = QuantumClient::new(QuantumConfig { + timeout_secs: cli.timeout_secs, + replica_count: 1, + encrypt_data: false, + }) + .with_node(node); + + if let Some(ref key) = private_key { + let network = resolve_evm_network(&cli.evm_network, manifest.as_ref())?; + let wallet = Wallet::new_from_private_key(network, key) + .map_err(|e| color_eyre::eyre::eyre!("Failed to create wallet: {e}"))?; + info!("Wallet configured for EVM payments"); + client = client.with_wallet(wallet); + } + + match cli.command { + CliCommand::File { action } => match action { + FileAction::Upload { path } => { + handle_upload(&client, &path).await?; + } + FileAction::Download { address, output } => { + handle_download(&client, &address, output.as_deref()).await?; + } + }, + CliCommand::Chunk { action } => match action { + ChunkAction::Put { file } => { + handle_chunk_put(&client, file).await?; + } + ChunkAction::Get { address, output } => { + handle_chunk_get(&client, &address, output).await?; + } + }, + } + + Ok(()) +} + +async fn handle_upload(client: &QuantumClient, path: &Path) -> color_eyre::Result<()> { + let filename = path.file_name().and_then(|n| n.to_str()).map(String::from); + let file_content = std::fs::read(path)?; + let file_size = file_content.len(); + + info!("Uploading file: {} ({file_size} bytes)", path.display()); + + // Split file into chunks + let chunks = split_file(&file_content); + let chunk_count = chunks.len(); + info!("File split into {chunk_count} chunk(s)"); + + // Upload each chunk with payment, collecting tx hashes + let mut chunk_addresses: Vec<[u8; 32]> = Vec::with_capacity(chunk_count); + let mut all_tx_hashes: Vec = Vec::new(); + + for (i, chunk) in chunks.into_iter().enumerate() { + let chunk_num = i + 1; + info!( + "Uploading chunk {chunk_num}/{chunk_count} ({} bytes)", + chunk.len() + ); + let (address, tx_hashes) = client.put_chunk_with_payment(chunk).await?; + info!( + "Chunk {chunk_num}/{chunk_count} stored at {}", + hex::encode(address) + ); + chunk_addresses.push(address); + for tx in &tx_hashes { + all_tx_hashes.push(format!("{tx:?}")); + } + } + + // Create and upload manifest (also paid) + let total_size = + u64::try_from(file_size).map_err(|e| color_eyre::eyre::eyre!("File too large: {e}"))?; + let manifest = create_manifest(filename, total_size, chunk_addresses); + let manifest_bytes = serialize_manifest(&manifest)?; + let (manifest_address, manifest_tx_hashes) = + client.put_chunk_with_payment(manifest_bytes).await?; + for tx in &manifest_tx_hashes { + all_tx_hashes.push(format!("{tx:?}")); + } + + let manifest_hex = hex::encode(manifest_address); + let total_tx_count = all_tx_hashes.len(); + let tx_hashes_str = all_tx_hashes.join(","); + + // Print results to stdout + println!("FILE_ADDRESS={manifest_hex}"); + println!("CHUNKS={chunk_count}"); + println!("TOTAL_SIZE={file_size}"); + println!("PAYMENTS={total_tx_count}"); + println!("TX_HASHES={tx_hashes_str}"); + + info!( + "Upload complete: address={manifest_hex}, chunks={chunk_count}, payments={total_tx_count}" + ); + + Ok(()) +} + +async fn handle_download( + client: &QuantumClient, + address: &str, + output: Option<&Path>, +) -> color_eyre::Result<()> { + let manifest_address = parse_address(address)?; + info!("Downloading file from manifest {address}"); + + // Fetch manifest chunk + let manifest_chunk = client + .get_chunk(&manifest_address) + .await? + .ok_or_else(|| color_eyre::eyre::eyre!("Manifest chunk not found at {address}"))?; + + let manifest = deserialize_manifest(&manifest_chunk.content)?; + let chunk_count = manifest.chunk_addresses.len(); + info!( + "Manifest loaded: {} chunk(s), {} bytes total", + chunk_count, manifest.total_size + ); + + // Fetch all data chunks in order + let mut chunks = Vec::with_capacity(chunk_count); + for (i, chunk_addr) in manifest.chunk_addresses.iter().enumerate() { + let chunk_num = i + 1; + info!( + "Downloading chunk {chunk_num}/{chunk_count} ({})", + hex::encode(chunk_addr) + ); + let chunk = client.get_chunk(chunk_addr).await?.ok_or_else(|| { + color_eyre::eyre::eyre!("Data chunk not found: {}", hex::encode(chunk_addr)) + })?; + chunks.push(chunk.content); + } + + // Reassemble file + let file_content = reassemble_file(&manifest, &chunks)?; + info!("File reassembled: {} bytes", file_content.len()); + + // Write output + if let Some(path) = output { + std::fs::write(path, &file_content)?; + info!("File saved to {}", path.display()); + println!( + "Downloaded {} bytes to {}", + file_content.len(), + path.display() + ); + } else { + use std::io::Write; + std::io::stdout().write_all(&file_content)?; + } + + Ok(()) +} + +async fn handle_chunk_put(client: &QuantumClient, file: Option) -> color_eyre::Result<()> { + let content = read_input(file)?; + info!("Storing single chunk ({} bytes)", content.len()); + + let (address, tx_hashes) = client.put_chunk_with_payment(Bytes::from(content)).await?; + let hex_addr = hex::encode(address); + info!("Chunk stored at {hex_addr}"); + + println!("{hex_addr}"); + let tx_strs: Vec = tx_hashes.iter().map(|tx| format!("{tx:?}")).collect(); + println!("TX_HASHES={}", tx_strs.join(",")); + + Ok(()) +} + +async fn handle_chunk_get( + client: &QuantumClient, + address: &str, + output: Option, +) -> color_eyre::Result<()> { + let addr = parse_address(address)?; + info!("Retrieving chunk {address}"); + + let result = client.get_chunk(&addr).await?; + match result { + Some(chunk) => { + if let Some(path) = output { + std::fs::write(&path, &chunk.content)?; + info!("Chunk saved to {}", path.display()); + } else { + use std::io::Write; + std::io::stdout().write_all(&chunk.content)?; + } + } + None => { + return Err(color_eyre::eyre::eyre!( + "Chunk not found for address {address}" + )); + } + } + + Ok(()) +} + +fn read_input(file: Option) -> color_eyre::Result> { + if let Some(path) = file { + return Ok(std::fs::read(path)?); + } + let mut buf = Vec::new(); + std::io::stdin().read_to_end(&mut buf)?; + Ok(buf) +} + +fn resolve_evm_network( + evm_network: &str, + manifest: Option<&DevnetManifest>, +) -> color_eyre::Result { + match evm_network { + "arbitrum-one" => Ok(EvmNetwork::ArbitrumOne), + "arbitrum-sepolia" => Ok(EvmNetwork::ArbitrumSepoliaTest), + "local" => { + if let Some(m) = manifest { + if let Some(ref evm) = m.evm { + let rpc_url: reqwest::Url = evm + .rpc_url + .parse() + .map_err(|e| color_eyre::eyre::eyre!("Invalid RPC URL: {e}"))?; + let token_addr: evmlib::common::Address = evm + .payment_token_address + .parse() + .map_err(|e| color_eyre::eyre::eyre!("Invalid token address: {e}"))?; + let payments_addr: evmlib::common::Address = evm + .data_payments_address + .parse() + .map_err(|e| color_eyre::eyre::eyre!("Invalid payments address: {e}"))?; + return Ok(EvmNetwork::Custom(evmlib::CustomNetwork { + rpc_url_http: rpc_url, + payment_token_address: token_addr, + data_payments_address: payments_addr, + merkle_payments_address: None, + })); + } + } + Err(color_eyre::eyre::eyre!( + "EVM network 'local' requires --devnet-manifest with EVM info" + )) + } + other => Err(color_eyre::eyre::eyre!( + "Unsupported EVM network: {other}. Use 'arbitrum-one', 'arbitrum-sepolia', or 'local'." + )), + } +} + +fn resolve_bootstrap( + cli: &Cli, +) -> color_eyre::Result<(Vec, Option)> { + if !cli.bootstrap.is_empty() { + return Ok((cli.bootstrap.clone(), None)); + } + + if let Some(ref manifest_path) = cli.devnet_manifest { + let data = std::fs::read_to_string(manifest_path)?; + let manifest: DevnetManifest = serde_json::from_str(&data)?; + let bootstrap = manifest.bootstrap.clone(); + return Ok((bootstrap, Some(manifest))); + } + + Err(color_eyre::eyre::eyre!( + "No bootstrap peers provided. Use --bootstrap or --devnet-manifest." + )) +} + +async fn create_client_node(bootstrap: Vec) -> Result, Error> { + let mut core_config = saorsa_core::NodeConfig::new() + .map_err(|e| Error::Config(format!("Failed to create core config: {e}")))?; + core_config.listen_addr = "0.0.0.0:0" + .parse() + .map_err(|e| Error::Config(format!("Invalid listen addr: {e}")))?; + core_config.listen_addrs = vec![core_config.listen_addr]; + core_config.enable_ipv6 = false; + core_config.bootstrap_peers = bootstrap; + core_config.max_message_size = Some(MAX_WIRE_MESSAGE_SIZE); + + let node = P2PNode::new(core_config) + .await + .map_err(|e| Error::Network(format!("Failed to create P2P node: {e}")))?; + node.start() + .await + .map_err(|e| Error::Network(format!("Failed to start P2P node: {e}")))?; + + Ok(Arc::new(node)) +} + +fn parse_address(address: &str) -> color_eyre::Result { + let bytes = hex::decode(address)?; + if bytes.len() != XORNAME_BYTE_LEN { + return Err(color_eyre::eyre::eyre!( + "Invalid address length: expected {XORNAME_BYTE_LEN} bytes, got {}", + bytes.len() + )); + } + let mut out = [0u8; XORNAME_BYTE_LEN]; + out.copy_from_slice(&bytes); + Ok(out) +} diff --git a/src/bin/saorsa-client/cli.rs b/src/bin/saorsa-client/cli.rs deleted file mode 100644 index 2c7ef9fe..00000000 --- a/src/bin/saorsa-client/cli.rs +++ /dev/null @@ -1,50 +0,0 @@ -//! CLI definition for saorsa-client. - -use clap::{Parser, Subcommand}; -use std::net::SocketAddr; -use std::path::PathBuf; - -/// Client CLI for chunk operations. -#[derive(Parser, Debug)] -#[command(name = "saorsa-client")] -#[command(author, version, about, long_about = None)] -pub struct Cli { - /// Bootstrap peer addresses. - #[arg(long, short)] - pub bootstrap: Vec, - - /// Path to devnet manifest JSON (output of saorsa-devnet). - #[arg(long)] - pub devnet_manifest: Option, - - /// Timeout for network operations (seconds). - #[arg(long, default_value_t = 30)] - pub timeout_secs: u64, - - /// Log level for client process. - #[arg(long, default_value = "info")] - pub log_level: String, - - /// Command to run. - #[command(subcommand)] - pub command: ClientCommand, -} - -/// Client commands. -#[derive(Subcommand, Debug)] -pub enum ClientCommand { - /// Put a chunk. Reads from --file or stdin. - Put { - /// Input file (defaults to stdin if omitted). - #[arg(long)] - file: Option, - }, - /// Get a chunk. Writes to --out or stdout. - Get { - /// Hex-encoded chunk address (64 hex chars). - address: String, - /// Output file (defaults to stdout if omitted). - #[arg(long)] - out: Option, - }, -} diff --git a/src/bin/saorsa-client/main.rs b/src/bin/saorsa-client/main.rs deleted file mode 100644 index de3370ed..00000000 --- a/src/bin/saorsa-client/main.rs +++ /dev/null @@ -1,143 +0,0 @@ -//! saorsa-client CLI entry point. - -mod cli; - -use bytes::Bytes; -use clap::Parser; -use cli::{Cli, ClientCommand}; -use saorsa_core::P2PNode; -use saorsa_node::ant_protocol::MAX_WIRE_MESSAGE_SIZE; -use saorsa_node::client::{QuantumClient, QuantumConfig, XorName}; -use saorsa_node::devnet::DevnetManifest; -use saorsa_node::error::Error; -use std::io::{Read, Write}; -use std::path::PathBuf; -use std::sync::Arc; -use tracing::info; -use tracing_subscriber::{fmt, prelude::*, EnvFilter}; - -/// Length of an `XorName` address in bytes. -const XORNAME_BYTE_LEN: usize = 32; - -/// Default replica count for client chunk operations. -const DEFAULT_CLIENT_REPLICA_COUNT: u8 = 1; - -#[tokio::main] -async fn main() -> color_eyre::Result<()> { - color_eyre::install()?; - - let cli = Cli::parse(); - - let filter = - EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(&cli.log_level)); - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(filter) - .init(); - - info!("saorsa-client v{}", env!("CARGO_PKG_VERSION")); - - let bootstrap = resolve_bootstrap(&cli)?; - let node = create_client_node(bootstrap).await?; - let client = QuantumClient::new(QuantumConfig { - timeout_secs: cli.timeout_secs, - replica_count: DEFAULT_CLIENT_REPLICA_COUNT, - encrypt_data: false, - }) - .with_node(node); - - match cli.command { - ClientCommand::Put { file } => { - let content = read_input(file)?; - let address = client.put_chunk(Bytes::from(content)).await?; - println!("{}", hex::encode(address)); - } - ClientCommand::Get { address, out } => { - let addr = parse_address(&address)?; - let result = client.get_chunk(&addr).await?; - match result { - Some(chunk) => write_output(&chunk.content, out)?, - None => { - return Err(color_eyre::eyre::eyre!( - "Chunk not found for address {address}" - )); - } - } - } - } - - Ok(()) -} - -fn resolve_bootstrap(cli: &Cli) -> color_eyre::Result> { - if !cli.bootstrap.is_empty() { - return Ok(cli.bootstrap.clone()); - } - - if let Some(ref manifest_path) = cli.devnet_manifest { - let data = std::fs::read_to_string(manifest_path)?; - let manifest: DevnetManifest = serde_json::from_str(&data)?; - return Ok(manifest.bootstrap); - } - - Err(color_eyre::eyre::eyre!( - "No bootstrap peers provided. Use --bootstrap or --devnet-manifest." - )) -} - -async fn create_client_node(bootstrap: Vec) -> Result, Error> { - let mut core_config = saorsa_core::NodeConfig::new() - .map_err(|e| Error::Config(format!("Failed to create core config: {e}")))?; - core_config.listen_addr = "0.0.0.0:0" - .parse() - .map_err(|e| Error::Config(format!("Invalid listen addr: {e}")))?; - core_config.listen_addrs = vec![core_config.listen_addr]; - core_config.enable_ipv6 = false; - core_config.bootstrap_peers = bootstrap; - core_config.max_message_size = Some(MAX_WIRE_MESSAGE_SIZE); - - let node = P2PNode::new(core_config) - .await - .map_err(|e| Error::Network(format!("Failed to create P2P node: {e}")))?; - node.start() - .await - .map_err(|e| Error::Network(format!("Failed to start P2P node: {e}")))?; - - Ok(Arc::new(node)) -} - -fn parse_address(address: &str) -> color_eyre::Result { - let bytes = hex::decode(address)?; - if bytes.len() != XORNAME_BYTE_LEN { - return Err(color_eyre::eyre::eyre!( - "Invalid address length: expected {} bytes, got {}", - XORNAME_BYTE_LEN, - bytes.len() - )); - } - let mut out = [0u8; XORNAME_BYTE_LEN]; - out.copy_from_slice(&bytes); - Ok(out) -} - -fn read_input(file: Option) -> color_eyre::Result> { - if let Some(path) = file { - return Ok(std::fs::read(path)?); - } - - let mut buf = Vec::new(); - std::io::stdin().read_to_end(&mut buf)?; - Ok(buf) -} - -fn write_output(content: &Bytes, out: Option) -> color_eyre::Result<()> { - if let Some(path) = out { - std::fs::write(path, content)?; - return Ok(()); - } - - let mut stdout = std::io::stdout(); - stdout.write_all(content)?; - Ok(()) -} diff --git a/src/bin/saorsa-devnet/cli.rs b/src/bin/saorsa-devnet/cli.rs index 9012882e..4bb5ae3a 100644 --- a/src/bin/saorsa-devnet/cli.rs +++ b/src/bin/saorsa-devnet/cli.rs @@ -47,4 +47,9 @@ pub struct Cli { /// Log level for devnet process. #[arg(long, default_value = "info")] pub log_level: String, + + /// Enable EVM payment enforcement with a local Anvil blockchain. + /// Starts Anvil, deploys contracts, and enables payment verification on all nodes. + #[arg(long)] + pub enable_evm: bool, } diff --git a/src/bin/saorsa-devnet/main.rs b/src/bin/saorsa-devnet/main.rs index 46a95744..8b94f04e 100644 --- a/src/bin/saorsa-devnet/main.rs +++ b/src/bin/saorsa-devnet/main.rs @@ -4,7 +4,7 @@ mod cli; use clap::Parser; use cli::Cli; -use saorsa_node::devnet::{Devnet, DevnetConfig, DevnetManifest}; +use saorsa_node::devnet::{Devnet, DevnetConfig, DevnetEvmInfo, DevnetManifest}; use tracing::info; use tracing_subscriber::{fmt, prelude::*, EnvFilter}; @@ -53,6 +53,46 @@ async fn main() -> color_eyre::Result<()> { config.stabilization_timeout = std::time::Duration::from_secs(timeout_secs); } + // Start Anvil and deploy contracts if EVM is enabled + let evm_info = if cli.enable_evm { + info!("Starting local Anvil blockchain for EVM payment enforcement..."); + let testnet = evmlib::testnet::Testnet::new().await; + let network = testnet.to_network(); + let wallet_key = testnet.default_wallet_private_key(); + + let (rpc_url, token_addr, payments_addr) = match &network { + evmlib::Network::Custom(custom) => ( + custom.rpc_url_http.to_string(), + format!("{:?}", custom.payment_token_address), + format!("{:?}", custom.data_payments_address), + ), + _ => { + return Err(color_eyre::eyre::eyre!( + "Anvil testnet returned non-Custom network" + )) + } + }; + + config.enable_evm = true; + config.evm_network = Some(network); + + info!("Anvil blockchain running at {rpc_url}"); + info!("Funded wallet private key: {wallet_key}"); + + // Keep testnet alive by leaking it (it will be cleaned up on process exit) + // This is necessary because AnvilInstance stops Anvil when dropped + std::mem::forget(testnet); + + Some(DevnetEvmInfo { + rpc_url, + wallet_private_key: wallet_key, + payment_token_address: token_addr, + data_payments_address: payments_addr, + }) + } else { + None + }; + let mut devnet = Devnet::new(config).await?; devnet.start().await?; @@ -62,6 +102,7 @@ async fn main() -> color_eyre::Result<()> { bootstrap: devnet.bootstrap_addrs(), data_dir: devnet.config().data_dir.clone(), created_at: chrono::Utc::now().to_rfc3339(), + evm: evm_info, }; let json = serde_json::to_string_pretty(&manifest)?; diff --git a/src/client/chunk_protocol.rs b/src/client/chunk_protocol.rs index 988bf27e..abfe08d4 100644 --- a/src/client/chunk_protocol.rs +++ b/src/client/chunk_protocol.rs @@ -4,6 +4,7 @@ //! generic function used by both [`super::QuantumClient`] and E2E test helpers. use crate::ant_protocol::{ChunkMessage, ChunkMessageBody, CHUNK_PROTOCOL_ID}; +use saorsa_core::identity::PeerId; use saorsa_core::{P2PEvent, P2PNode}; use std::time::Duration; use tokio::sync::broadcast::error::RecvError; @@ -29,7 +30,7 @@ use tracing::{debug, warn}; #[allow(clippy::too_many_arguments)] pub async fn send_and_await_chunk_response( node: &P2PNode, - target_peer: &str, + target_peer: &PeerId, message_bytes: Vec, request_id: u64, timeout: Duration, @@ -40,7 +41,7 @@ pub async fn send_and_await_chunk_response( // Subscribe before sending so we don't miss the response let mut events = node.subscribe_events(); - let target_peer_id = target_peer.to_string(); + let target_peer_id = *target_peer; node.send_message(&target_peer_id, CHUNK_PROTOCOL_ID, message_bytes) .await @@ -53,7 +54,7 @@ pub async fn send_and_await_chunk_response( match tokio::time::timeout(remaining, events.recv()).await { Ok(Ok(P2PEvent::Message { topic, - source, + source: Some(source), data, })) if topic == CHUNK_PROTOCOL_ID && source == target_peer_id => { let response = match ChunkMessage::decode(&data) { diff --git a/src/client/file_ops.rs b/src/client/file_ops.rs new file mode 100644 index 00000000..6e4dc2a1 --- /dev/null +++ b/src/client/file_ops.rs @@ -0,0 +1,189 @@ +//! File chunking and reassembly operations. +//! +//! Files are split into chunks of up to `MAX_CHUNK_SIZE` (4 MB). A manifest +//! chunk stores the ordered list of chunk addresses and the original file +//! metadata so the file can be reconstructed from the network. + +use super::data_types::compute_address; +use crate::ant_protocol::MAX_CHUNK_SIZE; +use crate::error::{Error, Result}; +use bytes::Bytes; +use serde::{Deserialize, Serialize}; + +/// A file manifest that describes how to reassemble a file from its chunks. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FileManifest { + /// Original file name (if known). + pub filename: Option, + /// Total file size in bytes. + pub total_size: u64, + /// Ordered list of chunk addresses (SHA256 hashes). + pub chunk_addresses: Vec<[u8; 32]>, +} + +/// Split file content into chunks of at most `MAX_CHUNK_SIZE`. +/// +/// Returns a list of `Bytes` chunks in order. +#[must_use] +pub fn split_file(content: &[u8]) -> Vec { + if content.is_empty() { + return vec![Bytes::from_static(b"")]; + } + + content + .chunks(MAX_CHUNK_SIZE) + .map(Bytes::copy_from_slice) + .collect() +} + +/// Create a `FileManifest` from the file content and chunk addresses. +#[must_use] +pub fn create_manifest( + filename: Option, + total_size: u64, + chunk_addresses: Vec<[u8; 32]>, +) -> FileManifest { + FileManifest { + filename, + total_size, + chunk_addresses, + } +} + +/// Serialize a manifest to bytes suitable for storing as a chunk. +/// +/// # Errors +/// +/// Returns an error if serialization fails. +pub fn serialize_manifest(manifest: &FileManifest) -> Result { + let bytes = rmp_serde::to_vec(manifest) + .map_err(|e| Error::Serialization(format!("Failed to serialize manifest: {e}")))?; + Ok(Bytes::from(bytes)) +} + +/// Deserialize a manifest from bytes. +/// +/// # Errors +/// +/// Returns an error if deserialization fails. +pub fn deserialize_manifest(bytes: &[u8]) -> Result { + rmp_serde::from_slice(bytes) + .map_err(|e| Error::Serialization(format!("Failed to deserialize manifest: {e}"))) +} + +/// Reassemble file content from ordered chunks. +/// +/// Validates that total reassembled size matches the manifest. +/// +/// # Errors +/// +/// Returns an error if the reassembled size doesn't match the manifest. +pub fn reassemble_file(manifest: &FileManifest, chunks: &[Bytes]) -> Result { + let total: usize = chunks.iter().map(Bytes::len).sum(); + let expected = usize::try_from(manifest.total_size) + .map_err(|e| Error::InvalidChunk(format!("File size too large for platform: {e}")))?; + + if total != expected { + return Err(Error::InvalidChunk(format!( + "Reassembled size {total} does not match manifest size {expected}" + ))); + } + + let mut result = Vec::with_capacity(total); + for chunk in chunks { + result.extend_from_slice(chunk); + } + Ok(Bytes::from(result)) +} + +/// Compute the address for file content (for verification). +#[must_use] +pub fn compute_chunk_address(content: &[u8]) -> [u8; 32] { + compute_address(content) +} + +#[cfg(test)] +#[allow(clippy::unwrap_used, clippy::expect_used)] +mod tests { + use super::*; + + #[test] + fn test_split_empty_file() { + let chunks = split_file(b""); + assert_eq!(chunks.len(), 1); + assert!(chunks.first().unwrap().is_empty()); + } + + #[test] + fn test_split_small_file() { + let data = b"hello world"; + let chunks = split_file(data); + assert_eq!(chunks.len(), 1); + assert_eq!(chunks.first().unwrap().as_ref(), data); + } + + #[test] + fn test_split_exact_chunk_size() { + let data = vec![0xABu8; MAX_CHUNK_SIZE]; + let chunks = split_file(&data); + assert_eq!(chunks.len(), 1); + assert_eq!(chunks.first().unwrap().len(), MAX_CHUNK_SIZE); + } + + #[test] + fn test_split_multiple_chunks() { + let data = vec![0xCDu8; MAX_CHUNK_SIZE * 2 + 100]; + let chunks = split_file(&data); + assert_eq!(chunks.len(), 3); + assert_eq!(chunks.first().unwrap().len(), MAX_CHUNK_SIZE); + assert_eq!(chunks.get(1).unwrap().len(), MAX_CHUNK_SIZE); + assert_eq!(chunks.get(2).unwrap().len(), 100); + } + + #[test] + fn test_manifest_roundtrip() { + let manifest = create_manifest( + Some("test.txt".to_string()), + 1024, + vec![[1u8; 32], [2u8; 32]], + ); + + let bytes = serialize_manifest(&manifest).unwrap(); + let deserialized = deserialize_manifest(&bytes).unwrap(); + + assert_eq!(deserialized.filename.as_deref(), Some("test.txt")); + assert_eq!(deserialized.total_size, 1024); + assert_eq!(deserialized.chunk_addresses.len(), 2); + } + + #[test] + fn test_reassemble_file() { + let original = b"hello world, this is a test file for reassembly"; + let chunks = split_file(original); + let addresses: Vec<[u8; 32]> = chunks.iter().map(|c| compute_chunk_address(c)).collect(); + + let manifest = create_manifest(None, original.len() as u64, addresses); + let reassembled = reassemble_file(&manifest, &chunks).unwrap(); + assert_eq!(reassembled.as_ref(), original); + } + + #[test] + fn test_reassemble_size_mismatch() { + let manifest = create_manifest(None, 9999, vec![[1u8; 32]]); + let chunks = vec![Bytes::from_static(b"small")]; + let result = reassemble_file(&manifest, &chunks); + assert!(result.is_err()); + } + + #[test] + fn test_split_and_reassemble_large() { + let data = vec![0xFFu8; MAX_CHUNK_SIZE * 3 + 500]; + let chunks = split_file(&data); + assert_eq!(chunks.len(), 4); + + let addresses: Vec<[u8; 32]> = chunks.iter().map(|c| compute_chunk_address(c)).collect(); + let manifest = create_manifest(None, data.len() as u64, addresses); + let reassembled = reassemble_file(&manifest, &chunks).unwrap(); + assert_eq!(reassembled.as_ref(), data.as_slice()); + } +} diff --git a/src/client/mod.rs b/src/client/mod.rs index b41bf5e1..3604c6c1 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -55,10 +55,15 @@ mod chunk_protocol; mod data_types; +pub mod file_ops; mod quantum; pub use chunk_protocol::send_and_await_chunk_response; pub use data_types::{ compute_address, peer_id_to_xor_name, xor_distance, ChunkStats, DataChunk, XorName, }; -pub use quantum::{QuantumClient, QuantumConfig}; +pub use file_ops::{ + create_manifest, deserialize_manifest, reassemble_file, serialize_manifest, split_file, + FileManifest, +}; +pub use quantum::{hex_node_id_to_encoded_peer_id, QuantumClient, QuantumConfig}; diff --git a/src/client/quantum.rs b/src/client/quantum.rs index 100e128b..468c4b4e 100644 --- a/src/client/quantum.rs +++ b/src/client/quantum.rs @@ -8,7 +8,8 @@ //! Chunks are the only data type supported: //! - **Content-addressed**: Address = SHA256(content) //! - **Immutable**: Once stored, content cannot change -//! - **Paid**: All storage requires EVM payment on Arbitrum +//! - **Paid**: Storage requires EVM payment on Arbitrum when a wallet is configured; +//! devnets with EVM disabled accept unpaid puts //! //! ## Security Features //! @@ -17,14 +18,21 @@ //! - **ChaCha20-Poly1305**: Symmetric encryption for data at rest use super::chunk_protocol::send_and_await_chunk_response; -use super::data_types::{DataChunk, XorName}; +use super::data_types::{compute_address, DataChunk, XorName}; use crate::ant_protocol::{ ChunkGetRequest, ChunkGetResponse, ChunkMessage, ChunkMessageBody, ChunkPutRequest, - ChunkPutResponse, + ChunkPutResponse, ChunkQuoteRequest, ChunkQuoteResponse, }; use crate::error::{Error, Result}; +use crate::payment::single_node::REQUIRED_QUOTES; +use crate::payment::{calculate_price, PaymentProof, SingleNodePayment}; +use ant_evm::{Amount, EncodedPeerId, PaymentQuote, ProofOfPayment}; use bytes::Bytes; +use evmlib::wallet::Wallet; +use futures::stream::{FuturesUnordered, StreamExt}; +use saorsa_core::identity::PeerId; use saorsa_core::P2PNode; +use std::collections::HashSet; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use std::time::Duration; @@ -71,10 +79,12 @@ impl Default for QuantumConfig { /// /// Chunks are content-addressed: the address is the SHA256 hash of the content. /// This ensures data integrity - if the content matches the address, the data -/// is authentic. All chunk storage requires EVM payment on Arbitrum. +/// is authentic. When a wallet is configured, chunk storage requires EVM payment +/// on Arbitrum. Without a wallet, chunks can be stored on devnets with EVM disabled. pub struct QuantumClient { config: QuantumConfig, p2p_node: Option>, + wallet: Option>, next_request_id: AtomicU64, } @@ -86,6 +96,7 @@ impl QuantumClient { Self { config, p2p_node: None, + wallet: None, next_request_id: AtomicU64::new(1), } } @@ -103,6 +114,13 @@ impl QuantumClient { self } + /// Set the wallet for payment operations. + #[must_use] + pub fn with_wallet(mut self, wallet: Wallet) -> Self { + self.wallet = Some(Arc::new(wallet)); + self + } + /// Get a chunk from the saorsa network via ANT protocol. /// /// Sends a `ChunkGetRequest` to a connected peer and waits for the @@ -120,10 +138,10 @@ impl QuantumClient { /// /// Returns an error if the network operation fails. pub async fn get_chunk(&self, address: &XorName) -> Result> { - debug!( - "Querying saorsa network for chunk: {}", - hex::encode(address) - ); + if tracing::enabled!(tracing::Level::DEBUG) { + let addr_hex = hex::encode(address); + debug!("Querying saorsa network for chunk: {addr_hex}"); + } let Some(ref node) = self.p2p_node else { return Err(Error::Network("P2P node not configured".into())); @@ -157,39 +175,43 @@ impl QuantumClient { address: addr, content, }) => { - if addr == *address { - let computed = crate::client::compute_address(&content); - if computed == addr { - debug!( - "Found chunk {} on saorsa network ({} bytes)", + if addr != *address { + if tracing::enabled!(tracing::Level::WARN) { + warn!( + "Peer returned chunk {} but we requested {}", hex::encode(addr), - content.len() + addr_hex ); - Some(Ok(Some(DataChunk::new(addr, Bytes::from(content))))) - } else { + } + return Some(Err(Error::InvalidChunk(format!( + "Mismatched chunk address: expected {addr_hex}, got {}", + hex::encode(addr) + )))); + } + + let computed = compute_address(&content); + if computed != addr { + if tracing::enabled!(tracing::Level::WARN) { warn!( "Peer returned chunk {} with invalid content hash {}", addr_hex, hex::encode(computed) ); - Some(Err(Error::InvalidChunk(format!( - "Invalid chunk content: expected hash {}, got {}", - addr_hex, - hex::encode(computed) - )))) } - } else { - warn!( - "Peer returned chunk {} but we requested {}", + return Some(Err(Error::InvalidChunk(format!( + "Invalid chunk content: expected hash {addr_hex}, got {}", + hex::encode(computed) + )))); + } + + if tracing::enabled!(tracing::Level::DEBUG) { + debug!( + "Found chunk {} on saorsa network ({} bytes)", hex::encode(addr), - addr_hex + content.len() ); - Some(Err(Error::InvalidChunk(format!( - "Mismatched chunk address: expected {}, got {}", - addr_hex, - hex::encode(addr) - )))) } + Some(Ok(Some(DataChunk::new(addr, Bytes::from(content))))) } ChunkMessageBody::GetResponse(ChunkGetResponse::NotFound { .. }) => { debug!("Chunk {} not found on saorsa network", addr_hex); @@ -210,11 +232,14 @@ impl QuantumClient { .await } - /// Store a chunk on the saorsa network via ANT protocol. + /// Store a chunk on the saorsa network with full payment workflow. /// - /// The chunk address is computed as SHA256(content), ensuring content-addressing. - /// Sends a `ChunkPutRequest` to a connected peer and waits for the - /// `ChunkPutResponse`. + /// This method implements the complete payment flow: + /// 1. Request quotes from 5 closest nodes via DHT + /// 2. Sort quotes by price and select median (index 2) + /// 3. Pay median node 3x on Arbitrum, send 0 atto to other 4 + /// 4. Create `ProofOfPayment` with all 5 quotes + /// 5. Send chunk with payment proof to storage nodes /// /// # Arguments /// @@ -226,27 +251,141 @@ impl QuantumClient { /// /// # Errors /// - /// Returns an error if the store operation fails. - pub async fn put_chunk(&self, content: Bytes) -> Result { - debug!("Storing chunk on saorsa network ({} bytes)", content.len()); + /// Returns an error if: + /// - Wallet is not configured + /// - Quote collection fails + /// - Payment fails + /// - Storage operation fails + pub async fn put_chunk_with_payment( + &self, + content: Bytes, + ) -> Result<(XorName, Vec)> { + let content_len = content.len(); + info!("Storing chunk with payment ({content_len} bytes)"); let Some(ref node) = self.p2p_node else { return Err(Error::Network("P2P node not configured".into())); }; - // Compute content address using SHA-256 (before peer selection so we can route by it) - let address = crate::client::compute_address(&content); + let Some(ref wallet) = self.wallet else { + return Err(Error::Payment( + "Wallet not configured - use with_wallet() to enable payments".to_string(), + )); + }; + + // Compute content address + let address = compute_address(&content); + let content_size = content.len(); + let data_size = u64::try_from(content_size) + .map_err(|e| Error::Network(format!("Content size too large: {e}")))?; + + // Step 1: Request quotes from network nodes via DHT + let quotes_with_peers = self + .get_quotes_from_dht_for_address(&address, data_size) + .await?; + + if quotes_with_peers.len() != REQUIRED_QUOTES { + return Err(Error::Payment(format!( + "Expected {REQUIRED_QUOTES} quotes but received {}", + quotes_with_peers.len() + ))); + } + + // Step 2: Split quotes into peer_quotes (for ProofOfPayment) and + // quotes_with_prices (for SingleNodePayment) in a single pass. + let mut peer_quotes: Vec<(EncodedPeerId, PaymentQuote)> = + Vec::with_capacity(quotes_with_peers.len()); + let mut quotes_with_prices: Vec<(PaymentQuote, Amount)> = + Vec::with_capacity(quotes_with_peers.len()); + + for (peer_id, quote, price) in quotes_with_peers { + let encoded_peer_id = hex_node_id_to_encoded_peer_id(&peer_id.to_hex())?; + peer_quotes.push((encoded_peer_id, quote.clone())); + quotes_with_prices.push((quote, price)); + } + + // Step 3: Create SingleNodePayment (sorts by price, selects median, pays 3x) + let payment = SingleNodePayment::from_quotes(quotes_with_prices)?; + + info!( + "Payment prepared: {} atto total (3x median price)", + payment.total_amount() + ); + + // Step 4: Pay on-chain — capture transaction hashes + let tx_hashes = payment.pay(wallet).await?; + info!( + "Payment successful on Arbitrum ({} transactions)", + tx_hashes.len() + ); + // Step 5: Build proof AFTER payment succeeds, including tx hashes + let proof = PaymentProof { + proof_of_payment: ProofOfPayment { peer_quotes }, + tx_hashes: tx_hashes.clone(), + }; + let payment_proof = rmp_serde::to_vec(&proof) + .map_err(|e| Error::Network(format!("Failed to serialize payment proof: {e}")))?; + + // Step 6: Send chunk with payment proof to storage node let target_peer = Self::pick_target_peer(node, &address).await?; - // Create PUT request with empty payment proof - let empty_payment = rmp_serde::to_vec(&ant_evm::ProofOfPayment { - peer_quotes: vec![], - }) - .map_err(|e| Error::Network(format!("Failed to serialize payment proof: {e}")))?; + let request_id = self.next_request_id.fetch_add(1, Ordering::Relaxed); + let request = ChunkPutRequest::with_payment(address, content.to_vec(), payment_proof); + let message = ChunkMessage { + request_id, + body: ChunkMessageBody::PutRequest(request), + }; + let message_bytes = message + .encode() + .map_err(|e| Error::Network(format!("Failed to encode PUT request: {e}")))?; + + let stored_address = Self::send_put_and_await( + node, + &target_peer, + message_bytes, + request_id, + self.config.timeout_secs, + hex::encode(address), + content_size, + ) + .await?; + + Ok((stored_address, tx_hashes)) + } + + /// Store a chunk with a pre-built payment proof, skipping the internal payment flow. + /// + /// Use this when you have already obtained quotes and paid on-chain externally + /// (e.g. via [`SingleNodePayment::pay`]) and want to avoid a redundant payment cycle. + /// + /// # Arguments + /// + /// * `content` - The data to store + /// * `proof` - A serialised [`ProofOfPayment`] (msgpack bytes) + /// + /// # Returns + /// + /// The `XorName` address where the chunk was stored. + /// + /// # Errors + /// + /// Returns an error if: + /// - P2P node is not configured + /// - No remote peers found near the target address + /// - Storage operation fails + pub async fn put_chunk_with_proof(&self, content: Bytes, proof: Vec) -> Result { + let Some(ref node) = self.p2p_node else { + return Err(Error::Network("P2P node not configured".into())); + }; + + let address = compute_address(&content); + let content_size = content.len(); + + let target_peer = Self::pick_target_peer(node, &address).await?; let request_id = self.next_request_id.fetch_add(1, Ordering::Relaxed); - let request = ChunkPutRequest::with_payment(address, content.to_vec(), empty_payment); + let request = ChunkPutRequest::with_payment(address, content.to_vec(), proof); let message = ChunkMessage { request_id, body: ChunkMessageBody::PutRequest(request), @@ -255,23 +394,77 @@ impl QuantumClient { .encode() .map_err(|e| Error::Network(format!("Failed to encode PUT request: {e}")))?; - let timeout = Duration::from_secs(self.config.timeout_secs); - let content_len = content.len(); - let addr_hex = hex::encode(address); - let timeout_secs = self.config.timeout_secs; + Self::send_put_and_await( + node, + &target_peer, + message_bytes, + request_id, + self.config.timeout_secs, + hex::encode(address), + content_size, + ) + .await + } + + /// Store a chunk on the saorsa network. + /// + /// Requires a wallet to be configured. Delegates to + /// [`put_chunk_with_payment`](Self::put_chunk_with_payment) for the full + /// payment flow (quotes, on-chain payment, proof). + /// + /// # Arguments + /// + /// * `content` - The data to store + /// + /// # Returns + /// + /// The `XorName` address where the chunk was stored. + /// + /// # Errors + /// + /// Returns an error if: + /// - No wallet is configured + /// - P2P node is not configured + /// - No remote peers found near the target address + /// - The storage operation fails + pub async fn put_chunk(&self, content: Bytes) -> Result { + if self.wallet.is_some() { + let (address, _tx_hashes) = self.put_chunk_with_payment(content).await?; + return Ok(address); + } + Err(Error::Payment( + "No wallet configured — payment is required for chunk storage. \ + Use --private-key or set SECRET_KEY to provide a wallet." + .to_string(), + )) + } + + /// Send a PUT request and await the response. + /// + /// Shared helper for all three PUT methods to avoid duplicating the + /// response-matching logic. + async fn send_put_and_await( + node: &P2PNode, + target_peer: &PeerId, + message_bytes: Vec, + request_id: u64, + timeout_secs: u64, + addr_hex: String, + content_size: usize, + ) -> Result { + let timeout = Duration::from_secs(timeout_secs); send_and_await_chunk_response( node, - &target_peer, + target_peer, message_bytes, request_id, timeout, |body| match body { ChunkMessageBody::PutResponse(ChunkPutResponse::Success { address: addr }) => { info!( - "Chunk stored at address: {} ({} bytes)", + "Chunk stored at address: {} ({content_size} bytes)", hex::encode(addr), - content_len ); Some(Ok(addr)) } @@ -316,10 +509,12 @@ impl QuantumClient { /// /// Returns an error if the network operation fails. pub async fn exists(&self, address: &XorName) -> Result { - debug!( - "Checking existence on saorsa network: {}", - hex::encode(address) - ); + if tracing::enabled!(tracing::Level::DEBUG) { + debug!( + "Checking existence on saorsa network: {}", + hex::encode(address) + ); + } self.get_chunk(address).await.map(|opt| opt.is_some()) } @@ -327,9 +522,8 @@ impl QuantumClient { /// /// Queries the DHT for the `CLOSE_GROUP_SIZE` closest nodes to the target /// address and returns the single closest remote peer (excluding ourselves). - async fn pick_target_peer(node: &P2PNode, target: &XorName) -> Result { + async fn pick_target_peer(node: &P2PNode, target: &XorName) -> Result { let local_peer_id = node.peer_id(); - let local_transport_id = node.transport_peer_id(); let closest_nodes = node .dht() @@ -339,22 +533,289 @@ impl QuantumClient { let closest = closest_nodes .into_iter() - .find(|n| { - n.peer_id != *local_peer_id - && local_transport_id - .as_ref() - .map_or(true, |tid| n.peer_id != *tid) - }) + .find(|n| n.peer_id != *local_peer_id) .ok_or_else(|| Error::Network("No remote peers found near target address".into()))?; - debug!( - "Selected closest peer {} for target {}", - closest.peer_id, - hex::encode(target) - ); + if tracing::enabled!(tracing::Level::DEBUG) { + debug!( + "Selected closest peer {} for target {}", + closest.peer_id, + hex::encode(target) + ); + } Ok(closest.peer_id) } + + /// Get quotes from DHT peers for chunk storage. + /// + /// Computes the content address and requests quotes from the closest peers. + /// Collects exactly `REQUIRED_QUOTES` quotes. + /// + /// # Arguments + /// + /// * `content` - The chunk data to get quotes for + /// + /// # Returns + /// + /// A vector of (`peer_id`, `PaymentQuote`, `Amount`) tuples containing the quoting peer's ID, + /// the quote, and its price. + /// + /// # Errors + /// + /// Returns an error if: + /// - DHT lookup fails + /// - Failed to collect enough quotes + /// - Quote deserialization fails + pub async fn get_quotes_from_dht( + &self, + content: &[u8], + ) -> Result> { + let address = compute_address(content); + let data_size = u64::try_from(content.len()) + .map_err(|e| Error::Network(format!("Content size too large: {e}")))?; + self.get_quotes_from_dht_for_address(&address, data_size) + .await + } + + /// Get quotes from DHT peers for chunk storage using a pre-computed address. + /// + /// Queries the DHT for the closest peers to the chunk address and requests + /// storage quotes from them. Collects exactly `REQUIRED_QUOTES` quotes. + /// + /// # Arguments + /// + /// * `address` - The pre-computed `XorName` address for the chunk + /// * `data_size` - The size of the chunk data in bytes + /// + /// # Returns + /// + /// A vector of (`peer_id`, `PaymentQuote`, `Amount`) tuples containing the quoting peer's ID, + /// the quote, and its price. + /// + /// # Errors + /// + /// Returns an error if: + /// - DHT lookup fails + /// - Failed to collect enough quotes + /// - Quote deserialization fails + #[allow(clippy::too_many_lines)] + async fn get_quotes_from_dht_for_address( + &self, + address: &XorName, + data_size: u64, + ) -> Result> { + let Some(ref node) = self.p2p_node else { + return Err(Error::Network("P2P node not configured".into())); + }; + + if tracing::enabled!(tracing::Level::DEBUG) { + let addr_hex = hex::encode(address); + debug!( + "Requesting {REQUIRED_QUOTES} quotes from DHT for chunk {addr_hex} (size: {data_size})" + ); + } + + let local_peer_id = node.peer_id(); + + // Find closest peers via DHT + let closest_nodes = node + .dht() + .find_closest_nodes(address, CLOSE_GROUP_SIZE) + .await + .map_err(|e| Error::Network(format!("DHT closest-nodes lookup failed: {e}")))?; + + // Filter out self and collect remote peers + let mut remote_peers: Vec = closest_nodes + .into_iter() + .filter(|n| n.peer_id != *local_peer_id) + .map(|n| n.peer_id) + .collect(); + + // Fallback to connected_peers() if DHT has insufficient peers + // This handles the case where DHT routing tables are still warming up + if remote_peers.len() < REQUIRED_QUOTES { + warn!( + "DHT returned only {} peers for {}, falling back to connected_peers()", + remote_peers.len(), + hex::encode(address) + ); + + let connected = node.connected_peers().await; + debug!("Found {} connected P2P peers for fallback", connected.len()); + + // Add connected peers that aren't already in remote_peers (O(1) dedup via HashSet) + let mut existing: HashSet = remote_peers.iter().copied().collect(); + for peer_id in connected { + if existing.insert(peer_id) { + remote_peers.push(peer_id); + } + } + + if remote_peers.len() < REQUIRED_QUOTES { + return Err(Error::Network(format!( + "Insufficient peers for quotes: found {} (DHT + P2P fallback), need {}", + remote_peers.len(), + REQUIRED_QUOTES + ))); + } + + info!( + "Fallback successful: now have {} peers for quote requests", + remote_peers.len() + ); + } + + if tracing::enabled!(tracing::Level::DEBUG) { + debug!( + "Found {} remote peers, requesting quotes from first {}", + remote_peers.len(), + REQUIRED_QUOTES + ); + } + + // Request quotes from all peers concurrently + // Collect the first REQUIRED_QUOTES successful responses + let timeout = Duration::from_secs(self.config.timeout_secs); + + // Create futures for all quote requests concurrently + let mut quote_futures = FuturesUnordered::new(); + + for peer_id in &remote_peers { + let request_id = self.next_request_id.fetch_add(1, Ordering::Relaxed); + let request = ChunkQuoteRequest::new(*address, data_size); + let message = ChunkMessage { + request_id, + body: ChunkMessageBody::QuoteRequest(request), + }; + + let message_bytes = match message.encode() { + Ok(bytes) => bytes, + Err(e) => { + warn!("Failed to encode quote request for {peer_id}: {e}"); + continue; + } + }; + + // Clone necessary data for the async task + let peer_id_clone = *peer_id; + let node_clone = node.clone(); + + // Create a future for this quote request + let quote_future = async move { + let quote_result = send_and_await_chunk_response( + &node_clone, + &peer_id_clone, + message_bytes, + request_id, + timeout, + |body| match body { + ChunkMessageBody::QuoteResponse(ChunkQuoteResponse::Success { quote }) => { + // Deserialize the quote + match rmp_serde::from_slice::("e) { + Ok(payment_quote) => { + let price = calculate_price(&payment_quote.quoting_metrics); + if tracing::enabled!(tracing::Level::DEBUG) { + debug!( + "Received quote from {peer_id_clone}: price = {price}" + ); + } + Some(Ok((payment_quote, price))) + } + Err(e) => Some(Err(Error::Network(format!( + "Failed to deserialize quote from {peer_id_clone}: {e}" + )))), + } + } + ChunkMessageBody::QuoteResponse(ChunkQuoteResponse::Error(e)) => Some(Err( + Error::Network(format!("Quote error from {peer_id_clone}: {e}")), + )), + _ => None, + }, + |e| { + Error::Network(format!( + "Failed to send quote request to {peer_id_clone}: {e}" + )) + }, + || Error::Network(format!("Timeout waiting for quote from {peer_id_clone}")), + ) + .await; + + (peer_id_clone, quote_result) + }; + + quote_futures.push(quote_future); + } + + // Collect quotes as they complete, stopping once we have REQUIRED_QUOTES + let mut quotes_with_peers = Vec::with_capacity(REQUIRED_QUOTES); + + while let Some((peer_id, quote_result)) = quote_futures.next().await { + match quote_result { + Ok((quote, price)) => { + quotes_with_peers.push((peer_id, quote, price)); + + // Stop collecting once we have enough quotes + if quotes_with_peers.len() >= REQUIRED_QUOTES { + break; + } + } + Err(e) => { + warn!("Failed to get quote from {peer_id}: {e}"); + // Continue trying other peers + } + } + } + + if quotes_with_peers.len() < REQUIRED_QUOTES { + return Err(Error::Network(format!( + "Failed to collect enough quotes: got {}, need {}", + quotes_with_peers.len(), + REQUIRED_QUOTES + ))); + } + + if tracing::enabled!(tracing::Level::INFO) { + let quote_count = quotes_with_peers.len(); + let addr_hex = hex::encode(address); + info!("Collected {quote_count} quotes for chunk {addr_hex}"); + } + + Ok(quotes_with_peers) + } +} + +/// Identity multihash code (stores raw bytes without hashing). +const MULTIHASH_IDENTITY_CODE: u64 = 0x00; + +/// Convert a hex-encoded 32-byte saorsa-core node ID to an [`EncodedPeerId`]. +/// +/// Saorsa-core peer IDs are 64-character hex strings representing 32 raw bytes. +/// libp2p `PeerId` expects a multihash-encoded identity. This function bridges the two +/// formats by wrapping the raw bytes in an identity multihash (code 0x00) and then +/// converting to `EncodedPeerId` via `From`. +/// +/// # Errors +/// +/// Returns an error if the hex string is invalid or the peer ID cannot be constructed. +pub fn hex_node_id_to_encoded_peer_id(hex_id: &str) -> Result { + let raw_bytes = hex::decode(hex_id) + .map_err(|e| Error::Payment(format!("Invalid hex peer ID '{hex_id}': {e}")))?; + + let multihash = + multihash::Multihash::<64>::wrap(MULTIHASH_IDENTITY_CODE, &raw_bytes).map_err(|e| { + Error::Payment(format!( + "Failed to create multihash for peer '{hex_id}': {e}" + )) + })?; + + let peer_id = libp2p::PeerId::from_multihash(multihash).map_err(|_| { + Error::Payment(format!( + "Failed to create PeerId from multihash for peer '{hex_id}'" + )) + })?; + + Ok(EncodedPeerId::from(peer_id)) } #[cfg(test)] @@ -403,4 +864,28 @@ mod tests { let result = client.exists(&address).await; assert!(result.is_err()); } + + #[test] + fn test_hex_node_id_to_encoded_peer_id_valid() { + // A valid 32-byte hex-encoded node ID (64 hex chars) + let hex_id = "80b6427dc1b0490ffe743d39a4d4d68c252f5053f6234a9154cfb017f92a1399"; + let result = hex_node_id_to_encoded_peer_id(hex_id); + assert!( + result.is_ok(), + "Should convert valid hex node ID: {result:?}" + ); + } + + #[test] + fn test_hex_node_id_to_encoded_peer_id_invalid_hex() { + let result = hex_node_id_to_encoded_peer_id("not-valid-hex"); + assert!(result.is_err()); + } + + #[test] + fn test_hex_node_id_to_encoded_peer_id_all_zeros() { + let hex_id = "0000000000000000000000000000000000000000000000000000000000000000"; + let result = hex_node_id_to_encoded_peer_id(hex_id); + assert!(result.is_ok()); + } } diff --git a/src/config.rs b/src/config.rs index a710b1ca..008d54ba 100644 --- a/src/config.rs +++ b/src/config.rs @@ -193,11 +193,18 @@ pub enum EvmNetworkConfig { /// Payment verification configuration. /// +/// **Production nodes require payment by default.** +/// /// All new data requires EVM payment on Arbitrum. The cache stores /// previously verified payments to avoid redundant lookups. +/// +/// To disable payment verification (test/dev only): +/// - Use CLI flag: `--disable-payment-verification` +/// - Or set `enabled = false` in config file #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PaymentConfig { /// Enable payment verification. + /// **Default: true (payment required).** #[serde(default = "default_payment_enabled")] pub enabled: bool, @@ -498,3 +505,24 @@ fn default_testnet_bootstrap() -> Vec { SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(164, 92, 111, 156), 12000)), ] } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_default_config_requires_payment() { + let config = PaymentConfig::default(); + assert!(config.enabled, "Payment must be enabled by default"); + } + + #[test] + fn test_default_evm_verifier_enabled() { + use crate::payment::EvmVerifierConfig; + let config = EvmVerifierConfig::default(); + assert!( + config.enabled, + "EVM verification must be enabled by default" + ); + } +} diff --git a/src/devnet.rs b/src/devnet.rs index 64473923..f2491d92 100644 --- a/src/devnet.rs +++ b/src/devnet.rs @@ -11,6 +11,7 @@ use crate::payment::{ }; use crate::storage::{AntProtocol, LmdbStorage, LmdbStorageConfig}; use ant_evm::RewardsAddress; +use evmlib::Network as EvmNetwork; use rand::Rng; use saorsa_core::identity::NodeIdentity; use saorsa_core::{NodeConfig as CoreNodeConfig, P2PEvent, P2PNode}; @@ -160,6 +161,15 @@ pub struct DevnetConfig { /// Whether to remove the data directory on shutdown. pub cleanup_data_dir: bool, + + /// Enable EVM payment enforcement on all nodes. + /// When true, nodes will require valid on-chain payment proofs. + pub enable_evm: bool, + + /// Optional EVM network for payment verification. + /// When `enable_evm` is true and this is `Some`, nodes will use + /// this network (e.g. Anvil testnet) for on-chain verification. + pub evm_network: Option, } impl Default for DevnetConfig { @@ -180,6 +190,8 @@ impl Default for DevnetConfig { node_startup_timeout: Duration::from_secs(DEFAULT_NODE_STARTUP_TIMEOUT_SECS), enable_node_logging: false, cleanup_data_dir: true, + enable_evm: false, + evm_network: None, } } } @@ -221,6 +233,22 @@ pub struct DevnetManifest { pub data_dir: PathBuf, /// Creation time in RFC3339. pub created_at: String, + /// EVM configuration (present when EVM payment enforcement is enabled). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub evm: Option, +} + +/// EVM configuration info included in the devnet manifest. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DevnetEvmInfo { + /// Anvil RPC URL. + pub rpc_url: String, + /// Funded wallet private key (hex-encoded with 0x prefix). + pub wallet_private_key: String, + /// Payment token contract address. + pub payment_token_address: String, + /// Data payments contract address. + pub data_payments_address: String, } /// Network state for devnet startup lifecycle. @@ -317,25 +345,23 @@ impl Devnet { )); } + let node_count = config.node_count; + let node_count_u16 = u16::try_from(node_count).map_err(|_| { + DevnetError::Config(format!("Node count {node_count} exceeds u16::MAX")) + })?; + if config.base_port == 0 { let mut rng = rand::thread_rng(); - let node_count_u16 = u16::try_from(config.node_count).map_err(|_| { - DevnetError::Config(format!("Node count {} exceeds u16::MAX", config.node_count)) - })?; let max_base_port = DEVNET_PORT_RANGE_MAX.saturating_sub(node_count_u16); config.base_port = rng.gen_range(DEVNET_PORT_RANGE_MIN..max_base_port); } - let node_count_u16 = u16::try_from(config.node_count).map_err(|_| { - DevnetError::Config(format!("Node count {} exceeds u16::MAX", config.node_count)) - })?; - let max_port = config - .base_port + let base_port = config.base_port; + let max_port = base_port .checked_add(node_count_u16) .ok_or_else(|| { DevnetError::Config(format!( - "Port range overflow: base_port {} + node_count {} exceeds u16::MAX", - config.base_port, config.node_count + "Port range overflow: base_port {base_port} + node_count {node_count} exceeds u16::MAX" )) })?; if max_port > DEVNET_PORT_RANGE_MAX { @@ -412,7 +438,7 @@ impl Devnet { shutdown_futures.push(async move { if let Some(p2p) = p2p_node { if let Err(e) = p2p.shutdown().await { - warn!("Error shutting down node {}: {}", node_index, e); + warn!("Error shutting down node {node_index}: {e}"); } } *node_state.write().await = NodeState::Stopped; @@ -422,7 +448,7 @@ impl Devnet { if self.config.cleanup_data_dir { if let Err(e) = tokio::fs::remove_dir_all(&self.config.data_dir).await { - warn!("Failed to cleanup devnet data directory: {}", e); + warn!("Failed to cleanup devnet data directory: {e}"); } } @@ -467,7 +493,16 @@ impl Devnet { let regular_count = self.config.node_count - self.config.bootstrap_count; info!("Starting {} regular nodes", regular_count); - let bootstrap_addrs: Vec = self.nodes[0..self.config.bootstrap_count] + let bootstrap_addrs: Vec = self + .nodes + .get(0..self.config.bootstrap_count) + .ok_or_else(|| { + DevnetError::Config(format!( + "Bootstrap count {} exceeds nodes length {}", + self.config.bootstrap_count, + self.nodes.len() + )) + })? .iter() .map(|n| n.address) .collect(); @@ -496,7 +531,7 @@ impl Devnet { // Generate identity first so we can use peer_id as the directory name let identity = NodeIdentity::generate() .map_err(|e| DevnetError::Core(format!("Failed to generate node identity: {e}")))?; - let peer_id = hex::encode(identity.node_id().0); + let peer_id = identity.peer_id().to_hex(); let node_id = format!("devnet_node_{index}"); let data_dir = self.config.data_dir.join(NODES_SUBDIR).join(&peer_id); @@ -507,7 +542,7 @@ impl Devnet { .await .map_err(|e| DevnetError::Core(format!("Failed to save node identity: {e}")))?; - let ant_protocol = Self::create_ant_protocol(&data_dir).await?; + let ant_protocol = Self::create_ant_protocol(&data_dir, &identity, &self.config).await?; Ok(DevnetNode { index, @@ -525,7 +560,11 @@ impl Devnet { }) } - async fn create_ant_protocol(data_dir: &std::path::Path) -> Result { + async fn create_ant_protocol( + data_dir: &std::path::Path, + identity: &NodeIdentity, + config: &DevnetConfig, + ) -> Result { let storage_config = LmdbStorageConfig { root_dir: data_dir.to_path_buf(), verify_on_read: true, @@ -536,19 +575,36 @@ impl Devnet { .await .map_err(|e| DevnetError::Core(format!("Failed to create LMDB storage: {e}")))?; - let payment_config = PaymentVerifierConfig { - evm: EvmVerifierConfig { + let evm_config = if config.enable_evm { + EvmVerifierConfig { + enabled: true, + network: config + .evm_network + .clone() + .unwrap_or(EvmNetwork::ArbitrumOne), + } + } else { + EvmVerifierConfig { enabled: false, ..Default::default() - }, + } + }; + + let payment_config = PaymentVerifierConfig { + evm: evm_config, cache_capacity: DEVNET_PAYMENT_CACHE_CAPACITY, + local_rewards_address: None, }; let payment_verifier = PaymentVerifier::new(payment_config); let rewards_address = RewardsAddress::new(DEVNET_REWARDS_ADDRESS); let metrics_tracker = QuotingMetricsTracker::new(DEVNET_MAX_RECORDS, DEVNET_INITIAL_RECORDS); - let quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); + let mut quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); + + // Wire ML-DSA-65 signing from the devnet node's identity + crate::payment::wire_ml_dsa_signer(&mut quote_generator, identity) + .map_err(|e| DevnetError::Startup(format!("Failed to wire ML-DSA-65 signer: {e}")))?; Ok(AntProtocol::new( Arc::new(storage), @@ -564,7 +620,6 @@ impl Devnet { let mut core_config = CoreNodeConfig::new() .map_err(|e| DevnetError::Core(format!("Failed to create core config: {e}")))?; - core_config.peer_id = Some(node.peer_id.clone()); core_config.listen_addr = node.address; core_config.listen_addrs = vec![node.address]; core_config.enable_ipv6 = false; @@ -573,13 +628,15 @@ impl Devnet { .clone_from(&node.bootstrap_addrs); core_config.max_message_size = Some(crate::ant_protocol::MAX_WIRE_MESSAGE_SIZE); - let p2p_node = P2PNode::new(core_config).await.map_err(|e| { - DevnetError::Startup(format!("Failed to create node {}: {e}", node.index)) - })?; + let index = node.index; + let p2p_node = P2PNode::new(core_config) + .await + .map_err(|e| DevnetError::Startup(format!("Failed to create node {index}: {e}")))?; - p2p_node.start().await.map_err(|e| { - DevnetError::Startup(format!("Failed to start node {}: {e}", node.index)) - })?; + p2p_node + .start() + .await + .map_err(|e| DevnetError::Startup(format!("Failed to start node {index}: {e}")))?; node.p2p_node = Some(Arc::new(p2p_node)); *node.state.write().await = NodeState::Running; @@ -593,14 +650,13 @@ impl Devnet { while let Ok(event) = events.recv().await { if let P2PEvent::Message { topic, - source, + source: Some(source), data, } = event { if topic == CHUNK_PROTOCOL_ID { debug!( - "Node {} received chunk protocol message from {}", - node_index, source + "Node {node_index} received chunk protocol message from {source}" ); let protocol = Arc::clone(&protocol_clone); let p2p = Arc::clone(&p2p_clone); @@ -616,13 +672,12 @@ impl Devnet { .await { warn!( - "Node {} failed to send response to {}: {}", - node_index, source, e + "Node {node_index} failed to send response to {source}: {e}" ); } } Err(e) => { - warn!("Node {} protocol handler error: {}", node_index, e); + warn!("Node {node_index} protocol handler error: {e}"); } } }); @@ -642,7 +697,13 @@ impl Devnet { for i in range { while Instant::now() < deadline { - let state = self.nodes[i].state.read().await.clone(); + let node = self.nodes.get(i).ok_or_else(|| { + DevnetError::Config(format!( + "Node index {i} out of bounds (len: {})", + self.nodes.len() + )) + })?; + let state = node.state.read().await.clone(); match state { NodeState::Running | NodeState::Connected => break, NodeState::Failed(ref e) => { diff --git a/src/lib.rs b/src/lib.rs index bc671d1e..e67fed63 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -47,8 +47,6 @@ pub mod error; pub mod event; pub mod node; pub mod payment; -#[cfg(test)] -mod probe; pub mod storage; pub mod upgrade; @@ -57,11 +55,12 @@ pub use ant_protocol::{ ChunkPutResponse, ChunkQuoteRequest, ChunkQuoteResponse, CHUNK_PROTOCOL_ID, MAX_CHUNK_SIZE, }; pub use client::{ - compute_address, peer_id_to_xor_name, xor_distance, DataChunk, QuantumClient, QuantumConfig, - XorName, + compute_address, create_manifest, deserialize_manifest, peer_id_to_xor_name, reassemble_file, + serialize_manifest, split_file, xor_distance, DataChunk, FileManifest, QuantumClient, + QuantumConfig, XorName, }; pub use config::{BootstrapCacheConfig, NodeConfig, StorageConfig}; -pub use devnet::{Devnet, DevnetConfig, DevnetManifest}; +pub use devnet::{Devnet, DevnetConfig, DevnetEvmInfo, DevnetManifest}; pub use error::{Error, Result}; pub use event::{NodeEvent, NodeEventsChannel}; pub use node::{NodeBuilder, RunningNode}; diff --git a/src/node.rs b/src/node.rs index 0a82d8c3..716f726f 100644 --- a/src/node.rs +++ b/src/node.rs @@ -1,6 +1,6 @@ //! Node implementation - thin wrapper around saorsa-core's `P2PNode`. -use crate::ant_protocol::CHUNK_PROTOCOL_ID; +use crate::ant_protocol::{CHUNK_PROTOCOL_ID, MAX_CHUNK_SIZE}; use crate::config::{ default_nodes_dir, default_root_dir, EvmNetworkConfig, IpVersion, NetworkMode, NodeConfig, NODE_IDENTITY_FILENAME, @@ -9,12 +9,12 @@ use crate::error::{Error, Result}; use crate::event::{create_event_channel, NodeEvent, NodeEventsChannel, NodeEventsSender}; use crate::payment::metrics::QuotingMetricsTracker; use crate::payment::wallet::parse_rewards_address; -use crate::payment::{PaymentVerifier, PaymentVerifierConfig, QuoteGenerator}; +use crate::payment::{EvmVerifierConfig, PaymentVerifier, PaymentVerifierConfig, QuoteGenerator}; use crate::storage::{AntProtocol, LmdbStorage, LmdbStorageConfig}; use crate::upgrade::{AutoApplyUpgrader, UpgradeMonitor, UpgradeResult}; use ant_evm::RewardsAddress; use evmlib::Network as EvmNetwork; -use saorsa_core::identity::{NodeId, NodeIdentity}; +use saorsa_core::identity::NodeIdentity; use saorsa_core::{ BootstrapConfig as CoreBootstrapConfig, BootstrapManager, IPDiversityConfig as CoreDiversityConfig, NodeConfig as CoreNodeConfig, P2PEvent, P2PNode, @@ -23,12 +23,18 @@ use saorsa_core::{ use std::net::SocketAddr; use std::path::PathBuf; use std::sync::Arc; +use tokio::sync::Semaphore; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tracing::{debug, error, info, warn}; -/// Maximum number of records for quoting metrics. -const DEFAULT_MAX_QUOTING_RECORDS: usize = 100_000; +/// Node storage capacity limit (5 GB). +/// +/// Used to derive `max_records` for the quoting metrics pricing curve. +/// A node advertises `NODE_STORAGE_LIMIT_BYTES / MAX_CHUNK_SIZE` as +/// its maximum record count, giving the pricing algorithm a meaningful +/// fullness ratio instead of a hardcoded constant. +pub const NODE_STORAGE_LIMIT_BYTES: u64 = 5 * 1024 * 1024 * 1024; /// Default rewards address when none is configured (20-byte zero address). const DEFAULT_REWARDS_ADDRESS: [u8; 20] = [0u8; 20]; @@ -56,9 +62,49 @@ impl NodeBuilder { pub async fn build(mut self) -> Result { info!("Building saorsa-node with config: {:?}", self.config); + // Validate production requirements + if self.config.network_mode == NetworkMode::Production && !self.config.payment.enabled { + return Err(Error::Config( + "CRITICAL: Payment verification is REQUIRED in production mode. \ + Remove 'enabled = false' from config or --disable-payment-verification flag." + .to_string(), + )); + } + + // Validate rewards address in production + if self.config.network_mode == NetworkMode::Production { + match self.config.payment.rewards_address { + None => { + return Err(Error::Config( + "CRITICAL: Rewards address is not configured. \ + Set payment.rewards_address in config to your Arbitrum wallet address." + .to_string(), + )); + } + Some(ref addr) if addr == "0xYOUR_ARBITRUM_ADDRESS_HERE" || addr.is_empty() => { + return Err(Error::Config( + "CRITICAL: Rewards address is not configured. \ + Set payment.rewards_address in config to your Arbitrum wallet address." + .to_string(), + )); + } + Some(_) => {} + } + } + + // Warn if payment disabled in any mode + if !self.config.payment.enabled { + let mode = self.config.network_mode; + warn!("⚠️ ⚠️ ⚠️"); + warn!("⚠️ PAYMENT VERIFICATION DISABLED (mode: {mode:?})"); + warn!("⚠️ This should ONLY be used for testing!"); + warn!("⚠️ All storage requests will be accepted for FREE"); + warn!("⚠️ ⚠️ ⚠️"); + } + // Resolve identity and root_dir (may update self.config.root_dir) - let identity = Self::resolve_identity(&mut self.config).await?; - let peer_id = node_id_to_peer_id(identity.node_id()); + let identity = Arc::new(Self::resolve_identity(&mut self.config).await?); + let peer_id = identity.peer_id().to_hex(); info!(peer_id = %peer_id, root_dir = %self.config.root_dir.display(), "Node identity resolved"); @@ -71,9 +117,11 @@ impl NodeBuilder { // Create event channel let (events_tx, events_rx) = create_event_channel(); - // Convert our config to saorsa-core's config, injecting our stable peer_id + // Convert our config to saorsa-core's config let mut core_config = Self::build_core_config(&self.config)?; - core_config.peer_id = Some(peer_id); + // Inject the ML-DSA identity so the P2PNode's transport peer ID + // matches the pub_key embedded in payment quotes. + core_config.node_identity = Some(Arc::clone(&identity)); debug!("Core config: {:?}", core_config); // Initialize saorsa-core's P2PNode @@ -99,7 +147,9 @@ impl NodeBuilder { // Initialize ANT protocol handler for chunk storage let ant_protocol = if self.config.storage.enabled { - Some(Arc::new(Self::build_ant_protocol(&self.config).await?)) + Some(Arc::new( + Self::build_ant_protocol(&self.config, &identity).await?, + )) } else { info!("Chunk storage disabled"); None @@ -123,11 +173,12 @@ impl NodeBuilder { /// Build the saorsa-core `NodeConfig` from our config. fn build_core_config(config: &NodeConfig) -> Result { // Determine listen address based on port and IP version + let port = config.port; let listen_addr: SocketAddr = match config.ip_version { - IpVersion::Ipv4 | IpVersion::Dual => format!("0.0.0.0:{}", config.port) + IpVersion::Ipv4 | IpVersion::Dual => format!("0.0.0.0:{port}") .parse() .map_err(|e| Error::Config(format!("Invalid listen address: {e}")))?, - IpVersion::Ipv6 => format!("[::]:{}", config.port) + IpVersion::Ipv6 => format!("[::]:{port}") .parse() .map_err(|e| Error::Config(format!("Invalid listen address: {e}")))?, }; @@ -210,7 +261,7 @@ impl NodeBuilder { let identity = NodeIdentity::generate().map_err(|e| { Error::Startup(format!("Failed to generate node identity: {e}")) })?; - let peer_id = node_id_to_peer_id(identity.node_id()); + let peer_id = identity.peer_id().to_hex(); let peer_dir = nodes_dir.join(&peer_id); std::fs::create_dir_all(&peer_dir)?; identity @@ -221,7 +272,9 @@ impl NodeBuilder { Ok(identity) } 1 => { - let dir = &identity_dirs[0]; + let dir = identity_dirs + .first() + .ok_or_else(|| Error::Config("No identity dirs found".to_string()))?; let identity = NodeIdentity::load_from_file(&dir.join(NODE_IDENTITY_FILENAME)) .await .map_err(|e| Error::Startup(format!("Failed to load node identity: {e}")))?; @@ -296,7 +349,11 @@ impl NodeBuilder { /// Build the ANT protocol handler from config. /// /// Initializes LMDB storage, payment verifier, and quote generator. - async fn build_ant_protocol(config: &NodeConfig) -> Result { + /// Wires ML-DSA-65 signing from the node's identity into the quote generator. + async fn build_ant_protocol( + config: &NodeConfig, + identity: &NodeIdentity, + ) -> Result { // Create LMDB storage let storage_config = LmdbStorageConfig { root_dir: config.root_dir.clone(), @@ -308,30 +365,37 @@ impl NodeBuilder { .await .map_err(|e| Error::Startup(format!("Failed to create LMDB storage: {e}")))?; + // Parse rewards address first (needed by both verifier and quote generator) + let rewards_address = match config.payment.rewards_address { + Some(ref addr) => parse_rewards_address(addr)?, + None => RewardsAddress::new(DEFAULT_REWARDS_ADDRESS), + }; + // Create payment verifier let evm_network = match config.payment.evm_network { EvmNetworkConfig::ArbitrumOne => EvmNetwork::ArbitrumOne, EvmNetworkConfig::ArbitrumSepolia => EvmNetwork::ArbitrumSepoliaTest, }; let payment_config = PaymentVerifierConfig { - evm: crate::payment::EvmVerifierConfig { + evm: EvmVerifierConfig { enabled: config.payment.enabled, network: evm_network, }, cache_capacity: config.payment.cache_capacity, + local_rewards_address: Some(rewards_address), }; let payment_verifier = PaymentVerifier::new(payment_config); + // Safe: 5GB fits in usize on all supported 64-bit platforms. + #[allow(clippy::cast_possible_truncation)] + let max_records = (NODE_STORAGE_LIMIT_BYTES as usize) / MAX_CHUNK_SIZE; + let metrics_tracker = QuotingMetricsTracker::new(max_records, 0); + let mut quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); - // Create quote generator - let rewards_address = match config.payment.rewards_address { - Some(ref addr) => parse_rewards_address(addr)?, - None => RewardsAddress::new(DEFAULT_REWARDS_ADDRESS), - }; - let metrics_tracker = QuotingMetricsTracker::new(DEFAULT_MAX_QUOTING_RECORDS, 0); - let quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); + // Wire ML-DSA-65 signing from node identity + crate::payment::wire_ml_dsa_signer(&mut quote_generator, identity)?; info!( - "ANT protocol handler initialized (protocol={})", + "ANT protocol handler initialized with ML-DSA-65 signing (protocol={})", CHUNK_PROTOCOL_ID ); @@ -352,7 +416,7 @@ impl NodeBuilder { // Create cache directory if let Err(e) = std::fs::create_dir_all(&cache_dir) { - warn!("Failed to create bootstrap cache directory: {}", e); + warn!("Failed to create bootstrap cache directory: {e}"); return None; } @@ -371,18 +435,13 @@ impl NodeBuilder { Some(manager) } Err(e) => { - warn!("Failed to initialize bootstrap cache: {}", e); + warn!("Failed to initialize bootstrap cache: {e}"); None } } } } -/// Convert a `NodeId` to a hex-encoded `PeerId` string (full 64 hex chars). -fn node_id_to_peer_id(node_id: &NodeId) -> String { - hex::encode(node_id.0) -} - /// A running saorsa node. pub struct RunningNode { config: NodeConfig, @@ -481,13 +540,13 @@ impl RunningNode { // If we reach here, exec() failed or not supported } Ok(UpgradeResult::RolledBack { reason }) => { - warn!("Upgrade rolled back: {}", reason); + warn!("Upgrade rolled back: {reason}"); } Ok(UpgradeResult::NoUpgrade) => { debug!("No upgrade needed"); } Err(e) => { - error!("Critical upgrade error: {}", e); + error!("Critical upgrade error: {e}"); } } } @@ -514,7 +573,7 @@ impl RunningNode { ); } Err(e) => { - debug!("Failed to get bootstrap cache stats: {}", e); + debug!("Failed to get bootstrap cache stats: {e}"); } } } @@ -560,8 +619,7 @@ impl RunningNode { break; } _ = sighup.recv() => { - info!("Received SIGHUP, could reload config here"); - // TODO: Implement config reload on SIGHUP + info!("Received SIGHUP (config reload not yet supported)"); } } } @@ -599,34 +657,36 @@ impl RunningNode { let mut events = self.p2p_node.subscribe_events(); let p2p = Arc::clone(&self.p2p_node); + let semaphore = Arc::new(Semaphore::new(64)); self.protocol_task = Some(tokio::spawn(async move { while let Ok(event) = events.recv().await { if let P2PEvent::Message { topic, - source, + source: Some(source), data, } = event { if topic == CHUNK_PROTOCOL_ID { - debug!("Received chunk protocol message from {}", source); + debug!("Received chunk protocol message from {source}"); let protocol = Arc::clone(&protocol); let p2p = Arc::clone(&p2p); + let sem = semaphore.clone(); tokio::spawn(async move { + let Ok(_permit) = sem.acquire().await else { + return; + }; match protocol.handle_message(&data).await { Ok(response) => { if let Err(e) = p2p .send_message(&source, CHUNK_PROTOCOL_ID, response.to_vec()) .await { - warn!( - "Failed to send protocol response to {}: {}", - source, e - ); + warn!("Failed to send protocol response to {source}: {e}"); } } Err(e) => { - warn!("Protocol handler error: {}", e); + warn!("Protocol handler error: {e}"); } } }); @@ -758,7 +818,7 @@ mod tests { // Key file should exist assert!(tmp.path().join(NODE_IDENTITY_FILENAME).exists()); // peer_id should be derivable from the identity - let peer_id = node_id_to_peer_id(identity.node_id()); + let peer_id = identity.peer_id().to_hex(); assert_eq!(peer_id.len(), 64); // 32 bytes hex-encoded } @@ -779,14 +839,14 @@ mod tests { }; let loaded = NodeBuilder::resolve_identity(&mut config).await.unwrap(); - assert_eq!(loaded.node_id(), original.node_id()); + assert_eq!(loaded.peer_id(), original.peer_id()); } #[test] - fn test_node_id_to_peer_id_length() { - let id = NodeId::from_bytes([0x42; 32]); - let peer_id = node_id_to_peer_id(&id); - assert_eq!(peer_id.len(), 64); // 32 bytes = 64 hex chars + fn test_peer_id_hex_length() { + let id = saorsa_core::identity::PeerId::from_bytes([0x42; 32]); + let hex = id.to_hex(); + assert_eq!(hex.len(), 64); // 32 bytes = 64 hex chars } /// Simulates a node restart: first run creates identity in a scoped subdir @@ -799,7 +859,7 @@ mod tests { // First "boot": generate identity, save it in nodes/{peer_id}/ let identity1 = NodeIdentity::generate().unwrap(); - let peer_id1 = node_id_to_peer_id(identity1.node_id()); + let peer_id1 = identity1.peer_id().to_hex(); let peer_dir = nodes_dir.join(&peer_id1); std::fs::create_dir_all(&peer_dir).unwrap(); identity1 @@ -817,7 +877,7 @@ mod tests { let loaded = NodeIdentity::load_from_file(&identity_dirs[0].join(NODE_IDENTITY_FILENAME)) .await .unwrap(); - let peer_id2 = node_id_to_peer_id(loaded.node_id()); + let peer_id2 = loaded.peer_id().to_hex(); assert_eq!(peer_id1, peer_id2, "peer_id must survive restart"); assert_eq!( @@ -869,8 +929,8 @@ mod tests { let identity2 = NodeBuilder::resolve_identity(&mut config2).await.unwrap(); assert_eq!( - identity1.node_id(), - identity2.node_id(), + identity1.peer_id(), + identity2.peer_id(), "explicit --root-dir must yield stable identity" ); } diff --git a/src/payment/cache.rs b/src/payment/cache.rs index 84d53126..47a5bcf1 100644 --- a/src/payment/cache.rs +++ b/src/payment/cache.rs @@ -6,11 +6,10 @@ use lru::LruCache; use parking_lot::Mutex; use std::num::NonZeroUsize; +use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; -/// `XorName` type - 32-byte content hash. -/// TODO: Import from saorsa-core or ant-protocol when available. -pub type XorName = [u8; 32]; +pub use super::quote::XorName; /// Default cache capacity (100,000 entries = 3.2MB memory). const DEFAULT_CACHE_CAPACITY: usize = 100_000; @@ -22,11 +21,13 @@ const DEFAULT_CACHE_CAPACITY: usize = 100_000; #[derive(Clone)] pub struct VerifiedCache { inner: Arc>>, - stats: Arc>, + hits: Arc, + misses: Arc, + additions: Arc, } /// Cache statistics for monitoring. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, Copy)] pub struct CacheStats { /// Number of cache hits. pub hits: u64, @@ -69,7 +70,9 @@ impl VerifiedCache { let cap = NonZeroUsize::new(effective_capacity).unwrap_or(NonZeroUsize::MIN); Self { inner: Arc::new(Mutex::new(LruCache::new(cap))), - stats: Arc::new(Mutex::new(CacheStats::default())), + hits: Arc::new(AtomicU64::new(0)), + misses: Arc::new(AtomicU64::new(0)), + additions: Arc::new(AtomicU64::new(0)), } } @@ -80,13 +83,11 @@ impl VerifiedCache { pub fn contains(&self, xorname: &XorName) -> bool { let found = self.inner.lock().get(xorname).is_some(); - let mut stats = self.stats.lock(); if found { - stats.hits += 1; + self.hits.fetch_add(1, Ordering::Relaxed); } else { - stats.misses += 1; + self.misses.fetch_add(1, Ordering::Relaxed); } - drop(stats); found } @@ -96,13 +97,17 @@ impl VerifiedCache { /// This should be called after verifying that data exists on the autonomi network. pub fn insert(&self, xorname: XorName) { self.inner.lock().put(xorname, ()); - self.stats.lock().additions += 1; + self.additions.fetch_add(1, Ordering::Relaxed); } /// Get current cache statistics. #[must_use] pub fn stats(&self) -> CacheStats { - self.stats.lock().clone() + CacheStats { + hits: self.hits.load(Ordering::Relaxed), + misses: self.misses.load(Ordering::Relaxed), + additions: self.additions.load(Ordering::Relaxed), + } } /// Get the current number of entries in the cache. @@ -130,6 +135,7 @@ impl Default for VerifiedCache { } #[cfg(test)] +#[allow(clippy::expect_used)] mod tests { use super::*; @@ -214,4 +220,109 @@ mod tests { cache.clear(); assert!(cache.is_empty()); } + + #[test] + fn test_with_capacity_zero_defaults_to_one() { + let cache = VerifiedCache::with_capacity(0); + // Should be able to store at least 1 element + cache.insert([1u8; 32]); + assert_eq!(cache.len(), 1); + } + + #[test] + fn test_default_impl() { + let cache = VerifiedCache::default(); + assert!(cache.is_empty()); + cache.insert([1u8; 32]); + assert!(cache.contains(&[1u8; 32])); + } + + #[test] + fn test_hit_rate_zero_total() { + let stats = CacheStats::default(); + assert!(stats.hit_rate().abs() < f64::EPSILON); + } + + #[test] + fn test_hit_rate_all_hits() { + let stats = CacheStats { + hits: 10, + misses: 0, + additions: 0, + }; + assert!((stats.hit_rate() - 100.0).abs() < 0.01); + } + + #[test] + fn test_hit_rate_all_misses() { + let stats = CacheStats { + hits: 0, + misses: 10, + additions: 0, + }; + assert!(stats.hit_rate().abs() < f64::EPSILON); + } + + #[test] + fn test_clear_does_not_reset_stats() { + let cache = VerifiedCache::new(); + cache.insert([1u8; 32]); + let _ = cache.contains(&[1u8; 32]); // hit + let _ = cache.contains(&[2u8; 32]); // miss + + cache.clear(); + + // Stats should persist after clear + let stats = cache.stats(); + assert_eq!(stats.hits, 1); + assert_eq!(stats.misses, 1); + assert_eq!(stats.additions, 1); + } + + #[test] + fn test_concurrent_insert_and_contains() { + use std::sync::Arc; + use std::thread; + + let cache = Arc::new(VerifiedCache::with_capacity(1000)); + let mut handles = Vec::new(); + + // 10 threads inserting + for i in 0..10u8 { + let c = cache.clone(); + handles.push(thread::spawn(move || { + let xorname = [i; 32]; + c.insert(xorname); + })); + } + + // 10 threads checking + for i in 0..10u8 { + let c = cache.clone(); + handles.push(thread::spawn(move || { + let xorname = [i; 32]; + let _ = c.contains(&xorname); + })); + } + + for handle in handles { + handle.join().expect("thread panicked"); + } + + // All 10 should have been inserted + assert_eq!(cache.len(), 10); + } + + #[test] + fn test_cache_stats_copy() { + let stats = CacheStats { + hits: 5, + misses: 3, + additions: 8, + }; + let stats2 = stats; // Copy + assert_eq!(stats.hits, stats2.hits); + assert_eq!(stats.misses, stats2.misses); + assert_eq!(stats.additions, stats2.additions); + } } diff --git a/src/payment/metrics.rs b/src/payment/metrics.rs index deae6ca0..9342350e 100644 --- a/src/payment/metrics.rs +++ b/src/payment/metrics.rs @@ -12,6 +12,9 @@ use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::time::Instant; use tracing::{debug, info, warn}; +/// Number of operations between disk persists (debounce). +const PERSIST_INTERVAL: usize = 10; + /// Tracker for quoting metrics. /// /// Maintains state that influences quote pricing, including payment history, @@ -32,6 +35,8 @@ pub struct QuotingMetricsTracker { persist_path: Option, /// Estimated network size. network_size: AtomicU64, + /// Operations since last persist (for debouncing disk I/O). + ops_since_persist: AtomicUsize, } impl QuotingMetricsTracker { @@ -51,6 +56,7 @@ impl QuotingMetricsTracker { start_time: Instant::now(), persist_path: None, network_size: AtomicU64::new(500), // Conservative default + ops_since_persist: AtomicUsize::new(0), } } @@ -86,8 +92,8 @@ impl QuotingMetricsTracker { /// Record a payment received. pub fn record_payment(&self) { let count = self.received_payment_count.fetch_add(1, Ordering::SeqCst) + 1; - debug!("Payment received, total count: {}", count); - self.persist(); + debug!("Payment received, total count: {count}"); + self.maybe_persist(); } /// Record data stored. @@ -102,13 +108,13 @@ impl QuotingMetricsTracker { { let mut records = self.records_per_type.write(); if let Some(entry) = records.iter_mut().find(|(t, _)| *t == data_type) { - entry.1 += 1; + entry.1 = entry.1.saturating_add(1); } else { records.push((data_type, 1)); } } - self.persist(); + self.maybe_persist(); } /// Get the number of payments received. @@ -150,11 +156,19 @@ impl QuotingMetricsTracker { max_records: self.max_records, received_payment_count: self.received_payment_count.load(Ordering::SeqCst), live_time: self.live_time_hours(), - network_density: None, // TODO: Calculate from DHT + network_density: None, // Not used in pricing; reserved for future DHT range filtering network_size: Some(self.network_size.load(Ordering::SeqCst)), } } + /// Debounced persist: only writes to disk every `PERSIST_INTERVAL` operations. + fn maybe_persist(&self) { + let ops = self.ops_since_persist.fetch_add(1, Ordering::Relaxed); + if ops % PERSIST_INTERVAL == 0 { + self.persist(); + } + } + /// Persist metrics to disk. fn persist(&self) { if let Some(ref path) = self.persist_path { @@ -166,7 +180,7 @@ impl QuotingMetricsTracker { if let Ok(bytes) = rmp_serde::to_vec(&data) { if let Err(e) = std::fs::write(path, bytes) { - warn!("Failed to persist metrics: {}", e); + warn!("Failed to persist metrics: {e}"); } } } @@ -179,6 +193,12 @@ impl QuotingMetricsTracker { } } +impl Drop for QuotingMetricsTracker { + fn drop(&mut self) { + self.persist(); + } +} + /// Metrics persisted to disk. #[derive(Debug, serde::Serialize, serde::Deserialize)] struct PersistedMetrics { @@ -260,4 +280,97 @@ mod tests { assert_eq!(tracker.payment_count(), 2); assert_eq!(tracker.records_stored(), 1); } + + #[test] + fn test_live_time_hours() { + let tracker = QuotingMetricsTracker::new(1000, 0); + // Just started, so live_time should be 0 hours + assert_eq!(tracker.live_time_hours(), 0); + } + + #[test] + fn test_set_network_size() { + let tracker = QuotingMetricsTracker::new(1000, 0); + tracker.set_network_size(1000); + + let metrics = tracker.get_metrics(0, 0); + assert_eq!(metrics.network_size, Some(1000)); + } + + #[test] + fn test_records_per_type_multiple_types() { + let tracker = QuotingMetricsTracker::new(1000, 0); + + tracker.record_store(0); + tracker.record_store(0); + tracker.record_store(1); + tracker.record_store(2); + tracker.record_store(1); + + let metrics = tracker.get_metrics(0, 0); + assert_eq!(metrics.records_per_type.len(), 3); + + // Verify per-type counts + let type_0 = metrics.records_per_type.iter().find(|(t, _)| *t == 0); + let type_1 = metrics.records_per_type.iter().find(|(t, _)| *t == 1); + let type_2 = metrics.records_per_type.iter().find(|(t, _)| *t == 2); + + assert_eq!(type_0.expect("type 0 exists").1, 2); + assert_eq!(type_1.expect("type 1 exists").1, 2); + assert_eq!(type_2.expect("type 2 exists").1, 1); + } + + #[test] + fn test_persistence_round_trip_with_types() { + let dir = tempdir().expect("tempdir"); + let path = dir.path().join("metrics_types.bin"); + + { + let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + tracker.record_store(0); + tracker.record_store(0); + tracker.record_store(1); + tracker.record_payment(); + } + + let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + assert_eq!(tracker.payment_count(), 1); + assert_eq!(tracker.records_stored(), 3); // 2 type-0 + 1 type-1 + + let metrics = tracker.get_metrics(0, 0); + assert_eq!(metrics.records_per_type.len(), 2); + } + + #[test] + fn test_with_persistence_nonexistent_path() { + let dir = tempdir().expect("tempdir"); + let path = dir.path().join("nonexistent_subdir").join("metrics.bin"); + + // Should not panic — just starts with defaults + let tracker = QuotingMetricsTracker::with_persistence(1000, &path); + assert_eq!(tracker.payment_count(), 0); + assert_eq!(tracker.records_stored(), 0); + } + + #[test] + fn test_max_records_zero() { + let tracker = QuotingMetricsTracker::new(0, 0); + let metrics = tracker.get_metrics(1024, 0); + assert_eq!(metrics.max_records, 0); + } + + #[test] + fn test_get_metrics_passes_data_params() { + let tracker = QuotingMetricsTracker::new(1000, 0); + let metrics = tracker.get_metrics(4096, 3); + assert_eq!(metrics.data_size, 4096); + assert_eq!(metrics.data_type, 3); + } + + #[test] + fn test_default_network_size() { + let tracker = QuotingMetricsTracker::new(1000, 0); + let metrics = tracker.get_metrics(0, 0); + assert_eq!(metrics.network_size, Some(500)); + } } diff --git a/src/payment/mod.rs b/src/payment/mod.rs index 309227dd..312ec39f 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -4,6 +4,15 @@ //! 1. Check LRU cache for already-verified data //! 2. Require and verify EVM/Arbitrum payment for new data //! +//! # Default Policy +//! +//! **Production nodes require payment by default.** +//! +//! - `PaymentVerifierConfig::default()` has `evm.enabled = true` +//! - `PaymentConfig::default()` has `enabled = true` +//! - Test environments can disable via CLI flag `--disable-payment-verification` +//! - Test utilities explicitly disable EVM verification for unit tests +//! //! # Architecture //! //! ```text @@ -33,6 +42,8 @@ mod cache; pub mod metrics; +pub mod pricing; +pub mod proof; pub mod quote; pub mod single_node; mod verifier; @@ -40,7 +51,9 @@ pub mod wallet; pub use cache::{CacheStats, VerifiedCache}; pub use metrics::QuotingMetricsTracker; -pub use quote::{verify_quote_content, QuoteGenerator, XorName}; +pub use pricing::calculate_price; +pub use proof::{deserialize_proof, PaymentProof}; +pub use quote::{verify_quote_content, wire_ml_dsa_signer, QuoteGenerator, XorName}; pub use single_node::SingleNodePayment; pub use verifier::{EvmVerifierConfig, PaymentStatus, PaymentVerifier, PaymentVerifierConfig}; pub use wallet::{is_valid_address, parse_rewards_address, WalletConfig}; diff --git a/src/payment/pricing.rs b/src/payment/pricing.rs new file mode 100644 index 00000000..9041ddef --- /dev/null +++ b/src/payment/pricing.rs @@ -0,0 +1,296 @@ +//! Local fullness-based pricing algorithm for saorsa-node. +//! +//! Mirrors the logarithmic pricing curve from autonomi's `MerklePaymentVault` contract: +//! - Empty node → price ≈ `MIN_PRICE` (floor) +//! - Filling up → price increases logarithmically +//! - Nearly full → price spikes (ln(x) as x→0) +//! - At capacity → returns `u64::MAX` (effectively refuses new data) +//! +//! ## Design Rationale: Capacity-Based Pricing +//! +//! Pricing is based on node **fullness** (percentage of storage capacity used), +//! not on a fixed cost-per-byte. This design mirrors the autonomi +//! `MerklePaymentVault` on-chain contract and creates natural load balancing: +//! +//! - **Empty nodes** charge the minimum floor price, attracting new data +//! - **Nearly full nodes** charge exponentially more via the logarithmic curve +//! - **This pushes clients toward emptier nodes**, distributing data across the network +//! +//! A flat cost-per-byte model would not incentivize distribution — all nodes would +//! charge the same regardless of remaining capacity. The logarithmic curve ensures +//! the network self-balances as nodes fill up. + +use ant_evm::{Amount, QuotingMetrics}; + +/// Minimum price floor (matches contract's `minPrice = 3`). +const MIN_PRICE: u64 = 3; + +/// Scaling factor for the logarithmic pricing curve. +/// In the contract this is 1e18; we normalize to 1.0 for f64 arithmetic. +const SCALING_FACTOR: f64 = 1.0; + +/// ANT price constant (normalized to 1.0, matching contract's 1e18/1e18 ratio). +const ANT_PRICE: f64 = 1.0; + +/// Calculate a local price estimate from node quoting metrics. +/// +/// Implements the autonomi pricing formula: +/// ```text +/// price = (-s/ANT) * (ln|rUpper - 1| - ln|rLower - 1|) + pMin*(rUpper - rLower) - (rUpper - rLower)/ANT +/// ``` +/// +/// where: +/// - `rLower = total_cost_units / max_cost_units` (current fullness ratio) +/// - `rUpper = (total_cost_units + cost_unit) / max_cost_units` (fullness after storing) +/// - `s` = scaling factor, `ANT` = ANT price, `pMin` = minimum price +#[allow( + clippy::cast_precision_loss, + clippy::cast_possible_truncation, + clippy::cast_sign_loss +)] +#[must_use] +pub fn calculate_price(metrics: &QuotingMetrics) -> Amount { + let min_price = Amount::from(MIN_PRICE); + + // Edge case: zero or very small capacity + if metrics.max_records == 0 { + return min_price; + } + + // Use close_records_stored as the authoritative record count for pricing. + let total_records = metrics.close_records_stored as u64; + + let max_records = metrics.max_records as f64; + + // Normalize to [0, 1) range (matching contract's _getBound) + let r_lower = total_records as f64 / max_records; + // Adding one record (cost_unit = 1 normalized) + let r_upper = (total_records + 1) as f64 / max_records; + + // At capacity: return maximum price to effectively refuse new data + if r_lower >= 1.0 || r_upper >= 1.0 { + return Amount::from(u64::MAX); + } + if (r_upper - r_lower).abs() < f64::EPSILON { + return min_price; + } + + // Calculate |r - 1| for logarithm inputs + let upper_diff = (r_upper - 1.0).abs(); + let lower_diff = (r_lower - 1.0).abs(); + + // Avoid log(0) + if upper_diff < f64::EPSILON || lower_diff < f64::EPSILON { + return min_price; + } + + let log_upper = upper_diff.ln(); + let log_lower = lower_diff.ln(); + let log_diff = log_upper - log_lower; + + let linear_part = r_upper - r_lower; + + // Formula: price = (-s/ANT) * logDiff + pMin * linearPart - linearPart/ANT + let part_one = (-SCALING_FACTOR / ANT_PRICE) * log_diff; + let part_two = MIN_PRICE as f64 * linear_part; + let part_three = linear_part / ANT_PRICE; + + let price = part_one + part_two - part_three; + + if price <= 0.0 || !price.is_finite() { + return min_price; + } + + // Scale by data_size (larger data costs proportionally more) + let data_size_factor = metrics.data_size.max(1) as f64; + let scaled_price = price * data_size_factor; + + if !scaled_price.is_finite() { + return min_price; + } + + // Convert to Amount (U256), floor at MIN_PRICE + let price_u64 = if scaled_price > u64::MAX as f64 { + u64::MAX + } else { + (scaled_price as u64).max(MIN_PRICE) + }; + + Amount::from(price_u64) +} + +#[cfg(test)] +#[allow(clippy::unwrap_used, clippy::expect_used)] +mod tests { + use super::*; + + fn make_metrics( + records_stored: usize, + max_records: usize, + data_size: usize, + data_type: u32, + ) -> QuotingMetrics { + let records_per_type = if records_stored > 0 { + vec![(data_type, u32::try_from(records_stored).unwrap_or(u32::MAX))] + } else { + vec![] + }; + QuotingMetrics { + data_type, + data_size, + close_records_stored: records_stored, + records_per_type, + max_records, + received_payment_count: 0, + live_time: 0, + network_density: None, + network_size: Some(500), + } + } + + #[test] + fn test_empty_node_gets_min_price() { + let metrics = make_metrics(0, 1000, 1, 0); + let price = calculate_price(&metrics); + // Empty node should return approximately MIN_PRICE + assert_eq!(price, Amount::from(MIN_PRICE)); + } + + #[test] + fn test_half_full_node_costs_more() { + let empty = make_metrics(0, 1000, 1024, 0); + let half = make_metrics(500, 1000, 1024, 0); + let price_empty = calculate_price(&empty); + let price_half = calculate_price(&half); + assert!( + price_half > price_empty, + "Half-full price ({price_half}) should exceed empty price ({price_empty})" + ); + } + + #[test] + fn test_nearly_full_node_costs_much_more() { + let half = make_metrics(500, 1000, 1024, 0); + let nearly_full = make_metrics(900, 1000, 1024, 0); + let price_half = calculate_price(&half); + let price_nearly_full = calculate_price(&nearly_full); + assert!( + price_nearly_full > price_half, + "Nearly-full price ({price_nearly_full}) should far exceed half-full price ({price_half})" + ); + } + + #[test] + fn test_full_node_returns_max_price() { + // At capacity (r_lower >= 1.0), effectively refuse new data with max price + let metrics = make_metrics(1000, 1000, 1024, 0); + let price = calculate_price(&metrics); + assert_eq!(price, Amount::from(u64::MAX)); + } + + #[test] + fn test_price_increases_monotonically() { + let max_records = 1000; + let data_size = 1024; + let mut prev_price = Amount::ZERO; + + // Check from 0% to 99% full + for pct in 0..100 { + let records = pct * max_records / 100; + let metrics = make_metrics(records, max_records, data_size, 0); + let price = calculate_price(&metrics); + assert!( + price >= prev_price, + "Price at {pct}% ({price}) should be >= price at previous step ({prev_price})" + ); + prev_price = price; + } + } + + #[test] + fn test_zero_max_records_returns_min_price() { + let metrics = make_metrics(0, 0, 1024, 0); + let price = calculate_price(&metrics); + assert_eq!(price, Amount::from(MIN_PRICE)); + } + + #[test] + fn test_different_data_sizes_same_fullness() { + let small = make_metrics(500, 1000, 100, 0); + let large = make_metrics(500, 1000, 10000, 0); + let price_small = calculate_price(&small); + let price_large = calculate_price(&large); + assert!( + price_large > price_small, + "Larger data ({price_large}) should cost more than smaller data ({price_small})" + ); + } + + #[test] + fn test_price_with_multiple_record_types() { + // 300 type-0 records + 200 type-1 records = 500 total out of 1000 + let metrics = QuotingMetrics { + data_type: 0, + data_size: 1024, + close_records_stored: 500, + records_per_type: vec![(0, 300), (1, 200)], + max_records: 1000, + received_payment_count: 0, + live_time: 0, + network_density: None, + network_size: Some(500), + }; + let price_multi = calculate_price(&metrics); + + // Compare with single-type equivalent (500 of type 0) + let metrics_single = make_metrics(500, 1000, 1024, 0); + let price_single = calculate_price(&metrics_single); + + // Same total records → same price + assert_eq!(price_multi, price_single); + } + + #[test] + fn test_price_at_95_percent() { + let metrics = make_metrics(950, 1000, 1024, 0); + let price = calculate_price(&metrics); + let min = Amount::from(MIN_PRICE); + assert!( + price > min, + "Price at 95% should be above minimum, got {price}" + ); + } + + #[test] + fn test_price_at_99_percent() { + let metrics = make_metrics(990, 1000, 1024, 0); + let price = calculate_price(&metrics); + let price_95 = calculate_price(&make_metrics(950, 1000, 1024, 0)); + assert!( + price > price_95, + "Price at 99% ({price}) should exceed price at 95% ({price_95})" + ); + } + + #[test] + fn test_over_capacity_returns_max_price() { + // 1100 records stored but max is 1000 — over capacity + let metrics = make_metrics(1100, 1000, 1024, 0); + let price = calculate_price(&metrics); + assert_eq!( + price, + Amount::from(u64::MAX), + "Over-capacity should return max price" + ); + } + + #[test] + fn test_price_deterministic() { + let metrics = make_metrics(500, 1000, 1024, 0); + let price1 = calculate_price(&metrics); + let price2 = calculate_price(&metrics); + let price3 = calculate_price(&metrics); + assert_eq!(price1, price2); + assert_eq!(price2, price3); + } +} diff --git a/src/payment/proof.rs b/src/payment/proof.rs new file mode 100644 index 00000000..d0d1adc2 --- /dev/null +++ b/src/payment/proof.rs @@ -0,0 +1,132 @@ +//! Payment proof wrapper that includes transaction hashes. +//! +//! `PaymentProof` bundles a `ProofOfPayment` (quotes + peer IDs) with the +//! on-chain transaction hashes returned by the wallet after payment. + +use ant_evm::ProofOfPayment; +use evmlib::common::TxHash; +use serde::{Deserialize, Serialize}; + +/// A payment proof that includes both the quote-based proof and on-chain tx hashes. +/// +/// This replaces the bare `ProofOfPayment` in serialized proof bytes, adding +/// the transaction hashes that were previously discarded after `payment.pay()`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PaymentProof { + /// The original quote-based proof (peer IDs + quotes with ML-DSA-65 signatures). + pub proof_of_payment: ProofOfPayment, + /// Transaction hashes from the on-chain payment. + /// Typically contains one hash for the median (non-zero) quote. + pub tx_hashes: Vec, +} + +/// Deserialize proof bytes from the `PaymentProof` format. +/// +/// Returns `(ProofOfPayment, Vec)`. +/// +/// # Errors +/// +/// Returns an error if the bytes cannot be deserialized. +pub fn deserialize_proof( + bytes: &[u8], +) -> std::result::Result<(ProofOfPayment, Vec), rmp_serde::decode::Error> { + let proof = rmp_serde::from_slice::(bytes)?; + Ok((proof.proof_of_payment, proof.tx_hashes)) +} + +#[cfg(test)] +#[allow(clippy::unwrap_used, clippy::expect_used)] +mod tests { + use super::*; + use alloy::primitives::FixedBytes; + use ant_evm::RewardsAddress; + use ant_evm::{EncodedPeerId, PaymentQuote}; + use evmlib::quoting_metrics::QuotingMetrics; + use libp2p::identity::Keypair; + use libp2p::PeerId; + use std::time::SystemTime; + use xor_name::XorName; + + fn make_test_quote() -> PaymentQuote { + PaymentQuote { + content: XorName::random(&mut rand::thread_rng()), + timestamp: SystemTime::now(), + quoting_metrics: QuotingMetrics { + data_size: 1024, + data_type: 0, + close_records_stored: 0, + records_per_type: vec![], + max_records: 1000, + received_payment_count: 0, + live_time: 0, + network_density: None, + network_size: None, + }, + rewards_address: RewardsAddress::new([1u8; 20]), + pub_key: vec![], + signature: vec![], + } + } + + fn make_proof_of_payment() -> ProofOfPayment { + let keypair = Keypair::generate_ed25519(); + let peer_id = PeerId::from_public_key(&keypair.public()); + ProofOfPayment { + peer_quotes: vec![(EncodedPeerId::from(peer_id), make_test_quote())], + } + } + + #[test] + fn test_payment_proof_serialization_roundtrip() { + let tx_hash = FixedBytes::from([0xABu8; 32]); + let proof = PaymentProof { + proof_of_payment: make_proof_of_payment(), + tx_hashes: vec![tx_hash], + }; + + let bytes = rmp_serde::to_vec(&proof).unwrap(); + let (pop, hashes) = deserialize_proof(&bytes).unwrap(); + + assert_eq!(pop.peer_quotes.len(), 1); + assert_eq!(hashes.len(), 1); + assert_eq!(hashes.first().unwrap(), &tx_hash); + } + + #[test] + fn test_payment_proof_with_empty_tx_hashes() { + let proof = PaymentProof { + proof_of_payment: make_proof_of_payment(), + tx_hashes: vec![], + }; + + let bytes = rmp_serde::to_vec(&proof).unwrap(); + let (pop, hashes) = deserialize_proof(&bytes).unwrap(); + + assert_eq!(pop.peer_quotes.len(), 1); + assert!(hashes.is_empty()); + } + + #[test] + fn test_deserialize_proof_rejects_garbage() { + let garbage = vec![0xFF, 0x00, 0x01, 0x02]; + let result = deserialize_proof(&garbage); + assert!(result.is_err()); + } + + #[test] + fn test_payment_proof_multiple_tx_hashes() { + let tx1 = FixedBytes::from([0x11u8; 32]); + let tx2 = FixedBytes::from([0x22u8; 32]); + let proof = PaymentProof { + proof_of_payment: make_proof_of_payment(), + tx_hashes: vec![tx1, tx2], + }; + + let bytes = rmp_serde::to_vec(&proof).unwrap(); + let (_, hashes) = deserialize_proof(&bytes).unwrap(); + + assert_eq!(hashes.len(), 2); + assert_eq!(hashes.first().unwrap(), &tx1); + assert_eq!(hashes.get(1).unwrap(), &tx2); + } +} diff --git a/src/payment/quote.rs b/src/payment/quote.rs index c21507e1..fe9c61d1 100644 --- a/src/payment/quote.rs +++ b/src/payment/quote.rs @@ -7,9 +7,12 @@ //! capabilities from saorsa-core. This module provides the interface //! and will be fully integrated when the node is initialized. -use crate::error::Result; +use crate::error::{Error, Result}; use crate::payment::metrics::QuotingMetricsTracker; use ant_evm::{PaymentQuote, QuotingMetrics, RewardsAddress}; +use saorsa_core::MlDsa65; +use saorsa_pqc::pqc::types::{MlDsaPublicKey, MlDsaSecretKey, MlDsaSignature}; +use saorsa_pqc::pqc::MlDsaOperations; use std::time::SystemTime; use tracing::debug; @@ -74,6 +77,26 @@ impl QuoteGenerator { self.sign_fn.is_some() } + /// Probe the signer with test data to verify it produces a non-empty signature. + /// + /// # Errors + /// + /// Returns an error if no signer is set or if signing produces an empty signature. + pub fn probe_signer(&self) -> Result<()> { + let sign_fn = self + .sign_fn + .as_ref() + .ok_or_else(|| Error::Payment("Signer not set".to_string()))?; + let test_msg = b"saorsa-signing-probe"; + let test_sig = sign_fn(test_msg); + if test_sig.is_empty() { + return Err(Error::Payment( + "ML-DSA-65 signing probe failed: empty signature produced".to_string(), + )); + } + Ok(()) + } + /// Generate a payment quote for storing data. /// /// # Arguments @@ -95,9 +118,10 @@ impl QuoteGenerator { data_size: usize, data_type: u32, ) -> Result { - let sign_fn = self.sign_fn.as_ref().ok_or_else(|| { - crate::error::Error::Payment("Quote signing not configured".to_string()) - })?; + let sign_fn = self + .sign_fn + .as_ref() + .ok_or_else(|| Error::Payment("Quote signing not configured".to_string()))?; let timestamp = SystemTime::now(); @@ -117,6 +141,11 @@ impl QuoteGenerator { // Sign the bytes let signature = sign_fn(&bytes); + if signature.is_empty() { + return Err(Error::Payment( + "Signing produced empty signature".to_string(), + )); + } let quote = PaymentQuote { content: xor_name, @@ -127,12 +156,10 @@ impl QuoteGenerator { signature, }; - debug!( - "Generated quote for {} (size: {}, type: {})", - hex::encode(content), - data_size, - data_type - ); + if tracing::enabled!(tracing::Level::DEBUG) { + let content_hex = hex::encode(content); + debug!("Generated quote for {content_hex} (size: {data_size}, type: {data_type})"); + } Ok(quote) } @@ -160,7 +187,7 @@ impl QuoteGenerator { } } -/// Verify a payment quote signature. +/// Verify a payment quote's content address and ML-DSA-65 signature. /// /// # Arguments /// @@ -169,26 +196,115 @@ impl QuoteGenerator { /// /// # Returns /// -/// `true` if the content matches (signature verification requires public key). +/// `true` if the content matches and the ML-DSA-65 signature is valid. #[must_use] pub fn verify_quote_content(quote: &PaymentQuote, expected_content: &XorName) -> bool { // Check content matches if quote.content.0 != *expected_content { - debug!( - "Quote content mismatch: expected {}, got {}", - hex::encode(expected_content), - hex::encode(quote.content.0) - ); + if tracing::enabled!(tracing::Level::DEBUG) { + debug!( + "Quote content mismatch: expected {}, got {}", + hex::encode(expected_content), + hex::encode(quote.content.0) + ); + } return false; } true } +/// Verify that a payment quote has a valid ML-DSA-65 signature. +/// +/// This replaces ant-evm's `check_is_signed_by_claimed_peer()` which only +/// handles Ed25519/libp2p signatures. Saorsa uses ML-DSA-65 post-quantum +/// signatures for quote signing. +/// +/// # Arguments +/// +/// * `quote` - The quote to verify +/// +/// # Returns +/// +/// `true` if the ML-DSA-65 signature is valid for the quote's content. +#[must_use] +pub fn verify_quote_signature(quote: &PaymentQuote) -> bool { + // Parse public key from quote + let pub_key = match MlDsaPublicKey::from_bytes("e.pub_key) { + Ok(pk) => pk, + Err(e) => { + debug!("Failed to parse ML-DSA-65 public key from quote: {e}"); + return false; + } + }; + + // Parse signature from quote + let signature = match MlDsaSignature::from_bytes("e.signature) { + Ok(sig) => sig, + Err(e) => { + debug!("Failed to parse ML-DSA-65 signature from quote: {e}"); + return false; + } + }; + + // Get the bytes that were signed + let bytes = quote.bytes_for_sig(); + + // Verify using saorsa's ML-DSA-65 implementation + let ml_dsa = MlDsa65::new(); + match ml_dsa.verify(&pub_key, &bytes, &signature) { + Ok(valid) => { + if !valid { + debug!("ML-DSA-65 quote signature verification failed"); + } + valid + } + Err(e) => { + debug!("ML-DSA-65 verification error: {e}"); + false + } + } +} + +/// Wire ML-DSA-65 signing from a node identity into a `QuoteGenerator`. +/// +/// This is the shared setup used by both production nodes and devnet nodes +/// to configure quote signing from a `NodeIdentity`. +/// +/// # Arguments +/// +/// * `generator` - The quote generator to configure +/// * `identity` - The node identity providing signing keys +/// +/// # Errors +/// +/// Returns an error if the secret key cannot be deserialized or if the +/// signing probe (a test signature at startup) fails. +pub fn wire_ml_dsa_signer( + generator: &mut QuoteGenerator, + identity: &saorsa_core::identity::NodeIdentity, +) -> Result<()> { + let pub_key_bytes = identity.public_key().as_bytes().to_vec(); + let sk_bytes = identity.secret_key_bytes().to_vec(); + let sk = MlDsaSecretKey::from_bytes(&sk_bytes) + .map_err(|e| Error::Crypto(format!("Failed to deserialize ML-DSA-65 secret key: {e}")))?; + let ml_dsa = MlDsa65::new(); + generator.set_signer(pub_key_bytes, move |msg| match ml_dsa.sign(&sk, msg) { + Ok(sig) => sig.as_bytes().to_vec(), + Err(e) => { + tracing::error!("ML-DSA-65 signing failed: {e}"); + vec![] + } + }); + generator.probe_signer()?; + Ok(()) +} + #[cfg(test)] #[allow(clippy::expect_used)] mod tests { use super::*; use crate::payment::metrics::QuotingMetricsTracker; + use saorsa_pqc::pqc::types::MlDsaSecretKey; fn create_test_generator() -> QuoteGenerator { let rewards_address = RewardsAddress::new([1u8; 20]); @@ -248,4 +364,213 @@ mod tests { let result = generator.create_quote(content, 1024, 0); assert!(result.is_err()); } + + #[test] + fn test_quote_signature_round_trip_real_keys() { + let ml_dsa = MlDsa65::new(); + let (public_key, secret_key) = ml_dsa.generate_keypair().expect("keypair generation"); + + let rewards_address = RewardsAddress::new([2u8; 20]); + let metrics_tracker = QuotingMetricsTracker::new(1000, 100); + let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); + + let pub_key_bytes = public_key.as_bytes().to_vec(); + let sk_bytes = secret_key.as_bytes().to_vec(); + generator.set_signer(pub_key_bytes, move |msg| { + let sk = MlDsaSecretKey::from_bytes(&sk_bytes).expect("secret key parse"); + let ml_dsa = MlDsa65::new(); + ml_dsa.sign(&sk, msg).expect("signing").as_bytes().to_vec() + }); + + let content = [7u8; 32]; + let quote = generator + .create_quote(content, 2048, 0) + .expect("create quote"); + + // Valid signature should verify + assert!(verify_quote_signature("e)); + + // Tamper with the signature — flip a byte + let mut tampered_quote = quote; + if let Some(byte) = tampered_quote.signature.first_mut() { + *byte ^= 0xFF; + } + assert!(!verify_quote_signature(&tampered_quote)); + } + + #[test] + fn test_empty_signature_fails_verification() { + let generator = create_test_generator(); + let content = [42u8; 32]; + + let quote = generator + .create_quote(content, 1024, 0) + .expect("create quote"); + + // The dummy signer produces a 64-byte fake signature, not a valid + // ML-DSA-65 signature (3309 bytes), so verification must fail. + assert!(!verify_quote_signature("e)); + } + + #[test] + fn test_rewards_address_getter() { + let addr = RewardsAddress::new([42u8; 20]); + let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let generator = QuoteGenerator::new(addr, metrics_tracker); + + assert_eq!(*generator.rewards_address(), addr); + } + + #[test] + fn test_current_metrics() { + let rewards_address = RewardsAddress::new([1u8; 20]); + let metrics_tracker = QuotingMetricsTracker::new(500, 50); + let generator = QuoteGenerator::new(rewards_address, metrics_tracker); + + let metrics = generator.current_metrics(); + assert_eq!(metrics.max_records, 500); + assert_eq!(metrics.close_records_stored, 50); + assert_eq!(metrics.data_size, 0); + assert_eq!(metrics.data_type, 0); + } + + #[test] + fn test_record_payment_delegation() { + let rewards_address = RewardsAddress::new([1u8; 20]); + let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let generator = QuoteGenerator::new(rewards_address, metrics_tracker); + + generator.record_payment(); + generator.record_payment(); + + let metrics = generator.current_metrics(); + assert_eq!(metrics.received_payment_count, 2); + } + + #[test] + fn test_record_store_delegation() { + let rewards_address = RewardsAddress::new([1u8; 20]); + let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let generator = QuoteGenerator::new(rewards_address, metrics_tracker); + + generator.record_store(0); + generator.record_store(1); + generator.record_store(0); + + let metrics = generator.current_metrics(); + assert_eq!(metrics.close_records_stored, 3); + } + + #[test] + fn test_create_quote_different_data_types() { + let generator = create_test_generator(); + let content = [10u8; 32]; + + // Data type 0 (chunk) + let q0 = generator.create_quote(content, 1024, 0).expect("type 0"); + assert_eq!(q0.quoting_metrics.data_type, 0); + + // Data type 1 + let q1 = generator.create_quote(content, 512, 1).expect("type 1"); + assert_eq!(q1.quoting_metrics.data_type, 1); + + // Data type 2 + let q2 = generator.create_quote(content, 256, 2).expect("type 2"); + assert_eq!(q2.quoting_metrics.data_type, 2); + } + + #[test] + fn test_create_quote_zero_size() { + let generator = create_test_generator(); + let content = [11u8; 32]; + + let quote = generator.create_quote(content, 0, 0).expect("zero size"); + assert_eq!(quote.quoting_metrics.data_size, 0); + } + + #[test] + fn test_create_quote_large_size() { + let generator = create_test_generator(); + let content = [12u8; 32]; + + let quote = generator + .create_quote(content, 10_000_000, 0) + .expect("large size"); + assert_eq!(quote.quoting_metrics.data_size, 10_000_000); + } + + #[test] + fn test_verify_quote_signature_empty_pub_key() { + let quote = PaymentQuote { + content: xor_name::XorName([0u8; 32]), + timestamp: SystemTime::now(), + quoting_metrics: ant_evm::QuotingMetrics { + data_size: 0, + data_type: 0, + close_records_stored: 0, + records_per_type: vec![], + max_records: 0, + received_payment_count: 0, + live_time: 0, + network_density: None, + network_size: None, + }, + rewards_address: RewardsAddress::new([0u8; 20]), + pub_key: vec![], + signature: vec![], + }; + + // Empty pub key should fail parsing + assert!(!verify_quote_signature("e)); + } + + #[test] + fn test_can_sign_after_set_signer() { + let rewards_address = RewardsAddress::new([1u8; 20]); + let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); + + assert!(!generator.can_sign()); + + generator.set_signer(vec![0u8; 32], |_| vec![0u8; 32]); + + assert!(generator.can_sign()); + } + + #[test] + fn test_wire_ml_dsa_signer_returns_ok_with_valid_identity() { + let identity = saorsa_core::identity::NodeIdentity::generate().expect("keypair generation"); + let rewards_address = RewardsAddress::new([3u8; 20]); + let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); + + let result = wire_ml_dsa_signer(&mut generator, &identity); + assert!( + result.is_ok(), + "wire_ml_dsa_signer should succeed: {result:?}" + ); + assert!(generator.can_sign()); + } + + #[test] + fn test_probe_signer_fails_without_signer() { + let rewards_address = RewardsAddress::new([1u8; 20]); + let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let generator = QuoteGenerator::new(rewards_address, metrics_tracker); + + let result = generator.probe_signer(); + assert!(result.is_err()); + } + + #[test] + fn test_probe_signer_fails_with_empty_signature() { + let rewards_address = RewardsAddress::new([1u8; 20]); + let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); + + generator.set_signer(vec![0u8; 32], |_| vec![]); + + let result = generator.probe_signer(); + assert!(result.is_err()); + } } diff --git a/src/payment/single_node.rs b/src/payment/single_node.rs index fc4b504b..d336bc07 100644 --- a/src/payment/single_node.rs +++ b/src/payment/single_node.rs @@ -20,6 +20,23 @@ use tracing::info; /// Required number of quotes for `SingleNode` payment (matches `CLOSE_GROUP_SIZE`) pub const REQUIRED_QUOTES: usize = 5; +/// Create zero-valued `QuotingMetrics` for payment verification. +/// +/// The contract doesn't validate metric values, so we use zeroes. +fn zero_quoting_metrics() -> QuotingMetrics { + QuotingMetrics { + data_size: 0, + data_type: 0, + close_records_stored: 0, + records_per_type: vec![], + max_records: 0, + received_payment_count: 0, + live_time: 0, + network_density: None, + network_size: None, + } +} + /// Index of the median-priced node after sorting const MEDIAN_INDEX: usize = 2; @@ -64,11 +81,10 @@ impl SingleNodePayment { /// /// Returns error if not exactly 5 quotes are provided. pub fn from_quotes(mut quotes_with_prices: Vec<(PaymentQuote, Amount)>) -> Result { - if quotes_with_prices.len() != REQUIRED_QUOTES { + let len = quotes_with_prices.len(); + if len != REQUIRED_QUOTES { return Err(Error::Payment(format!( - "SingleNode payment requires exactly {} quotes, got {}", - REQUIRED_QUOTES, - quotes_with_prices.len() + "SingleNode payment requires exactly {REQUIRED_QUOTES} quotes, got {len}" ))); } @@ -78,7 +94,11 @@ impl SingleNodePayment { // Get median price and calculate 3x let median_price = quotes_with_prices .get(MEDIAN_INDEX) - .ok_or_else(|| Error::Payment("Missing median quote".to_string()))? + .ok_or_else(|| { + Error::Payment(format!( + "Missing median quote at index {MEDIAN_INDEX}: expected {REQUIRED_QUOTES} quotes but get() failed" + )) + })? .1; let enhanced_price = median_price .checked_mul(Amount::from(3u64)) @@ -119,11 +139,11 @@ impl SingleNodePayment { /// Get the median quote that receives payment. /// - /// This always returns a valid reference since the array is fixed-size - /// and `MEDIAN_INDEX` is guaranteed to be in bounds. + /// Returns `None` only if the internal array is somehow shorter than `MEDIAN_INDEX`, + /// which should never happen since the array is fixed-size `[_; REQUIRED_QUOTES]`. #[must_use] - pub fn paid_quote(&self) -> &QuotePaymentInfo { - &self.quotes[MEDIAN_INDEX] + pub fn paid_quote(&self) -> Option<&QuotePaymentInfo> { + self.quotes.get(MEDIAN_INDEX) } /// Pay for all quotes on-chain using the wallet. @@ -154,29 +174,23 @@ impl SingleNodePayment { }, )?; - // Collect transaction hashes for all quotes - // Note: wallet may not return tx_hash for zero-amount payments - let result_hashes: Vec<_> = self - .quotes - .iter() - .filter_map(|quote_info| { - if let Some(&tx_hash) = tx_hashes.get("e_info.quote_hash) { - Some(Ok(tx_hash)) - } else if quote_info.amount != Amount::ZERO { - // Non-zero amount should have a transaction hash - Some(Err(Error::Payment(format!( - "Missing transaction hash for non-zero quote {} (amount: {})", - quote_info.quote_hash, quote_info.amount - )))) - } else { - // Zero-amount payments may not get a transaction - None - } - }) - .collect::>>()?; + // Collect transaction hashes only for non-zero amount quotes + // Zero-amount quotes don't generate on-chain transactions + let mut result_hashes = Vec::new(); + for quote_info in &self.quotes { + if quote_info.amount > Amount::ZERO { + let tx_hash = tx_hashes.get("e_info.quote_hash).ok_or_else(|| { + Error::Payment(format!( + "Missing transaction hash for non-zero quote {}", + quote_info.quote_hash + )) + })?; + result_hashes.push(*tx_hash); + } + } info!( - "Payment successful: {} transactions (expected 1-5)", + "Payment successful: {} on-chain transactions", result_hashes.len() ); @@ -205,24 +219,12 @@ impl SingleNodePayment { network: &EvmNetwork, owned_quote_hash: Option, ) -> Result { - // Use zero metrics for verification (contract doesn't validate them) - let zero_metrics = QuotingMetrics { - data_size: 0, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 0, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - }; - // Build payment digest for all 5 quotes + // Each quote needs an owned QuotingMetrics (tuple requires ownership) let payment_digest: Vec<_> = self .quotes .iter() - .map(|q| (q.quote_hash, zero_metrics.clone(), q.rewards_address)) + .map(|q| (q.quote_hash, zero_quoting_metrics(), q.rewards_address)) .collect(); // Mark owned quotes @@ -269,10 +271,35 @@ mod tests { use alloy::node_bindings::{Anvil, AnvilInstance}; use evmlib::contract::payment_vault::interface; use evmlib::quoting_metrics::QuotingMetrics; - use evmlib::testnet::{deploy_data_payments_contract, deploy_network_token_contract}; + use evmlib::testnet::{deploy_data_payments_contract, deploy_network_token_contract, Testnet}; use evmlib::transaction_config::TransactionConfig; use evmlib::utils::{dummy_address, dummy_hash}; + use evmlib::wallet::Wallet; use reqwest::Url; + use serial_test::serial; + use std::time::SystemTime; + use xor_name::XorName; + + fn make_test_quote(rewards_addr_seed: u8) -> PaymentQuote { + PaymentQuote { + content: XorName::random(&mut rand::thread_rng()), + timestamp: SystemTime::now(), + quoting_metrics: QuotingMetrics { + data_size: 1024, + data_type: 0, + close_records_stored: 0, + records_per_type: vec![], + max_records: 1000, + received_payment_count: 0, + live_time: 0, + network_density: None, + network_size: None, + }, + rewards_address: RewardsAddress::new([rewards_addr_seed; 20]), + pub_key: vec![], + signature: vec![], + } + } /// Start an Anvil node with increased timeout for CI environments. /// @@ -298,10 +325,11 @@ mod tests { (anvil, url) } - /// Step 1: Exact copy of autonomi's `test_verify_payment_on_local` + /// Test: Standard 5-quote payment verification (autonomi baseline) #[tokio::test] + #[serial] #[allow(clippy::expect_used)] - async fn test_exact_copy_of_autonomi_verify_payment() { + async fn test_standard_five_quote_payment() { // Use autonomi's setup pattern with increased timeout for CI let (node, rpc_url) = start_node_with_timeout(); let network_token = deploy_network_token_contract(&rpc_url, &node).await; @@ -346,18 +374,7 @@ mod tests { let payment_verifications: Vec<_> = quote_payments .into_iter() .map(|v| interface::IPaymentVault::PaymentVerification { - metrics: QuotingMetrics { - data_size: 0, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 0, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - } - .into(), + metrics: zero_quoting_metrics().into(), rewardsAddress: v.1, quoteHash: v.0, }) @@ -372,14 +389,15 @@ mod tests { assert!(result.isValid, "Payment verification should be valid"); } - println!("✓ All {} payments verified successfully", 5); - println!("\n✅ Exact autonomi pattern works!"); + println!("✓ All 5 payments verified successfully"); + println!("\n✅ Standard 5-quote payment works!"); } - /// Step 3: Pay 3x for ONE quote and 0 for the other 4 (`SingleNode` mode) + /// Test: `SingleNode` payment strategy (1 real + 4 dummy payments) #[tokio::test] + #[serial] #[allow(clippy::expect_used)] - async fn test_step3_single_node_payment_pattern() { + async fn test_single_node_payment_strategy() { let (node, rpc_url) = start_node_with_timeout(); let network_token = deploy_network_token_contract(&rpc_url, &node).await; let mut payment_vault = @@ -429,18 +447,7 @@ mod tests { let payment_verifications: Vec<_> = quote_payments .into_iter() .map(|v| interface::IPaymentVault::PaymentVerification { - metrics: QuotingMetrics { - data_size: 0, - data_type: 0, - close_records_stored: 0, - records_per_type: vec![], - max_records: 0, - received_payment_count: 0, - live_time: 0, - network_density: None, - network_size: None, - } - .into(), + metrics: zero_quoting_metrics().into(), rewardsAddress: v.1, quoteHash: v.0, }) @@ -463,17 +470,143 @@ mod tests { println!(" Dummy payment {}: valid={}", i + 1, result.isValid); } - println!("\n✅ Step 3: SingleNode pattern (1 real + 4 dummy) works!"); + println!("\n✅ SingleNode payment strategy works!"); } - /// Step 4: Complete `SingleNode` payment flow with real quotes - #[tokio::test] - async fn test_step4_complete_single_node_payment_flow() -> Result<()> { - use evmlib::testnet::Testnet; - use evmlib::wallet::Wallet; - use std::time::SystemTime; - use xor_name::XorName; + #[test] + #[allow(clippy::unwrap_used)] + fn test_from_quotes_median_selection() { + let prices: Vec = vec![50, 30, 10, 40, 20]; + let mut quotes_with_prices = Vec::new(); + + for price in &prices { + let quote = PaymentQuote { + content: XorName::random(&mut rand::thread_rng()), + timestamp: SystemTime::now(), + quoting_metrics: QuotingMetrics { + data_size: 1024, + data_type: 0, + close_records_stored: 0, + records_per_type: vec![(0, 10)], + max_records: 1000, + received_payment_count: 5, + live_time: 3600, + network_density: None, + network_size: Some(100), + }, + rewards_address: RewardsAddress::new([1u8; 20]), + pub_key: vec![], + signature: vec![], + }; + quotes_with_prices.push((quote, Amount::from(*price))); + } + + let payment = SingleNodePayment::from_quotes(quotes_with_prices).unwrap(); + // After sorting by price: 10, 20, 30, 40, 50 + // Median (index 2) = 30, paid amount = 3 * 30 = 90 + let median_quote = payment.quotes.get(MEDIAN_INDEX).unwrap(); + assert_eq!(median_quote.amount, Amount::from(90u64)); + + // Other 4 quotes should have Amount::ZERO + for (i, q) in payment.quotes.iter().enumerate() { + if i != MEDIAN_INDEX { + assert_eq!(q.amount, Amount::ZERO); + } + } + + // Total should be 3 * median price = 90 + assert_eq!(payment.total_amount(), Amount::from(90u64)); + } + + #[test] + fn test_from_quotes_wrong_count() { + let quotes: Vec<_> = (0..3) + .map(|_| (make_test_quote(1), Amount::from(10u64))) + .collect(); + let result = SingleNodePayment::from_quotes(quotes); + assert!(result.is_err()); + } + + #[test] + #[allow(clippy::expect_used)] + fn test_from_quotes_zero_quotes() { + let result = SingleNodePayment::from_quotes(vec![]); + assert!(result.is_err()); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!(err_msg.contains("exactly 5")); + } + + #[test] + fn test_from_quotes_one_quote() { + let result = + SingleNodePayment::from_quotes(vec![(make_test_quote(1), Amount::from(10u64))]); + assert!(result.is_err()); + } + + #[test] + #[allow(clippy::expect_used)] + fn test_from_quotes_six_quotes() { + let quotes: Vec<_> = (0..6) + .map(|_| (make_test_quote(1), Amount::from(10u64))) + .collect(); + let result = SingleNodePayment::from_quotes(quotes); + assert!(result.is_err()); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!(err_msg.contains("exactly 5")); + } + + #[test] + #[allow(clippy::unwrap_used)] + fn test_paid_quote_returns_median() { + let quotes: Vec<_> = (0..5u8) + .map(|i| (make_test_quote(i + 1), Amount::from(u64::from(i + 1) * 10))) + .collect(); + + let payment = SingleNodePayment::from_quotes(quotes).unwrap(); + let paid = payment.paid_quote().unwrap(); + + // The paid quote should have a non-zero amount + assert!(paid.amount > Amount::ZERO); + + // Total amount should equal the paid quote's amount + assert_eq!(payment.total_amount(), paid.amount); + } + + #[test] + #[allow(clippy::unwrap_used)] + fn test_all_quotes_have_distinct_addresses() { + let quotes: Vec<_> = (0..5u8) + .map(|i| (make_test_quote(i + 1), Amount::from(u64::from(i + 1) * 10))) + .collect(); + + let payment = SingleNodePayment::from_quotes(quotes).unwrap(); + + // Verify all 5 quotes are present (sorting doesn't lose data) + let mut addresses: Vec<_> = payment.quotes.iter().map(|q| q.rewards_address).collect(); + addresses.sort(); + addresses.dedup(); + assert_eq!(addresses.len(), 5); + } + + #[test] + #[allow(clippy::unwrap_used)] + fn test_total_amount_equals_3x_median() { + let prices = [100u64, 200, 300, 400, 500]; + let quotes: Vec<_> = prices + .iter() + .map(|price| (make_test_quote(1), Amount::from(*price))) + .collect(); + + let payment = SingleNodePayment::from_quotes(quotes).unwrap(); + // Sorted: 100, 200, 300, 400, 500 — median = 300, total = 3 * 300 = 900 + assert_eq!(payment.total_amount(), Amount::from(900u64)); + } + + /// Test: Complete `SingleNode` flow with real contract prices + #[tokio::test] + #[serial] + async fn test_single_node_with_real_prices() -> Result<()> { // Setup testnet let testnet = Testnet::new().await; let network = testnet.to_network(); @@ -514,12 +647,19 @@ mod tests { }; // Get market price for this quote + // PERF-004: Clone required - payment_vault::get_market_price (external API from evmlib) + // takes ownership of Vec. We need quoting_metrics again below for + // PaymentQuote construction, so the clone is unavoidable. let prices = payment_vault::get_market_price(&network, vec![quoting_metrics.clone()]) .await .map_err(|e| Error::Payment(format!("Failed to get market price: {e}")))?; let price = prices.first().ok_or_else(|| { - Error::Payment("Empty price list from get_market_price".to_string()) + Error::Payment(format!( + "Empty price list from get_market_price for quote {}: expected at least 1 price but got {} elements", + i, + prices.len() + )) })?; let quote = PaymentQuote { @@ -541,6 +681,7 @@ mod tests { let median_price = payment .paid_quote() + .ok_or_else(|| Error::Payment("Missing paid quote at median index".to_string()))? .amount .checked_div(Amount::from(3u64)) .ok_or_else(|| Error::Payment("Failed to calculate median price".to_string()))?; @@ -550,7 +691,13 @@ mod tests { let median_amount = payment .quotes .get(MEDIAN_INDEX) - .ok_or_else(|| Error::Payment("Missing median quote".to_string()))? + .ok_or_else(|| { + Error::Payment(format!( + "Index out of bounds: tried to access median index {} but quotes array has {} elements", + MEDIAN_INDEX, + payment.quotes.len() + )) + })? .amount; assert_eq!( payment.total_amount(), @@ -571,7 +718,13 @@ mod tests { let median_quote = payment .quotes .get(MEDIAN_INDEX) - .ok_or_else(|| Error::Payment("Missing median quote".to_string()))?; + .ok_or_else(|| { + Error::Payment(format!( + "Index out of bounds: tried to access median index {} but quotes array has {} elements", + MEDIAN_INDEX, + payment.quotes.len() + )) + })?; let median_quote_hash = median_quote.quote_hash; let verified_amount = payment.verify(&network, Some(median_quote_hash)).await?; @@ -581,7 +734,7 @@ mod tests { ); println!("✓ Payment verified: {verified_amount} atto"); - println!("\n✅ Step 4: Complete SingleNode flow with real quotes works!"); + println!("\n✅ Complete SingleNode flow with real prices works!"); Ok(()) } diff --git a/src/payment/verifier.rs b/src/payment/verifier.rs index 9176a0ae..910599f5 100644 --- a/src/payment/verifier.rs +++ b/src/payment/verifier.rs @@ -4,10 +4,38 @@ //! All new data requires EVM payment on Arbitrum (no free tier). use crate::error::{Error, Result}; -use crate::payment::cache::{VerifiedCache, XorName}; -use ant_evm::ProofOfPayment; +use crate::payment::cache::{CacheStats, VerifiedCache, XorName}; +use crate::payment::proof::deserialize_proof; +use crate::payment::quote::{verify_quote_content, verify_quote_signature}; +use crate::payment::single_node::REQUIRED_QUOTES; +use ant_evm::{ProofOfPayment, RewardsAddress}; +use evmlib::contract::payment_vault::error::Error as PaymentVaultError; +use evmlib::contract::payment_vault::verify_data_payment; use evmlib::Network as EvmNetwork; -use tracing::{debug, info, warn}; +use saorsa_core::identity::node_identity::peer_id_from_public_key_bytes; +use std::time::SystemTime; +use tracing::{debug, info}; + +/// Minimum allowed size for a payment proof in bytes. +/// +/// This minimum ensures the proof contains at least a basic cryptographic hash or identifier. +/// Proofs smaller than this are rejected as they cannot contain sufficient payment information. +const MIN_PAYMENT_PROOF_SIZE_BYTES: usize = 32; + +/// Maximum allowed size for a payment proof in bytes (100 KB). +/// +/// A `ProofOfPayment` with 5 ML-DSA-65 quotes can reach ~30 KB (each quote carries a +/// ~1,952-byte public key and a 3,309-byte signature plus metadata). 100 KB provides +/// headroom for future fields while still capping memory during verification. +const MAX_PAYMENT_PROOF_SIZE_BYTES: usize = 102_400; + +/// Maximum age of a payment quote before it's considered expired (24 hours). +/// Prevents replaying old cheap quotes against nearly-full nodes. +const QUOTE_MAX_AGE_SECS: u64 = 86_400; + +/// Maximum allowed clock skew for quote timestamps (60 seconds). +/// Accounts for NTP synchronization differences between P2P nodes. +const QUOTE_CLOCK_SKEW_TOLERANCE_SECS: u64 = 60; /// Configuration for EVM payment verification. #[derive(Debug, Clone)] @@ -37,6 +65,9 @@ pub struct PaymentVerifierConfig { pub evm: EvmVerifierConfig, /// Cache capacity (number of `XorName` values to cache). pub cache_capacity: usize, + /// Local node's rewards address. + /// When set, the verifier rejects payments that don't include this node as a recipient. + pub local_rewards_address: Option, } impl Default for PaymentVerifierConfig { @@ -44,6 +75,7 @@ impl Default for PaymentVerifierConfig { Self { evm: EvmVerifierConfig::default(), cache_capacity: 100_000, + local_rewards_address: None, } } } @@ -91,10 +123,9 @@ impl PaymentVerifier { pub fn new(config: PaymentVerifierConfig) -> Self { let cache = VerifiedCache::with_capacity(config.cache_capacity); - info!( - "Payment verifier initialized (cache_capacity={}, evm_enabled={})", - config.cache_capacity, config.evm.enabled - ); + let cache_capacity = config.cache_capacity; + let evm_enabled = config.evm.enabled; + info!("Payment verifier initialized (cache_capacity={cache_capacity}, evm_enabled={evm_enabled})"); Self { cache, config } } @@ -116,15 +147,19 @@ impl PaymentVerifier { pub fn check_payment_required(&self, xorname: &XorName) -> PaymentStatus { // Check LRU cache (fast path) if self.cache.contains(xorname) { - debug!("Data {} found in verified cache", hex::encode(xorname)); + if tracing::enabled!(tracing::Level::DEBUG) { + debug!("Data {} found in verified cache", hex::encode(xorname)); + } return PaymentStatus::CachedAsVerified; } // Not in cache - payment required - debug!( - "Data {} not in cache - payment required", - hex::encode(xorname) - ); + if tracing::enabled!(tracing::Level::DEBUG) { + debug!( + "Data {} not in cache - payment required", + hex::encode(xorname) + ); + } PaymentStatus::PaymentRequired } @@ -161,46 +196,65 @@ impl PaymentVerifier { Ok(status) } PaymentStatus::PaymentRequired => { - // Payment is required - verify the proof - match payment_proof { - Some(proof) => { - if proof.is_empty() { - return Err(Error::Payment("Empty payment proof".to_string())); - } - - // Deserialize the ProofOfPayment - let payment: ProofOfPayment = - rmp_serde::from_slice(proof).map_err(|e| { - Error::Payment(format!("Failed to deserialize payment proof: {e}")) - })?; + // Test/devnet mode: EVM disabled - accept with or without proof + if !self.config.evm.enabled { + if tracing::enabled!(tracing::Level::DEBUG) { + debug!( + "Test mode: Allowing storage without EVM verification (EVM disabled): {}", + hex::encode(xorname) + ); + } + self.cache.insert(*xorname); + return Ok(PaymentStatus::PaymentVerified); + } - // Verify the payment using EVM - self.verify_evm_payment(xorname, &payment).await?; + // Production mode: EVM enabled - verify the proof + if let Some(proof) = payment_proof { + let proof_len = proof.len(); + if proof_len < MIN_PAYMENT_PROOF_SIZE_BYTES { + return Err(Error::Payment(format!( + "Payment proof too small: {proof_len} bytes (min {MIN_PAYMENT_PROOF_SIZE_BYTES})" + ))); + } + if proof_len > MAX_PAYMENT_PROOF_SIZE_BYTES { + return Err(Error::Payment(format!( + "Payment proof too large: {proof_len} bytes (max {MAX_PAYMENT_PROOF_SIZE_BYTES} bytes)" + ))); + } - // Cache the verified xorname - self.cache.insert(*xorname); + // Deserialize the proof (supports both new PaymentProof and legacy ProofOfPayment) + let (payment, tx_hashes) = deserialize_proof(proof).map_err(|e| { + Error::Payment(format!("Failed to deserialize payment proof: {e}")) + })?; - Ok(PaymentStatus::PaymentVerified) - } - None => { - // No payment provided - Err(Error::Payment(format!( - "Payment required for new data {}", - hex::encode(xorname) - ))) + if !tx_hashes.is_empty() { + debug!("Proof includes {} transaction hash(es)", tx_hashes.len()); } + + // Verify the payment using EVM + self.verify_evm_payment(xorname, &payment).await?; + + // Cache the verified xorname + self.cache.insert(*xorname); + + Ok(PaymentStatus::PaymentVerified) + } else { + // No payment provided in production mode + Err(Error::Payment(format!( + "Payment required for new data {}", + hex::encode(xorname) + ))) } } - PaymentStatus::PaymentVerified => { - // This shouldn't happen from check_payment_required - Ok(status) - } + PaymentStatus::PaymentVerified => Err(Error::Payment( + "Unexpected PaymentVerified status from check_payment_required".to_string(), + )), } } /// Get cache statistics. #[must_use] - pub fn cache_stats(&self) -> crate::payment::cache::CacheStats { + pub fn cache_stats(&self) -> CacheStats { self.cache.stats() } @@ -218,69 +272,192 @@ impl PaymentVerifier { /// Verify an EVM payment proof. /// - /// This verifies that: - /// 1. All quote signatures are valid - /// 2. The payment was made on-chain + /// This is production-only verification that ALWAYS validates payment proofs. + /// It verifies that: + /// 1. All quotes target the correct content address (xorname binding) + /// 2. All quote ML-DSA-65 signatures are valid (offloaded to a blocking + /// thread via `spawn_blocking` since post-quantum signature verification + /// is CPU-intensive) + /// 3. The payment was made on-chain via the EVM payment vault contract + /// + /// Test environments should disable EVM at the `verify_payment` level, + /// not bypass verification here. async fn verify_evm_payment(&self, xorname: &XorName, payment: &ProofOfPayment) -> Result<()> { - debug!( - "Verifying EVM payment for {} with {} quotes", - hex::encode(xorname), - payment.peer_quotes.len() - ); - - // Skip EVM verification if disabled - if !self.config.evm.enabled { - warn!("EVM verification disabled - accepting payment without on-chain check"); - return Ok(()); + if tracing::enabled!(tracing::Level::DEBUG) { + let xorname_hex = hex::encode(xorname); + let quote_count = payment.peer_quotes.len(); + debug!("Verifying EVM payment for {xorname_hex} with {quote_count} quotes"); } - // Verify quote signatures first (doesn't require network) - for (encoded_peer_id, quote) in &payment.peer_quotes { - let peer_id = encoded_peer_id - .to_peer_id() - .map_err(|e| Error::Payment(format!("Invalid peer ID in payment proof: {e}")))?; + debug_assert!(self.config.evm.enabled); - if !quote.check_is_signed_by_claimed_peer(peer_id) { - return Err(Error::Payment(format!( - "Quote signature invalid for peer {peer_id}" - ))); + Self::validate_quote_structure(payment)?; + Self::validate_quote_content(payment, xorname)?; + Self::validate_quote_timestamps(payment)?; + Self::validate_peer_bindings(payment)?; + self.validate_local_recipient(payment)?; + + // Verify quote signatures (CPU-bound, run off async runtime) + let peer_quotes = payment.peer_quotes.clone(); + tokio::task::spawn_blocking(move || { + for (encoded_peer_id, quote) in &peer_quotes { + if !verify_quote_signature(quote) { + return Err(Error::Payment( + format!("Quote ML-DSA-65 signature verification failed for peer {encoded_peer_id:?}"), + )); + } } - } + Ok(()) + }) + .await + .map_err(|e| Error::Payment(format!("Signature verification task failed: {e}")))??; - // Get the payment digest for on-chain verification + // Verify on-chain payment let payment_digest = payment.digest(); - if payment_digest.is_empty() { return Err(Error::Payment("Payment has no quotes".to_string())); } - // Verify on-chain payment - // Note: We pass empty owned_quote_hashes because we're not a node claiming payment, - // we just want to verify the payment is valid let owned_quote_hashes = vec![]; - match evmlib::contract::payment_vault::verify_data_payment( - &self.config.evm.network, - owned_quote_hashes, - payment_digest, - ) - .await + match verify_data_payment(&self.config.evm.network, owned_quote_hashes, payment_digest) + .await { Ok(_amount) => { - info!("EVM payment verified for {}", hex::encode(xorname)); + if tracing::enabled!(tracing::Level::INFO) { + info!("EVM payment verified for {}", hex::encode(xorname)); + } Ok(()) } - Err(evmlib::contract::payment_vault::error::Error::PaymentInvalid) => { - Err(Error::Payment(format!( - "Payment verification failed on-chain for {}", - hex::encode(xorname) - ))) - } + Err(PaymentVaultError::PaymentInvalid) => Err(Error::Payment(format!( + "Payment verification failed on-chain for {}", + hex::encode(xorname) + ))), Err(e) => Err(Error::Payment(format!( "EVM verification error for {}: {e}", hex::encode(xorname) ))), } } + + /// Validate quote count, uniqueness, and basic structure. + fn validate_quote_structure(payment: &ProofOfPayment) -> Result<()> { + if payment.peer_quotes.is_empty() { + return Err(Error::Payment("Payment has no quotes".to_string())); + } + + let quote_count = payment.peer_quotes.len(); + if quote_count != REQUIRED_QUOTES { + return Err(Error::Payment(format!( + "Payment must have exactly {REQUIRED_QUOTES} quotes, got {quote_count}" + ))); + } + + let mut seen: Vec<&ant_evm::EncodedPeerId> = Vec::with_capacity(quote_count); + for (encoded_peer_id, _) in &payment.peer_quotes { + if seen.contains(&encoded_peer_id) { + return Err(Error::Payment(format!( + "Duplicate peer ID in payment quotes: {encoded_peer_id:?}" + ))); + } + seen.push(encoded_peer_id); + } + + Ok(()) + } + + /// Verify all quotes target the correct content address. + fn validate_quote_content(payment: &ProofOfPayment, xorname: &XorName) -> Result<()> { + for (encoded_peer_id, quote) in &payment.peer_quotes { + if !verify_quote_content(quote, xorname) { + return Err(Error::Payment(format!( + "Quote content address mismatch for peer {encoded_peer_id:?}: expected {}, got {}", + hex::encode(xorname), + hex::encode(quote.content.0) + ))); + } + } + Ok(()) + } + + /// Verify quote freshness — reject stale or excessively future quotes. + fn validate_quote_timestamps(payment: &ProofOfPayment) -> Result<()> { + let now = SystemTime::now(); + for (encoded_peer_id, quote) in &payment.peer_quotes { + match now.duration_since(quote.timestamp) { + Ok(age) => { + if age.as_secs() > QUOTE_MAX_AGE_SECS { + return Err(Error::Payment(format!( + "Quote from peer {encoded_peer_id:?} expired: age {}s exceeds max {QUOTE_MAX_AGE_SECS}s", + age.as_secs() + ))); + } + } + Err(_) => { + if let Ok(skew) = quote.timestamp.duration_since(now) { + if skew.as_secs() > QUOTE_CLOCK_SKEW_TOLERANCE_SECS { + return Err(Error::Payment(format!( + "Quote from peer {encoded_peer_id:?} has timestamp {}s in the future \ + (exceeds {QUOTE_CLOCK_SKEW_TOLERANCE_SECS}s tolerance)", + skew.as_secs() + ))); + } + } else { + return Err(Error::Payment(format!( + "Quote from peer {encoded_peer_id:?} has invalid timestamp" + ))); + } + } + } + } + Ok(()) + } + + /// Verify each quote's `pub_key` matches the claimed peer ID via BLAKE3. + fn validate_peer_bindings(payment: &ProofOfPayment) -> Result<()> { + for (encoded_peer_id, quote) in &payment.peer_quotes { + let expected_peer_id = peer_id_from_public_key_bytes("e.pub_key) + .map_err(|e| Error::Payment(format!("Invalid ML-DSA public key in quote: {e}")))?; + + let libp2p_peer_id = encoded_peer_id + .to_peer_id() + .map_err(|e| Error::Payment(format!("Invalid encoded peer ID: {e}")))?; + let peer_id_bytes = libp2p_peer_id.to_bytes(); + let raw_peer_bytes = if peer_id_bytes.len() > 2 { + &peer_id_bytes[2..] + } else { + return Err(Error::Payment(format!( + "Invalid encoded peer ID: too short ({} bytes)", + peer_id_bytes.len() + ))); + }; + + if expected_peer_id.as_bytes() != raw_peer_bytes { + return Err(Error::Payment(format!( + "Quote pub_key does not belong to claimed peer {encoded_peer_id:?}: \ + BLAKE3(pub_key) = {}, peer_id = {}", + expected_peer_id.to_hex(), + hex::encode(raw_peer_bytes) + ))); + } + } + Ok(()) + } + + /// Verify this node is among the paid recipients. + fn validate_local_recipient(&self, payment: &ProofOfPayment) -> Result<()> { + if let Some(ref local_addr) = self.config.local_rewards_address { + let is_recipient = payment + .peer_quotes + .iter() + .any(|(_, quote)| quote.rewards_address == *local_addr); + if !is_recipient { + return Err(Error::Payment( + "Payment proof does not include this node as a recipient".to_string(), + )); + } + } + Ok(()) + } } #[cfg(test)] @@ -295,6 +472,19 @@ mod tests { ..Default::default() }, cache_capacity: 100, + local_rewards_address: None, + }; + PaymentVerifier::new(config) + } + + fn create_evm_enabled_verifier() -> PaymentVerifier { + let config = PaymentVerifierConfig { + evm: EvmVerifierConfig { + enabled: true, + network: EvmNetwork::ArbitrumOne, + }, + cache_capacity: 100, + local_rewards_address: None, }; PaymentVerifier::new(config) } @@ -327,9 +517,14 @@ mod tests { let verifier = create_test_verifier(); let xorname = [1u8; 32]; - // Should fail without payment proof + // Test mode (EVM disabled): Should SUCCEED without payment proof + // This allows tests to run without needing real EVM payments let result = verifier.verify_payment(&xorname, None).await; - assert!(result.is_err()); + assert!(result.is_ok(), "Expected Ok in test mode, got: {result:?}"); + assert_eq!( + result.expect("should succeed"), + PaymentStatus::PaymentVerified + ); } #[tokio::test] @@ -337,17 +532,22 @@ mod tests { let verifier = create_test_verifier(); let xorname = [1u8; 32]; - // Create a valid (but empty) ProofOfPayment + // Create a properly-sized proof let proof = ProofOfPayment { peer_quotes: vec![], }; - let proof_bytes = rmp_serde::to_vec(&proof).expect("should serialize"); + let mut proof_bytes = rmp_serde::to_vec(&proof).expect("should serialize"); + // Pad to minimum required size to pass validation + proof_bytes.resize(MIN_PAYMENT_PROOF_SIZE_BYTES, 0); - // Should succeed with a valid proof when EVM verification is disabled - // Note: With EVM verification disabled, even empty proofs pass + // EVM disabled (test/devnet mode): should SUCCEED even with a proof present. + // When EVM is disabled, the verifier skips on-chain checks and accepts storage. let result = verifier.verify_payment(&xorname, Some(&proof_bytes)).await; - assert!(result.is_ok(), "Expected Ok, got: {result:?}"); - assert_eq!(result.expect("verified"), PaymentStatus::PaymentVerified); + assert!(result.is_ok(), "Expected Ok in test mode, got: {result:?}"); + assert_eq!( + result.expect("should succeed"), + PaymentStatus::PaymentVerified + ); } #[tokio::test] @@ -377,4 +577,617 @@ mod tests { assert!(!PaymentStatus::PaymentVerified.is_cached()); assert!(!PaymentStatus::PaymentRequired.is_cached()); } + + #[tokio::test] + async fn test_verifier_caches_after_successful_verification() { + let verifier = create_test_verifier(); + let xorname = [42u8; 32]; + + // Not yet cached — should require payment + assert_eq!( + verifier.check_payment_required(&xorname), + PaymentStatus::PaymentRequired + ); + + // Verify payment (EVM disabled, so it succeeds and caches) + let result = verifier.verify_payment(&xorname, None).await; + assert!(result.is_ok()); + assert_eq!(result.expect("verified"), PaymentStatus::PaymentVerified); + + // Now the xorname should be cached + assert_eq!( + verifier.check_payment_required(&xorname), + PaymentStatus::CachedAsVerified + ); + } + + #[tokio::test] + async fn test_verifier_rejects_without_proof_when_evm_enabled() { + let verifier = create_evm_enabled_verifier(); + let xorname = [99u8; 32]; + + // EVM enabled + no proof provided => should return an error + let result = verifier.verify_payment(&xorname, None).await; + assert!(result.is_err()); + } + + #[tokio::test] + async fn test_proof_too_small() { + let verifier = create_evm_enabled_verifier(); + let xorname = [1u8; 32]; + + // Proof smaller than MIN_PAYMENT_PROOF_SIZE_BYTES + let small_proof = vec![0u8; MIN_PAYMENT_PROOF_SIZE_BYTES - 1]; + let result = verifier.verify_payment(&xorname, Some(&small_proof)).await; + assert!(result.is_err()); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!( + err_msg.contains("too small"), + "Error should mention 'too small': {err_msg}" + ); + } + + #[tokio::test] + async fn test_proof_too_large() { + let verifier = create_evm_enabled_verifier(); + let xorname = [2u8; 32]; + + // Proof larger than MAX_PAYMENT_PROOF_SIZE_BYTES + let large_proof = vec![0u8; MAX_PAYMENT_PROOF_SIZE_BYTES + 1]; + let result = verifier.verify_payment(&xorname, Some(&large_proof)).await; + assert!(result.is_err()); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!( + err_msg.contains("too large"), + "Error should mention 'too large': {err_msg}" + ); + } + + #[tokio::test] + async fn test_proof_at_min_boundary() { + let verifier = create_evm_enabled_verifier(); + let xorname = [3u8; 32]; + + // Exactly MIN_PAYMENT_PROOF_SIZE_BYTES — passes size check, but + // will fail deserialization (not valid msgpack) + let boundary_proof = vec![0xFFu8; MIN_PAYMENT_PROOF_SIZE_BYTES]; + let result = verifier + .verify_payment(&xorname, Some(&boundary_proof)) + .await; + assert!(result.is_err()); + let err_msg = format!("{}", result.expect_err("should fail deser")); + assert!( + err_msg.contains("deserialize"), + "Error should mention deserialization: {err_msg}" + ); + } + + #[tokio::test] + async fn test_proof_at_max_boundary() { + let verifier = create_evm_enabled_verifier(); + let xorname = [4u8; 32]; + + // Exactly MAX_PAYMENT_PROOF_SIZE_BYTES — passes size check, but + // will fail deserialization + let boundary_proof = vec![0xFFu8; MAX_PAYMENT_PROOF_SIZE_BYTES]; + let result = verifier + .verify_payment(&xorname, Some(&boundary_proof)) + .await; + assert!(result.is_err()); + let err_msg = format!("{}", result.expect_err("should fail deser")); + assert!( + err_msg.contains("deserialize"), + "Error should mention deserialization: {err_msg}" + ); + } + + #[tokio::test] + async fn test_malformed_msgpack_proof() { + let verifier = create_evm_enabled_verifier(); + let xorname = [5u8; 32]; + + // Valid size but garbage bytes — should fail deserialization + let garbage = vec![0xAB; 64]; + let result = verifier.verify_payment(&xorname, Some(&garbage)).await; + assert!(result.is_err()); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!(err_msg.contains("deserialize")); + } + + #[test] + fn test_evm_enabled_getter() { + let verifier = create_test_verifier(); + assert!(!verifier.evm_enabled()); + + let verifier = create_evm_enabled_verifier(); + assert!(verifier.evm_enabled()); + } + + #[test] + fn test_cache_len_getter() { + let verifier = create_test_verifier(); + assert_eq!(verifier.cache_len(), 0); + + verifier.cache.insert([10u8; 32]); + assert_eq!(verifier.cache_len(), 1); + + verifier.cache.insert([20u8; 32]); + assert_eq!(verifier.cache_len(), 2); + } + + #[test] + fn test_cache_stats_after_operations() { + let verifier = create_test_verifier(); + let xorname = [7u8; 32]; + + // Miss + verifier.check_payment_required(&xorname); + let stats = verifier.cache_stats(); + assert_eq!(stats.misses, 1); + assert_eq!(stats.hits, 0); + + // Insert and hit + verifier.cache.insert(xorname); + verifier.check_payment_required(&xorname); + let stats = verifier.cache_stats(); + assert_eq!(stats.hits, 1); + assert_eq!(stats.misses, 1); + assert_eq!(stats.additions, 1); + } + + #[tokio::test] + async fn test_concurrent_verify_payment() { + let verifier = std::sync::Arc::new(create_test_verifier()); + let mut handles = Vec::new(); + + for i in 0..10u8 { + let v = verifier.clone(); + handles.push(tokio::spawn(async move { + let xorname = [i; 32]; + v.verify_payment(&xorname, None).await + })); + } + + for handle in handles { + let result = handle.await.expect("task panicked"); + assert!(result.is_ok()); + } + + // All 10 should be cached + assert_eq!(verifier.cache_len(), 10); + } + + #[test] + fn test_default_config() { + let config = PaymentVerifierConfig::default(); + assert!(config.evm.enabled); + assert_eq!(config.cache_capacity, 100_000); + } + + #[test] + fn test_default_evm_config() { + let config = EvmVerifierConfig::default(); + assert!(config.enabled); + } + + #[test] + fn test_real_ml_dsa_proof_size_within_limits() { + use crate::payment::metrics::QuotingMetricsTracker; + use crate::payment::proof::PaymentProof; + use crate::payment::quote::{QuoteGenerator, XorName}; + use alloy::primitives::FixedBytes; + use ant_evm::{EncodedPeerId, RewardsAddress}; + use saorsa_core::MlDsa65; + use saorsa_pqc::pqc::types::MlDsaSecretKey; + use saorsa_pqc::pqc::MlDsaOperations; + + let ml_dsa = MlDsa65::new(); + let mut peer_quotes = Vec::new(); + + for i in 0..5u8 { + let (public_key, secret_key) = ml_dsa.generate_keypair().expect("keygen"); + + let rewards_address = RewardsAddress::new([i; 20]); + let metrics_tracker = QuotingMetricsTracker::new(1000, 0); + let mut generator = QuoteGenerator::new(rewards_address, metrics_tracker); + + let pub_key_bytes = public_key.as_bytes().to_vec(); + let sk_bytes = secret_key.as_bytes().to_vec(); + generator.set_signer(pub_key_bytes, move |msg| { + let sk = MlDsaSecretKey::from_bytes(&sk_bytes).expect("sk parse"); + let ml_dsa = MlDsa65::new(); + ml_dsa.sign(&sk, msg).expect("sign").as_bytes().to_vec() + }); + + let content: XorName = [i; 32]; + let quote = generator.create_quote(content, 4096, 0).expect("quote"); + + let keypair = libp2p::identity::Keypair::generate_ed25519(); + let peer_id = libp2p::PeerId::from_public_key(&keypair.public()); + peer_quotes.push((EncodedPeerId::from(peer_id), quote)); + } + + let proof = PaymentProof { + proof_of_payment: ProofOfPayment { peer_quotes }, + tx_hashes: vec![FixedBytes::from([0xABu8; 32])], + }; + + let proof_bytes = rmp_serde::to_vec(&proof).expect("serialize"); + + // 5 ML-DSA-65 quotes with ~1952-byte pub keys and ~3309-byte signatures + // should produce a proof in the 20-60 KB range + assert!( + proof_bytes.len() > 20_000, + "Real 5-quote ML-DSA proof should be > 20 KB, got {} bytes", + proof_bytes.len() + ); + assert!( + proof_bytes.len() < MAX_PAYMENT_PROOF_SIZE_BYTES, + "Real 5-quote ML-DSA proof ({} bytes) should fit within {} byte limit", + proof_bytes.len(), + MAX_PAYMENT_PROOF_SIZE_BYTES + ); + } + + #[tokio::test] + async fn test_content_address_mismatch_rejected() { + use crate::payment::proof::PaymentProof; + use ant_evm::{EncodedPeerId, PaymentQuote, QuotingMetrics, RewardsAddress}; + use libp2p::identity::Keypair; + use libp2p::PeerId; + use std::time::SystemTime; + + let verifier = create_evm_enabled_verifier(); + + // The xorname we're trying to store + let target_xorname = [0xAAu8; 32]; + + // Create a quote for a DIFFERENT xorname + let wrong_xorname = [0xBBu8; 32]; + let quote = PaymentQuote { + content: xor_name::XorName(wrong_xorname), + timestamp: SystemTime::now(), + quoting_metrics: QuotingMetrics { + data_size: 1024, + data_type: 0, + close_records_stored: 0, + records_per_type: vec![], + max_records: 1000, + received_payment_count: 0, + live_time: 0, + network_density: None, + network_size: None, + }, + rewards_address: RewardsAddress::new([1u8; 20]), + pub_key: vec![0u8; 64], + signature: vec![0u8; 64], + }; + + // Build 5 quotes with distinct peer IDs (required by REQUIRED_QUOTES enforcement) + let mut peer_quotes = Vec::new(); + for _ in 0..5 { + let keypair = Keypair::generate_ed25519(); + let peer_id = PeerId::from_public_key(&keypair.public()); + peer_quotes.push((EncodedPeerId::from(peer_id), quote.clone())); + } + let payment = ProofOfPayment { peer_quotes }; + + let proof = PaymentProof { + proof_of_payment: payment, + tx_hashes: vec![], + }; + + let proof_bytes = rmp_serde::to_vec(&proof).expect("serialize proof"); + + let result = verifier + .verify_payment(&target_xorname, Some(&proof_bytes)) + .await; + + assert!(result.is_err(), "Should reject mismatched content address"); + let err_msg = format!("{}", result.expect_err("should be error")); + assert!( + err_msg.contains("content address mismatch"), + "Error should mention 'content address mismatch': {err_msg}" + ); + } + + /// Helper: create a fake quote with the given xorname and timestamp. + fn make_fake_quote( + xorname: [u8; 32], + timestamp: SystemTime, + rewards_address: RewardsAddress, + ) -> ant_evm::PaymentQuote { + use ant_evm::{PaymentQuote, QuotingMetrics}; + + PaymentQuote { + content: xor_name::XorName(xorname), + timestamp, + quoting_metrics: QuotingMetrics { + data_size: 1024, + data_type: 0, + close_records_stored: 0, + records_per_type: vec![], + max_records: 1000, + received_payment_count: 0, + live_time: 0, + network_density: None, + network_size: None, + }, + rewards_address, + pub_key: vec![0u8; 64], + signature: vec![0u8; 64], + } + } + + /// Helper: wrap quotes into a serialized `PaymentProof`. + fn serialize_proof( + peer_quotes: Vec<(ant_evm::EncodedPeerId, ant_evm::PaymentQuote)>, + ) -> Vec { + use crate::payment::proof::PaymentProof; + + let proof = PaymentProof { + proof_of_payment: ProofOfPayment { peer_quotes }, + tx_hashes: vec![], + }; + rmp_serde::to_vec(&proof).expect("serialize proof") + } + + #[tokio::test] + async fn test_expired_quote_rejected() { + use ant_evm::{EncodedPeerId, RewardsAddress}; + use std::time::Duration; + + let verifier = create_evm_enabled_verifier(); + let xorname = [0xCCu8; 32]; + let rewards_addr = RewardsAddress::new([1u8; 20]); + + // Create a quote that's 25 hours old (exceeds 24-hour max) + let old_timestamp = SystemTime::now() - Duration::from_secs(25 * 3600); + let quote = make_fake_quote(xorname, old_timestamp, rewards_addr); + + let mut peer_quotes = Vec::new(); + for _ in 0..5 { + let keypair = libp2p::identity::Keypair::generate_ed25519(); + let peer_id = libp2p::PeerId::from_public_key(&keypair.public()); + peer_quotes.push((EncodedPeerId::from(peer_id), quote.clone())); + } + + let proof_bytes = serialize_proof(peer_quotes); + let result = verifier.verify_payment(&xorname, Some(&proof_bytes)).await; + + assert!(result.is_err(), "Should reject expired quote"); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!( + err_msg.contains("expired"), + "Error should mention 'expired': {err_msg}" + ); + } + + #[tokio::test] + async fn test_future_timestamp_rejected() { + use ant_evm::{EncodedPeerId, RewardsAddress}; + use std::time::Duration; + + let verifier = create_evm_enabled_verifier(); + let xorname = [0xDDu8; 32]; + let rewards_addr = RewardsAddress::new([1u8; 20]); + + // Create a quote with a timestamp 1 hour in the future + let future_timestamp = SystemTime::now() + Duration::from_secs(3600); + let quote = make_fake_quote(xorname, future_timestamp, rewards_addr); + + let mut peer_quotes = Vec::new(); + for _ in 0..5 { + let keypair = libp2p::identity::Keypair::generate_ed25519(); + let peer_id = libp2p::PeerId::from_public_key(&keypair.public()); + peer_quotes.push((EncodedPeerId::from(peer_id), quote.clone())); + } + + let proof_bytes = serialize_proof(peer_quotes); + let result = verifier.verify_payment(&xorname, Some(&proof_bytes)).await; + + assert!(result.is_err(), "Should reject future-timestamped quote"); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!( + err_msg.contains("future"), + "Error should mention 'future': {err_msg}" + ); + } + + #[tokio::test] + async fn test_quote_within_clock_skew_tolerance_accepted() { + use ant_evm::{EncodedPeerId, RewardsAddress}; + use std::time::Duration; + + let verifier = create_evm_enabled_verifier(); + let xorname = [0xD1u8; 32]; + let rewards_addr = RewardsAddress::new([1u8; 20]); + + // Quote 30 seconds in the future — within 60s tolerance + let future_timestamp = SystemTime::now() + Duration::from_secs(30); + let quote = make_fake_quote(xorname, future_timestamp, rewards_addr); + + let mut peer_quotes = Vec::new(); + for _ in 0..5 { + let keypair = libp2p::identity::Keypair::generate_ed25519(); + let peer_id = libp2p::PeerId::from_public_key(&keypair.public()); + peer_quotes.push((EncodedPeerId::from(peer_id), quote.clone())); + } + + let proof_bytes = serialize_proof(peer_quotes); + let result = verifier.verify_payment(&xorname, Some(&proof_bytes)).await; + + // Should NOT fail at timestamp check (will fail later at pub_key binding) + let err_msg = format!("{}", result.expect_err("should fail at later check")); + assert!( + !err_msg.contains("future"), + "Should pass timestamp check (within tolerance), but got: {err_msg}" + ); + } + + #[tokio::test] + async fn test_quote_just_beyond_clock_skew_tolerance_rejected() { + use ant_evm::{EncodedPeerId, RewardsAddress}; + use std::time::Duration; + + let verifier = create_evm_enabled_verifier(); + let xorname = [0xD2u8; 32]; + let rewards_addr = RewardsAddress::new([1u8; 20]); + + // Quote 120 seconds in the future — exceeds 60s tolerance + let future_timestamp = SystemTime::now() + Duration::from_secs(120); + let quote = make_fake_quote(xorname, future_timestamp, rewards_addr); + + let mut peer_quotes = Vec::new(); + for _ in 0..5 { + let keypair = libp2p::identity::Keypair::generate_ed25519(); + let peer_id = libp2p::PeerId::from_public_key(&keypair.public()); + peer_quotes.push((EncodedPeerId::from(peer_id), quote.clone())); + } + + let proof_bytes = serialize_proof(peer_quotes); + let result = verifier.verify_payment(&xorname, Some(&proof_bytes)).await; + + assert!( + result.is_err(), + "Should reject quote beyond clock skew tolerance" + ); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!( + err_msg.contains("future"), + "Error should mention 'future': {err_msg}" + ); + } + + #[tokio::test] + async fn test_quote_23h_old_still_accepted() { + use ant_evm::{EncodedPeerId, RewardsAddress}; + use std::time::Duration; + + let verifier = create_evm_enabled_verifier(); + let xorname = [0xD3u8; 32]; + let rewards_addr = RewardsAddress::new([1u8; 20]); + + // Quote 23 hours old — within 24h max age + let old_timestamp = SystemTime::now() - Duration::from_secs(23 * 3600); + let quote = make_fake_quote(xorname, old_timestamp, rewards_addr); + + let mut peer_quotes = Vec::new(); + for _ in 0..5 { + let keypair = libp2p::identity::Keypair::generate_ed25519(); + let peer_id = libp2p::PeerId::from_public_key(&keypair.public()); + peer_quotes.push((EncodedPeerId::from(peer_id), quote.clone())); + } + + let proof_bytes = serialize_proof(peer_quotes); + let result = verifier.verify_payment(&xorname, Some(&proof_bytes)).await; + + // Should NOT fail at timestamp check (will fail later at pub_key binding) + let err_msg = format!("{}", result.expect_err("should fail at later check")); + assert!( + !err_msg.contains("expired"), + "Should pass expiry check (23h < 24h), but got: {err_msg}" + ); + } + + /// Helper: build an `EncodedPeerId` that matches the BLAKE3 hash of an ML-DSA public key. + fn encoded_peer_id_for_pub_key(pub_key: &[u8]) -> ant_evm::EncodedPeerId { + let saorsa_peer_id = peer_id_from_public_key_bytes(pub_key).expect("valid ML-DSA pub key"); + // Wrap raw 32-byte peer ID in identity multihash format: [0x00, length, ...bytes] + let raw = saorsa_peer_id.as_bytes(); + let mut multihash_bytes = Vec::with_capacity(2 + raw.len()); + multihash_bytes.push(0x00); // identity multihash code + // PeerId is always 32 bytes, safely fits in u8 + multihash_bytes.push(u8::try_from(raw.len()).unwrap_or(32)); + multihash_bytes.extend_from_slice(raw); + let libp2p_peer_id = + libp2p::PeerId::from_bytes(&multihash_bytes).expect("valid multihash peer ID"); + ant_evm::EncodedPeerId::from(libp2p_peer_id) + } + + #[tokio::test] + async fn test_local_not_in_paid_set_rejected() { + use ant_evm::RewardsAddress; + use saorsa_core::MlDsa65; + use saorsa_pqc::pqc::MlDsaOperations; + + // Verifier with a local rewards address set + let local_addr = RewardsAddress::new([0xAAu8; 20]); + let config = PaymentVerifierConfig { + evm: EvmVerifierConfig { + enabled: true, + network: EvmNetwork::ArbitrumOne, + }, + cache_capacity: 100, + local_rewards_address: Some(local_addr), + }; + let verifier = PaymentVerifier::new(config); + + let xorname = [0xEEu8; 32]; + // Quotes pay a DIFFERENT rewards address + let other_addr = RewardsAddress::new([0xBBu8; 20]); + + // Use real ML-DSA keys so the pub_key→peer_id binding check passes + let ml_dsa = MlDsa65::new(); + let mut peer_quotes = Vec::new(); + for _ in 0..5 { + let (public_key, _secret_key) = ml_dsa.generate_keypair().expect("keygen"); + let pub_key_bytes = public_key.as_bytes().to_vec(); + let encoded = encoded_peer_id_for_pub_key(&pub_key_bytes); + + let mut quote = make_fake_quote(xorname, SystemTime::now(), other_addr); + quote.pub_key = pub_key_bytes; + + peer_quotes.push((encoded, quote)); + } + + let proof_bytes = serialize_proof(peer_quotes); + let result = verifier.verify_payment(&xorname, Some(&proof_bytes)).await; + + assert!(result.is_err(), "Should reject payment not addressed to us"); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!( + err_msg.contains("does not include this node as a recipient"), + "Error should mention recipient rejection: {err_msg}" + ); + } + + #[tokio::test] + async fn test_wrong_peer_binding_rejected() { + use ant_evm::{EncodedPeerId, RewardsAddress}; + use saorsa_core::MlDsa65; + use saorsa_pqc::pqc::MlDsaOperations; + + let verifier = create_evm_enabled_verifier(); + let xorname = [0xFFu8; 32]; + let rewards_addr = RewardsAddress::new([1u8; 20]); + + // Generate a real ML-DSA keypair so pub_key is valid + let ml_dsa = MlDsa65::new(); + let (public_key, _secret_key) = ml_dsa.generate_keypair().expect("keygen"); + let pub_key_bytes = public_key.as_bytes().to_vec(); + + // Create a quote with a real pub_key but attach it to a random peer ID + // whose identity multihash does NOT match BLAKE3(pub_key) + let mut quote = make_fake_quote(xorname, SystemTime::now(), rewards_addr); + quote.pub_key = pub_key_bytes; + + // Use random ed25519 peer IDs — they won't match BLAKE3(pub_key) + let mut peer_quotes = Vec::new(); + for _ in 0..5 { + let keypair = libp2p::identity::Keypair::generate_ed25519(); + let peer_id = libp2p::PeerId::from_public_key(&keypair.public()); + peer_quotes.push((EncodedPeerId::from(peer_id), quote.clone())); + } + + let proof_bytes = serialize_proof(peer_quotes); + let result = verifier.verify_payment(&xorname, Some(&proof_bytes)).await; + + assert!(result.is_err(), "Should reject wrong peer binding"); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!( + err_msg.contains("pub_key does not belong to claimed peer"), + "Error should mention binding mismatch: {err_msg}" + ); + } } diff --git a/src/payment/wallet.rs b/src/payment/wallet.rs index 93a92d4d..c0554e63 100644 --- a/src/payment/wallet.rs +++ b/src/payment/wallet.rs @@ -78,10 +78,10 @@ pub fn parse_rewards_address(address: &str) -> Result { ))); } - if address.len() != 42 { + let len = address.len(); + if len != 42 { return Err(Error::Payment(format!( - "Invalid rewards address length: expected 42 characters, got {}", - address.len() + "Invalid rewards address length: expected 42 characters, got {len}", ))); } @@ -186,4 +186,76 @@ mod tests { assert!(!config.has_rewards_address()); assert!(config.is_mainnet()); } + + #[test] + fn test_uppercase_0x_prefix() { + let address = "0X742d35Cc6634C0532925a3b844Bc9e7595916Da2"; + let result = parse_rewards_address(address); + assert!(result.is_ok()); + } + + #[test] + fn test_empty_string() { + let result = parse_rewards_address(""); + assert!(result.is_err()); + } + + #[test] + fn test_just_0x_prefix() { + let result = parse_rewards_address("0x"); + assert!(result.is_err()); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!(err_msg.contains("length")); + } + + #[test] + fn test_address_with_spaces() { + let result = parse_rewards_address("0x 742d35Cc6634C0532925a3b844Bc9e7595916Da"); + assert!(result.is_err()); + } + + #[test] + fn test_get_rewards_address_none() { + let config = WalletConfig::new(None, EvmNetworkConfig::ArbitrumOne).expect("valid config"); + assert!(config.get_rewards_address().is_none()); + } + + #[test] + fn test_get_rewards_address_some() { + let config = WalletConfig::new( + Some("0x742d35Cc6634C0532925a3b844Bc9e7595916Da2"), + EvmNetworkConfig::ArbitrumOne, + ) + .expect("valid config"); + assert!(config.get_rewards_address().is_some()); + } + + #[test] + fn test_all_zeros_address() { + let address = "0x0000000000000000000000000000000000000000"; + let result = parse_rewards_address(address); + assert!(result.is_ok()); + } + + #[test] + fn test_all_ff_address() { + let address = "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"; + let result = parse_rewards_address(address); + assert!(result.is_ok()); + } + + #[test] + fn test_too_long_address() { + let address = "0x742d35Cc6634C0532925a3b844Bc9e7595916Da2a"; + let result = parse_rewards_address(address); + assert!(result.is_err()); + let err_msg = format!("{}", result.expect_err("should fail")); + assert!(err_msg.contains("length")); + } + + #[test] + fn test_wallet_config_invalid_address() { + let result = WalletConfig::new(Some("invalid"), EvmNetworkConfig::ArbitrumOne); + assert!(result.is_err()); + } } diff --git a/src/probe.rs b/src/probe.rs deleted file mode 100644 index 173555b7..00000000 --- a/src/probe.rs +++ /dev/null @@ -1,33 +0,0 @@ -#[cfg(test)] -#[allow(clippy::unwrap_used, clippy::expect_used)] -mod probe_tests { - use saorsa_core::{IPDiversityConfig, NodeConfig as CoreNodeConfig, P2PNode, ProductionConfig}; - - #[tokio::test] - #[ignore = "Exploration test - requires network binding"] - async fn probe_apis() { - // Probe CoreNodeConfig fields - let core_config = CoreNodeConfig::new().unwrap(); - println!("CoreConfig: {core_config:?}"); - - // Probe DiversityConfig - let diversity = IPDiversityConfig::default(); - println!("Diversity: {diversity:?}"); - - // Probe ProductionConfig - let prod = ProductionConfig::default(); - println!("Production: {prod:?}"); - - // Probe P2PNode for verifier setter - // We'll try to call a method that looks like what we want, and see suggestions - let node = P2PNode::new(core_config).await.unwrap(); - - // API exploration - these methods don't exist, commented out - // node.set_verifier(()); - // node.register_verifier(()); - // node.set_payment_verifier(()); - - // Verify node created successfully - drop(node); - } -} diff --git a/src/storage/handler.rs b/src/storage/handler.rs index 3389a882..2c8e58ff 100644 --- a/src/storage/handler.rs +++ b/src/storage/handler.rs @@ -130,7 +130,8 @@ impl AntProtocol { /// Handle a PUT request. async fn handle_put(&self, request: ChunkPutRequest) -> ChunkPutResponse { let address = request.address; - debug!("Handling PUT request for {}", hex::encode(address)); + let addr_hex = hex::encode(address); + debug!("Handling PUT request for {addr_hex}"); // 1. Validate chunk size if request.content.len() > MAX_CHUNK_SIZE { @@ -152,7 +153,7 @@ impl AntProtocol { // 3. Check if already exists (idempotent success) match self.storage.exists(&address) { Ok(true) => { - debug!("Chunk {} already exists", hex::encode(address)); + debug!("Chunk {addr_hex} already exists"); return ChunkPutResponse::AlreadyExists { address }; } Err(e) => { @@ -186,17 +187,15 @@ impl AntProtocol { // 5. Store chunk match self.storage.put(&address, &request.content).await { Ok(_) => { - info!( - "Stored chunk {} ({} bytes)", - hex::encode(address), - request.content.len() - ); - // Record the store in metrics + let content_len = request.content.len(); + info!("Stored chunk {addr_hex} ({content_len} bytes)"); + // Record the store and payment in metrics self.quote_generator.record_store(DATA_TYPE_CHUNK); + self.quote_generator.record_payment(); ChunkPutResponse::Success { address } } Err(e) => { - warn!("Failed to store chunk {}: {}", hex::encode(address), e); + warn!("Failed to store chunk {addr_hex}: {e}"); ChunkPutResponse::Error(ProtocolError::StorageFailed(e.to_string())) } } @@ -205,23 +204,21 @@ impl AntProtocol { /// Handle a GET request. async fn handle_get(&self, request: ChunkGetRequest) -> ChunkGetResponse { let address = request.address; - debug!("Handling GET request for {}", hex::encode(address)); + let addr_hex = hex::encode(address); + debug!("Handling GET request for {addr_hex}"); match self.storage.get(&address).await { Ok(Some(content)) => { - debug!( - "Retrieved chunk {} ({} bytes)", - hex::encode(address), - content.len() - ); + let content_len = content.len(); + debug!("Retrieved chunk {addr_hex} ({content_len} bytes)"); ChunkGetResponse::Success { address, content } } Ok(None) => { - debug!("Chunk {} not found", hex::encode(address)); + debug!("Chunk {addr_hex} not found"); ChunkGetResponse::NotFound { address } } Err(e) => { - warn!("Failed to retrieve chunk {}: {}", hex::encode(address), e); + warn!("Failed to retrieve chunk {addr_hex}: {e}"); ChunkGetResponse::Error(ProtocolError::StorageFailed(e.to_string())) } } @@ -229,11 +226,9 @@ impl AntProtocol { /// Handle a quote request. fn handle_quote(&self, request: &ChunkQuoteRequest) -> ChunkQuoteResponse { - debug!( - "Handling quote request for {} (size: {})", - hex::encode(request.address), - request.data_size - ); + let addr_hex = hex::encode(request.address); + let data_size = request.data_size; + debug!("Handling quote request for {addr_hex} (size: {data_size})"); // Validate data size - data_size is u64, cast carefully and reject overflow let Ok(data_size_usize) = usize::try_from(request.data_size) else { @@ -298,11 +293,12 @@ impl AntProtocol { /// Store a chunk directly to local storage (bypasses payment verification). /// - /// This is useful for testing or when payment has been verified elsewhere. + /// TEST ONLY - This method bypasses payment verification and should only be used in tests. /// /// # Errors /// /// Returns an error if storage fails or content doesn't match address. + #[cfg(test)] pub async fn put_local(&self, address: &[u8; 32], content: &[u8]) -> Result { self.storage.put(address, content).await } @@ -339,6 +335,7 @@ mod tests { ..Default::default() }, cache_capacity: 100, + local_rewards_address: None, }; let payment_verifier = Arc::new(PaymentVerifier::new(payment_config)); @@ -357,15 +354,8 @@ mod tests { let content = b"hello world"; let address = LmdbStorage::compute_address(content); - // Create PUT request - with empty payment proof (EVM disabled) - let put_request = ChunkPutRequest::with_payment( - address, - content.to_vec(), - rmp_serde::to_vec(&ant_evm::ProofOfPayment { - peer_quotes: vec![], - }) - .unwrap(), - ); + // Create PUT request - no payment proof needed (EVM disabled in test) + let put_request = ChunkPutRequest::new(address, content.to_vec()); let put_msg = ChunkMessage { request_id: 1, body: ChunkMessageBody::PutRequest(put_request), @@ -451,14 +441,8 @@ mod tests { let content = b"test content"; let wrong_address = [0xFF; 32]; // Wrong address - let put_request = ChunkPutRequest::with_payment( - wrong_address, - content.to_vec(), - rmp_serde::to_vec(&ant_evm::ProofOfPayment { - peer_quotes: vec![], - }) - .unwrap(), - ); + // No payment proof needed (EVM disabled in test) + let put_request = ChunkPutRequest::new(wrong_address, content.to_vec()); let put_msg = ChunkMessage { request_id: 20, body: ChunkMessageBody::PutRequest(put_request), @@ -521,15 +505,8 @@ mod tests { let content = b"duplicate content"; let address = LmdbStorage::compute_address(content); - // Store first time - let put_request = ChunkPutRequest::with_payment( - address, - content.to_vec(), - rmp_serde::to_vec(&ant_evm::ProofOfPayment { - peer_quotes: vec![], - }) - .unwrap(), - ); + // Store first time - no payment proof needed (EVM disabled in test) + let put_request = ChunkPutRequest::new(address, content.to_vec()); let put_msg = ChunkMessage { request_id: 40, body: ChunkMessageBody::PutRequest(put_request), @@ -583,4 +560,136 @@ mod tests { let retrieved = protocol.get_local(&address).await.expect("get local"); assert_eq!(retrieved, Some(content.to_vec())); } + + #[tokio::test] + async fn test_put_populates_payment_cache() { + let (protocol, _temp) = create_test_protocol().await; + + let content = b"cache test content"; + let address = LmdbStorage::compute_address(content); + + // Before PUT: cache should be empty + let stats_before = protocol.payment_cache_stats(); + assert_eq!(stats_before.additions, 0); + + // PUT (EVM disabled — verifier will auto-accept and cache) + let put_request = ChunkPutRequest::new(address, content.to_vec()); + let put_msg = ChunkMessage { + request_id: 100, + body: ChunkMessageBody::PutRequest(put_request), + }; + let put_bytes = put_msg.encode().expect("encode put"); + let response_bytes = protocol + .handle_message(&put_bytes) + .await + .expect("handle put"); + let response = ChunkMessage::decode(&response_bytes).expect("decode"); + + if let ChunkMessageBody::PutResponse(ChunkPutResponse::Success { .. }) = response.body { + // expected + } else { + panic!("expected success, got: {response:?}"); + } + + // After PUT: cache should have the xorname + let stats_after = protocol.payment_cache_stats(); + assert_eq!(stats_after.additions, 1); + } + + #[tokio::test] + async fn test_put_same_chunk_twice_hits_cache() { + let (protocol, _temp) = create_test_protocol().await; + + let content = b"duplicate cache test"; + let address = LmdbStorage::compute_address(content); + + // First PUT + let put_request = ChunkPutRequest::new(address, content.to_vec()); + let put_msg = ChunkMessage { + request_id: 110, + body: ChunkMessageBody::PutRequest(put_request), + }; + let put_bytes = put_msg.encode().expect("encode put"); + let _ = protocol + .handle_message(&put_bytes) + .await + .expect("handle put 1"); + + // Second PUT — should return AlreadyExists (checked in storage before payment) + let response_bytes = protocol + .handle_message(&put_bytes) + .await + .expect("handle put 2"); + let response = ChunkMessage::decode(&response_bytes).expect("decode"); + + if let ChunkMessageBody::PutResponse(ChunkPutResponse::AlreadyExists { .. }) = response.body + { + // expected — storage check comes before payment check + } else { + panic!("expected AlreadyExists, got: {response:?}"); + } + } + + #[tokio::test] + async fn test_payment_cache_stats_returns_correct_values() { + let (protocol, _temp) = create_test_protocol().await; + + let stats = protocol.payment_cache_stats(); + assert_eq!(stats.hits, 0); + assert_eq!(stats.misses, 0); + assert_eq!(stats.additions, 0); + + // Store a chunk to trigger payment verification + let content = b"stats test"; + let address = LmdbStorage::compute_address(content); + let put_request = ChunkPutRequest::new(address, content.to_vec()); + let put_msg = ChunkMessage { + request_id: 120, + body: ChunkMessageBody::PutRequest(put_request), + }; + let put_bytes = put_msg.encode().expect("encode put"); + let _ = protocol + .handle_message(&put_bytes) + .await + .expect("handle put"); + + let stats = protocol.payment_cache_stats(); + // Should have 1 miss (first lookup) + 1 addition (after verify) + assert_eq!(stats.misses, 1); + assert_eq!(stats.additions, 1); + } + + #[tokio::test] + async fn test_storage_stats() { + let (protocol, _temp) = create_test_protocol().await; + let stats = protocol.storage_stats(); + assert_eq!(stats.chunks_stored, 0); + } + + #[tokio::test] + async fn test_handle_unexpected_response_message() { + let (protocol, _temp) = create_test_protocol().await; + + // Send a PutResponse as if it were a request + let msg = ChunkMessage { + request_id: 200, + body: ChunkMessageBody::PutResponse(ChunkPutResponse::Success { address: [0u8; 32] }), + }; + let msg_bytes = msg.encode().expect("encode"); + + let response_bytes = protocol + .handle_message(&msg_bytes) + .await + .expect("handle msg"); + let response = ChunkMessage::decode(&response_bytes).expect("decode"); + + if let ChunkMessageBody::PutResponse(ChunkPutResponse::Error(ProtocolError::Internal( + msg, + ))) = response.body + { + assert!(msg.contains("Unexpected")); + } else { + panic!("expected Internal error, got: {response:?}"); + } + } } diff --git a/tests/e2e/anvil.rs b/tests/e2e/anvil.rs index 380d730d..b64349aa 100644 --- a/tests/e2e/anvil.rs +++ b/tests/e2e/anvil.rs @@ -1,9 +1,11 @@ //! Anvil EVM testnet wrapper for payment verification tests. //! -//! This module wraps the `evmlib::testnet::Testnet` to provide a local +//! This module wraps `evmlib::testnet::Testnet` to provide a local //! Anvil blockchain for testing payment verification. -use std::time::Duration; +use evmlib::testnet::Testnet; +use evmlib::wallet::Wallet; +use evmlib::Network as EvmNetwork; use tracing::{debug, info}; /// Error type for Anvil operations. @@ -25,57 +27,29 @@ pub enum AnvilError { /// Result type for Anvil operations. pub type Result = std::result::Result; -/// Wrapper around Anvil EVM testnet. +/// Wrapper around a real `evmlib::testnet::Testnet`. /// -/// This provides a local Ethereum-compatible blockchain for testing -/// payment verification without connecting to a real network. -/// -/// ## Features -/// -/// - Pre-funded test accounts (10,000 ETH each) -/// - Deployed payment contracts -/// - Fast block times for testing +/// Spawns a local Anvil instance with deployed contracts. The Anvil +/// process is kept alive for the lifetime of this struct. /// /// ## Usage /// /// ```rust,ignore /// let anvil = TestAnvil::new().await?; -/// -/// // Get the network configuration for PaymentVerifier -/// let network = anvil.network(); -/// -/// // Get a funded wallet for testing -/// let wallet_key = anvil.default_wallet_key(); -/// +/// let network = anvil.to_network(); +/// let wallet = anvil.create_funded_wallet()?; /// anvil.shutdown().await; /// ``` pub struct TestAnvil { - /// The underlying evmlib testnet. - // Note: When evmlib is available, this would be: - // testnet: evmlib::testnet::Testnet, - // network: evmlib::Network, - - /// RPC URL for the testnet. - rpc_url: String, - - /// Default wallet private key. - default_wallet_key: String, - - /// Payment token contract address. - payment_token_address: Option, - - /// Data payments contract address. - data_payments_address: Option, - - /// Whether Anvil is running. - running: bool, + /// The underlying evmlib testnet (owns the Anvil process). + testnet: Testnet, } impl TestAnvil { /// Start a new Anvil EVM testnet. /// - /// This spawns an Anvil process and deploys the necessary contracts - /// for payment verification testing. + /// Spawns an Anvil process, deploys payment contracts, and returns + /// a fully-configured testnet ready for payment verification tests. /// /// # Errors /// @@ -83,145 +57,96 @@ impl TestAnvil { pub async fn new() -> Result { info!("Starting Anvil EVM testnet"); - // In a full implementation, this would use evmlib::testnet::Testnet - // For now, we provide a placeholder that can be connected to actual Anvil - - // Default Anvil configuration - let rpc_url = "http://127.0.0.1:8545".to_string(); - let default_wallet_key = - "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80".to_string(); + let testnet = Testnet::new().await; - // In production, this would: - // 1. Spawn Anvil process - // 2. Wait for it to be ready - // 3. Deploy contracts - // 4. Return the configured testnet + info!("Anvil testnet started"); - // Placeholder: Simulate startup delay - tokio::time::sleep(Duration::from_millis(100)).await; - - info!("Anvil testnet started on {}", rpc_url); - - Ok(Self { - rpc_url, - default_wallet_key, - payment_token_address: None, - data_payments_address: None, - running: true, - }) + Ok(Self { testnet }) } - /// Start Anvil with evmlib integration (when available). - /// - /// This is the preferred method when evmlib is properly integrated. + /// Get the EVM network configuration for this testnet. /// - /// # Errors - /// - /// Returns an error if Anvil fails to start. - #[allow(dead_code)] - pub async fn with_evmlib() -> Result { - // When evmlib is available: - // let testnet = evmlib::testnet::Testnet::new().await; - // let network = testnet.to_network(); - // ... - - Self::new().await - } - - /// Get the RPC URL for the testnet. + /// Use this to configure `PaymentVerifier` or `Wallet` instances. #[must_use] - pub fn rpc_url(&self) -> &str { - &self.rpc_url + pub fn to_network(&self) -> EvmNetwork { + self.testnet.to_network() } - /// Get the default wallet private key. - /// - /// This is a pre-funded test account with 10,000 ETH. + /// Get a reference to the underlying `Testnet`. #[must_use] - pub fn default_wallet_key(&self) -> &str { - &self.default_wallet_key + pub fn testnet(&self) -> &Testnet { + &self.testnet } - /// Get the payment token contract address. + /// Get the default wallet private key (pre-funded Anvil account). #[must_use] - pub fn payment_token_address(&self) -> Option<&str> { - self.payment_token_address.as_deref() + pub fn default_wallet_key(&self) -> String { + self.testnet.default_wallet_private_key() } - /// Get the data payments contract address. - #[must_use] - pub fn data_payments_address(&self) -> Option<&str> { - self.data_payments_address.as_deref() - } - - /// Check if Anvil is running and healthy. - pub async fn is_healthy(&self) -> bool { - if !self.running { - return false; - } - - // In production, this would make an eth_blockNumber RPC call - // to verify Anvil is responding - true - } - - /// Get the current block number. + /// Create a wallet funded with test tokens. + /// + /// Uses the default Anvil account (pre-funded). /// /// # Errors /// - /// Returns an error if the RPC call fails. - pub async fn block_number(&self) -> Result { - // In production, this would make an eth_blockNumber RPC call - Ok(0) + /// Returns an error if wallet creation fails. + pub fn create_funded_wallet(&self) -> Result { + let network = self.testnet.to_network(); + let private_key = self.testnet.default_wallet_private_key(); + + let wallet = Wallet::new_from_private_key(network, &private_key) + .map_err(|e| AnvilError::Startup(format!("Failed to create funded wallet: {e}")))?; + + debug!("Created funded wallet with address: {}", wallet.address()); + Ok(wallet) } - /// Mine a specified number of blocks. - /// - /// Useful for advancing block time in tests. - /// - /// # Arguments - /// - /// * `count` - Number of blocks to mine + /// Create an empty wallet (for testing insufficient funds). /// /// # Errors /// - /// Returns an error if the RPC call fails. - pub async fn mine_blocks(&self, count: u64) -> Result<()> { - debug!("Mining {} blocks", count); - // In production, this would call evm_mine RPC method - Ok(()) + /// Returns an error if wallet creation fails. + pub fn create_empty_wallet(&self) -> Result { + let network = self.testnet.to_network(); + let random_key = format!("0x{}", hex::encode(rand::random::<[u8; 32]>())); + + let wallet = Wallet::new_from_private_key(network, &random_key) + .map_err(|e| AnvilError::Startup(format!("Failed to create empty wallet: {e}")))?; + + debug!( + "Created empty wallet (no funds) with address: {}", + wallet.address() + ); + Ok(wallet) } - /// Set the block timestamp to a specific value. - /// - /// # Arguments - /// - /// * `timestamp` - Unix timestamp to set - /// - /// # Errors - /// - /// Returns an error if the RPC call fails. - pub async fn set_timestamp(&self, timestamp: u64) -> Result<()> { - debug!("Setting block timestamp to {}", timestamp); - // In production, this would call evm_setNextBlockTimestamp - Ok(()) + /// Consume `TestAnvil` and return the inner `Testnet`. + #[must_use] + pub fn into_testnet(self) -> Testnet { + self.testnet } /// Shutdown the Anvil testnet. pub async fn shutdown(&mut self) { - if self.running { - info!("Shutting down Anvil testnet"); - // In production, this would kill the Anvil process - self.running = false; - } + info!("Shutting down Anvil testnet"); + // Testnet is dropped when self is dropped, which kills the Anvil process. } } -impl Drop for TestAnvil { - fn drop(&mut self) { - // Best-effort cleanup - self.running = false; - } +/// Create a funded wallet using an explicit EVM network and private key. +/// +/// Use this when multiple test components share a single Anvil testnet +/// to ensure all wallets point at the same deployed contracts. +#[allow(dead_code)] +pub fn create_funded_wallet_for_network(network: &EvmNetwork, private_key: &str) -> Result { + let wallet = Wallet::new_from_private_key(network.clone(), private_key) + .map_err(|e| AnvilError::Startup(format!("Failed to create funded wallet: {e}")))?; + debug!( + "Created funded wallet for explicit network: {}", + wallet.address() + ); + Ok(wallet) } /// Pre-funded test accounts from Anvil. @@ -256,12 +181,13 @@ pub mod test_accounts { #[allow(clippy::unwrap_used, clippy::expect_used)] mod tests { use super::*; + use serial_test::serial; #[tokio::test] + #[serial] async fn test_anvil_creation() { let anvil = TestAnvil::new().await.unwrap(); - assert!(anvil.is_healthy().await); - assert!(!anvil.rpc_url().is_empty()); + let _network = anvil.to_network(); assert!(!anvil.default_wallet_key().is_empty()); } diff --git a/tests/e2e/complete_payment_e2e.rs b/tests/e2e/complete_payment_e2e.rs new file mode 100644 index 00000000..b65db98c --- /dev/null +++ b/tests/e2e/complete_payment_e2e.rs @@ -0,0 +1,543 @@ +//! Complete E2E test proving the payment protocol works on live nodes. +//! +//! **All payment tests in this file use `payment_enforcement: true`.** +//! Nodes verify payments on-chain via Anvil before storing chunks. +//! +//! ## Test Flow +//! +//! 1. **Network Setup**: Spawn 10 live saorsa nodes + Anvil EVM testnet +//! 2. **Quote Collection**: Client requests quotes from 5 closest DHT peers +//! 3. **Price Calculation**: Sort quotes by price, select median +//! 4. **Payment**: Make on-chain payment (median node 3x, others 0 atto) +//! 5. **Chunk Storage**: Send chunk + `ProofOfPayment` to network +//! 6. **Verification**: Nodes verify payment on-chain before storing +//! 7. **Retrieval**: Retrieve chunk from storing node to prove storage succeeded +//! 8. **Cross-Node**: Retrieve chunk from a DIFFERENT node (tests replication) + +use super::harness::TestHarness; +use super::testnet::TestNetworkConfig; +use ant_evm::ProofOfPayment; +use bytes::Bytes; +use evmlib::testnet::Testnet; +use evmlib::wallet::Wallet; +use saorsa_node::client::{hex_node_id_to_encoded_peer_id, QuantumClient}; +use saorsa_node::payment::{PaymentProof, SingleNodePayment}; +use serial_test::serial; +use std::time::Duration; +use tokio::time::sleep; +use tracing::{info, warn}; + +/// Test environment for complete E2E payment flow. +/// +/// All nodes have `payment_enforcement: true` and use the same Anvil +/// instance as the client wallet, so on-chain verification is real. +struct CompletePaymentTestEnv { + harness: TestHarness, + /// Kept alive to prevent Anvil process from being dropped + _testnet: Testnet, + wallet: Wallet, +} + +impl CompletePaymentTestEnv { + /// Initialize complete payment test environment with enforcement enabled. + /// + /// Nodes and client share the SAME Anvil instance so on-chain + /// verification is real, not bypassed. + async fn setup() -> Result> { + info!("Setting up complete payment E2E test environment"); + + // Start Anvil EVM testnet FIRST so we can wire it to nodes + let testnet = Testnet::new().await; + let network = testnet.to_network(); + info!("Anvil testnet started"); + + // Setup 10-node network with payment enforcement ON and the + // SAME Anvil network so nodes verify on the same chain the client pays on. + // Use setup_with_config (NOT setup_with_evm_and_config) because we already + // created our own Testnet above — creating another would double-bind the port. + let config = TestNetworkConfig::small() + .with_payment_enforcement() + .with_evm_network(network.clone()); + + let harness = TestHarness::setup_with_config(config).await?; + + info!("10-node test network started with payment enforcement ENABLED"); + + // Wait for network to stabilize + sleep(Duration::from_secs(10)).await; + + let total_connections = harness.total_connections().await; + info!("Network stabilized with {total_connections} total connections"); + + // Warm up DHT routing tables (essential for quote collection) + harness.warmup_dht().await?; + sleep(Duration::from_secs(5)).await; + + // Create funded wallet from the SAME Anvil instance + let private_key = testnet.default_wallet_private_key(); + let wallet = Wallet::new_from_private_key(network, &private_key)?; + info!("Created funded wallet: {}", wallet.address()); + + Ok(Self { + harness, + _testnet: testnet, + wallet, + }) + } + + async fn teardown(self) -> Result<(), Box> { + self.harness.teardown().await?; + Ok(()) + } +} + +/// Complete chunk upload + payment + on-chain verification + retrieval flow. +/// +/// Nodes have `payment_enforcement: true`. The payment is verified on-chain. +#[tokio::test(flavor = "multi_thread")] +#[serial] +#[allow(clippy::too_many_lines)] +async fn test_complete_payment_flow_live_nodes() -> Result<(), Box> { + info!("COMPLETE E2E PAYMENT TEST - LIVE NODES (enforcement ON)"); + + let mut env = CompletePaymentTestEnv::setup().await?; + + // Configure client node (node 0) with wallet + env.harness + .test_node_mut(0) + .ok_or("Node 0 not found")? + .set_wallet(env.wallet.clone()); + + let test_data = b"Complete E2E payment test data - proving the protocol works!"; + let expected_address = saorsa_node::compute_address(test_data); + + // Request quotes from DHT peers with retries + let client = env + .harness + .test_node(0) + .ok_or("Node 0 not found")? + .client + .as_ref() + .ok_or("Client not configured")?; + + let mut quotes_with_prices = None; + for attempt in 1..=10 { + info!("Quote collection attempt {attempt}/10..."); + match client.get_quotes_from_dht(test_data).await { + Ok(quotes) => { + info!("Got {} quotes on attempt {attempt}", quotes.len()); + quotes_with_prices = Some(quotes); + break; + } + Err(e) => { + warn!("Attempt {attempt} failed: {e}"); + if attempt < 10 { + let _ = env.harness.warmup_dht().await; + sleep(Duration::from_secs(5)).await; + } + } + } + } + + let quotes_with_prices = quotes_with_prices.ok_or("Failed to get quotes after 10 attempts")?; + + assert_eq!( + quotes_with_prices.len(), + 5, + "Should receive exactly 5 quotes (REQUIRED_QUOTES)" + ); + + // Calculate payment (sort by price, select median) + let mut peer_quotes: Vec<_> = Vec::with_capacity(quotes_with_prices.len()); + let mut quotes_for_payment: Vec<_> = Vec::with_capacity(quotes_with_prices.len()); + for (peer_id_str, quote, price) in quotes_with_prices { + let encoded_peer_id = hex_node_id_to_encoded_peer_id(&peer_id_str.to_hex()) + .map_err(|e| format!("Failed to convert peer ID '{peer_id_str}': {e}"))?; + peer_quotes.push((encoded_peer_id, quote.clone())); + quotes_for_payment.push((quote, price)); + } + let payment = SingleNodePayment::from_quotes(quotes_for_payment) + .map_err(|e| format!("Failed to create payment: {e}"))?; + + info!("Payment total: {} atto", payment.total_amount()); + + // Verify only median quote has non-zero amount + let non_zero_quotes = payment + .quotes + .iter() + .filter(|q| q.amount > ant_evm::Amount::ZERO) + .count(); + assert_eq!( + non_zero_quotes, 1, + "Only median quote should have non-zero amount" + ); + + // Make on-chain payment + let tx_hashes = payment + .pay(&env.wallet) + .await + .map_err(|e| format!("Payment failed: {e}"))?; + + assert!( + !tx_hashes.is_empty(), + "Expected at least one transaction hash from payment" + ); + info!( + "On-chain payment succeeded: {} transactions", + tx_hashes.len() + ); + + // Build proof AFTER payment with tx hashes included + let proof = PaymentProof { + proof_of_payment: ProofOfPayment { peer_quotes }, + tx_hashes, + }; + let proof_bytes = + rmp_serde::to_vec(&proof).map_err(|e| format!("Failed to serialize proof: {e}"))?; + + // Store chunk with payment proof — nodes WILL verify on-chain + // Retry with backoff: DHT routing tables may not be fully stabilized yet + let mut stored_address = None; + for attempt in 1..=10 { + match client + .put_chunk_with_proof(Bytes::from(test_data.to_vec()), proof_bytes.clone()) + .await + { + Ok(addr) => { + info!("Chunk stored on attempt {attempt}"); + stored_address = Some(addr); + break; + } + Err(e) => { + warn!("Storage attempt {attempt}/10 failed: {e}"); + if attempt < 10 { + let _ = env.harness.warmup_dht().await; + sleep(Duration::from_secs(5)).await; + } + } + } + } + let stored_address = + stored_address.ok_or("Storage MUST succeed with valid payment proof after 10 attempts")?; + + assert_eq!( + stored_address, expected_address, + "Stored address should match computed address" + ); + info!("Chunk stored at {}", hex::encode(stored_address)); + + // Verify chunk is retrievable + sleep(Duration::from_millis(500)).await; + + let retrieved = client + .get_chunk(&stored_address) + .await + .map_err(|e| format!("Failed to retrieve chunk: {e}"))?; + + let chunk = retrieved.ok_or("Chunk should be retrievable from storing node")?; + assert_eq!( + chunk.content.as_ref(), + test_data, + "Retrieved data should match original" + ); + + info!("Chunk retrieved and verified"); + + // Try cross-node retrieval (may not work without replication) + let node1_chunk = env + .harness + .test_node(1) + .ok_or("Node 1 not found")? + .get_chunk(&stored_address) + .await?; + + if let Some(chunk) = node1_chunk { + assert_eq!( + chunk.content.as_ref(), + test_data, + "Cross-node data should match original" + ); + info!("Cross-node retrieval succeeded"); + } else { + info!("Cross-node retrieval: not replicated yet (expected in test mode)"); + } + + info!("COMPLETE E2E PAYMENT TEST PASSED (enforcement ON)"); + + env.teardown().await?; + Ok(()) +} + +/// Test: Nodes reject unpaid chunks when `payment_enforcement: true`. +/// +/// Validates server-side enforcement: the NODE rejects, not the client. +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_payment_verification_enforcement() -> Result<(), Box> { + info!("PAYMENT ENFORCEMENT TEST (enforcement ON)"); + + // Start Anvil and wire it to nodes + let testnet = Testnet::new().await; + let network = testnet.to_network(); + + let config = TestNetworkConfig::small() + .with_payment_enforcement() + .with_evm_network(network.clone()); + + // Use setup_with_config (NOT setup_with_evm_and_config) because we already + // created our own Testnet above — creating another would double-bind the port. + let harness = TestHarness::setup_with_config(config).await?; + + sleep(Duration::from_secs(10)).await; + harness.warmup_dht().await?; + sleep(Duration::from_secs(5)).await; + + // Try to store WITHOUT a wallet (sends no payment proof to server) + let client = + QuantumClient::with_defaults().with_node(harness.node(0).ok_or("Node 0 not found")?); + + let test_data = b"This should be rejected without payment"; + let result = client.put_chunk(Bytes::from(test_data.to_vec())).await; + + // MUST be rejected — assert exactly one outcome + assert!( + result.is_err(), + "Storage MUST fail without payment when enforcement is enabled" + ); + let error_msg = format!("{}", result.as_ref().err().ok_or("Expected error")?); + info!("Rejected as expected: {error_msg}"); + assert!( + error_msg.to_lowercase().contains("payment"), + "Error must be payment-related, got: {error_msg}" + ); + + // Now try WITH wallet and full payment flow — MUST succeed + let private_key = testnet.default_wallet_private_key(); + let wallet = Wallet::new_from_private_key(network, &private_key)?; + + let client_with_wallet = QuantumClient::with_defaults() + .with_node(harness.node(0).ok_or("Node 0 not found")?) + .with_wallet(wallet); + + let mut stored_address = None; + for attempt in 1..=10 { + match client_with_wallet + .put_chunk(Bytes::from(test_data.to_vec())) + .await + { + Ok(addr) => { + stored_address = Some(addr); + break; + } + Err(e) => { + warn!("Storage with payment attempt {attempt}/10 failed: {e}"); + if attempt < 10 { + let _ = harness.warmup_dht().await; + sleep(Duration::from_secs(5)).await; + } + } + } + } + + // MUST succeed — assert exactly one outcome + let address = + stored_address.ok_or("Storage MUST succeed with valid payment after 10 attempts")?; + info!("Stored with payment at {}", hex::encode(address)); + + info!("PAYMENT ENFORCEMENT TEST PASSED"); + + harness.teardown().await?; + Ok(()) +} + +/// Test: Forged ML-DSA-65 signature rejection. +/// +/// Gets valid quotes, makes real payment, builds proof, CORRUPTS the +/// signature bytes, sends to EVM-enabled node, asserts rejection. +#[tokio::test(flavor = "multi_thread")] +#[serial] +#[allow(clippy::too_many_lines)] +async fn test_forged_signature_rejection() -> Result<(), Box> { + info!("FORGED SIGNATURE REJECTION TEST (enforcement ON)"); + + let testnet = Testnet::new().await; + let network = testnet.to_network(); + + let config = TestNetworkConfig::small() + .with_payment_enforcement() + .with_evm_network(network.clone()); + + // Use setup_with_config (NOT setup_with_evm_and_config) because we already + // created our own Testnet above — creating another would double-bind the port. + let harness = TestHarness::setup_with_config(config).await?; + + sleep(Duration::from_secs(10)).await; + harness.warmup_dht().await?; + + // Create client with wallet + let private_key = testnet.default_wallet_private_key(); + let wallet = Wallet::new_from_private_key(network, &private_key)?; + let client = QuantumClient::with_defaults() + .with_node(harness.node(0).ok_or("Node 0 not found")?) + .with_wallet(wallet.clone()); + + let test_data = b"Forged signature test data"; + + // Get quotes from DHT + let mut quotes_with_prices = None; + for attempt in 1..=5 { + match client.get_quotes_from_dht(test_data).await { + Ok(quotes) => { + quotes_with_prices = Some(quotes); + break; + } + Err(e) => { + warn!("Quote attempt {attempt} failed: {e}"); + if attempt < 5 { + sleep(Duration::from_secs(2u64.pow(attempt))).await; + } + } + } + } + + let quotes_with_prices = quotes_with_prices.ok_or("Failed to get quotes after 5 attempts")?; + + // Build peer_quotes and payment + let mut peer_quotes: Vec<_> = Vec::with_capacity(quotes_with_prices.len()); + let mut quotes_for_payment: Vec<_> = Vec::with_capacity(quotes_with_prices.len()); + for (peer_id_str, quote, price) in quotes_with_prices { + let encoded_peer_id = hex_node_id_to_encoded_peer_id(&peer_id_str.to_hex()) + .map_err(|e| format!("Failed to convert peer ID '{peer_id_str}': {e}"))?; + peer_quotes.push((encoded_peer_id, quote.clone())); + quotes_for_payment.push((quote, price)); + } + + let payment = SingleNodePayment::from_quotes(quotes_for_payment) + .map_err(|e| format!("Failed to create payment: {e}"))?; + + // Pay on-chain (real payment) + let tx_hashes = payment + .pay(&wallet) + .await + .map_err(|e| format!("Payment failed: {e}"))?; + + // CORRUPT the signature on the first quote + let mut forged_quotes = peer_quotes.clone(); + if let Some((_peer_id, ref mut quote)) = forged_quotes.first_mut() { + // Flip all signature bytes to corrupt it + for byte in &mut quote.signature { + *byte = byte.wrapping_add(1); + } + } + + // Build proof with forged signature + let forged_proof = PaymentProof { + proof_of_payment: ProofOfPayment { + peer_quotes: forged_quotes, + }, + tx_hashes, + }; + let forged_proof_bytes = rmp_serde::to_vec(&forged_proof) + .map_err(|e| format!("Failed to serialize forged proof: {e}"))?; + + // Try to store with forged proof — MUST be rejected + let result = client + .put_chunk_with_proof(Bytes::from(test_data.to_vec()), forged_proof_bytes) + .await; + + assert!(result.is_err(), "Storage MUST fail with forged signature"); + let error_msg = format!("{}", result.as_ref().err().ok_or("Expected error")?); + info!("Forged signature rejected: {error_msg}"); + + info!("FORGED SIGNATURE REJECTION TEST PASSED"); + + harness.teardown().await?; + Ok(()) +} + +/// Test: Payment flow survives node failures. +/// +/// Validates that payment collection and storage continue to work +/// even when some nodes in the network fail. +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_payment_flow_with_failures() -> Result<(), Box> { + info!("PAYMENT FLOW RESILIENCE TEST (enforcement ON)"); + + let mut env = CompletePaymentTestEnv::setup().await?; + + // Configure client + env.harness + .test_node_mut(0) + .ok_or("Node 0 not found")? + .set_wallet(env.wallet.clone()); + + // Verify initial network + let initial_count = env.harness.running_node_count().await; + assert_eq!(initial_count, 10); + + // Simulate failures - shutdown 3 nodes + info!("Simulating node failures (shutting down nodes 5, 6, 7)"); + env.harness.shutdown_nodes(&[5, 6, 7]).await?; + + sleep(Duration::from_secs(15)).await; + + let remaining_count = env.harness.running_node_count().await; + assert_eq!(remaining_count, 7); + + // Re-warm DHT after node failures so routing tables adapt + env.harness.warmup_dht().await?; + sleep(Duration::from_secs(25)).await; + + // Payment flow with reduced network — MUST succeed (7 nodes > 5 required) + let test_data = b"Resilience test data"; + let client = env + .harness + .test_node(0) + .ok_or("Node 0 not found")? + .client + .as_ref() + .ok_or("Client not configured")?; + + // Retry quote collection and storage up to 3 times to allow DHT to stabilize + let mut last_err = String::new(); + let mut succeeded = false; + for attempt in 1..=10 { + info!("Storage attempt {attempt}/10 after node failures..."); + match client.get_quotes_from_dht(test_data).await { + Ok(quotes) => { + info!("Collected {} quotes despite failures", quotes.len()); + match client.put_chunk(Bytes::from(test_data.to_vec())).await { + Ok(_address) => { + info!("Storage succeeded with reduced network"); + succeeded = true; + break; + } + Err(e) => { + last_err = format!("Storage failed: {e}"); + warn!("Attempt {attempt} storage failed: {e}"); + } + } + } + Err(e) => { + last_err = format!("Quote collection failed: {e}"); + warn!("Attempt {attempt} quote collection failed: {e}"); + } + } + if attempt < 10 { + if attempt == 4 || attempt == 7 { + let _ = env.harness.warmup_dht().await; + } + sleep(Duration::from_secs(10)).await; + } + } + assert!( + succeeded, + "Storage MUST succeed with reduced network after retries: {last_err}" + ); + + info!("RESILIENCE TEST PASSED"); + + env.teardown().await?; + Ok(()) +} diff --git a/tests/e2e/data_types/chunk.rs b/tests/e2e/data_types/chunk.rs index b3acbe91..b495ac05 100644 --- a/tests/e2e/data_types/chunk.rs +++ b/tests/e2e/data_types/chunk.rs @@ -63,7 +63,15 @@ mod tests { use std::sync::Arc; use crate::{TestHarness, TestNetwork}; + use ant_evm::RewardsAddress; + use evmlib::testnet::Testnet; use rand::seq::SliceRandom; + use saorsa_node::payment::{ + EvmVerifierConfig, PaymentVerifier, PaymentVerifierConfig, QuoteGenerator, + QuotingMetricsTracker, + }; + use saorsa_node::storage::{AntProtocol, LmdbStorage, LmdbStorageConfig}; + use serial_test::serial; /// Test 1: Content address computation is deterministic #[test] @@ -282,32 +290,6 @@ mod tests { .expect("Failed to teardown harness"); } - // ========================================================================= - // Tests requiring additional infrastructure (not yet implemented) - // ========================================================================= - - /// Test 9: Chunk replication across nodes. - /// - /// Store on one node, retrieve from a different node. - #[test] - #[ignore = "TODO: Cross-node DHT replication not yet working in saorsa-core"] - fn test_chunk_replication() { - // TODO: Implement when saorsa-core DHT replication is fixed - // - Store chunk on node 0 - // - Retrieve from nodes 1-4 - // - Verify data matches - } - - /// Test: Payment verification for chunk storage. - #[test] - #[ignore = "Requires Anvil EVM testnet integration"] - fn test_chunk_payment_verification() { - // TODO: Implement with TestHarness and TestAnvil - // - Create payment proof via Anvil - // - Store chunk with payment proof - // - Verify payment was validated - } - /// Test 8: Reject oversized chunk (> 4MB). /// /// Chunks have a maximum size of 4MB. Attempting to store a larger @@ -390,9 +372,13 @@ mod tests { } // Recreate AntProtocol from the same data directory (simulates restart) - let new_protocol = TestNetwork::create_ant_protocol(&data_dir) - .await - .expect("Failed to recreate AntProtocol"); + // Pass false for payment_enforcement (disabled for this test) + let restart_identity = saorsa_core::identity::NodeIdentity::generate() + .expect("Failed to generate identity for restart"); + let new_protocol = + TestNetwork::create_ant_protocol(&data_dir, false, None, &restart_identity) + .await + .expect("Failed to recreate AntProtocol"); { let node = harness .network_mut() @@ -427,10 +413,348 @@ mod tests { .expect("Failed to teardown harness"); } - /// Test: ML-DSA-65 signature on chunk. - #[test] - #[ignore = "Requires signature verification infrastructure"] - fn test_chunk_signature_verification() { - // TODO: Verify chunk is signed with ML-DSA-65 when stored + // ========================================================================= + // Payment E2E Tests + // ========================================================================= + + /// Test: Store chunk with payment (full E2E flow). + /// + /// This test validates the complete pay-to-store workflow: + /// 1. Starts a test network with Anvil EVM testnet + /// 2. Creates a funded wallet from Anvil + /// 3. Configures a client node with the wallet + /// 4. Stores a chunk (triggers quote request, payment, and storage) + /// 5. Retrieves and verifies the chunk + #[tokio::test(flavor = "multi_thread")] + #[serial] + async fn test_chunk_store_with_payment() { + let mut harness = TestHarness::setup_with_payments() + .await + .expect("Failed to setup harness with payments"); + + // Get wallet from Anvil + let anvil = harness.anvil().expect("Anvil should be running"); + let wallet = anvil + .create_funded_wallet() + .expect("Failed to create funded wallet"); + + // Setup client with wallet + let client_node = harness.test_node_mut(0).expect("Node 0 should exist"); + client_node.set_wallet(wallet); + + let fixture = ChunkTestFixture::new(); + + // Store chunk - should request quotes, pay, and store + let address = harness + .test_node(0) + .expect("Node 0 should exist") + .store_chunk_with_payment(&fixture.small) + .await + .expect("Failed to store chunk with payment"); + + // Verify the address matches the content hash + let expected_address = ChunkTestFixture::compute_address(&fixture.small); + assert_eq!( + address, expected_address, + "Returned address should match computed content address" + ); + + // Verify chunk was stored by retrieving it + let retrieved = harness + .test_node(0) + .expect("Node 0 should exist") + .get_chunk_with_client(&address) + .await + .expect("Failed to retrieve chunk"); + + let chunk = retrieved.expect("Chunk should exist after payment"); + assert_eq!( + chunk.content.as_ref(), + fixture.small.as_slice(), + "Retrieved data should match original" + ); + + harness + .teardown() + .await + .expect("Failed to teardown harness"); + } + + /// Test: Payment cache works (second PUT is free). + /// + /// This test verifies that storing the same chunk twice doesn't require + /// a second payment (the first payment is cached). + #[tokio::test(flavor = "multi_thread")] + #[serial] + async fn test_chunk_payment_cache() { + let mut harness = TestHarness::setup_with_payments() + .await + .expect("Failed to setup harness"); + + let anvil = harness.anvil().expect("Anvil should be running"); + let wallet = anvil + .create_funded_wallet() + .expect("Failed to create wallet"); + + harness + .test_node_mut(0) + .expect("Node 0 should exist") + .set_wallet(wallet); + + let fixture = ChunkTestFixture::new(); + + // First store - pays + let address1 = harness + .test_node(0) + .expect("Node 0 should exist") + .store_chunk_with_payment(&fixture.small) + .await + .expect("Failed to store chunk first time"); + + // Second store of same data - should return same address + // Note: The chunk already exists, so the node will return AlreadyExists + let address2 = harness + .test_node(0) + .expect("Node 0 should exist") + .store_chunk_with_payment(&fixture.small) + .await + .expect("Failed to store chunk second time"); + + assert_eq!( + address1, address2, + "Same data should produce same address both times" + ); + + harness + .teardown() + .await + .expect("Failed to teardown harness"); + } + + /// Test: Store fails without wallet. + /// + /// This test verifies that attempting to store a chunk without configuring + /// a wallet results in an appropriate error. + #[tokio::test(flavor = "multi_thread")] + async fn test_chunk_store_fails_without_wallet() { + let harness = TestHarness::setup_minimal() + .await + .expect("Failed to setup harness"); + + // Client without wallet - use the test node without calling with_wallet() + let client_node = harness.test_node(0).expect("Node 0 should exist"); + let fixture = ChunkTestFixture::new(); + + // This should fail because no client is configured (no wallet means no client) + let result = client_node.store_chunk_with_payment(&fixture.small).await; + + assert!( + result.is_err(), + "Store should fail without client/wallet configured" + ); + + harness + .teardown() + .await + .expect("Failed to teardown harness"); + } + + /// Test: Store fails with insufficient funds. + /// + /// This test verifies that attempting to store a chunk with an empty wallet + /// (no balance) results in a payment failure. + #[tokio::test(flavor = "multi_thread")] + #[serial] + async fn test_chunk_store_fails_with_insufficient_funds() { + let mut harness = TestHarness::setup_with_payments() + .await + .expect("Failed to setup harness"); + + // Create wallet with 0 balance + let anvil = harness.anvil().expect("Anvil should be running"); + let wallet = anvil + .create_empty_wallet() + .expect("Failed to create empty wallet"); + + harness + .test_node_mut(0) + .expect("Node 0 should exist") + .set_wallet(wallet); + + let fixture = ChunkTestFixture::new(); + + // Should fail with insufficient funds error + let result = harness + .test_node(0) + .expect("Node 0 should exist") + .store_chunk_with_payment(&fixture.small) + .await; + + assert!(result.is_err(), "Store should fail with insufficient funds"); + + // Verify the error is related to payment/funds + if let Err(e) = result { + let error_msg = format!("{e}"); + assert!( + { + let lower = error_msg.to_lowercase(); + lower.contains("payment") + || lower.contains("pay") + || lower.contains("funds") + || lower.contains("balance") + || lower.contains("insufficient") + }, + "Error should mention payment or funds, got: {error_msg}" + ); + } + + harness + .teardown() + .await + .expect("Failed to teardown harness"); + } + + /// Create an `AntProtocol` with EVM verification enabled, backed by an Anvil testnet. + /// + /// Returns (protocol, `temp_dir`, testnet). The testnet must be kept alive for the + /// duration of the test so Anvil doesn't shut down. + async fn create_evm_enabled_protocol( + test_name: &str, + ) -> color_eyre::Result<(AntProtocol, std::path::PathBuf, Testnet)> { + let testnet = Testnet::new().await; + let network = testnet.to_network(); + + let temp_dir = std::env::temp_dir().join(format!("{test_name}_{}", rand::random::())); + tokio::fs::create_dir_all(&temp_dir).await?; + + let storage = LmdbStorage::new(LmdbStorageConfig { + root_dir: temp_dir.clone(), + verify_on_read: true, + max_chunks: 0, + max_map_size: 0, + }) + .await?; + + let payment_verifier = PaymentVerifier::new(PaymentVerifierConfig { + evm: EvmVerifierConfig { + enabled: true, + network, + }, + cache_capacity: 100, + local_rewards_address: None, + }); + + let rewards_address = RewardsAddress::new([0x01; 20]); + let metrics_tracker = QuotingMetricsTracker::new(1000, 100); + let quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); + + let protocol = AntProtocol::new( + Arc::new(storage), + Arc::new(payment_verifier), + Arc::new(quote_generator), + ); + + Ok((protocol, temp_dir, testnet)) + } + + /// Test: Chunk is rejected without payment when EVM verification is enabled. + /// + /// This test verifies that payment enforcement actually works by: + /// 1. Creating a protocol handler with EVM verification enabled + /// 2. Attempting to store a chunk with an empty payment proof + /// 3. Verifying the request is rejected with `PaymentRequired` + /// 4. Confirming the chunk was NOT stored + #[tokio::test(flavor = "multi_thread")] + #[serial] + async fn test_chunk_rejected_without_payment() -> color_eyre::Result<()> { + use saorsa_node::ant_protocol::{ + ChunkGetRequest, ChunkGetResponse, ChunkMessage, ChunkMessageBody, ChunkPutRequest, + ChunkPutResponse, + }; + + let (protocol, temp_dir, _testnet) = + create_evm_enabled_protocol("test_payment_rejection").await?; + + // Create test data + let data = b"test data that should be rejected without payment"; + let address = ChunkTestFixture::compute_address(data); + + // Create empty payment proof + let empty_payment = rmp_serde::to_vec(&ant_evm::ProofOfPayment { + peer_quotes: vec![], + })?; + + // Create PUT request with empty payment + let request_id: u64 = rand::random(); + let request = ChunkPutRequest::with_payment(address, data.to_vec(), empty_payment); + let message = ChunkMessage { + request_id, + body: ChunkMessageBody::PutRequest(request), + }; + let message_bytes = message.encode()?; + + // Send PUT request to protocol handler + let response_bytes = protocol.handle_message(&message_bytes).await?; + let response = ChunkMessage::decode(&response_bytes)?; + + // Verify the response indicates payment is required or an error occurred + match response.body { + ChunkMessageBody::PutResponse(ChunkPutResponse::PaymentRequired { message }) => { + // Success - payment was required as expected + assert!( + !message.is_empty(), + "PaymentRequired should include a message" + ); + eprintln!("✓ Chunk rejected with PaymentRequired: {message}"); + } + ChunkMessageBody::PutResponse(ChunkPutResponse::Error(err)) => { + // Also acceptable - payment verification failure can be reported as error + let err_str = format!("{err:?}"); + assert!( + err_str.contains("Payment") || err_str.contains("payment"), + "Error should mention payment: {err_str}" + ); + eprintln!("✓ Chunk rejected with Error: {err:?}"); + } + other => { + return Err(color_eyre::eyre::eyre!( + "Expected PaymentRequired or Error response, got: {other:?}" + )); + } + } + + // Verify the chunk was NOT stored by attempting to retrieve it + let get_request_id: u64 = rand::random(); + let get_request = ChunkGetRequest::new(address); + let get_message = ChunkMessage { + request_id: get_request_id, + body: ChunkMessageBody::GetRequest(get_request), + }; + let get_message_bytes = get_message.encode()?; + + let get_response_bytes = protocol.handle_message(&get_message_bytes).await?; + let get_response = ChunkMessage::decode(&get_response_bytes)?; + + match get_response.body { + ChunkMessageBody::GetResponse(ChunkGetResponse::NotFound { .. }) => { + // Success - chunk was not stored + eprintln!("✓ Confirmed chunk was NOT stored (GET returned NotFound)"); + } + other => { + return Err(color_eyre::eyre::eyre!( + "Expected NotFound response (chunk should not be stored), got: {other:?}" + )); + } + } + + eprintln!("\n✅ Payment enforcement verified: chunks are rejected without valid payment when EVM is enabled"); + + // Cleanup + drop(protocol); + if let Err(e) = tokio::fs::remove_dir_all(&temp_dir).await { + eprintln!("Failed to cleanup temp directory: {e}"); + } + + Ok(()) } } diff --git a/tests/e2e/data_types/graph_entry.rs b/tests/e2e/data_types/graph_entry.rs index e19361f5..810e7d49 100644 --- a/tests/e2e/data_types/graph_entry.rs +++ b/tests/e2e/data_types/graph_entry.rs @@ -178,116 +178,4 @@ mod tests { assert_eq!(fixture.parents.len(), 1); assert_eq!(fixture.parents[0], parent); } - - // ========================================================================= - // Integration Tests (require testnet) - // ========================================================================= - - /// Test 8: Store and retrieve root entry (no parents) - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_graph_entry_store_retrieve_root() { - // TODO: Implement with TestHarness - // let harness = TestHarness::setup().await.unwrap(); - // let fixture = GraphEntryTestFixture::new(); - // - // // Store via node 5 - // let entry = harness.node(5).put_graph_entry( - // fixture.owner, - // fixture.parents.clone(), - // &fixture.small_content, - // ).await.unwrap(); - // - // // Retrieve via node 20 - // let retrieved = harness.node(20).get_graph_entry(&entry.address()).await.unwrap(); - // assert_eq!(retrieved.content(), fixture.small_content); - // - // harness.teardown().await.unwrap(); - } - - /// Test 9: Store and retrieve entry with single parent - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_graph_entry_single_parent() { - // TODO: Create root entry, then child entry pointing to root - } - - /// Test 10: Store and retrieve entry with multiple parents (merge) - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_graph_entry_multiple_parents() { - // TODO: Create two branches, then merge entry with both as parents - } - - /// Test 11: DAG traversal from leaf to root - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_graph_entry_dag_traversal() { - // TODO: Create chain: root -> child1 -> child2 -> leaf - // Traverse from leaf back to root via parent links - } - - /// Test 12: Cross-node replication - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_graph_entry_replication() { - // TODO: Store on node A, verify retrieval from nodes B, C, D - } - - /// Test 13: Payment verification for graph entry storage - #[test] - #[ignore = "Requires real P2P testnet and Anvil - run with --ignored"] - fn test_graph_entry_payment_verification() { - // TODO: Implement with TestHarness and TestAnvil - } - - /// Test 14: Large graph entry (100KB max) - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_graph_entry_large_content() { - // TODO: Store and retrieve 100KB graph entry - } - - /// Test 15: Reject oversized graph entry - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_graph_entry_reject_oversized() { - // TODO: Attempt to store > 100KB entry, verify rejection - } - - /// Test 16: Owner signature verification - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_graph_entry_owner_signature() { - // TODO: Verify entry is signed with ML-DSA-65 - } - - /// Test 17: Retrieve non-existent entry returns None - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_graph_entry_retrieve_nonexistent() { - // TODO: Query random address, verify None returned - } - - /// Test 18: Parent validation - reject invalid parent reference - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_graph_entry_invalid_parent_rejected() { - // TODO: Attempt to create entry with non-existent parent, verify rejection - } - - /// Test 19: Multi-owner entry (collaborative DAG) - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_graph_entry_multi_owner() { - // TODO: Two owners create entries, one creates child referencing both - } - - /// Test 20: Graph entry immutability - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_graph_entry_immutable() { - // TODO: Verify that once stored, entry cannot be modified - // (new entry with same address should be rejected) - } } diff --git a/tests/e2e/data_types/pointer.rs b/tests/e2e/data_types/pointer.rs index 26e3a649..a46e1742 100644 --- a/tests/e2e/data_types/pointer.rs +++ b/tests/e2e/data_types/pointer.rs @@ -142,94 +142,4 @@ mod tests { "Pointer and scratchpad addresses should be in different namespaces" ); } - - // ========================================================================= - // Integration Tests (require testnet) - // ========================================================================= - - /// Test 5: Store and retrieve pointer - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_pointer_store_retrieve() { - // TODO: Implement with TestHarness - // let harness = TestHarness::setup().await.unwrap(); - // let fixture = PointerTestFixture::new(); - // - // // Store via node 5 - // let record = harness.node(5).put_pointer( - // fixture.owner, - // fixture.target, - // 0, // Initial counter - // ).await.unwrap(); - // - // // Retrieve via node 20 - // let retrieved = harness.node(20).get_pointer(&fixture.owner).await.unwrap(); - // assert_eq!(retrieved.target(), fixture.target); - // - // harness.teardown().await.unwrap(); - } - - /// Test 6: Update pointer target - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_pointer_update_target() { - // TODO: Store with target A, update to target B, verify B is returned - } - - /// Test 7: Counter versioning - higher counter wins - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_pointer_counter_versioning() { - // TODO: Similar to scratchpad counter test - } - - /// Test 8: Cross-node replication - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_pointer_replication() { - // TODO: Store on node A, verify retrieval from nodes B, C, D - } - - /// Test 9: Payment verification for pointer storage - #[test] - #[ignore = "Requires real P2P testnet and Anvil - run with --ignored"] - fn test_pointer_payment_verification() { - // TODO: Implement with TestHarness and TestAnvil - } - - /// Test 10: Owner signature verification - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_pointer_owner_signature() { - // TODO: Verify only owner can update pointer (ML-DSA-65 signature) - } - - /// Test 11: Reject updates from non-owner - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_pointer_reject_non_owner_update() { - // TODO: Attempt update with wrong key, verify rejection - } - - /// Test 12: Retrieve non-existent pointer returns None - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_pointer_retrieve_nonexistent() { - // TODO: Query random owner, verify None returned - } - - /// Test 13: Pointer chain resolution - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_pointer_chain_resolution() { - // TODO: Create pointer A -> chunk B, verify resolution - } - - /// Test 14: Update doesn't affect target data - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_pointer_update_preserves_target_data() { - // TODO: Store chunk, create pointer to chunk, update pointer, - // verify chunk data is unchanged - } } diff --git a/tests/e2e/data_types/scratchpad.rs b/tests/e2e/data_types/scratchpad.rs index 061bde97..0949908a 100644 --- a/tests/e2e/data_types/scratchpad.rs +++ b/tests/e2e/data_types/scratchpad.rs @@ -123,113 +123,4 @@ mod tests { let fixture = ScratchpadTestFixture::with_owner(custom_owner); assert_eq!(fixture.owner, custom_owner); } - - // ========================================================================= - // Integration Tests (require testnet) - // ========================================================================= - - /// Test 6: Store and retrieve scratchpad - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_scratchpad_store_retrieve() { - // TODO: Implement with TestHarness - // let harness = TestHarness::setup().await.unwrap(); - // let fixture = ScratchpadTestFixture::new(); - // - // // Store via node 5 - // let entry = harness.node(5).put_scratchpad( - // fixture.owner, - // fixture.content_type, - // &fixture.small_data, - // 0, // Initial counter - // ).await.unwrap(); - // - // // Retrieve via node 20 - // let retrieved = harness.node(20).get_scratchpad(&fixture.owner).await.unwrap(); - // assert_eq!(retrieved.data(), fixture.small_data); - // - // harness.teardown().await.unwrap(); - } - - /// Test 7: Counter versioning - higher counter wins - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_scratchpad_counter_versioning() { - // TODO: Implement CRDT counter test - // - Store with counter 0 - // - Store with counter 1 (should win) - // - Store with counter 0 again (should be rejected) - // - Verify counter 1 version is returned - } - - /// Test 8: Counter must be strictly increasing - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_scratchpad_counter_must_increase() { - // TODO: Verify that same or lower counter updates are rejected - } - - /// Test 9: Cross-node replication with version sync - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_scratchpad_replication_version_sync() { - // TODO: Store on node A, update on node B, verify sync - } - - /// Test 10: Payment verification for scratchpad storage - #[test] - #[ignore = "Requires real P2P testnet and Anvil - run with --ignored"] - fn test_scratchpad_payment_verification() { - // TODO: Implement with TestHarness and TestAnvil - } - - /// Test 11: Large scratchpad (4MB max) - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_scratchpad_large_data() { - // TODO: Store and retrieve 4MB scratchpad - } - - /// Test 12: Reject oversized scratchpad - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_scratchpad_reject_oversized() { - // TODO: Attempt to store > 4MB scratchpad, verify rejection - } - - /// Test 13: Owner signature verification - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_scratchpad_owner_signature() { - // TODO: Verify only owner can update scratchpad (ML-DSA-65 signature) - } - - /// Test 14: Reject updates from non-owner - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_scratchpad_reject_non_owner_update() { - // TODO: Attempt update with wrong key, verify rejection - } - - /// Test 15: Content type is preserved - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_scratchpad_content_type_preserved() { - // TODO: Store with content_type=42, verify it's preserved on retrieval - } - - /// Test 16: Retrieve non-existent scratchpad returns None - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_scratchpad_retrieve_nonexistent() { - // TODO: Query random owner, verify None returned - } - - /// Test 17: Concurrent updates resolve to highest counter - #[test] - #[ignore = "Requires real P2P testnet - run with --ignored"] - fn test_scratchpad_concurrent_updates() { - // TODO: Simulate concurrent updates with different counters, - // verify CRDT semantics (highest counter wins) - } } diff --git a/tests/e2e/harness.rs b/tests/e2e/harness.rs index 5ee9103d..d75ce09f 100644 --- a/tests/e2e/harness.rs +++ b/tests/e2e/harness.rs @@ -5,8 +5,11 @@ use super::anvil::TestAnvil; use super::testnet::{TestNetwork, TestNetworkConfig, TestNode}; +use evmlib::common::TxHash; use saorsa_core::P2PNode; -use std::sync::Arc; +use saorsa_node::client::XorName; +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; use tracing::info; /// Error type for test harness operations. @@ -28,11 +31,131 @@ pub enum HarnessError { /// Result type for harness operations. pub type Result = std::result::Result; +/// Payment tracking record for a chunk. +#[derive(Debug, Clone)] +pub struct PaymentRecord { + /// The chunk address that was paid for. + pub chunk_address: XorName, + /// Transaction hashes for this payment (typically 1 for `SingleNode` strategy). + pub tx_hashes: Vec, + /// Timestamp when the payment was recorded. + pub timestamp: std::time::SystemTime, +} + +/// Tracks on-chain payments made during tests. +/// +/// This allows tests to verify that payment caching works correctly +/// and that duplicate payments are not made for the same chunk. +#[derive(Debug, Clone, Default)] +pub struct PaymentTracker { + /// Map from chunk address to payment records. + /// Multiple payments for the same chunk indicate a bug. + payments: Arc>>>, +} + +impl PaymentTracker { + /// Create a new payment tracker. + #[must_use] + pub fn new() -> Self { + Self { + payments: Arc::new(Mutex::new(HashMap::new())), + } + } + + /// Record a payment for a chunk. + /// + /// This should be called after a successful `wallet.pay_for_quotes()` call. + pub fn record_payment(&self, chunk_address: XorName, tx_hashes: Vec) { + let record = PaymentRecord { + chunk_address, + tx_hashes, + timestamp: std::time::SystemTime::now(), + }; + + let mut payments = self + .payments + .lock() + .unwrap_or_else(std::sync::PoisonError::into_inner); + payments.entry(chunk_address).or_default().push(record); + } + + /// Get the number of payments made for a specific chunk. + /// + /// # Returns + /// + /// - `0` if no payments were made + /// - `1` if one payment was made (expected) + /// - `>1` if duplicate payments were made (bug - cache failed) + #[must_use] + pub fn payment_count(&self, chunk_address: &XorName) -> usize { + let payments = self + .payments + .lock() + .unwrap_or_else(std::sync::PoisonError::into_inner); + payments.get(chunk_address).map_or(0, Vec::len) + } + + /// Get all payment records for a specific chunk. + #[must_use] + pub fn get_payments(&self, chunk_address: &XorName) -> Vec { + let payments = self + .payments + .lock() + .unwrap_or_else(std::sync::PoisonError::into_inner); + payments.get(chunk_address).cloned().unwrap_or_default() + } + + /// Get the total number of unique chunks that have been paid for. + #[must_use] + pub fn unique_chunk_count(&self) -> usize { + let payments = self + .payments + .lock() + .unwrap_or_else(std::sync::PoisonError::into_inner); + payments.len() + } + + /// Get the total number of payment transactions (across all chunks). + #[must_use] + pub fn total_payment_count(&self) -> usize { + let payments = self + .payments + .lock() + .unwrap_or_else(std::sync::PoisonError::into_inner); + payments.values().map(Vec::len).sum() + } + + /// Check if any chunk has duplicate payments (indicates cache failure). + #[must_use] + pub fn has_duplicate_payments(&self) -> bool { + let payments = self + .payments + .lock() + .unwrap_or_else(std::sync::PoisonError::into_inner); + payments.values().any(|records| records.len() > 1) + } + + /// Get all chunks with duplicate payments. + #[must_use] + pub fn chunks_with_duplicates(&self) -> Vec { + let payments = self + .payments + .lock() + .unwrap_or_else(std::sync::PoisonError::into_inner); + payments + .iter() + .filter(|(_, records)| records.len() > 1) + .map(|(addr, _)| *addr) + .collect() + } +} + /// Test harness that manages the complete test environment. /// /// The harness coordinates: /// - A network of 25 saorsa nodes /// - Optional Anvil EVM testnet for payment verification +/// - Payment tracking for verifying cache behavior /// - Helper methods for common test operations pub struct TestHarness { /// The test network. @@ -40,6 +163,9 @@ pub struct TestHarness { /// Optional Anvil EVM testnet. anvil: Option, + + /// Payment tracker for monitoring on-chain payments. + payment_tracker: PaymentTracker, } impl TestHarness { @@ -72,6 +198,7 @@ impl TestHarness { Ok(Self { network, anvil: None, + payment_tracker: PaymentTracker::new(), }) } @@ -104,6 +231,17 @@ impl TestHarness { Self::setup_with_evm_and_config(TestNetworkConfig::default()).await } + /// Create and start a test network with Anvil EVM testnet (alias for `setup_with_evm`). + /// + /// Use this for tests that require payment verification. + /// + /// # Errors + /// + /// Returns an error if the network or Anvil fails to start. + pub async fn setup_with_payments() -> Result { + Self::setup_with_evm().await + } + /// Create and start a test network with Anvil EVM testnet and custom config. /// /// # Arguments @@ -122,6 +260,10 @@ impl TestHarness { let mut network = TestNetwork::new(config).await?; network.start().await?; + // Warm up DHT routing tables (essential for quote collection) + info!("Warming up DHT routing tables..."); + network.warmup_dht().await?; + let anvil = TestAnvil::new() .await .map_err(|e| HarnessError::Anvil(format!("Failed to start Anvil: {e}")))?; @@ -129,9 +271,21 @@ impl TestHarness { Ok(Self { network, anvil: Some(anvil), + payment_tracker: PaymentTracker::new(), }) } + /// Access the payment tracker for verifying on-chain payment behavior. + /// + /// This allows tests to verify that: + /// - Payments are actually made + /// - Payment caching prevents duplicate payments + /// - Multiple stores of the same chunk only pay once + #[must_use] + pub fn payment_tracker(&self) -> &PaymentTracker { + &self.payment_tracker + } + /// Access the test network. #[must_use] pub fn network(&self) -> &TestNetwork { @@ -180,6 +334,16 @@ impl TestHarness { self.network.node(index) } + /// Access a specific test node mutably. + /// + /// # Arguments + /// + /// * `index` - The node index (0-based) + #[must_use] + pub fn test_node_mut(&mut self, index: usize) -> Option<&mut TestNode> { + self.network.node_mut(index) + } + /// Get a random non-bootstrap node. /// /// Useful for tests that need to pick an arbitrary regular node. @@ -242,6 +406,57 @@ impl TestHarness { self.network.total_connections().await } + /// Shutdown a specific node by index. + /// + /// This simulates a node failure during testing. The node is gracefully shut down + /// and removed from the network. The remaining nodes continue to operate. + /// + /// # Arguments + /// + /// * `index` - The index of the node to shutdown (0-based) + /// + /// # Errors + /// + /// Returns an error if the node index is invalid or shutdown fails. + pub async fn shutdown_node(&mut self, index: usize) -> Result<()> { + self.network.shutdown_node(index).await?; + Ok(()) + } + + /// Shutdown multiple nodes by their indices. + /// + /// This is a convenience method for simulating multiple node failures at once. + /// + /// # Arguments + /// + /// * `indices` - Slice of node indices to shutdown + /// + /// # Errors + /// + /// Returns an error if any node index is invalid or shutdown fails. + pub async fn shutdown_nodes(&mut self, indices: &[usize]) -> Result<()> { + self.network.shutdown_nodes(indices).await?; + Ok(()) + } + + /// Get the number of currently running nodes. + pub async fn running_node_count(&self) -> usize { + self.network.running_node_count().await + } + + /// Warm up DHT routing tables for quote collection. + /// + /// This method populates DHT routing tables by performing random lookups, + /// which is necessary before using `get_quotes_from_dht()`. + /// + /// # Errors + /// + /// Returns an error if DHT warmup fails. + pub async fn warmup_dht(&self) -> Result<()> { + self.network.warmup_dht().await?; + Ok(()) + } + /// Teardown the test harness. /// /// This shuts down all nodes and the Anvil testnet if running. diff --git a/tests/e2e/integration_tests.rs b/tests/e2e/integration_tests.rs index 62c1132e..ae52a8c0 100644 --- a/tests/e2e/integration_tests.rs +++ b/tests/e2e/integration_tests.rs @@ -12,12 +12,12 @@ use super::{NetworkState, TestHarness, TestNetwork, TestNetworkConfig}; use bytes::Bytes; use saorsa_core::P2PEvent; use saorsa_node::client::{QuantumClient, QuantumConfig}; +use serial_test::serial; use std::sync::Arc; use std::time::Duration; /// Test that a minimal network (5 nodes) can form and stabilize. #[tokio::test] -#[ignore = "Requires real P2P node spawning - run with --ignored"] async fn test_minimal_network_formation() { // TestNetworkConfig automatically generates unique ports and data dirs let harness = TestHarness::setup_minimal() @@ -41,7 +41,6 @@ async fn test_minimal_network_formation() { /// Test that a small network (10 nodes) can form and stabilize. #[tokio::test] -#[ignore = "Requires real P2P node spawning - run with --ignored"] async fn test_small_network_formation() { // TestNetworkConfig automatically generates unique ports and data dirs let harness = TestHarness::setup_small() @@ -63,7 +62,6 @@ async fn test_small_network_formation() { /// Test that the full 25-node network can form. #[tokio::test] -#[ignore = "Requires real P2P node spawning - run with --ignored"] async fn test_full_network_formation() { let harness = TestHarness::setup().await.expect("Failed to setup harness"); @@ -89,7 +87,6 @@ async fn test_full_network_formation() { /// Test custom network configuration. #[tokio::test] -#[ignore = "Requires real P2P node spawning - run with --ignored"] async fn test_custom_network_config() { // Override only the settings we care about; ports and data dir are auto-generated let config = TestNetworkConfig { @@ -113,7 +110,7 @@ async fn test_custom_network_config() { /// Test network with EVM testnet. #[tokio::test] -#[ignore = "Requires real P2P node spawning and Anvil - run with --ignored"] +#[serial] async fn test_network_with_evm() { // TestNetworkConfig automatically generates unique ports and data dirs let harness = TestHarness::setup_with_evm() @@ -124,8 +121,9 @@ async fn test_network_with_evm() { assert!(harness.has_evm()); let anvil = harness.anvil().expect("Anvil should be present"); - assert!(anvil.is_healthy().await); - assert!(!anvil.rpc_url().is_empty()); + // Verify the Anvil testnet is usable by checking we can get a network config + let _network = anvil.to_network(); + assert!(!anvil.default_wallet_key().is_empty()); harness.teardown().await.expect("Failed to teardown"); } @@ -218,7 +216,7 @@ async fn test_node_to_node_messaging() { !peers.is_empty(), "Node 3 should have at least one connected peer" ); - let target_peer_id = peers[0].clone(); + let target_peer_id = *peers.first().expect("Should have at least one peer"); let sender_p2p = sender.p2p_node.as_ref().expect("Node 3 should be running"); @@ -309,11 +307,15 @@ async fn test_quantum_client_chunk_round_trip() { let client = QuantumClient::new(config).with_node(Arc::clone(&node)); // ── PUT ────────────────────────────────────────────────────────────── + // Nodes use payment_enforcement: false, so we send a dummy proof via + // put_chunk_with_proof() (put_chunk() requires a wallet since the + // client-side early-rejection fix). let content = Bytes::from("quantum client e2e test payload"); + let dummy_proof = vec![0u8; 64]; let address = client - .put_chunk(content.clone()) + .put_chunk_with_proof(content.clone(), dummy_proof) .await - .expect("QuantumClient::put_chunk should succeed"); + .expect("QuantumClient::put_chunk_with_proof should succeed"); // Address must equal SHA256(content) let expected_address = saorsa_node::compute_address(&content); diff --git a/tests/e2e/live_testnet.rs b/tests/e2e/live_testnet.rs index 78c2fd4f..35010e0e 100644 --- a/tests/e2e/live_testnet.rs +++ b/tests/e2e/live_testnet.rs @@ -1,7 +1,8 @@ //! Live testnet tests for load testing and data verification. //! -//! These tests connect to the live 200-node testnet for comprehensive testing. +//! These tests connect to the live saorsa testnet for comprehensive testing. //! They are designed to be run via shell scripts that set environment variables. +//! When environment variables are not set, the tests skip gracefully. #![allow( clippy::unwrap_used, @@ -92,13 +93,18 @@ fn generate_chunk(index: usize, size_kb: usize) -> Vec { /// Load test: store thousands of chunks on the testnet. /// /// Environment variables: +/// - `SAORSA_TEST_LIVE`: Must be set to "true" to run this test /// - `SAORSA_TEST_CHUNK_COUNT`: Number of chunks to store (default: 1000) /// - `SAORSA_TEST_CHUNK_SIZE_KB`: Size of each chunk in KB (default: 1) /// - `SAORSA_TEST_CONCURRENCY`: Concurrent operations (default: 10) /// - `SAORSA_TEST_ADDRESSES_FILE`: File to write chunk addresses to #[tokio::test] -#[ignore = "Live testnet test - run via load-test.sh"] async fn run_load_test() { + if env::var("SAORSA_TEST_LIVE").as_deref() != Ok("true") { + println!("Skipping: SAORSA_TEST_LIVE not set to 'true'"); + return; + } + let chunk_count: usize = env::var("SAORSA_TEST_CHUNK_COUNT") .unwrap_or_else(|_| "1000".to_string()) .parse() @@ -211,11 +217,16 @@ async fn run_load_test() { /// Verify chunks: check that all stored chunks are retrievable. /// /// Environment variables: +/// - `SAORSA_TEST_LIVE`: Must be set to "true" to run this test /// - `SAORSA_TEST_ADDRESSES_FILE`: File containing chunk addresses to verify /// - `SAORSA_TEST_SAMPLE_SIZE`: Number of chunks to sample (default: all) #[tokio::test] -#[ignore = "Live testnet test - run via churn-verify.sh"] async fn run_verify_chunks() { + if env::var("SAORSA_TEST_LIVE").as_deref() != Ok("true") { + println!("Skipping: SAORSA_TEST_LIVE not set to 'true'"); + return; + } + let addresses_file = env::var("SAORSA_TEST_ADDRESSES_FILE").expect("SAORSA_TEST_ADDRESSES_FILE not set"); @@ -346,8 +357,9 @@ async fn run_verify_chunks() { /// /// This test stores a moderate number of chunks and immediately verifies /// they can be retrieved from different parts of the network. +/// +/// Set `SAORSA_TEST_EXTERNAL=true` to run this test. #[tokio::test] -#[ignore = "Live testnet test - requires SAORSA_TEST_EXTERNAL=true"] async fn run_comprehensive_data_tests() { if env::var("SAORSA_TEST_EXTERNAL").is_err() { println!("Skipping: SAORSA_TEST_EXTERNAL not set"); diff --git a/tests/e2e/mod.rs b/tests/e2e/mod.rs index c0cc2567..b81e89c6 100644 --- a/tests/e2e/mod.rs +++ b/tests/e2e/mod.rs @@ -47,6 +47,15 @@ mod integration_tests; #[cfg(test)] mod live_testnet; +#[cfg(test)] +mod payment_flow; + +#[cfg(test)] +mod complete_payment_e2e; + +#[cfg(test)] +mod security_attacks; + pub use anvil::TestAnvil; pub use harness::TestHarness; pub use testnet::{NetworkState, NodeState, TestNetwork, TestNetworkConfig, TestNode}; diff --git a/tests/e2e/payment_flow.rs b/tests/e2e/payment_flow.rs new file mode 100644 index 00000000..1aa11eb5 --- /dev/null +++ b/tests/e2e/payment_flow.rs @@ -0,0 +1,652 @@ +//! E2E tests for payment-enabled chunk storage across multiple nodes. +//! +//! These tests validate the full payment workflow for chunk storage: +//! +//! **Payment Workflow**: +//! 1. Client requests quotes from 5 network nodes via DHT +//! 2. Client sorts quotes by price and selects median +//! 3. Client pays median node 3x on Arbitrum (`SingleNode` payment strategy) +//! 4. Client sends 0 atto to the other 4 nodes for verification +//! 5. Client sends chunk with `ProofOfPayment` to storage nodes +//! 6. Nodes verify payment on-chain before storing (when EVM verification enabled) +//! 7. Chunk is retrievable from the network +//! +//! **Test Coverage**: +//! - Network setup with 10-node test network and Anvil EVM testnet +//! - Wallet creation and funding +//! - Quote collection from DHT peers +//! - Median price calculation and `SingleNode` payment +//! - On-chain payment verification +//! - Payment cache preventing duplicate payments +//! - Network resilience with node failures +//! +//! **Network Setup**: Uses a 10-node test network (need 8+ for `CLOSE_GROUP_SIZE`). + +use super::harness::TestHarness; +use bytes::Bytes; +use evmlib::testnet::Testnet; +use evmlib::wallet::Wallet; +use saorsa_node::client::QuantumClient; +use saorsa_node::payment::SingleNodePayment; +use serial_test::serial; +use std::time::Duration; +use tokio::time::sleep; +use tracing::{info, warn}; + +/// Test environment containing both the test network and EVM testnet. +struct PaymentTestEnv { + /// Test harness managing the saorsa node network + harness: TestHarness, + /// Anvil EVM testnet for payment testing + testnet: Testnet, +} + +impl PaymentTestEnv { + /// Teardown the test environment. + async fn teardown(self) -> Result<(), Box> { + self.harness.teardown().await?; + Ok(()) + } + + /// Create a funded wallet from the Anvil testnet. + fn create_funded_wallet(&self) -> Result> { + let network = self.testnet.to_network(); + let private_key = self.testnet.default_wallet_private_key(); + + let wallet = Wallet::new_from_private_key(network, &private_key)?; + info!("Created funded wallet: {}", wallet.address()); + + Ok(wallet) + } +} + +/// Initialize test network and EVM testnet for payment E2E tests. +/// +/// This sets up: +/// - Anvil EVM testnet FIRST (so nodes can verify on the same chain) +/// - 10-node saorsa test network with `payment_enforcement: true` +/// - Network stabilization wait (5 seconds for 10 nodes) +/// +/// All nodes share the SAME Anvil instance as the client wallet, +/// so on-chain verification is real, not bypassed. +/// +/// # Returns +/// +/// A `PaymentTestEnv` containing both the network harness and EVM testnet. +async fn init_testnet_and_evm() -> Result> { + info!("Initializing payment test environment"); + + // Start Anvil EVM testnet FIRST so we can wire it to nodes + let testnet = Testnet::new().await; + let network = testnet.to_network(); + info!("Anvil testnet started"); + + // Setup 10-node network with payment enforcement ON and the + // SAME Anvil network so nodes verify on the same chain the client pays on. + let config = super::testnet::TestNetworkConfig::small() + .with_payment_enforcement() + .with_evm_network(network); + + // Use setup_with_config (NOT setup_with_evm_and_config) because we already + // created our own Testnet above — creating another would double-bind the port. + let harness = TestHarness::setup_with_config(config).await?; + + info!("10-node test network started with payment enforcement ENABLED"); + + // Wait for network to stabilize (10 nodes need more time) + sleep(Duration::from_secs(10)).await; + + let total_connections = harness.total_connections().await; + info!("Network stabilized with {total_connections} total connections"); + + // Warm up DHT routing tables (essential for quote collection and chunk routing) + harness.warmup_dht().await?; + sleep(Duration::from_secs(5)).await; + info!("Payment test environment ready"); + + Ok(PaymentTestEnv { harness, testnet }) +} + +/// Test: Client pays and stores chunk on 5-node network. +/// +/// This validates the full end-to-end payment flow: +/// - Network discovery via DHT +/// - Quote collection from multiple nodes +/// - Median price calculation +/// - On-chain payment on Arbitrum +/// - Chunk storage after payment verification +/// - Cross-node retrieval +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_client_pays_and_stores_on_network() -> Result<(), Box> { + info!("Starting E2E payment test: client pays and stores on network"); + + // Initialize test environment (network + EVM) + let mut env = init_testnet_and_evm().await?; + + // Create funded wallet for client + let wallet = env.create_funded_wallet()?; + + // Configure node 0 as the client with wallet + let client_node = env.harness.test_node_mut(0).ok_or("Node 0 not found")?; + client_node.set_wallet(wallet); + + info!("Client configured with funded wallet"); + + // Store a chunk using the payment-enabled client + let test_data = b"Test data for payment E2E flow"; + info!("Storing {} bytes", test_data.len()); + + let address = env + .harness + .test_node(0) + .ok_or("Node 0 not found")? + .store_chunk_with_payment(test_data) + .await?; + info!("Chunk stored successfully at: {}", hex::encode(address)); + + // Verify chunk is retrievable via DHT-routed client (same routing as PUT) + sleep(Duration::from_millis(500)).await; + + let retrieved = env + .harness + .test_node(0) + .ok_or("Node 0 not found")? + .get_chunk_with_client(&address) + .await?; + + assert!( + retrieved.is_some(), + "Chunk should be retrievable via DHT routing" + ); + + let chunk = retrieved.ok_or("Chunk not found")?; + assert_eq!( + chunk.content.as_ref(), + test_data, + "Retrieved data should match original" + ); + + info!("✅ Chunk successfully retrieved via DHT routing"); + + env.teardown().await?; + Ok(()) +} + +/// Test: Multiple clients store chunks with independent payments. +/// +/// Validates that: +/// - Multiple clients can operate concurrently +/// - Each payment is independent +/// - All chunks are stored correctly +/// - Payment cache doesn't interfere between clients +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_multiple_clients_concurrent_payments() -> Result<(), Box> { + info!("Starting E2E payment test: multiple clients with concurrent payments"); + + // Initialize test environment (network + EVM) + let mut env = init_testnet_and_evm().await?; + + // Create 3 clients with separate wallets + for i in 0..3 { + let wallet = env.create_funded_wallet()?; + let node = env + .harness + .test_node_mut(i) + .ok_or_else(|| format!("Node {i} not found"))?; + node.set_wallet(wallet); + } + + info!("Created 3 clients with independent funded wallets"); + + // Extra stabilization after wallet setup + sleep(Duration::from_secs(3)).await; + + // Store chunks concurrently using payment-enabled client + let mut addresses = Vec::new(); + for i in 0..3 { + let data = format!("Data from client {i}"); + let address = env + .harness + .test_node(i) + .ok_or_else(|| format!("Node {i} not found"))? + .store_chunk_with_payment(data.as_bytes()) + .await?; + info!("Client {} stored chunk at: {}", i, hex::encode(address)); + addresses.push(address); + } + + assert_eq!(addresses.len(), 3, "All clients should store successfully"); + + // Verify all chunks are retrievable via DHT routing + for (i, address) in addresses.iter().enumerate() { + let retrieved = env + .harness + .test_node(i) + .ok_or_else(|| format!("Node {i} not found"))? + .get_chunk_with_client(address) + .await?; + + assert!(retrieved.is_some(), "Chunk {i} should be retrievable"); + + let expected = format!("Data from client {i}"); + assert_eq!( + retrieved.ok_or("Chunk not found")?.content.as_ref(), + expected.as_bytes(), + "Retrieved data should match for client {i}" + ); + } + + info!("✅ All chunks from multiple clients verified"); + + env.teardown().await?; + Ok(()) +} + +/// Test: Payment verification prevents storage without valid payment. +/// +/// Validates that: +/// - Nodes reject chunks without payment when EVM verification is enabled +/// - Payment verification is enforced on the server side +/// - Clients without wallets get appropriate errors +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_payment_required_enforcement() -> Result<(), Box> { + info!("Starting E2E payment test: payment enforcement validation"); + + // Start Anvil EVM testnet FIRST so we can wire it to nodes + let testnet = Testnet::new().await; + let network = testnet.to_network(); + info!("Anvil testnet started"); + + // Setup 10-node network with payment enforcement ON and the + // SAME Anvil network so nodes verify on the same chain. + let config = super::testnet::TestNetworkConfig::small() + .with_payment_enforcement() + .with_evm_network(network); + + // Use setup_with_config (NOT setup_with_evm_and_config) because we already + // created our own Testnet above — creating another would double-bind the port. + let harness = TestHarness::setup_with_config(config).await?; + + info!("10-node test network started with payment enforcement ENABLED"); + + // Wait for network to stabilize (10 nodes need more time) + sleep(Duration::from_secs(5)).await; + + let total_connections = harness.total_connections().await; + info!("Payment test environment ready: {total_connections} total connections"); + + let env = PaymentTestEnv { harness, testnet }; + + // Try to store without wallet (should fail) + let client_without_wallet = + QuantumClient::with_defaults().with_node(env.harness.node(0).ok_or("Node 0 not found")?); + + let test_data = b"This should be rejected"; + let result = client_without_wallet + .put_chunk(Bytes::from(test_data.to_vec())) + .await; + + assert!(result.is_err(), "Store should fail without wallet/payment"); + + info!("✅ Payment enforcement validated - storage rejected without payment"); + + env.teardown().await?; + Ok(()) +} + +/// Test: Large chunk storage with payment. +/// +/// Validates that: +/// - Large chunks (near max size) work with payment flow +/// - Quote prices scale appropriately with chunk size +/// - Payment and storage succeed for realistic data sizes +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_large_chunk_payment_flow() -> Result<(), Box> { + info!("Starting E2E payment test: large chunk storage"); + + // Initialize test environment (network + EVM) + let mut env = init_testnet_and_evm().await?; + + // Configure client with wallet + let wallet = env.create_funded_wallet()?; + env.harness + .test_node_mut(0) + .ok_or("Node 0 not found")? + .set_wallet(wallet); + + // Create a large chunk (512 KB) + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] + let large_data: Vec = (0..524_288).map(|i| (i % 256) as u8).collect(); + info!("Storing large chunk: {} bytes", large_data.len()); + + let address = env + .harness + .test_node(0) + .ok_or("Node 0 not found")? + .store_chunk_with_payment(&large_data) + .await?; + info!("Large chunk stored at: {}", hex::encode(address)); + + // Verify retrieval via DHT routing + sleep(Duration::from_millis(500)).await; + + let retrieved = env + .harness + .test_node(0) + .ok_or("Node 0 not found")? + .get_chunk_with_client(&address) + .await?; + + assert!(retrieved.is_some(), "Large chunk should be retrievable"); + + let chunk = retrieved.ok_or("Chunk not found")?; + assert_eq!( + chunk.content.len(), + large_data.len(), + "Retrieved size should match" + ); + assert_eq!( + chunk.content.as_ref(), + large_data.as_slice(), + "Retrieved data should match original" + ); + + info!("✅ Large chunk payment flow validated"); + + env.teardown().await?; + Ok(()) +} + +/// Test: Idempotent chunk storage — storing the same chunk twice succeeds. +/// +/// Validates that: +/// - First store with payment succeeds +/// - Second store of same data returns same address (`AlreadyExists` on node) +/// - Both stores produce valid addresses +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_idempotent_chunk_storage() -> Result<(), Box> { + info!("Starting E2E payment test: idempotent chunk storage"); + + // Initialize test environment (network + EVM) + let mut env = init_testnet_and_evm().await?; + + // Configure client + let wallet = env.create_funded_wallet()?; + env.harness + .test_node_mut(0) + .ok_or("Node 0 not found")? + .set_wallet(wallet); + + let test_data = b"Test data for idempotent storage"; + + // First store + let address1 = env + .harness + .test_node(0) + .ok_or("Node 0 not found")? + .store_chunk_with_payment(test_data) + .await?; + info!("First store: {}", hex::encode(address1)); + + // Second store of same data — node should respond with AlreadyExists + let address2 = env + .harness + .test_node(0) + .ok_or("Node 0 not found")? + .store_chunk_with_payment(test_data) + .await?; + info!("Second store: {}", hex::encode(address2)); + + assert_eq!( + address1, address2, + "Same data should produce same address on both stores" + ); + + // Verify chunk is retrievable + let retrieved = env + .harness + .test_node(0) + .ok_or("Node 0 not found")? + .get_chunk_with_client(&address1) + .await?; + + assert!( + retrieved.is_some(), + "Chunk should be retrievable after idempotent store" + ); + + let chunk = retrieved.ok_or("Chunk not found")?; + assert_eq!( + chunk.content.as_ref(), + test_data, + "Retrieved data should match original" + ); + + info!("✅ Idempotent chunk storage validated"); + + env.teardown().await?; + Ok(()) +} + +/// Test: Quote collection from DHT peers. +/// +/// Validates that: +/// - Client can discover and contact peers via DHT +/// - Multiple quotes are received +/// - Median price calculation works correctly +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_quote_collection_via_dht() -> Result<(), Box> { + info!("Starting E2E payment test: quote collection via DHT"); + + // Initialize test environment (network + EVM) + let env = init_testnet_and_evm().await?; + + // Create a client connected to node 0 + let client = + QuantumClient::with_defaults().with_node(env.harness.node(0).ok_or("Node 0 not found")?); + + // Prepare test data + let test_data = b"Test data for quote collection"; + info!("Requesting quotes for {} bytes", test_data.len()); + + // Request quotes from DHT peers + let quotes_with_prices = client.get_quotes_from_dht(test_data).await?; + + // Validate we got exactly 5 quotes (REQUIRED_QUOTES) + assert_eq!( + quotes_with_prices.len(), + 5, + "Should collect exactly 5 quotes" + ); + + info!( + "✅ Successfully collected {} quotes from DHT", + quotes_with_prices.len() + ); + + // Validate each quote has a price and peer ID + for (i, (peer_id, quote, price)) in quotes_with_prices.iter().enumerate() { + info!( + "Quote {}: peer = {peer_id}, price = {} atto, address = {}", + i + 1, + price, + quote.rewards_address + ); + + // Verify quote content matches our data + let address = saorsa_node::compute_address(test_data); + assert_eq!( + quote.content.0, address, + "Quote content address should match computed address" + ); + } + + // Create SingleNodePayment to test median selection (strip peer IDs) + let quotes_for_payment: Vec<_> = quotes_with_prices + .into_iter() + .map(|(_peer_id, quote, price)| (quote, price)) + .collect(); + let payment = SingleNodePayment::from_quotes(quotes_for_payment)?; + + info!("✅ Successfully created SingleNodePayment from quotes"); + info!(" Total payment amount: {} atto", payment.total_amount()); + info!( + " Paid quote (median): {} atto", + payment + .paid_quote() + .ok_or("Missing paid quote at median index")? + .amount + ); + + // Verify only the median quote has a non-zero amount + let non_zero_quotes = payment + .quotes + .iter() + .filter(|q| q.amount > ant_evm::Amount::ZERO) + .count(); + assert_eq!( + non_zero_quotes, 1, + "Only median quote should have non-zero amount" + ); + + info!("✅ Quote collection and median selection validated"); + + env.teardown().await?; + Ok(()) +} + +/// Test: Network resilience - storage succeeds even if some nodes fail. +/// +/// Validates that: +/// - Payment flow works when some nodes are unavailable +/// - Chunk is still stored on available nodes +/// - System gracefully handles partial failures +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_payment_with_node_failures() -> Result<(), Box> { + info!("Starting E2E payment test: resilience with node failures"); + + // Initialize test environment (network + EVM) + let mut env = init_testnet_and_evm().await?; + + // Configure client + let wallet = env.create_funded_wallet()?; + env.harness + .test_node_mut(0) + .ok_or("Node 0 not found")? + .set_wallet(wallet); + + // Verify initial network has all nodes running + let initial_count = env.harness.running_node_count().await; + info!("Initial network has {} running nodes", initial_count); + assert_eq!(initial_count, 10, "Should start with 10 nodes"); + + // Simulate node failures by shutting down nodes 5, 6, and 7 + info!("Simulating node failures: shutting down nodes 5, 6, 7"); + env.harness.shutdown_nodes(&[5, 6, 7]).await?; + + // Wait for network to adapt to failures + sleep(Duration::from_secs(15)).await; + + // Verify nodes are shut down + let remaining_count = env.harness.running_node_count().await; + info!("After failures: {remaining_count} running nodes remain"); + assert_eq!( + remaining_count, 7, + "Should have 7 nodes after shutting down 3" + ); + + // Re-warm DHT after node failures so routing tables adapt + env.harness.warmup_dht().await?; + sleep(Duration::from_secs(15)).await; + + // Store a chunk with the remaining nodes (7 nodes still > 5 needed for quotes) + let test_data = b"Resilience test data"; + let mut address = None; + for attempt in 1..=10 { + info!("Storage attempt {attempt}/10 after node failures..."); + match env + .harness + .test_node(0) + .ok_or("Node 0 not found")? + .store_chunk_with_payment(test_data) + .await + { + Ok(addr) => { + address = Some(addr); + break; + } + Err(e) => { + warn!("Storage attempt {attempt}/10 failed: {e}"); + if attempt < 10 { + let _ = env.harness.warmup_dht().await; + sleep(Duration::from_secs(10)).await; + } + } + } + } + let address = address.ok_or("Storage MUST succeed after node failures with 10 attempts")?; + + info!( + "Successfully stored chunk despite simulated failures: {}", + hex::encode(address) + ); + + // Verify chunk is retrievable via DHT routing + sleep(Duration::from_millis(500)).await; + + let retrieved = env + .harness + .test_node(0) + .ok_or("Node 0 not found")? + .get_chunk_with_client(&address) + .await?; + + assert!( + retrieved.is_some(), + "Chunk should be retrievable despite node failures" + ); + + let chunk = retrieved.ok_or("Chunk not found")?; + assert_eq!( + chunk.content.as_ref(), + test_data, + "Retrieved data should match original" + ); + + info!( + "✅ Network resilience validated: storage succeeds with {} nodes after 3 failures", + remaining_count + ); + + env.teardown().await?; + Ok(()) +} + +#[cfg(test)] +mod helper_tests { + use super::*; + + /// Test initialization helper. + #[tokio::test] + #[serial] + async fn test_init_testnet_and_evm() -> Result<(), Box> { + let env = init_testnet_and_evm().await?; + + // Verify we can create wallets + let wallet = env.create_funded_wallet()?; + assert!(!wallet.address().to_string().is_empty()); + + // Verify harness is accessible + assert!(env.harness.node(0).is_some(), "Node 0 should exist"); + + env.teardown().await?; + Ok(()) + } +} diff --git a/tests/e2e/security_attacks.rs b/tests/e2e/security_attacks.rs new file mode 100644 index 00000000..2d762c66 --- /dev/null +++ b/tests/e2e/security_attacks.rs @@ -0,0 +1,755 @@ +//! Security attack tests: adversarial payment bypass attempts. +//! +//! These tests simulate a malicious attacker trying to store data on the +//! saorsa network WITHOUT paying. Every test uses `payment_enforcement: true` +//! on all nodes. Every test MUST verify the attack is REJECTED. +//! +//! The attacker cannot modify source code -- only craft malicious messages. + +#![allow(clippy::unwrap_used, clippy::expect_used)] + +use super::harness::TestHarness; +use super::testnet::TestNetworkConfig; +use ant_evm::ProofOfPayment; +use bytes::Bytes; +use evmlib::testnet::Testnet; +use evmlib::wallet::Wallet; +use rand::Rng; +use saorsa_node::ant_protocol::{ + ChunkMessage, ChunkMessageBody, ChunkPutRequest, ChunkPutResponse, ProtocolError, +}; +use saorsa_node::client::{hex_node_id_to_encoded_peer_id, QuantumClient}; +use saorsa_node::compute_address; +use saorsa_node::payment::{PaymentProof, SingleNodePayment}; +use serial_test::serial; +use std::time::Duration; +use tokio::time::sleep; +use tracing::{info, warn}; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/// Check if a `ChunkMessageBody` indicates payment rejection. +fn is_payment_rejection(body: &ChunkMessageBody) -> bool { + matches!( + body, + ChunkMessageBody::PutResponse( + ChunkPutResponse::PaymentRequired { .. } + | ChunkPutResponse::Error(ProtocolError::PaymentFailed(_)) + ) + ) +} + +/// Send a PUT request directly to a node's `AntProtocol` handler. +async fn send_put_to_node( + harness: &TestHarness, + node_index: usize, + request: ChunkPutRequest, +) -> Result { + let node = harness + .test_node(node_index) + .ok_or_else(|| format!("Node {node_index} not found"))?; + let protocol = node + .ant_protocol + .as_ref() + .ok_or("No ant_protocol on node")?; + + let request_id: u64 = rand::thread_rng().gen(); + let message = ChunkMessage { + request_id, + body: ChunkMessageBody::PutRequest(request), + }; + let message_bytes = message + .encode() + .map_err(|e| format!("Encode failed: {e}"))?; + let response_bytes = protocol + .handle_message(&message_bytes) + .await + .map_err(|e| format!("Handle failed: {e}"))?; + ChunkMessage::decode(&response_bytes).map_err(|e| format!("Decode failed: {e}")) +} + +/// Create a lightweight test harness with payment enforcement and Anvil wiring. +/// Returns (harness, testnet) -- keep testnet alive to avoid Anvil teardown. +async fn setup_enforcement_env() -> Result<(TestHarness, Testnet), Box> { + let testnet = Testnet::new().await; + let network = testnet.to_network(); + let config = TestNetworkConfig::minimal() + .with_payment_enforcement() + .with_evm_network(network); + let harness = TestHarness::setup_with_config(config).await?; + sleep(Duration::from_secs(5)).await; + Ok((harness, testnet)) +} + +/// Create a full test harness (10 nodes) with DHT warmup for quote collection. +/// Returns (harness, testnet, wallet). +async fn setup_full_payment_env( +) -> Result<(TestHarness, Testnet, Wallet), Box> { + let testnet = Testnet::new().await; + let network = testnet.to_network(); + let config = TestNetworkConfig::small() + .with_payment_enforcement() + .with_evm_network(network.clone()); + let harness = TestHarness::setup_with_config(config).await?; + sleep(Duration::from_secs(10)).await; + harness.warmup_dht().await?; + let private_key = testnet.default_wallet_private_key(); + let wallet = Wallet::new_from_private_key(network, &private_key)?; + Ok((harness, testnet, wallet)) +} + +// =========================================================================== +// Category 1: No/Invalid Proof Bytes (Direct Protocol Handler) +// =========================================================================== + +/// Attack: Send a valid chunk with NO payment proof at all. +/// Node MUST reject with `PaymentRequired`. +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_attack_no_payment_proof() -> Result<(), Box> { + info!("ATTACK TEST: no payment proof"); + + let (harness, _testnet) = setup_enforcement_env().await?; + + let test_data = b"Attack: no payment proof whatsoever"; + let address = compute_address(test_data); + let request = ChunkPutRequest::new(address, test_data.to_vec()); + + let response = send_put_to_node(&harness, 0, request) + .await + .map_err(|e| format!("Send failed: {e}"))?; + + assert!( + is_payment_rejection(&response.body), + "Attack MUST be rejected with payment error, got: {response:?}" + ); + info!("Correctly rejected: no payment proof"); + + harness.teardown().await?; + Ok(()) +} + +/// Attack: Send a chunk with an empty byte array as payment proof (0 bytes). +/// Node MUST reject (proof too small, minimum 32 bytes). +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_attack_empty_proof_bytes() -> Result<(), Box> { + info!("ATTACK TEST: empty proof bytes"); + + let (harness, _testnet) = setup_enforcement_env().await?; + + let test_data = b"Attack: empty proof bytes"; + let address = compute_address(test_data); + let request = ChunkPutRequest::with_payment(address, test_data.to_vec(), vec![]); + + let response = send_put_to_node(&harness, 0, request) + .await + .map_err(|e| format!("Send failed: {e}"))?; + + assert!( + is_payment_rejection(&response.body), + "Attack MUST be rejected with payment error, got: {response:?}" + ); + info!("Correctly rejected: empty proof bytes"); + + harness.teardown().await?; + Ok(()) +} + +/// Attack: Send 64 bytes of random garbage as payment proof. +/// Node MUST reject (deserialization failure). +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_attack_garbage_bytes_as_proof() -> Result<(), Box> { + info!("ATTACK TEST: garbage bytes as proof"); + + let (harness, _testnet) = setup_enforcement_env().await?; + + let test_data = b"Attack: garbage bytes as proof"; + let address = compute_address(test_data); + let garbage: Vec = (0..64).map(|_| rand::thread_rng().gen()).collect(); + let request = ChunkPutRequest::with_payment(address, test_data.to_vec(), garbage); + + let response = send_put_to_node(&harness, 0, request) + .await + .map_err(|e| format!("Send failed: {e}"))?; + + assert!( + is_payment_rejection(&response.body), + "Attack MUST be rejected with payment error, got: {response:?}" + ); + info!("Correctly rejected: garbage bytes"); + + harness.teardown().await?; + Ok(()) +} + +/// Attack: Send a valid MessagePack-serialized `PaymentProof` but with empty quotes. +/// Node MUST reject ("Payment has no quotes"). +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_attack_valid_msgpack_empty_quotes() -> Result<(), Box> { + info!("ATTACK TEST: valid msgpack, empty quotes"); + + let (harness, _testnet) = setup_enforcement_env().await?; + + let test_data = b"Attack: valid msgpack, empty quotes"; + let address = compute_address(test_data); + + // Build a structurally valid but semantically empty proof + let empty_proof = PaymentProof { + proof_of_payment: ProofOfPayment { + peer_quotes: vec![], + }, + tx_hashes: vec![], + }; + let proof_bytes = + rmp_serde::to_vec(&empty_proof).map_err(|e| format!("Serialize failed: {e}"))?; + + // Pad to >= 32 bytes if needed (msgpack of empty proof is likely > 32 already) + let mut padded = proof_bytes; + while padded.len() < 32 { + padded.push(0); + } + + let request = ChunkPutRequest::with_payment(address, test_data.to_vec(), padded); + + let response = send_put_to_node(&harness, 0, request) + .await + .map_err(|e| format!("Send failed: {e}"))?; + + assert!( + is_payment_rejection(&response.body), + "Attack MUST be rejected with payment error, got: {response:?}" + ); + info!("Correctly rejected: empty quotes"); + + harness.teardown().await?; + Ok(()) +} + +/// Attack: Send 200KB of garbage as payment proof (exceeds 100KB max). +/// Node MUST reject (proof too large). +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_attack_proof_too_large() -> Result<(), Box> { + info!("ATTACK TEST: proof too large (200KB)"); + + let (harness, _testnet) = setup_enforcement_env().await?; + + let test_data = b"Attack: oversized proof bytes"; + let address = compute_address(test_data); + let oversized: Vec = vec![0xAA; 200 * 1024]; // 200KB of junk + let request = ChunkPutRequest::with_payment(address, test_data.to_vec(), oversized); + + let response = send_put_to_node(&harness, 0, request) + .await + .map_err(|e| format!("Send failed: {e}"))?; + + assert!( + is_payment_rejection(&response.body), + "Attack MUST be rejected with payment error, got: {response:?}" + ); + info!("Correctly rejected: proof too large"); + + harness.teardown().await?; + Ok(()) +} + +// =========================================================================== +// Category 2: Cryptographic Attacks (Real Quotes + Anvil) +// =========================================================================== + +/// Helper: get quotes from DHT with retries (up to 5 attempts, exponential backoff). +async fn get_quotes_with_retries( + client: &QuantumClient, + test_data: &[u8], +) -> Result< + Vec<( + saorsa_core::identity::PeerId, + ant_evm::PaymentQuote, + ant_evm::Amount, + )>, + String, +> { + let mut last_err = String::new(); + for attempt in 1..=5u32 { + match client.get_quotes_from_dht(test_data).await { + Ok(quotes) => { + info!("Got {} quotes on attempt {attempt}", quotes.len()); + return Ok(quotes); + } + Err(e) => { + last_err = format!("{e}"); + warn!("Quote attempt {attempt} failed: {e}"); + if attempt < 5 { + sleep(Duration::from_secs(2u64.pow(attempt))).await; + } + } + } + } + Err(format!("Failed to get quotes after 5 attempts: {last_err}")) +} + +/// Helper: build a valid proof from quotes + wallet payment. +/// Returns (`proof_bytes`, `tx_hashes`). +async fn build_valid_proof( + quotes_with_prices: Vec<( + saorsa_core::identity::PeerId, + ant_evm::PaymentQuote, + ant_evm::Amount, + )>, + wallet: &Wallet, +) -> Result<(Vec, Vec), Box> { + let mut peer_quotes = Vec::with_capacity(quotes_with_prices.len()); + let mut quotes_for_payment = Vec::with_capacity(quotes_with_prices.len()); + for (peer_id_str, quote, price) in quotes_with_prices { + let encoded = hex_node_id_to_encoded_peer_id(&peer_id_str.to_hex()) + .map_err(|e| format!("Peer ID conversion failed: {e}"))?; + peer_quotes.push((encoded, quote.clone())); + quotes_for_payment.push((quote, price)); + } + let payment = SingleNodePayment::from_quotes(quotes_for_payment) + .map_err(|e| format!("Payment creation failed: {e}"))?; + let tx_hashes = payment + .pay(wallet) + .await + .map_err(|e| format!("Payment failed: {e}"))?; + let proof = PaymentProof { + proof_of_payment: ProofOfPayment { peer_quotes }, + tx_hashes: tx_hashes.clone(), + }; + let proof_bytes = rmp_serde::to_vec(&proof).map_err(|e| format!("Serialize failed: {e}"))?; + Ok((proof_bytes, tx_hashes)) +} + +/// Attack: Forge ALL ML-DSA-65 signatures on valid quotes + real payment. +/// Node MUST reject because signature verification fails. +#[tokio::test(flavor = "multi_thread")] +#[serial] +#[allow(clippy::too_many_lines)] +async fn test_attack_forged_ml_dsa_signature() -> Result<(), Box> { + info!("ATTACK TEST: forged ML-DSA-65 signatures (ALL quotes)"); + + let (harness, _testnet, wallet) = setup_full_payment_env().await?; + + let client = QuantumClient::with_defaults() + .with_node(harness.node(0).ok_or("Node 0 not found")?) + .with_wallet(wallet.clone()); + + let test_data = b"Attack: forge all ML-DSA signatures"; + let quotes = get_quotes_with_retries(&client, test_data).await?; + + // Build peer_quotes and payment + let mut peer_quotes = Vec::with_capacity(quotes.len()); + let mut quotes_for_payment = Vec::with_capacity(quotes.len()); + for (peer_id_str, quote, price) in quotes { + let encoded = hex_node_id_to_encoded_peer_id(&peer_id_str.to_hex()) + .map_err(|e| format!("Peer ID conversion failed: {e}"))?; + peer_quotes.push((encoded, quote.clone())); + quotes_for_payment.push((quote, price)); + } + let payment = SingleNodePayment::from_quotes(quotes_for_payment) + .map_err(|e| format!("Payment creation failed: {e}"))?; + let tx_hashes = payment + .pay(&wallet) + .await + .map_err(|e| format!("Payment failed: {e}"))?; + + // CORRUPT ALL signatures (flip every byte) + let mut forged_quotes = peer_quotes; + for (_peer_id, ref mut quote) in &mut forged_quotes { + for byte in &mut quote.signature { + *byte = byte.wrapping_add(1); + } + } + + let forged_proof = PaymentProof { + proof_of_payment: ProofOfPayment { + peer_quotes: forged_quotes, + }, + tx_hashes, + }; + let forged_bytes = + rmp_serde::to_vec(&forged_proof).map_err(|e| format!("Serialize failed: {e}"))?; + + // Try to store with forged proof + let result = client + .put_chunk_with_proof(Bytes::from(test_data.to_vec()), forged_bytes) + .await; + + assert!( + result.is_err(), + "Attack MUST be rejected with forged signatures" + ); + let err_msg = format!("{}", result.expect_err("just asserted is_err")); + info!("Correctly rejected forged signatures: {err_msg}"); + + harness.teardown().await?; + Ok(()) +} + +/// Attack: Pay for chunk A, try to store chunk B using chunk A's proof. +/// The proof was generated for A's xorname; on-chain verification should fail for B. +#[tokio::test(flavor = "multi_thread")] +#[serial] +#[allow(clippy::too_many_lines)] +async fn test_attack_wrong_chunk_address() -> Result<(), Box> { + info!("ATTACK TEST: wrong chunk address (use A's proof for B)"); + + let (harness, _testnet, wallet) = setup_full_payment_env().await?; + + let client = QuantumClient::with_defaults() + .with_node(harness.node(0).ok_or("Node 0 not found")?) + .with_wallet(wallet.clone()); + + // Get quotes and pay for chunk A + let chunk_a_data = b"Attack: this is chunk A with valid payment"; + let quotes = get_quotes_with_retries(&client, chunk_a_data).await?; + let (proof_bytes_a, _tx_hashes) = build_valid_proof(quotes, &wallet).await?; + + // Try to store chunk B using chunk A's proof + let chunk_b_data = b"Attack: this is chunk B, using A's proof"; + let result = client + .put_chunk_with_proof(Bytes::from(chunk_b_data.to_vec()), proof_bytes_a) + .await; + + assert!( + result.is_err(), + "Attack MUST be rejected: proof was for a different chunk" + ); + let err_msg = format!("{}", result.expect_err("just asserted is_err")); + info!("Correctly rejected wrong chunk address: {err_msg}"); + + harness.teardown().await?; + Ok(()) +} + +/// Attack: Replay chunk A's proof to store chunk B. +/// First legitimately store chunk A, then try to reuse its proof for chunk B. +#[tokio::test(flavor = "multi_thread")] +#[serial] +#[allow(clippy::too_many_lines)] +async fn test_attack_replay_different_chunk() -> Result<(), Box> { + info!("ATTACK TEST: replay proof from chunk A to store chunk B"); + + let (harness, _testnet, wallet) = setup_full_payment_env().await?; + + let client = QuantumClient::with_defaults() + .with_node(harness.node(0).ok_or("Node 0 not found")?) + .with_wallet(wallet.clone()); + + // Legitimately upload chunk A + let chunk_a_data = b"Attack: legitimate chunk A for replay test"; + let quotes = get_quotes_with_retries(&client, chunk_a_data).await?; + let (proof_bytes_a, _tx_hashes) = build_valid_proof(quotes, &wallet).await?; + + // Store chunk A (should succeed) — retry for slow DHT on CI + let mut chunk_a_stored = false; + for attempt in 1..=5u32 { + match client + .put_chunk_with_proof(Bytes::from(chunk_a_data.to_vec()), proof_bytes_a.clone()) + .await + { + Ok(_addr) => { + chunk_a_stored = true; + break; + } + Err(e) => { + warn!("Legitimate store of chunk A attempt {attempt}/5 failed: {e}"); + if attempt < 5 { + let _ = harness.warmup_dht().await; + sleep(Duration::from_secs(3)).await; + } + } + } + } + assert!( + chunk_a_stored, + "Legitimate store of chunk A should succeed after retries" + ); + info!("Chunk A stored successfully (legitimate)"); + + // Now replay A's proof for chunk B + let chunk_b_data = b"Attack: trying to replay A's proof for chunk B"; + let result_b = client + .put_chunk_with_proof(Bytes::from(chunk_b_data.to_vec()), proof_bytes_a) + .await; + + assert!( + result_b.is_err(), + "Replay attack MUST be rejected: proof is for chunk A, not B" + ); + let err_msg = format!("{}", result_b.expect_err("just asserted is_err")); + info!("Correctly rejected replay attack: {err_msg}"); + + harness.teardown().await?; + Ok(()) +} + +/// Attack: Build proof with real quotes but NO on-chain payment (empty `tx_hashes`). +/// Node MUST reject because on-chain verification finds no payment. +#[tokio::test(flavor = "multi_thread")] +#[serial] +#[allow(clippy::too_many_lines)] +async fn test_attack_zero_amount_payment() -> Result<(), Box> { + info!("ATTACK TEST: real quotes but no on-chain payment (empty tx_hashes)"); + + let (harness, _testnet, wallet) = setup_full_payment_env().await?; + + let client = QuantumClient::with_defaults() + .with_node(harness.node(0).ok_or("Node 0 not found")?) + .with_wallet(wallet.clone()); + + let test_data = b"Attack: quotes but no payment"; + let quotes = get_quotes_with_retries(&client, test_data).await?; + + // Build peer_quotes from real quotes but skip on-chain payment + let mut peer_quotes = Vec::with_capacity(quotes.len()); + for (peer_id_str, quote, _price) in quotes { + let encoded = hex_node_id_to_encoded_peer_id(&peer_id_str.to_hex()) + .map_err(|e| format!("Peer ID conversion failed: {e}"))?; + peer_quotes.push((encoded, quote)); + } + + // Build proof with valid structure but NO payment + let unpaid_proof = PaymentProof { + proof_of_payment: ProofOfPayment { peer_quotes }, + tx_hashes: vec![], // No on-chain payment! + }; + let proof_bytes = + rmp_serde::to_vec(&unpaid_proof).map_err(|e| format!("Serialize failed: {e}"))?; + + let result = client + .put_chunk_with_proof(Bytes::from(test_data.to_vec()), proof_bytes) + .await; + + assert!( + result.is_err(), + "Attack MUST be rejected: no on-chain payment exists" + ); + let err_msg = format!("{}", result.expect_err("just asserted is_err")); + info!("Correctly rejected zero-amount payment: {err_msg}"); + + harness.teardown().await?; + Ok(()) +} + +/// Attack: Use real quotes but fabricate a random tx hash (no corresponding on-chain tx). +/// Node MUST reject because on-chain verification fails. +#[tokio::test(flavor = "multi_thread")] +#[serial] +#[allow(clippy::too_many_lines)] +async fn test_attack_fabricated_tx_hash() -> Result<(), Box> { + info!("ATTACK TEST: fabricated transaction hash"); + + let (harness, _testnet, wallet) = setup_full_payment_env().await?; + + let client = QuantumClient::with_defaults() + .with_node(harness.node(0).ok_or("Node 0 not found")?) + .with_wallet(wallet.clone()); + + let test_data = b"Attack: fabricated tx hash"; + let quotes = get_quotes_with_retries(&client, test_data).await?; + + // Build peer_quotes from real quotes + let mut peer_quotes = Vec::with_capacity(quotes.len()); + for (peer_id_str, quote, _price) in quotes { + let encoded = hex_node_id_to_encoded_peer_id(&peer_id_str.to_hex()) + .map_err(|e| format!("Peer ID conversion failed: {e}"))?; + peer_quotes.push((encoded, quote)); + } + + // Fabricate a fake tx hash + let fake_tx = alloy::primitives::FixedBytes::from([0xDE; 32]); + + let fake_proof = PaymentProof { + proof_of_payment: ProofOfPayment { peer_quotes }, + tx_hashes: vec![fake_tx], + }; + let proof_bytes = + rmp_serde::to_vec(&fake_proof).map_err(|e| format!("Serialize failed: {e}"))?; + + let result = client + .put_chunk_with_proof(Bytes::from(test_data.to_vec()), proof_bytes) + .await; + + assert!( + result.is_err(), + "Attack MUST be rejected: fabricated tx hash has no on-chain payment" + ); + let err_msg = format!("{}", result.expect_err("just asserted is_err")); + info!("Correctly rejected fabricated tx hash: {err_msg}"); + + harness.teardown().await?; + Ok(()) +} + +// =========================================================================== +// Category 3: Advanced Protocol Attacks +// =========================================================================== + +/// Attack: Double-spend the same proof for the same chunk (idempotent check). +/// The first store succeeds; the second returns `AlreadyExists` (not an error). +/// This proves double-spend is prevented by idempotent storage. +#[tokio::test(flavor = "multi_thread")] +#[serial] +#[allow(clippy::too_many_lines)] +async fn test_attack_double_spend_same_proof() -> Result<(), Box> { + info!("ATTACK TEST: double-spend same proof for same chunk"); + + let (harness, _testnet, wallet) = setup_full_payment_env().await?; + + let client = QuantumClient::with_defaults() + .with_node(harness.node(0).ok_or("Node 0 not found")?) + .with_wallet(wallet.clone()); + + let test_data = b"Attack: double-spend same proof"; + let quotes = get_quotes_with_retries(&client, test_data).await?; + let (proof_bytes, _tx_hashes) = build_valid_proof(quotes, &wallet).await?; + + // First store: should succeed — retry for slow DHT on CI + let mut first_stored = false; + for attempt in 1..=5u32 { + match client + .put_chunk_with_proof(Bytes::from(test_data.to_vec()), proof_bytes.clone()) + .await + { + Ok(_addr) => { + first_stored = true; + break; + } + Err(e) => { + warn!("First store attempt {attempt}/5 failed: {e}"); + if attempt < 5 { + let _ = harness.warmup_dht().await; + sleep(Duration::from_secs(3)).await; + } + } + } + } + assert!( + first_stored, + "First store MUST succeed with valid payment after retries" + ); + info!("First store succeeded (legitimate)"); + + // Second store with same proof: should return AlreadyExists (idempotent) + let result2 = client + .put_chunk_with_proof(Bytes::from(test_data.to_vec()), proof_bytes) + .await; + + // AlreadyExists is returned as Ok (it's idempotent success), proving the chunk + // was cached and the proof cannot be used to double-store different data. + match result2 { + Ok(addr) => { + let expected = compute_address(test_data); + assert_eq!(addr, expected, "AlreadyExists should return same address"); + info!("Double-spend correctly returned existing address (idempotent)"); + } + Err(e) => { + // Some implementations may also reject duplicates -- both behaviors are safe + info!("Double-spend rejected outright: {e}"); + } + } + + harness.teardown().await?; + Ok(()) +} + +/// Attack: Corrupt the ML-DSA-65 public key in quotes (replace with random bytes). +/// Node MUST reject because public key parsing or signature verification fails. +#[tokio::test(flavor = "multi_thread")] +#[serial] +#[allow(clippy::too_many_lines)] +async fn test_attack_corrupted_public_key() -> Result<(), Box> { + info!("ATTACK TEST: corrupted ML-DSA-65 public key"); + + let (harness, _testnet, wallet) = setup_full_payment_env().await?; + + let client = QuantumClient::with_defaults() + .with_node(harness.node(0).ok_or("Node 0 not found")?) + .with_wallet(wallet.clone()); + + let test_data = b"Attack: corrupted public key"; + let quotes = get_quotes_with_retries(&client, test_data).await?; + + // Build peer_quotes and payment + let mut peer_quotes = Vec::with_capacity(quotes.len()); + let mut quotes_for_payment = Vec::with_capacity(quotes.len()); + for (peer_id_str, quote, price) in quotes { + let encoded = hex_node_id_to_encoded_peer_id(&peer_id_str.to_hex()) + .map_err(|e| format!("Peer ID conversion failed: {e}"))?; + peer_quotes.push((encoded, quote.clone())); + quotes_for_payment.push((quote, price)); + } + let payment = SingleNodePayment::from_quotes(quotes_for_payment) + .map_err(|e| format!("Payment creation failed: {e}"))?; + let tx_hashes = payment + .pay(&wallet) + .await + .map_err(|e| format!("Payment failed: {e}"))?; + + // CORRUPT ALL public keys (replace with random bytes of same length) + let mut corrupted_quotes = peer_quotes; + for (_peer_id, ref mut quote) in &mut corrupted_quotes { + let key_len = quote.pub_key.len(); + quote.pub_key = (0..key_len).map(|_| rand::thread_rng().gen()).collect(); + } + + let corrupted_proof = PaymentProof { + proof_of_payment: ProofOfPayment { + peer_quotes: corrupted_quotes, + }, + tx_hashes, + }; + let proof_bytes = + rmp_serde::to_vec(&corrupted_proof).map_err(|e| format!("Serialize failed: {e}"))?; + + let result = client + .put_chunk_with_proof(Bytes::from(test_data.to_vec()), proof_bytes) + .await; + + assert!( + result.is_err(), + "Attack MUST be rejected: corrupted public keys" + ); + let err_msg = format!("{}", result.expect_err("just asserted is_err")); + info!("Correctly rejected corrupted public key: {err_msg}"); + + harness.teardown().await?; + Ok(()) +} + +/// Attack: Use `QuantumClient` without wallet (no proof sent to server). +/// Server-side enforcement MUST reject the storage attempt. +#[tokio::test(flavor = "multi_thread")] +#[serial] +async fn test_attack_client_without_wallet() -> Result<(), Box> { + info!("ATTACK TEST: QuantumClient without wallet"); + + let (harness, _testnet) = setup_enforcement_env().await?; + + // Create client WITHOUT wallet -- sends no payment proof + let client = + QuantumClient::with_defaults().with_node(harness.node(0).ok_or("Node 0 not found")?); + + let test_data = b"Attack: client with no wallet configured"; + let result = client.put_chunk(Bytes::from(test_data.to_vec())).await; + + assert!( + result.is_err(), + "Storage MUST fail without wallet when enforcement is enabled" + ); + let err_msg = format!("{}", result.expect_err("just asserted is_err")); + assert!( + err_msg.to_lowercase().contains("payment"), + "Error must be payment-related, got: {err_msg}" + ); + info!("Correctly rejected client without wallet: {err_msg}"); + + harness.teardown().await?; + Ok(()) +} diff --git a/tests/e2e/testnet.rs b/tests/e2e/testnet.rs index 769e6eef..c153b653 100644 --- a/tests/e2e/testnet.rs +++ b/tests/e2e/testnet.rs @@ -15,16 +15,21 @@ use ant_evm::RewardsAddress; use bytes::Bytes; +use evmlib::wallet::Wallet; +use evmlib::Network as EvmNetwork; use futures::future::join_all; use rand::Rng; -use saorsa_core::{NodeConfig as CoreNodeConfig, P2PEvent, P2PNode}; +use saorsa_core::{ + identity::NodeIdentity, IPDiversityConfig as CoreDiversityConfig, NodeConfig as CoreNodeConfig, + P2PEvent, P2PNode, +}; use saorsa_node::ant_protocol::{ ChunkGetRequest, ChunkGetResponse, ChunkMessage, ChunkMessageBody, ChunkPutRequest, ChunkPutResponse, CHUNK_PROTOCOL_ID, }; -use saorsa_node::client::{send_and_await_chunk_response, DataChunk, XorName}; +use saorsa_node::client::{send_and_await_chunk_response, DataChunk, QuantumClient, XorName}; use saorsa_node::payment::{ - EvmVerifierConfig, PaymentVerifier, PaymentVerifierConfig, QuoteGenerator, + EvmVerifierConfig, PaymentProof, PaymentVerifier, PaymentVerifierConfig, QuoteGenerator, QuotingMetricsTracker, }; use saorsa_node::storage::{AntProtocol, LmdbStorage, LmdbStorageConfig}; @@ -34,7 +39,7 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::{broadcast, RwLock}; use tokio::task::JoinHandle; -use tokio::time::Instant; +use tokio::time::{sleep, Instant}; use tracing::{debug, info, warn}; // ============================================================================= @@ -100,8 +105,9 @@ const TEST_PAYMENT_CACHE_CAPACITY: usize = 1000; /// Test rewards address (20 bytes, all 0x01). const TEST_REWARDS_ADDRESS: [u8; 20] = [0x01; 20]; -/// Max records for quoting metrics (test value). -const TEST_MAX_RECORDS: usize = 100_000; +/// Max records for quoting metrics (derived from node storage limit / max chunk size). +/// 5 GB / 4 MB = 1280 records. +const TEST_MAX_RECORDS: usize = 1280; /// Initial records for quoting metrics (test value). const TEST_INITIAL_RECORDS: usize = 1000; @@ -197,6 +203,16 @@ pub struct TestNetworkConfig { /// Enable verbose logging for test nodes. pub enable_node_logging: bool, + + /// Enable payment enforcement (EVM verification) for test nodes. + /// Default: false (EVM disabled for speed). + pub payment_enforcement: bool, + + /// Optional EVM network for payment verification. + /// When `payment_enforcement` is true and this is `Some`, nodes will use + /// this network (e.g. Anvil testnet) for on-chain verification. + /// When `None`, defaults to `ArbitrumOne`. + pub evm_network: Option, } impl Default for TestNetworkConfig { @@ -205,10 +221,15 @@ impl Default for TestNetworkConfig { // Random port in isolated range to avoid collisions in parallel tests. // Ensure we have room for DEFAULT_NODE_COUNT consecutive ports. + // Calculation: base_port + (DEFAULT_NODE_COUNT - 1) must be < TEST_PORT_RANGE_MAX // Safety: DEFAULT_NODE_COUNT (25) fits in u16. #[allow(clippy::cast_possible_truncation)] let max_base_port = TEST_PORT_RANGE_MAX.saturating_sub(DEFAULT_NODE_COUNT as u16); - let base_port = rng.gen_range(TEST_PORT_RANGE_MIN..max_base_port); + let base_port = if max_base_port > TEST_PORT_RANGE_MIN { + rng.gen_range(TEST_PORT_RANGE_MIN..max_base_port) + } else { + TEST_PORT_RANGE_MIN + }; // Random suffix for unique temp directory let suffix: u64 = rng.gen(); @@ -223,6 +244,8 @@ impl Default for TestNetworkConfig { stabilization_timeout: Duration::from_secs(DEFAULT_STABILIZATION_TIMEOUT_SECS), node_startup_timeout: Duration::from_secs(DEFAULT_NODE_STARTUP_TIMEOUT_SECS), enable_node_logging: false, + payment_enforcement: false, + evm_network: None, } } } @@ -249,6 +272,27 @@ impl TestNetworkConfig { ..Default::default() } } + + /// Enable payment enforcement for this configuration. + /// + /// When enabled, nodes will require valid EVM payment proofs + /// for all chunk storage operations. This allows testing the + /// full payment enforcement flow. + #[must_use] + pub fn with_payment_enforcement(mut self) -> Self { + self.payment_enforcement = true; + self + } + + /// Set the EVM network for payment verification. + /// + /// Use this with `with_payment_enforcement()` to wire nodes to + /// a local Anvil testnet for on-chain payment verification. + #[must_use] + pub fn with_evm_network(mut self, network: EvmNetwork) -> Self { + self.evm_network = Some(network); + self + } } /// State of the test network. @@ -297,6 +341,8 @@ pub enum NodeState { Stopping, /// Node has stopped. Stopped, + /// Node has been intentionally shut down (simulated failure). + ShutDown, /// Node encountered an error. Failed(String), } @@ -324,6 +370,12 @@ pub struct TestNode { /// ANT protocol handler (`AntProtocol`) for processing chunk PUT/GET requests. pub ant_protocol: Option>, + /// `QuantumClient` for payment-enabled operations. + pub client: Option>, + + /// EVM wallet for payment operations. + pub wallet: Option, + /// Is this a bootstrap node? pub is_bootstrap: bool, @@ -333,6 +385,13 @@ pub struct TestNode { /// Bootstrap addresses this node connects to. pub bootstrap_addrs: Vec, + /// ML-DSA-65 identity used for quote signing. + /// + /// Stored so that `start_node` can inject the same identity into the + /// `P2PNode`, ensuring the transport-level peer ID matches the public + /// key embedded in payment quotes (`BLAKE3(pub_key)` == `peer_id`). + pub node_identity: Option>, + /// Protocol handler background task handle. /// /// Populated once the node starts and the protocol router is spawned. @@ -341,6 +400,150 @@ pub struct TestNode { } impl TestNode { + /// Set wallet for payment tests. + /// + /// This updates the node's wallet and creates a new `QuantumClient` configured + /// with both the P2P node and wallet for payment-enabled operations. + pub fn set_wallet(&mut self, wallet: Wallet) { + // Create a new QuantumClient with the P2P node and wallet if available + if let Some(ref p2p_node) = self.p2p_node { + let client = QuantumClient::with_defaults() + .with_node(Arc::clone(p2p_node)) + .with_wallet(wallet.clone()); + self.client = Some(Arc::new(client)); + } + + self.wallet = Some(wallet); + } + + /// Store a chunk using the `QuantumClient` (with payment). + /// + /// This is the payment-enabled variant that uses the `QuantumClient` to handle + /// quote requests, payments, and chunk storage. + /// + /// # Errors + /// + /// Returns an error if the client is not configured or the store operation fails. + pub async fn store_chunk_with_payment(&self, data: &[u8]) -> Result { + let client = self.client.as_ref().ok_or(TestnetError::NodeNotRunning)?; + let data_bytes = Bytes::from(data.to_vec()); + + let mut last_err = String::new(); + for attempt in 1..=5 { + match client.put_chunk(data_bytes.clone()).await { + Ok(addr) => return Ok(addr), + Err(e) => { + last_err = format!("Client PUT error: {e}"); + if attempt < 5 { + warn!("store_chunk_with_payment attempt {attempt}/5 failed: {e}"); + sleep(Duration::from_secs(3)).await; + } + } + } + } + + Err(TestnetError::Storage(last_err)) + } + + /// Store a chunk with payment tracking. + /// + /// This method stores a chunk using the payment-enabled client and records + /// the payment transaction to the provided tracker. This allows tests to + /// verify payment behavior (e.g., that caching prevents duplicate payments). + /// + /// # Arguments + /// + /// * `data` - The chunk data to store + /// * `tracker` - Payment tracker to record transactions + /// + /// # Errors + /// + /// Returns an error if the client/wallet is not configured or the store operation fails. + pub async fn store_chunk_with_tracked_payment( + &self, + data: &[u8], + tracker: &super::harness::PaymentTracker, + ) -> Result { + use saorsa_node::payment::SingleNodePayment; + + // Reuse the client created by set_wallet() + let client = self.client.as_ref().ok_or_else(|| { + TestnetError::Storage( + "Client not configured - use set_wallet() to create a payment-enabled client" + .to_string(), + ) + })?; + let wallet = self.wallet.as_ref().ok_or_else(|| { + TestnetError::Storage("Wallet not configured - use set_wallet()".to_string()) + })?; + + // Compute the chunk address + let address = Self::compute_chunk_address(data); + + // Get quotes from the network (includes peer IDs for proof of payment) + let quotes_with_peers = client + .get_quotes_from_dht(data) + .await + .map_err(|e| TestnetError::Storage(format!("Failed to get quotes: {e}")))?; + + // Collect peer_quotes and strip peer IDs for SingleNodePayment + let mut peer_quotes: Vec<_> = Vec::with_capacity(quotes_with_peers.len()); + let mut quotes_with_prices: Vec<_> = Vec::with_capacity(quotes_with_peers.len()); + for (peer_id_str, quote, price) in quotes_with_peers { + let encoded_peer_id = + saorsa_node::client::hex_node_id_to_encoded_peer_id(&peer_id_str.to_hex()) + .map_err(|e| { + TestnetError::Storage(format!( + "Failed to convert peer ID '{peer_id_str}': {e}" + )) + })?; + peer_quotes.push((encoded_peer_id, quote.clone())); + quotes_with_prices.push((quote, price)); + } + + // Create payment structure (sorts by price, selects median) + let payment = SingleNodePayment::from_quotes(quotes_with_prices) + .map_err(|e| TestnetError::Storage(format!("Failed to create payment: {e}")))?; + + // Make the payment and get transaction hashes + let tx_hashes = payment + .pay(wallet) + .await + .map_err(|e| TestnetError::Storage(format!("Payment failed: {e}")))?; + + // Record the payment in the tracker + tracker.record_payment(address, tx_hashes.clone()); + + // Build proof AFTER payment with tx hashes included + let proof = PaymentProof { + proof_of_payment: ant_evm::ProofOfPayment { peer_quotes }, + tx_hashes, + }; + let proof_bytes = rmp_serde::to_vec(&proof) + .map_err(|e| TestnetError::Storage(format!("Failed to serialize proof: {e}")))?; + + // Use put_chunk_with_proof to send the pre-built proof, avoiding a + // redundant quote+pay cycle that put_chunk_with_payment would perform. + client + .put_chunk_with_proof(Bytes::from(data.to_vec()), proof_bytes) + .await + .map_err(|e| TestnetError::Storage(format!("Client PUT error: {e}"))) + } + + /// Retrieve a chunk using the `QuantumClient`. + /// + /// # Errors + /// + /// Returns an error if the client is not configured or the retrieval fails. + pub async fn get_chunk_with_client(&self, address: &XorName) -> Result> { + let client = self.client.as_ref().ok_or(TestnetError::NodeNotRunning)?; + + client + .get_chunk(address) + .await + .map_err(|e| TestnetError::Retrieval(format!("Client GET error: {e}"))) + } + /// Check if this node is running. pub async fn is_running(&self) -> bool { matches!( @@ -349,6 +552,39 @@ impl TestNode { ) } + /// Shutdown this test node gracefully. + /// + /// This simulates a node failure by shutting down the P2P node and + /// stopping the protocol handler. The node's state is set to `ShutDown`. + /// + /// # Errors + /// + /// Returns an error if the node is not running or shutdown fails. + pub async fn shutdown(&mut self) -> Result<()> { + info!("Shutting down test node {}", self.index); + + // Stop protocol handler first + if let Some(handle) = self.protocol_task.take() { + handle.abort(); + } + + // Drop client to release its Arc reference + self.client = None; + + *self.state.write().await = NodeState::Stopping; + + // Shutdown P2P node if running + if let Some(p2p) = self.p2p_node.take() { + p2p.shutdown() + .await + .map_err(|e| TestnetError::Core(format!("Failed to shutdown node: {e}")))?; + } + + *self.state.write().await = NodeState::ShutDown; + info!("Test node {} shut down successfully", self.index); + Ok(()) + } + /// Get the number of connected peers. pub async fn peer_count(&self) -> usize { if let Some(ref node) = self.p2p_node { @@ -359,7 +595,7 @@ impl TestNode { } /// Get the list of connected peer IDs. - pub async fn connected_peers(&self) -> Vec { + pub async fn connected_peers(&self) -> Vec { if let Some(ref node) = self.p2p_node { node.connected_peers().await } else { @@ -391,16 +627,11 @@ impl TestNode { // Compute content address let address = Self::compute_chunk_address(data); - // Create PUT request with empty payment proof (EVM disabled in tests) - let empty_payment = rmp_serde::to_vec(&ant_evm::ProofOfPayment { - peer_quotes: vec![], - }) - .map_err(|e| { - TestnetError::Serialization(format!("Failed to serialize payment proof: {e}")) - })?; - + // Create PUT request WITHOUT payment proof (EVM disabled in tests) + // When EVM verification is disabled, we send None instead of an empty proof + // to avoid triggering the fail-secure rejection in PaymentVerifier let request_id: u64 = rand::thread_rng().gen(); - let request = ChunkPutRequest::with_payment(address, data.to_vec(), empty_payment); + let request = ChunkPutRequest::new(address, data.to_vec()); let message = ChunkMessage { request_id, body: ChunkMessageBody::PutRequest(request), @@ -535,10 +766,8 @@ impl TestNode { .p2p_node .as_ref() .ok_or(TestnetError::NodeNotRunning)?; - let target_peer_id = target_p2p - .transport_peer_id() - .ok_or_else(|| TestnetError::Core("No transport peer ID available".to_string()))?; - self.store_chunk_on_peer(&target_peer_id, data).await + let target_peer_id = target_p2p.peer_id(); + self.store_chunk_on_peer(target_peer_id, data).await } /// Store a chunk on a remote peer via P2P using the peer's ID directly. @@ -547,21 +776,19 @@ impl TestNode { /// /// Returns an error if this node is not running, the message cannot be /// sent, the response times out, or the remote peer reports an error. - pub async fn store_chunk_on_peer(&self, target_peer_id: &str, data: &[u8]) -> Result { + pub async fn store_chunk_on_peer( + &self, + target_peer_id: &saorsa_core::identity::PeerId, + data: &[u8], + ) -> Result { let p2p = self.p2p_node.as_ref().ok_or(TestnetError::NodeNotRunning)?; - let target_peer_id = target_peer_id.to_string(); + let target_peer_id = *target_peer_id; - // Create PUT request + // Create PUT request WITHOUT payment proof (EVM disabled in tests) let address = Self::compute_chunk_address(data); - let empty_payment = rmp_serde::to_vec(&ant_evm::ProofOfPayment { - peer_quotes: vec![], - }) - .map_err(|e| { - TestnetError::Serialization(format!("Failed to serialize payment proof: {e}")) - })?; let request_id: u64 = rand::thread_rng().gen(); - let request = ChunkPutRequest::with_payment(address, data.to_vec(), empty_payment); + let request = ChunkPutRequest::new(address, data.to_vec()); let message = ChunkMessage { request_id, body: ChunkMessageBody::PutRequest(request), @@ -638,10 +865,8 @@ impl TestNode { .p2p_node .as_ref() .ok_or(TestnetError::NodeNotRunning)?; - let target_peer_id = target_p2p - .transport_peer_id() - .ok_or_else(|| TestnetError::Core("No transport peer ID available".to_string()))?; - self.get_chunk_from_peer(&target_peer_id, address).await + let target_peer_id = target_p2p.peer_id(); + self.get_chunk_from_peer(target_peer_id, address).await } /// Retrieve a chunk from a remote peer via P2P using the peer's ID directly. @@ -652,11 +877,11 @@ impl TestNode { /// sent, the response times out, or the remote peer reports an error. pub async fn get_chunk_from_peer( &self, - target_peer_id: &str, + target_peer_id: &saorsa_core::identity::PeerId, address: &XorName, ) -> Result> { let p2p = self.p2p_node.as_ref().ok_or(TestnetError::NodeNotRunning)?; - let target_peer_id = target_peer_id.to_string(); + let target_peer_id = *target_peer_id; // Create GET request let request_id: u64 = rand::thread_rng().gen(); @@ -886,7 +1111,10 @@ impl TestNetwork { info!("Starting {} regular nodes", regular_count); // Get bootstrap addresses - let bootstrap_addrs: Vec = self.nodes[0..self.config.bootstrap_count] + let bootstrap_addrs: Vec = self + .nodes + .get(0..self.config.bootstrap_count) + .unwrap_or_default() .iter() .map(|n| n.address) .collect(); @@ -907,7 +1135,7 @@ impl TestNetwork { /// /// Initializes the `AntProtocol` handler with: /// - LMDB storage in the node's data directory - /// - Payment verification disabled (for testing) + /// - Payment verification configured per `TestNetworkConfig` /// - Quote generation with a test rewards address async fn create_node( &self, @@ -925,8 +1153,20 @@ impl TestNetwork { tokio::fs::create_dir_all(&data_dir).await?; - // Initialize AntProtocol for this node - let ant_protocol = Self::create_ant_protocol(&data_dir).await?; + // Generate an ML-DSA-65 identity for this test node's quote signing + // AND for the P2PNode so BLAKE3(pub_key) == transport peer_id. + let identity = Arc::new(NodeIdentity::generate().map_err(|e| { + TestnetError::Core(format!("Failed to generate test node identity: {e}")) + })?); + + // Initialize AntProtocol for this node with payment enforcement setting + let ant_protocol = Self::create_ant_protocol( + &data_dir, + self.config.payment_enforcement, + self.config.evm_network.clone(), + &identity, + ) + .await?; Ok(TestNode { index, @@ -936,9 +1176,12 @@ impl TestNetwork { data_dir, p2p_node: None, ant_protocol: Some(Arc::new(ant_protocol)), + client: None, + wallet: None, is_bootstrap, state: Arc::new(RwLock::new(NodeState::Pending)), bootstrap_addrs, + node_identity: Some(identity), protocol_task: None, }) } @@ -947,13 +1190,23 @@ impl TestNetwork { /// /// Configures: /// - LMDB storage with verification enabled - /// - Payment verification disabled (for testing without Anvil) + /// - Payment verification (enabled/disabled based on `payment_enforcement`) /// - Quote generator with a test rewards address /// + /// # Arguments + /// + /// * `data_dir` - Directory for LMDB storage + /// * `payment_enforcement` - Whether to enable EVM payment verification + /// /// # Errors /// /// Returns an error if LMDB storage initialisation fails. - pub async fn create_ant_protocol(data_dir: &std::path::Path) -> Result { + pub async fn create_ant_protocol( + data_dir: &std::path::Path, + payment_enforcement: bool, + evm_network: Option, + identity: &saorsa_core::identity::NodeIdentity, + ) -> Result { // Create LMDB storage let storage_config = LmdbStorageConfig { root_dir: data_dir.to_path_buf(), @@ -965,20 +1218,46 @@ impl TestNetwork { .await .map_err(|e| TestnetError::Core(format!("Failed to create LMDB storage: {e}")))?; - // Create payment verifier with EVM disabled + // Create payment verifier with EVM enabled/disabled based on test config. + // When payment_enforcement is true and an EVM network is provided, + // use that network (e.g. Anvil) for on-chain verification. let payment_config = PaymentVerifierConfig { evm: EvmVerifierConfig { - enabled: false, // Disable EVM verification for tests - ..Default::default() + enabled: payment_enforcement, + network: evm_network.unwrap_or(EvmNetwork::ArbitrumSepoliaTest), }, cache_capacity: TEST_PAYMENT_CACHE_CAPACITY, + local_rewards_address: None, }; let payment_verifier = PaymentVerifier::new(payment_config); - // Create quote generator with test rewards address + // Create quote generator with ML-DSA-65 signing from the test node's identity let rewards_address = RewardsAddress::new(TEST_REWARDS_ADDRESS); let metrics_tracker = QuotingMetricsTracker::new(TEST_MAX_RECORDS, TEST_INITIAL_RECORDS); - let quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); + let mut quote_generator = QuoteGenerator::new(rewards_address, metrics_tracker); + + // Wire ML-DSA-65 signing so quotes are properly signed and verifiable + let pub_key_bytes = identity.public_key().as_bytes().to_vec(); + let sk_bytes = identity.secret_key_bytes().to_vec(); + let sk = { + use saorsa_pqc::pqc::types::MlDsaSecretKey; + match MlDsaSecretKey::from_bytes(&sk_bytes) { + Ok(sk) => sk, + Err(e) => { + return Err(TestnetError::Core(format!( + "Failed to deserialize ML-DSA-65 secret key: {e}" + ))); + } + } + }; + quote_generator.set_signer(pub_key_bytes, move |msg| { + use saorsa_pqc::pqc::MlDsaOperations; + + let ml_dsa = saorsa_core::MlDsa65::new(); + ml_dsa + .sign(&sk, msg) + .map_or_else(|_| vec![], |sig| sig.as_bytes().to_vec()) + }); Ok(AntProtocol::new( Arc::new(storage), @@ -1007,6 +1286,14 @@ impl TestNetwork { // chunks (4 MiB payload + serialization overhead = 5 MiB wire). core_config.max_message_size = Some(saorsa_node::ant_protocol::MAX_WIRE_MESSAGE_SIZE); + // Allow localhost peers in DHT routing for test environments + // This prevents diversity filters from excluding peers on 127.0.0.1 + core_config.diversity_config = Some(CoreDiversityConfig::permissive()); + + // Inject the ML-DSA identity so the P2PNode's transport peer ID + // matches the pub_key embedded in payment quotes. + core_config.node_identity.clone_from(&node.node_identity); + // Create and start the P2P node let p2p_node = P2PNode::new(core_config).await.map_err(|e| { TestnetError::Startup(format!("Failed to create node {}: {e}", node.index)) @@ -1029,14 +1316,13 @@ impl TestNetwork { while let Ok(event) = events.recv().await { if let P2PEvent::Message { topic, - source, + source: Some(source), data, } = event { if topic == CHUNK_PROTOCOL_ID { debug!( - "Node {} received chunk protocol message from {}", - node_index, source + "Node {node_index} received chunk protocol message from {source}" ); let protocol = Arc::clone(&protocol_clone); let p2p = Arc::clone(&p2p_clone); @@ -1052,13 +1338,12 @@ impl TestNetwork { .await { warn!( - "Node {} failed to send response to {}: {}", - node_index, source, e + "Node {node_index} failed to send response to {source}: {e}" ); } } Err(e) => { - warn!("Node {} protocol handler error: {}", node_index, e); + warn!("Node {node_index} protocol handler error: {e}"); } } }); @@ -1079,7 +1364,11 @@ impl TestNetwork { for i in range { while Instant::now() < deadline { - let state = self.nodes[i].state.read().await.clone(); + let node = self + .nodes + .get(i) + .ok_or_else(|| TestnetError::Config(format!("Node index {i} out of range")))?; + let state = node.state.read().await.clone(); match state { NodeState::Running | NodeState::Connected => break, NodeState::Failed(ref e) => { @@ -1135,6 +1424,72 @@ impl TestNetwork { )) } + /// Warm up DHT routing tables by performing random lookups. + /// + /// After network stabilization, nodes are P2P connected but their DHT + /// routing tables may be sparse. Performing random lookups forces DHT + /// query traffic that populates and propagates routing information + /// across the network. + /// + /// This is essential for tests that use `get_quotes_from_dht()` which relies + /// on `find_closest_nodes()` to discover peers. + /// + /// # Errors + /// + /// Returns an error if DHT lookup fails. + pub async fn warmup_dht(&self) -> Result<()> { + info!("Warming up DHT routing tables ({} nodes)", self.nodes.len()); + + // Perform DHT queries to populate and propagate routing tables. + // The permissive diversity config (set in start_node) allows the DHT + // to accept localhost peers during these find_closest_nodes() calls. + let num_warmup_queries = 5; // More queries for better DHT coverage + let mut random_addresses = Vec::new(); + for _ in 0..num_warmup_queries { + let mut addr = [0u8; 32]; + rand::Rng::fill(&mut rand::thread_rng(), &mut addr); + random_addresses.push(addr); + } + + for node in &self.nodes { + if let Some(ref p2p) = node.p2p_node { + for addr in &random_addresses { + // Perform DHT lookup to populate routing tables + let result = p2p.dht().find_closest_nodes(addr, 8).await; + if let Ok(peers) = result { + if peers.is_empty() { + warn!( + "Node {} DHT warmup found 0 peers for {} - DHT may not be seeded yet", + node.index, + hex::encode(addr) + ); + } else { + debug!( + "Node {} DHT warmup found {} peers for target {}", + node.index, + peers.len(), + hex::encode(addr) + ); + } + } else if tracing::enabled!(tracing::Level::WARN) { + warn!( + "Node {} DHT warmup failed for {}: {:?}", + node.index, + hex::encode(addr), + result + ); + } + } + } + } + + // Give DHT time to propagate discoveries + tokio::time::sleep(Duration::from_secs(3)).await; + + info!("✅ DHT routing tables warmed up"); + Ok(()) + } + /// Start background health monitoring. fn start_health_monitor(&mut self) { let nodes: Vec> = self @@ -1184,8 +1539,17 @@ impl TestNetwork { // Stop all nodes in reverse order. // We shutdown nodes concurrently to avoid serially accumulating DHT // graceful-leave waits across every node. + // Skip nodes that are already shut down (e.g., via shutdown_node()). let mut shutdown_futures = Vec::with_capacity(self.nodes.len()); for node in self.nodes.iter_mut().rev() { + let state = node.state.read().await.clone(); + + // Skip nodes that are already shut down or stopped + if matches!(state, NodeState::ShutDown | NodeState::Stopped) { + debug!("Skipping node {} (already shut down)", node.index); + continue; + } + debug!("Stopping node {}", node.index); if let Some(handle) = node.protocol_task.take() { handle.abort(); @@ -1205,7 +1569,10 @@ impl TestNetwork { } for node in &self.nodes { - *node.state.write().await = NodeState::Stopped; + let state = node.state.read().await.clone(); + if !matches!(state, NodeState::ShutDown) { + *node.state.write().await = NodeState::Stopped; + } } // Cleanup test data directory @@ -1282,6 +1649,60 @@ impl TestNetwork { pub fn config(&self) -> &TestNetworkConfig { &self.config } + + /// Shutdown a specific node by index. + /// + /// This simulates a node failure during testing. The node is gracefully shut down + /// and its state is set to `ShutDown`. The network continues to operate with the + /// remaining nodes. + /// + /// # Arguments + /// + /// * `index` - The index of the node to shutdown (0-based) + /// + /// # Errors + /// + /// Returns an error if the node index is invalid or shutdown fails. + pub async fn shutdown_node(&mut self, index: usize) -> Result<()> { + let node = self + .nodes + .get_mut(index) + .ok_or_else(|| TestnetError::Config(format!("Node index {index} out of bounds")))?; + + node.shutdown().await?; + + info!("Node {} has been shut down", index); + Ok(()) + } + + /// Shutdown multiple nodes by their indices. + /// + /// This is a convenience method for simulating multiple node failures at once. + /// + /// # Arguments + /// + /// * `indices` - Slice of node indices to shutdown + /// + /// # Errors + /// + /// Returns an error if any node index is invalid or shutdown fails. + pub async fn shutdown_nodes(&mut self, indices: &[usize]) -> Result<()> { + for &index in indices { + self.shutdown_node(index).await?; + } + Ok(()) + } + + /// Get the number of currently running nodes. + pub async fn running_node_count(&self) -> usize { + let mut count = 0; + for node in &self.nodes { + if node.is_running().await { + count += 1; + } + } + count + } } impl Drop for TestNetwork {