diff --git a/.claude-flow/CAPABILITIES.md b/.claude-flow/CAPABILITIES.md index 3d74c7af2..d219f9ec5 100644 --- a/.claude-flow/CAPABILITIES.md +++ b/.claude-flow/CAPABILITIES.md @@ -350,11 +350,11 @@ npx @claude-flow/cli@latest hive-mind consensus --propose "task" ### MCP Server Setup ```bash # Add Claude Flow MCP -claude mcp add claude-flow -- npx -y @claude-flow/cli@latest +claude mcp add claude-flow - npx -y @claude-flow/cli@latest # Optional servers -claude mcp add ruv-swarm -- npx -y ruv-swarm mcp start -claude mcp add flow-nexus -- npx -y flow-nexus@latest mcp start +claude mcp add ruv-swarm - npx -y ruv-swarm mcp start +claude mcp add flow-nexus - npx -y flow-nexus@latest mcp start ``` --- diff --git a/.claude/agents/core/tester.md b/.claude/agents/core/tester.md index 9b2707e45..9984000fd 100644 --- a/.claude/agents/core/tester.md +++ b/.claude/agents/core/tester.md @@ -47,7 +47,7 @@ hooks: post: | echo "📋 Test results summary:" - TEST_OUTPUT=$(npm test -- --reporter=json 2>/dev/null | jq '.numPassedTests, .numFailedTests' 2>/dev/null || echo "Tests completed") + TEST_OUTPUT=$(npm test - --reporter=json 2>/dev/null | jq '.numPassedTests, .numFailedTests' 2>/dev/null || echo "Tests completed") echo "$TEST_OUTPUT" # 1. Calculate test quality metrics @@ -509,4 +509,4 @@ console.log(`Common missed scenarios: ${stats.commonCritiques}`); 9. **Use GNN Search**: Find similar test scenarios (+12.4% coverage) 10. **Flash Attention**: Generate tests faster (2.49x-7.47x speedup) -Remember: Tests are a safety net that enables confident refactoring and prevents regressions. Invest in good tests—they pay dividends in maintainability. **Learn from every test failure to continuously improve test coverage and quality.** \ No newline at end of file +Remember: Tests are a safety net that enables confident refactoring and prevents regressions. Invest in good tests - they pay dividends in maintainability. **Learn from every test failure to continuously improve test coverage and quality.** \ No newline at end of file diff --git a/.claude/agents/github/multi-repo-swarm.md b/.claude/agents/github/multi-repo-swarm.md index 90a527827..1c6ef8e0e 100644 --- a/.claude/agents/github/multi-repo-swarm.md +++ b/.claude/agents/github/multi-repo-swarm.md @@ -95,7 +95,7 @@ MATCHING_REPOS=$(gh repo list org --limit 100 --json name \ # Execute task and create PRs echo "$MATCHING_REPOS" | while read -r repo; do # Clone repo - gh repo clone org/$repo /tmp/$repo -- --depth=1 + gh repo clone org/$repo /tmp/$repo - --depth=1 # Execute task cd /tmp/$repo @@ -206,7 +206,7 @@ TS_REPOS=$(gh repo list org --limit 100 --json name | jq -r '.[].name' | \ # Update each repository echo "$TS_REPOS" | while read -r repo; do # Clone and update - gh repo clone org/$repo /tmp/$repo -- --depth=1 + gh repo clone org/$repo /tmp/$repo - --depth=1 cd /tmp/$repo # Update dependency diff --git a/.claude/agents/github/release-manager.md b/.claude/agents/github/release-manager.md index 57be9ea93..27158fb07 100644 --- a/.claude/agents/github/release-manager.md +++ b/.claude/agents/github/release-manager.md @@ -456,7 +456,7 @@ This release is production-ready with comprehensive validation and testing. Bash("gh api repos/:owner/:repo/git/refs --method POST -f ref='refs/heads/release/v1.0.72' -f sha=$(gh api repos/:owner/:repo/git/refs/heads/main --jq '.object.sha')") // Clone and update release files - Bash("gh repo clone :owner/:repo /tmp/release-v1.0.72 -- --branch release/v1.0.72 --depth=1") + Bash("gh repo clone :owner/:repo /tmp/release-v1.0.72 - --branch release/v1.0.72 --depth=1") // Update all release-related files Write("/tmp/release-v1.0.72/claude-code-flow/claude-code-flow/package.json", "[updated package.json]") diff --git a/.claude/commands/github/multi-repo-swarm.md b/.claude/commands/github/multi-repo-swarm.md index b907872e2..8edb2cf88 100644 --- a/.claude/commands/github/multi-repo-swarm.md +++ b/.claude/commands/github/multi-repo-swarm.md @@ -61,7 +61,7 @@ MATCHING_REPOS=$(gh repo list org --limit 100 --json name \ # Execute task and create PRs echo "$MATCHING_REPOS" | while read -r repo; do # Clone repo - gh repo clone org/$repo /tmp/$repo -- --depth=1 + gh repo clone org/$repo /tmp/$repo - --depth=1 # Execute task cd /tmp/$repo @@ -172,7 +172,7 @@ TS_REPOS=$(gh repo list org --limit 100 --json name | jq -r '.[].name' | \ # Update each repository echo "$TS_REPOS" | while read -r repo; do # Clone and update - gh repo clone org/$repo /tmp/$repo -- --depth=1 + gh repo clone org/$repo /tmp/$repo - --depth=1 cd /tmp/$repo # Update dependency diff --git a/.claude/commands/github/release-manager.md b/.claude/commands/github/release-manager.md index 7cf2948e1..81006fe27 100644 --- a/.claude/commands/github/release-manager.md +++ b/.claude/commands/github/release-manager.md @@ -189,7 +189,7 @@ This release is production-ready with comprehensive validation and testing. Bash("gh api repos/:owner/:repo/git/refs --method POST -f ref='refs/heads/release/v1.0.72' -f sha=$(gh api repos/:owner/:repo/git/refs/heads/main --jq '.object.sha')") // Clone and update release files - Bash("gh repo clone :owner/:repo /tmp/release-v1.0.72 -- --branch release/v1.0.72 --depth=1") + Bash("gh repo clone :owner/:repo /tmp/release-v1.0.72 - --branch release/v1.0.72 --depth=1") // Update all release-related files Write("/tmp/release-v1.0.72/claude-code-flow/claude-code-flow/package.json", "[updated package.json]") diff --git a/.claude/commands/sparc/spec-pseudocode.md b/.claude/commands/sparc/spec-pseudocode.md index cb253275f..6742c2db2 100644 --- a/.claude/commands/sparc/spec-pseudocode.md +++ b/.claude/commands/sparc/spec-pseudocode.md @@ -1,12 +1,12 @@ --- name: sparc-spec-pseudocode -description: 📋 Specification Writer - You capture full project context—functional requirements, edge cases, constraints—and translate t... +description: 📋 Specification Writer - You capture full project context - functional requirements, edge cases, constraints - and translate t... --- # 📋 Specification Writer ## Role Definition -You capture full project context—functional requirements, edge cases, constraints—and translate that into modular pseudocode with TDD anchors. +You capture full project context - functional requirements, edge cases, constraints - and translate that into modular pseudocode with TDD anchors. ## Custom Instructions Write pseudocode as a series of md files with phase_number_name.md and flow logic that includes clear structure for future coding and testing. Split complex logic across modules. Never include hard-coded secrets or config values. Ensure each spec module remains < 500 lines. diff --git a/.claude/helpers/auto-commit.sh b/.claude/helpers/auto-commit.sh index cdecccff8..3b520f8d0 100755 --- a/.claude/helpers/auto-commit.sh +++ b/.claude/helpers/auto-commit.sh @@ -88,9 +88,7 @@ Automatic checkpoint created by Claude Code - Timestamp: $timestamp - Changes: $change_count file(s) -🤖 Generated with [Claude Code](https://claude.com/claude-code) - -Co-Authored-By: Claude Opus 4.5 " --quiet 2>/dev/null; then +🤖 Generated with [Claude Code](https://claude.com/claude-code)" --quiet 2>/dev/null; then log "Created commit: $message" # Push if enabled diff --git a/.claude/helpers/auto-memory-hook.mjs b/.claude/helpers/auto-memory-hook.mjs index 94205288b..327aab68b 100755 --- a/.claude/helpers/auto-memory-hook.mjs +++ b/.claude/helpers/auto-memory-hook.mjs @@ -201,7 +201,7 @@ async function doImport() { const memPkg = await loadMemoryPackage(); if (!memPkg || !memPkg.AutoMemoryBridge) { - dim('Memory package not available — skipping auto memory import'); + dim('Memory package not available - skipping auto memory import'); return; } @@ -254,7 +254,7 @@ async function doSync() { const memPkg = await loadMemoryPackage(); if (!memPkg || !memPkg.AutoMemoryBridge) { - dim('Memory package not available — skipping sync'); + dim('Memory package not available - skipping sync'); return; } diff --git a/.claude/helpers/intelligence.cjs b/.claude/helpers/intelligence.cjs index a182aced4..87ed5c219 100644 --- a/.claude/helpers/intelligence.cjs +++ b/.claude/helpers/intelligence.cjs @@ -3,13 +3,13 @@ * Intelligence Layer (ADR-050) * * Closes the intelligence loop by wiring PageRank-ranked memory into - * the hook system. Pure CJS — no ESM imports of @claude-flow/memory. + * the hook system. Pure CJS - no ESM imports of @claude-flow/memory. * * Data files (all under .claude-flow/data/): - * auto-memory-store.json — written by auto-memory-hook.mjs - * graph-state.json — serialized graph (nodes + edges + pageRanks) - * ranked-context.json — pre-computed ranked entries for fast lookup - * pending-insights.jsonl — append-only edit/task log + * auto-memory-store.json - written by auto-memory-hook.mjs + * graph-state.json - serialized graph (nodes + edges + pageRanks) + * ranked-context.json - pre-computed ranked entries for fast lookup + * pending-insights.jsonl - append-only edit/task log */ 'use strict'; @@ -48,7 +48,7 @@ function ensureDataDir() { function readJSON(filePath) { try { if (fs.existsSync(filePath)) return JSON.parse(fs.readFileSync(filePath, 'utf-8')); - } catch { /* corrupt file — start fresh */ } + } catch { /* corrupt file - start fresh */ } return null; } @@ -302,7 +302,7 @@ function parseMemoryDir(dir, entries) { // ── Exported functions ─────────────────────────────────────────────────────── /** - * init() — Called from session-restore. Budget: <200ms. + * init() - Called from session-restore. Budget: <200ms. * Reads auto-memory-store.json, builds graph, computes PageRank, writes caches. * If store is empty, bootstraps from MEMORY.md files directly. */ @@ -405,7 +405,7 @@ function init() { } /** - * getContext(prompt) — Called from route. Budget: <15ms. + * getContext(prompt) - Called from route. Budget: <15ms. * Matches prompt to ranked entries, returns top-5 formatted context. */ function getContext(prompt) { @@ -466,7 +466,7 @@ function getContext(prompt) { } /** - * recordEdit(file) — Called from post-edit. Budget: <2ms. + * recordEdit(file) - Called from post-edit. Budget: <2ms. * Appends to pending-insights.jsonl. */ function recordEdit(file) { @@ -481,7 +481,7 @@ function recordEdit(file) { } /** - * feedback(success) — Called from post-task. Budget: <10ms. + * feedback(success) - Called from post-task. Budget: <10ms. * Boosts or decays confidence for last-matched patterns. */ function feedback(success) { @@ -521,7 +521,7 @@ function boostConfidence(ids, amount) { } /** - * consolidate() — Called from session-end. Budget: <500ms. + * consolidate() - Called from session-end. Budget: <500ms. * Processes pending insights, rebuilds edges, recomputes PageRank. */ function consolidate() { @@ -556,7 +556,7 @@ function consolidate() { store.push({ id: `insight-${Date.now()}-${Math.random().toString(36).slice(2, 6)}`, key: `frequent-edit-${path.basename(file)}`, - content: `File ${file} was edited ${count} times this session — likely a hot path worth monitoring.`, + content: `File ${file} was edited ${count} times this session - likely a hot path worth monitoring.`, summary: `Frequently edited: ${path.basename(file)} (${count}x)`, namespace: 'insights', type: 'procedural', @@ -707,7 +707,7 @@ function saveSnapshot(graph, ranked) { } /** - * stats() — Diagnostic report showing intelligence health and improvement. + * stats() - Diagnostic report showing intelligence health and improvement. * Can be called as: node intelligence.cjs stats [--json] */ function stats(outputJson) { @@ -894,7 +894,7 @@ function stats(outputJson) { } if (!delta && !trend) { - console.log(' No history yet — run more sessions to see deltas and trends.'); + console.log(' No history yet - run more sessions to see deltas and trends.'); console.log(''); } diff --git a/.claude/helpers/learning-service.mjs b/.claude/helpers/learning-service.mjs index 4b46c3194..6ca2af52c 100755 --- a/.claude/helpers/learning-service.mjs +++ b/.claude/helpers/learning-service.mjs @@ -80,7 +80,7 @@ const CONFIG = { function initializeDatabase(db) { db.exec(` - -- Short-term patterns (session-level) + - Short-term patterns (session-level) CREATE TABLE IF NOT EXISTS short_term_patterns ( id TEXT PRIMARY KEY, strategy TEXT NOT NULL, @@ -96,7 +96,7 @@ function initializeDatabase(db) { metadata TEXT ); - -- Long-term patterns (promoted from short-term) + - Long-term patterns (promoted from short-term) CREATE TABLE IF NOT EXISTS long_term_patterns ( id TEXT PRIMARY KEY, strategy TEXT NOT NULL, @@ -113,17 +113,17 @@ function initializeDatabase(db) { metadata TEXT ); - -- HNSW index metadata + - HNSW index metadata CREATE TABLE IF NOT EXISTS hnsw_index ( id INTEGER PRIMARY KEY, - pattern_type TEXT NOT NULL, -- 'short_term' or 'long_term' + pattern_type TEXT NOT NULL, - 'short_term' or 'long_term' pattern_id TEXT NOT NULL, vector_id INTEGER NOT NULL, created_at INTEGER NOT NULL, UNIQUE(pattern_type, pattern_id) ); - -- Learning trajectories + - Learning trajectories CREATE TABLE IF NOT EXISTS trajectories ( id TEXT PRIMARY KEY, session_id TEXT NOT NULL, @@ -136,7 +136,7 @@ function initializeDatabase(db) { distilled_pattern_id TEXT ); - -- Learning metrics + - Learning metrics CREATE TABLE IF NOT EXISTS learning_metrics ( id INTEGER PRIMARY KEY AUTOINCREMENT, timestamp INTEGER NOT NULL, @@ -146,14 +146,14 @@ function initializeDatabase(db) { metadata TEXT ); - -- Session state + - Session state CREATE TABLE IF NOT EXISTS session_state ( key TEXT PRIMARY KEY, value TEXT NOT NULL, updated_at INTEGER NOT NULL ); - -- Create indexes + - Create indexes CREATE INDEX IF NOT EXISTS idx_short_term_domain ON short_term_patterns(domain); CREATE INDEX IF NOT EXISTS idx_short_term_quality ON short_term_patterns(quality DESC); CREATE INDEX IF NOT EXISTS idx_short_term_usage ON short_term_patterns(usage_count DESC); diff --git a/.claude/helpers/statusline.cjs b/.claude/helpers/statusline.cjs index 0f4b1e5c4..5858c4168 100644 --- a/.claude/helpers/statusline.cjs +++ b/.claude/helpers/statusline.cjs @@ -96,7 +96,7 @@ function safeStat(filePath) { return null; } -// Shared settings cache — read once, used by multiple functions +// Shared settings cache - read once, used by multiple functions let _settingsCache = undefined; function getSettings() { if (_settingsCache !== undefined) return _settingsCache; @@ -303,7 +303,7 @@ function getSwarmStatus() { return { activeAgents: 0, maxAgents: CONFIG.maxAgents, coordinationActive: false }; } -// System metrics (uses process.memoryUsage() — no shell spawn) +// System metrics (uses process.memoryUsage() - no shell spawn) function getSystemMetrics() { const memoryMB = Math.floor(process.memoryUsage().heapUsed / 1024 / 1024); const learning = getLearningStats(); @@ -351,7 +351,7 @@ function getSystemMetrics() { return { memoryMB, contextPct, intelligencePct, subAgents }; } -// ADR status (count files only — don't read contents) +// ADR status (count files only - don't read contents) function getADRStatus() { const complianceData = readJSON(path.join(CWD, '.claude-flow', 'metrics', 'adr-compliance.json')); if (complianceData) { @@ -470,7 +470,7 @@ function getAgentDBStats() { return { vectorCount, dbSizeKB: Math.floor(dbSizeKB), namespaces, hasHnsw }; } -// Test stats (count files only — NO reading file contents) +// Test stats (count files only - NO reading file contents) function getTestStats() { let testFiles = 0; @@ -708,7 +708,7 @@ let _stdinData = null; function getStdinData() { if (_stdinData !== undefined && _stdinData !== null) return _stdinData; try { - // Check if stdin is a TTY (manual run) — skip reading + // Check if stdin is a TTY (manual run) - skip reading if (process.stdin.isTTY) { _stdinData = null; return null; } // Read stdin synchronously via fd 0 const chunks = []; diff --git a/.claude/settings.json b/.claude/settings.json index f7606aef7..74313f896 100644 --- a/.claude/settings.json +++ b/.claude/settings.json @@ -132,7 +132,7 @@ ] }, "attribution": { - "commit": "Co-Authored-By: claude-flow ", + "commit": "", "pr": "🤖 Generated with [claude-flow](https://github.com/ruvnet/claude-flow)" }, "env": { diff --git a/.claude/skills/github-multi-repo/SKILL.md b/.claude/skills/github-multi-repo/SKILL.md index 73ff842fe..873a4d116 100644 --- a/.claude/skills/github-multi-repo/SKILL.md +++ b/.claude/skills/github-multi-repo/SKILL.md @@ -112,7 +112,7 @@ mcp__claude-flow__swarm_init({ // Execute task across repositories Bash(`cat /tmp/repos.txt | while read -r repo; do - gh repo clone org/$repo /tmp/$repo -- --depth=1 + gh repo clone org/$repo /tmp/$repo - --depth=1 cd /tmp/$repo # Apply changes @@ -377,7 +377,7 @@ jobs: // Update each repository Bash(`echo "$TS_REPOS" | while read -r repo; do - gh repo clone org/$repo /tmp/$repo -- --depth=1 + gh repo clone org/$repo /tmp/$repo - --depth=1 cd /tmp/$repo npm install --save-dev typescript@5.0.0 @@ -430,7 +430,7 @@ Part of #$TRACKING_ISSUE" // Scan all repositories Bash(`gh repo list org --limit 100 --json name | jq -r '.[].name' | \ while read -r repo; do - gh repo clone org/$repo /tmp/$repo -- --depth=1 + gh repo clone org/$repo /tmp/$repo - --depth=1 cd /tmp/$repo npm audit --json > /tmp/audit-$repo.json done`) diff --git a/.claude/skills/github-release-management/SKILL.md b/.claude/skills/github-release-management/SKILL.md index 5ddeb335a..c1f00b4de 100644 --- a/.claude/skills/github-release-management/SKILL.md +++ b/.claude/skills/github-release-management/SKILL.md @@ -953,7 +953,7 @@ docker run --rm -v $(pwd):/app node:20 \ ### Issue: Test Failures in CI ```bash # Run tests with detailed output -npm run test -- --verbose --coverage +npm run test - --verbose --coverage # Check for environment-specific issues npm run test:ci diff --git a/.claude/skills/v3-ddd-architecture/SKILL.md b/.claude/skills/v3-ddd-architecture/SKILL.md index 227b37867..df157ac1b 100644 --- a/.claude/skills/v3-ddd-architecture/SKILL.md +++ b/.claude/skills/v3-ddd-architecture/SKILL.md @@ -438,5 +438,5 @@ Task("DDD architecture implementation", ### Plugin Development ```bash # Create domain plugin -npm run create:plugin -- --name swarm-coordination --template domain +npm run create:plugin - --name swarm-coordination --template domain ``` \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index e37cd28cd..aa2f03b5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,33 +5,33 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [v0.5.0-esp32] — 2026-03-15 +## [v0.5.0-esp32] - 2026-03-15 ### Added -- **60 GHz mmWave sensor fusion (ADR-063)** — Auto-detects Seeed MR60BHA2 (60 GHz, HR/BR/presence) and HLK-LD2410 (24 GHz, presence/distance) on UART at boot. Probes 115200 then 256000 baud, registers device capabilities, starts background parser. -- **48-byte fused vitals packet** (magic `0xC5110004`) — Kalman-style fusion: mmWave 80% + CSI 20% when both available. Automatic fallback to standard 32-byte CSI-only packet. -- **Server-side fusion bridge** (`scripts/mmwave_fusion_bridge.py`) — Reads two serial ports simultaneously for dual-sensor setups where mmWave runs on a separate ESP32. -- **Multimodal ambient intelligence roadmap (ADR-064)** — 25+ applications from fall detection to sleep monitoring to RF tomography. +- **60 GHz mmWave sensor fusion (ADR-063)** - Auto-detects Seeed MR60BHA2 (60 GHz, HR/BR/presence) and HLK-LD2410 (24 GHz, presence/distance) on UART at boot. Probes 115200 then 256000 baud, registers device capabilities, starts background parser. +- **48-byte fused vitals packet** (magic `0xC5110004`) - Kalman-style fusion: mmWave 80% + CSI 20% when both available. Automatic fallback to standard 32-byte CSI-only packet. +- **Server-side fusion bridge** (`scripts/mmwave_fusion_bridge.py`) - Reads two serial ports simultaneously for dual-sensor setups where mmWave runs on a separate ESP32. +- **Multimodal ambient intelligence roadmap (ADR-064)** - 25+ applications from fall detection to sleep monitoring to RF tomography. ### Verified - Real hardware: ESP32-S3 (COM7) WiFi CSI + ESP32-C6/MR60BHA2 (COM4) 60 GHz mmWave running concurrently. HR=75 bpm, BR=25/min at 52 cm range. All 11 QEMU CI jobs green. -## [v0.4.3-esp32] — 2026-03-15 +## [v0.4.3-esp32] - 2026-03-15 ### Fixed -- **Fall detection false positives (#263)** — Default threshold raised from 2.0 to 15.0 rad/s²; normal walking (2-5 rad/s²) no longer triggers alerts. Added 3-consecutive-frame debounce and 5-second cooldown between alerts. Verified on real ESP32-S3 hardware: 0 false alerts in 60s / 1,300+ live WiFi CSI frames. -- **Kconfig default mismatch** — `CONFIG_EDGE_FALL_THRESH` Kconfig default was still 2000 (=2.0) while `nvs_config.c` fallback was updated to 15.0. Fixed Kconfig to 15000. Caught by real hardware testing — mock data did not reproduce. -- **provision.py NVS generator API change** — `esp_idf_nvs_partition_gen` package changed its `generate()` signature; switched to subprocess-first invocation for cross-version compatibility. -- **QEMU CI pipeline (11 jobs)** — Fixed all failures: fuzz test `esp_timer` stubs, QEMU `libgcrypt` dependency, NVS matrix generator, IDF container `pip` path, flash image padding, validation WARN handling, swarm `ip`/`cargo` missing. +- **Fall detection false positives (#263)** - Default threshold raised from 2.0 to 15.0 rad/s²; normal walking (2-5 rad/s²) no longer triggers alerts. Added 3-consecutive-frame debounce and 5-second cooldown between alerts. Verified on real ESP32-S3 hardware: 0 false alerts in 60s / 1,300+ live WiFi CSI frames. +- **Kconfig default mismatch** - `CONFIG_EDGE_FALL_THRESH` Kconfig default was still 2000 (=2.0) while `nvs_config.c` fallback was updated to 15.0. Fixed Kconfig to 15000. Caught by real hardware testing - mock data did not reproduce. +- **provision.py NVS generator API change** - `esp_idf_nvs_partition_gen` package changed its `generate()` signature; switched to subprocess-first invocation for cross-version compatibility. +- **QEMU CI pipeline (11 jobs)** - Fixed all failures: fuzz test `esp_timer` stubs, QEMU `libgcrypt` dependency, NVS matrix generator, IDF container `pip` path, flash image padding, validation WARN handling, swarm `ip`/`cargo` missing. ### Added -- **4MB flash support (#265)** — `partitions_4mb.csv` and `sdkconfig.defaults.4mb` for ESP32-S3 boards with 4MB flash (e.g. SuperMini). Dual OTA slots, 1.856 MB each. Thanks to @sebbu for the community workaround that confirmed feasibility. -- **`--strict` flag** for `validate_qemu_output.py` — WARNs now pass by default in CI (no real WiFi in QEMU); use `--strict` to fail on warnings. +- **4MB flash support (#265)** - `partitions_4mb.csv` and `sdkconfig.defaults.4mb` for ESP32-S3 boards with 4MB flash (e.g. SuperMini). Dual OTA slots, 1.856 MB each. Thanks to @sebbu for the community workaround that confirmed feasibility. +- **`--strict` flag** for `validate_qemu_output.py` - WARNs now pass by default in CI (no real WiFi in QEMU); use `--strict` to fail on warnings. ## [Unreleased] ### Added -- **QEMU ESP32-S3 testing platform (ADR-061)** — 9-layer firmware testing without hardware +- **QEMU ESP32-S3 testing platform (ADR-061)** - 9-layer firmware testing without hardware - Mock CSI generator with 10 physics-based scenarios (empty room, walking, fall, multi-person, etc.) - Single-node QEMU runner with 16-check UART validation - Multi-node TDM mesh simulation (TAP networking, 2-6 nodes) @@ -41,14 +41,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - NVS provisioning matrix (14 configs) - Snapshot-based regression testing (sub-second VM restore) - Chaos testing with fault injection + health monitoring -- **QEMU Swarm Configurator (ADR-062)** — YAML-driven multi-ESP32 test orchestration +- **QEMU Swarm Configurator (ADR-062)** - YAML-driven multi-ESP32 test orchestration - 4 topologies: star, mesh, line, ring - 3 node roles: sensor, coordinator, gateway - 9 swarm-level assertions (boot, crashes, TDM, frame rate, fall detection, etc.) - 7 presets: smoke (2n/15s), standard (3n/60s), ci-matrix, large-mesh, line-relay, ring-fault, heterogeneous - Health oracle with cross-node validation -- **QEMU installer** (`install-qemu.sh`) — auto-detects OS, installs deps, builds Espressif QEMU fork -- **Unified QEMU CLI** (`qemu-cli.sh`) — single entry point for all 11 QEMU test commands +- **QEMU installer** (`install-qemu.sh`) - auto-detects OS, installs deps, builds Espressif QEMU fork +- **Unified QEMU CLI** (`qemu-cli.sh`) - single entry point for all 11 QEMU test commands - CI: `firmware-qemu.yml` workflow with QEMU test matrix, fuzz testing, NVS validation, and swarm test jobs - User guide: QEMU testing and swarm configurator section with plain-language walkthrough @@ -59,35 +59,35 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - 16 bugs from ADR-062 deep review (log filename mismatch, SLIRP port collision, heap false positives, etc.) - All scripts: `--help` flags, prerequisite checks with install hints, standardized exit codes -- **Sensing server UI API completion (ADR-043)** — 14 fully-functional REST endpoints for model management, CSI recording, and training control +- **Sensing server UI API completion (ADR-043)** - 14 fully-functional REST endpoints for model management, CSI recording, and training control - Model CRUD: `GET /api/v1/models`, `GET /api/v1/models/active`, `POST /api/v1/models/load`, `POST /api/v1/models/unload`, `DELETE /api/v1/models/:id`, `GET /api/v1/models/lora/profiles`, `POST /api/v1/models/lora/activate` - CSI recording: `GET /api/v1/recording/list`, `POST /api/v1/recording/start`, `POST /api/v1/recording/stop`, `DELETE /api/v1/recording/:id` - Training control: `GET /api/v1/train/status`, `POST /api/v1/train/start`, `POST /api/v1/train/stop` - Recording writes CSI frames to `.jsonl` files via tokio background task - Model/recording directories scanned at startup, state managed via `Arc>` -- **ADR-044: Provisioning tool enhancements** — 5-phase plan for complete NVS coverage (7 missing keys), JSON config files, mesh presets, read-back/verify, and auto-detect -- **25 real mobile tests** replacing `it.todo()` placeholders — 205 assertions covering components, services, stores, hooks, screens, and utils -- **Project MERIDIAN (ADR-027)** — Cross-environment domain generalization for WiFi pose estimation (1,858 lines, 72 tests) - - `HardwareNormalizer` — Catmull-Rom cubic interpolation resamples any hardware CSI to canonical 56 subcarriers; z-score + phase sanitization - - `DomainFactorizer` + `GradientReversalLayer` — adversarial disentanglement of pose-relevant vs environment-specific features - - `GeometryEncoder` + `FilmLayer` — Fourier positional encoding + DeepSets + FiLM for zero-shot deployment given AP positions - - `VirtualDomainAugmentor` — synthetic environment diversity (room scale, wall material, scatterers, noise) for 4x training augmentation - - `RapidAdaptation` — 10-second unsupervised calibration via contrastive test-time training + LoRA adapters - - `CrossDomainEvaluator` — 6-metric evaluation protocol (MPJPE in-domain/cross-domain/few-shot/cross-hardware, domain gap ratio, adaptation speedup) -- ADR-027: Cross-Environment Domain Generalization — 10 SOTA citations (PerceptAlign, X-Fi ICLR 2025, AM-FM, DGSense, CVPR 2024) -- **Cross-platform RSSI adapters** — macOS CoreWLAN (`MacosCoreWlanScanner`) and Linux `iw` (`LinuxIwScanner`) Rust adapters with `#[cfg(target_os)]` gating +- **ADR-044: Provisioning tool enhancements** - 5-phase plan for complete NVS coverage (7 missing keys), JSON config files, mesh presets, read-back/verify, and auto-detect +- **25 real mobile tests** replacing `it.todo()` placeholders - 205 assertions covering components, services, stores, hooks, screens, and utils +- **Project MERIDIAN (ADR-027)** - Cross-environment domain generalization for WiFi pose estimation (1,858 lines, 72 tests) + - `HardwareNormalizer` - Catmull-Rom cubic interpolation resamples any hardware CSI to canonical 56 subcarriers; z-score + phase sanitization + - `DomainFactorizer` + `GradientReversalLayer` - adversarial disentanglement of pose-relevant vs environment-specific features + - `GeometryEncoder` + `FilmLayer` - Fourier positional encoding + DeepSets + FiLM for zero-shot deployment given AP positions + - `VirtualDomainAugmentor` - synthetic environment diversity (room scale, wall material, scatterers, noise) for 4x training augmentation + - `RapidAdaptation` - 10-second unsupervised calibration via contrastive test-time training + LoRA adapters + - `CrossDomainEvaluator` - 6-metric evaluation protocol (MPJPE in-domain/cross-domain/few-shot/cross-hardware, domain gap ratio, adaptation speedup) +- ADR-027: Cross-Environment Domain Generalization - 10 SOTA citations (PerceptAlign, X-Fi ICLR 2025, AM-FM, DGSense, CVPR 2024) +- **Cross-platform RSSI adapters** - macOS CoreWLAN (`MacosCoreWlanScanner`) and Linux `iw` (`LinuxIwScanner`) Rust adapters with `#[cfg(target_os)]` gating - macOS CoreWLAN Python sensing adapter with Swift helper (`mac_wifi.swift`) - macOS synthetic BSSID generation (FNV-1a hash) for Sonoma 14.4+ BSSID redaction - Linux `iw dev scan` parser with freq-to-channel conversion and `scan dump` (no-root) mode - ADR-025: macOS CoreWLAN WiFi Sensing (ORCA) ### Fixed -- **sendto ENOMEM crash (Issue #127)** — CSI callbacks in promiscuous mode exhaust lwIP pbuf pool causing guru meditation crash. Fixed with 50 Hz rate limiter in `csi_collector.c` and 100 ms ENOMEM backoff in `stream_sender.c`. Hardware-verified on ESP32-S3 (200+ callbacks, zero crashes) -- **Provisioning script missing TDM/edge flags (Issue #130)** — Added `--tdm-slot`, `--tdm-total`, `--edge-tier`, `--pres-thresh`, `--fall-thresh`, `--vital-win`, `--vital-int`, `--subk-count` to `provision.py` -- **WebSocket "RECONNECTING" on Dashboard/Live Demo** — `sensingService.start()` now called on app init in `app.js` so WebSocket connects immediately instead of waiting for Sensing tab visit -- **Mobile WebSocket port** — `ws.service.ts` `buildWsUrl()` uses same-origin port instead of hardcoded port 3001 -- **Mobile Jest config** — `testPathIgnorePatterns` no longer silently ignores the entire test directory -- Removed synthetic byte counters from Python `MacosWifiCollector` — now reports `tx_bytes=0, rx_bytes=0` instead of fake incrementing values +- **sendto ENOMEM crash (Issue #127)** - CSI callbacks in promiscuous mode exhaust lwIP pbuf pool causing guru meditation crash. Fixed with 50 Hz rate limiter in `csi_collector.c` and 100 ms ENOMEM backoff in `stream_sender.c`. Hardware-verified on ESP32-S3 (200+ callbacks, zero crashes) +- **Provisioning script missing TDM/edge flags (Issue #130)** - Added `--tdm-slot`, `--tdm-total`, `--edge-tier`, `--pres-thresh`, `--fall-thresh`, `--vital-win`, `--vital-int`, `--subk-count` to `provision.py` +- **WebSocket "RECONNECTING" on Dashboard/Live Demo** - `sensingService.start()` now called on app init in `app.js` so WebSocket connects immediately instead of waiting for Sensing tab visit +- **Mobile WebSocket port** - `ws.service.ts` `buildWsUrl()` uses same-origin port instead of hardcoded port 3001 +- **Mobile Jest config** - `testPathIgnorePatterns` no longer silently ignores the entire test directory +- Removed synthetic byte counters from Python `MacosWifiCollector` - now reports `tx_bytes=0, rx_bytes=0` instead of fake incrementing values --- @@ -95,23 +95,23 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 Major release: AETHER contrastive embedding model, Docker Hub images, and comprehensive UI overhaul. -### Added — AETHER Contrastive Embedding Model (ADR-024) -- **Project AETHER** — self-supervised contrastive learning for WiFi CSI fingerprinting, similarity search, and anomaly detection (`9bbe956`) +### Added - AETHER Contrastive Embedding Model (ADR-024) +- **Project AETHER** - self-supervised contrastive learning for WiFi CSI fingerprinting, similarity search, and anomaly detection (`9bbe956`) - `embedding.rs` module: `ProjectionHead`, `InfoNceLoss`, `CsiAugmenter`, `FingerprintIndex`, `PoseEncoder`, `EmbeddingExtractor` (909 lines, zero external ML dependencies) - SimCLR-style pretraining with 5 physically-motivated augmentations (temporal jitter, subcarrier masking, Gaussian noise, phase rotation, amplitude scaling) - CLI flags: `--pretrain`, `--pretrain-epochs`, `--embed`, `--build-index ` - Four HNSW-compatible fingerprint index types: `env_fingerprint`, `activity_pattern`, `temporal_baseline`, `person_track` - Cross-modal `PoseEncoder` for WiFi-to-camera embedding alignment - VICReg regularization for embedding collapse prevention -- 53K total parameters (55 KB at INT8) — fits on ESP32 +- 53K total parameters (55 KB at INT8) - fits on ESP32 -### Added — Docker & Deployment +### Added - Docker & Deployment - Published Docker Hub images: `ruvnet/wifi-densepose:latest` (132 MB Rust) and `ruvnet/wifi-densepose:python` (569 MB) (`add9f19`) - Multi-stage Dockerfile for Rust sensing server with RuVector crates - `docker-compose.yml` orchestrating both Rust and Python services - RVF model export via `--export-rvf` and load via `--load-rvf` CLI flags -### Added — Documentation +### Added - Documentation - 33 use cases across 4 vertical tiers: Everyday, Specialized, Robotics & Industrial, Extreme (`0afd9c5`) - "Why WiFi Wins" comparison table (WiFi vs camera vs LIDAR vs wearable vs PIR) - Mermaid architecture diagrams: end-to-end pipeline, signal processing detail, deployment topology (`50f0fc9`) @@ -122,8 +122,8 @@ Major release: AETHER contrastive embedding model, Docker Hub images, and compre - CSI hardware requirement notice (`528b394`) ### Fixed -- **UI auto-detects server port from page origin** — no more hardcoded `localhost:8080`; works on any port (Docker :3000, native :8080, custom) (`3b72f35`, closes #55) -- **Docker port mismatch** — server now binds 3000/3001 inside container as documented (`44b9c30`) +- **UI auto-detects server port from page origin** - no more hardcoded `localhost:8080`; works on any port (Docker :3000, native :8080, custom) (`3b72f35`, closes #55) +- **Docker port mismatch** - server now binds 3000/3001 inside container as documented (`44b9c30`) - Added `/ws/sensing` WebSocket route to the HTTP server so UI only needs one port - Fixed README API endpoint references: `/api/v1/health` → `/health`, `/api/v1/sensing` → `/api/v1/sensing/latest` - Multi-person tracking limit corrected: configurable default 10, no hard software cap (`e2ce250`) @@ -134,41 +134,41 @@ Major release: AETHER contrastive embedding model, Docker Hub images, and compre Major release: complete Rust sensing server, full DensePose training pipeline, RuVector v2.0.4 integration, ESP32-S3 firmware, and 6 security hardening patches. -### Added — Rust Sensing Server +### Added - Rust Sensing Server - **Full DensePose-compatible REST API** served by Axum (`d956c30`) - - `GET /health` — server health - - `GET /api/v1/sensing/latest` — live CSI sensing data - - `GET /api/v1/vital-signs` — breathing rate (6-30 BPM) and heartbeat (40-120 BPM) - - `GET /api/v1/pose/current` — 17 COCO keypoints derived from WiFi signal field - - `GET /api/v1/info` — server build and feature info - - `GET /api/v1/model/info` — RVF model container metadata - - `ws://host/ws/sensing` — real-time WebSocket stream + - `GET /health` - server health + - `GET /api/v1/sensing/latest` - live CSI sensing data + - `GET /api/v1/vital-signs` - breathing rate (6-30 BPM) and heartbeat (40-120 BPM) + - `GET /api/v1/pose/current` - 17 COCO keypoints derived from WiFi signal field + - `GET /api/v1/info` - server build and feature info + - `GET /api/v1/model/info` - RVF model container metadata + - `ws://host/ws/sensing` - real-time WebSocket stream - Three data sources: `--source esp32` (UDP CSI), `--source windows` (netsh RSSI), `--source simulated` (deterministic reference) - Auto-detection: server probes ESP32 UDP and Windows WiFi, falls back to simulated - Three.js visualization UI with 3D body skeleton, signal heatmap, phase plot, Doppler bars, vital signs panel - Static UI serving via `--ui-path` flag - Throughput: 9,520–11,665 frames/sec (release build) -### Added — ADR-021: Vital Sign Detection +### Added - ADR-021: Vital Sign Detection - `VitalSignDetector` with breathing (6-30 BPM) and heartbeat (40-120 BPM) extraction from CSI fluctuations (`1192de9`) - FFT-based spectral analysis with configurable band-pass filters - Confidence scoring based on spectral peak prominence - REST endpoint `/api/v1/vital-signs` with real-time JSON output -### Added — ADR-023: DensePose Training Pipeline (Phases 1-8) +### Added - ADR-023: DensePose Training Pipeline (Phases 1-8) - `wifi-densepose-train` crate with complete 8-phase pipeline (`fc409df`, `ec98e40`, `fce1271`) - Phase 1: `DataPipeline` with MM-Fi and Wi-Pose dataset loaders - - Phase 2: `CsiToPoseTransformer` — 4-head cross-attention + 2-layer GCN on COCO skeleton + - Phase 2: `CsiToPoseTransformer` - 4-head cross-attention + 2-layer GCN on COCO skeleton - Phase 3: 6-term composite loss (MSE, bone length, symmetry, joint angle, temporal, confidence) - Phase 4: `DynamicPersonMatcher` via ruvector-mincut (O(n^1.5 log n) Hungarian assignment) - - Phase 5: `SonaAdapter` — MicroLoRA rank-4 with EWC++ memory preservation - - Phase 6: `SparseInference` — progressive 3-layer model loading (A: essential, B: refinement, C: full) - - Phase 7: `RvfContainer` — single-file model packaging with segment-based binary format + - Phase 5: `SonaAdapter` - MicroLoRA rank-4 with EWC++ memory preservation + - Phase 6: `SparseInference` - progressive 3-layer model loading (A: essential, B: refinement, C: full) + - Phase 7: `RvfContainer` - single-file model packaging with segment-based binary format - Phase 8: End-to-end training with cosine-annealing LR, early stopping, checkpoint saving - CLI: `--train`, `--dataset`, `--epochs`, `--save-rvf`, `--load-rvf`, `--export-rvf` - Benchmark: ~11,665 fps inference, 229 tests passing -### Added — ADR-016: RuVector Training Integration (all 5 crates) +### Added - ADR-016: RuVector Training Integration (all 5 crates) - `ruvector-mincut` → `DynamicPersonMatcher` in `metrics.rs` + subcarrier selection (`81ad09d`, `a7dd31c`) - `ruvector-attn-mincut` → antenna attention in `model.rs` + noise-gated spectrogram - `ruvector-temporal-tensor` → `CompressedCsiBuffer` in `dataset.rs` + compressed breathing/heartbeat @@ -176,16 +176,16 @@ Major release: complete Rust sensing server, full DensePose training pipeline, R - `ruvector-attention` → spatial attention in `model.rs` + attention-weighted BVP - Vendored all 11 RuVector crates under `vendor/ruvector/` (`d803bfe`) -### Added — ADR-017: RuVector Signal & MAT Integration (7 integration points) -- `gate_spectrogram()` — attention-gated noise suppression (`18170d7`) -- `attention_weighted_bvp()` — sensitivity-weighted velocity profiles -- `mincut_subcarrier_partition()` — dynamic sensitive/insensitive subcarrier split -- `solve_fresnel_geometry()` — TX-body-RX distance estimation +### Added - ADR-017: RuVector Signal & MAT Integration (7 integration points) +- `gate_spectrogram()` - attention-gated noise suppression (`18170d7`) +- `attention_weighted_bvp()` - sensitivity-weighted velocity profiles +- `mincut_subcarrier_partition()` - dynamic sensitive/insensitive subcarrier split +- `solve_fresnel_geometry()` - TX-body-RX distance estimation - `CompressedBreathingBuffer` + `CompressedHeartbeatSpectrogram` - `BreathingDetector` + `HeartbeatDetector` (MAT crate, real FFT + micro-Doppler) - Feature-gated behind `cfg(feature = "ruvector")` (`ab2453e`) -### Added — ADR-018: ESP32-S3 Firmware & Live CSI Pipeline +### Added - ADR-018: ESP32-S3 Firmware & Live CSI Pipeline - ESP32-S3 firmware with FreeRTOS CSI extraction (`92a5182`) - ADR-018 binary frame format: `[0xAD, 0x18, len_hi, len_lo, payload]` - Rust `Esp32Aggregator` receiving UDP frames on port 5005 @@ -193,7 +193,7 @@ Major release: complete Rust sensing server, full DensePose training pipeline, R - NVS provisioning for WiFi credentials - Pre-built binary quick start documentation (`696a726`) -### Added — ADR-014: SOTA Signal Processing +### Added - ADR-014: SOTA Signal Processing - 6 algorithms, 83 tests (`fcb93cc`) - Hampel filter (median + MAD, resistant to 50% contamination) - Conjugate multiplication (reference-antenna ratio, cancels common-mode noise) @@ -202,41 +202,41 @@ Major release: complete Rust sensing server, full DensePose training pipeline, R - Body Velocity Profile (micro-Doppler extraction, 5.7x speedup) - Attention-gated spectrogram (learned noise suppression) -### Added — ADR-015: Public Dataset Training Strategy +### Added - ADR-015: Public Dataset Training Strategy - MM-Fi and Wi-Pose dataset specifications with download links (`4babb32`, `5dc2f66`) - Verified dataset dimensions, sampling rates, and annotation formats - Cross-dataset evaluation protocol -### Added — WiFi-Mat Disaster Detection Module +### Added - WiFi-Mat Disaster Detection Module - Multi-AP triangulation for through-wall survivor detection (`a17b630`, `6b20ff0`) - Triage classification (breathing, heartbeat, motion) - Domain events: `survivor_detected`, `survivor_updated`, `alert_created` - WebSocket broadcast at `/ws/mat/stream` -### Added — Infrastructure +### Added - Infrastructure - Guided 7-step interactive installer with 8 hardware profiles (`8583f3e`) - Comprehensive build guide for Linux, macOS, Windows, Docker, ESP32 (`45f8a0d`) - 12 Architecture Decision Records (ADR-001 through ADR-012) (`337dd96`) -### Added — UI & Visualization +### Added - UI & Visualization - Sensing-only UI mode with Gaussian splat visualization (`b7e0f07`) - Three.js 3D body model (17 joints, 16 limbs) with signal-viz components - Tabs: Dashboard, Hardware, Live Demo, Sensing, Architecture, Performance, Applications - WebSocket client with automatic reconnection and exponential backoff -### Added — Rust Signal Processing Crate +### Added - Rust Signal Processing Crate - Complete Rust port of WiFi-DensePose with modular workspace (`6ed69a3`) - - `wifi-densepose-signal` — CSI processing, phase sanitization, feature extraction - - `wifi-densepose-core` — shared types and configuration - - `wifi-densepose-nn` — neural network inference (DensePose head, RCNN) - - `wifi-densepose-hardware` — ESP32 aggregator, hardware interfaces - - `wifi-densepose-config` — configuration management + - `wifi-densepose-signal` - CSI processing, phase sanitization, feature extraction + - `wifi-densepose-core` - shared types and configuration + - `wifi-densepose-nn` - neural network inference (DensePose head, RCNN) + - `wifi-densepose-hardware` - ESP32 aggregator, hardware interfaces + - `wifi-densepose-config` - configuration management - Comprehensive benchmarks and validation tests (`3ccb301`) -### Added — Python Sensing Pipeline -- `WindowsWifiCollector` — RSSI collection via `netsh wlan show networks` -- `RssiFeatureExtractor` — variance, spectral bands (motion 0.5-4 Hz, breathing 0.1-0.5 Hz), change points -- `PresenceClassifier` — rule-based 3-state classification (ABSENT / PRESENT_STILL / ACTIVE) +### Added - Python Sensing Pipeline +- `WindowsWifiCollector` - RSSI collection via `netsh wlan show networks` +- `RssiFeatureExtractor` - variance, spectral bands (motion 0.5-4 Hz, breathing 0.1-0.5 Hz), change points +- `PresenceClassifier` - rule-based 3-state classification (ABSENT / PRESENT_STILL / ACTIVE) - Cross-receiver agreement scoring for multi-AP confidence boosting - WebSocket sensing server (`ws_server.py`) broadcasting JSON at 2 Hz - Deterministic CSI proof bundles for reproducible verification (`v1/data/proof/`) @@ -260,7 +260,7 @@ Major release: complete Rust sensing server, full DensePose training pipeline, R - Fix XSS vulnerabilities in UI components (`5db55fd`) - Fix command injection in statusline.cjs (`4cb01fd`) - Fix path traversal vulnerabilities (`896c4fc`) -- Fix insecure WebSocket connections — enforce wss:// on non-localhost (`ac094d4`) +- Fix insecure WebSocket connections - enforce wss:// on non-localhost (`ac094d4`) - Fix GitHub Actions shell injection (`ab2e7b4`) - Fix 10 additional vulnerabilities, remove 12 dead code instances (`7afdad0`) diff --git a/CLAUDE.md b/CLAUDE.md index 4c11fd733..a82a9dfa1 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,4 +1,4 @@ -# Claude Code Configuration — WiFi-DensePose + Claude Flow V3 +# Claude Code Configuration - WiFi-DensePose + Claude Flow V3 ## Project: wifi-densepose @@ -11,7 +11,7 @@ Dual codebase: Python v1 (`v1/`) and Rust port (`rust-port/wifi-densepose-rs/`). | `wifi-densepose-signal` | SOTA signal processing + RuvSense multistatic sensing (14 modules) | | `wifi-densepose-nn` | Neural network inference (ONNX, PyTorch, Candle backends) | | `wifi-densepose-train` | Training pipeline with ruvector integration + ruview_metrics | -| `wifi-densepose-mat` | Mass Casualty Assessment Tool — disaster survivor detection | +| `wifi-densepose-mat` | Mass Casualty Assessment Tool - disaster survivor detection | | `wifi-densepose-hardware` | ESP32 aggregator, TDM protocol, channel hopping firmware | | `wifi-densepose-ruvector` | RuVector v2.0.4 integration + cross-viewpoint fusion (5 modules) | | `wifi-densepose-api` | REST API (Axum) | @@ -60,8 +60,8 @@ All 5 ruvector crates integrated in workspace: 43 ADRs in `docs/adr/` (ADR-001 through ADR-043). Key ones: - ADR-014: SOTA signal processing (Accepted) - ADR-015: MM-Fi + Wi-Pose training datasets (Accepted) -- ADR-016: RuVector training pipeline integration (Accepted — complete) -- ADR-017: RuVector signal + MAT integration (Proposed — next target) +- ADR-016: RuVector training pipeline integration (Accepted - complete) +- ADR-017: RuVector signal + MAT integration (Proposed - next target) - ADR-024: Contrastive CSI embedding / AETHER (Accepted) - ADR-027: Cross-environment domain generalization / MERIDIAN (Accepted) - ADR-028: ESP32 capability audit + witness verification (Accepted) @@ -75,29 +75,29 @@ All 5 ruvector crates integrated in workspace: | Device | Port | Chip | Role | Cost | |--------|------|------|------|------| | ESP32-S3 (8MB flash) | COM7 | Xtensa dual-core | WiFi CSI sensing node | ~$9 | -| ESP32-S3 SuperMini (4MB) | — | Xtensa dual-core | WiFi CSI (compact) | ~$6 | +| ESP32-S3 SuperMini (4MB) | - | Xtensa dual-core | WiFi CSI (compact) | ~$6 | | ESP32-C6 + Seeed MR60BHA2 | COM4 | RISC-V + 60 GHz FMCW | mmWave HR/BR/presence | ~$15 | -| HLK-LD2410 | — | 24 GHz FMCW | Presence + distance | ~$3 | +| HLK-LD2410 | - | 24 GHz FMCW | Presence + distance | ~$3 | -**Not supported:** ESP32 (original), ESP32-C3 — single-core, can't run CSI DSP pipeline. +**Not supported:** ESP32 (original), ESP32-C3 - single-core, can't run CSI DSP pipeline. ### Build & Test Commands (this repo) ```bash -# Rust — full workspace tests (1,031+ tests, ~2 min) +# Rust - full workspace tests (1,031+ tests, ~2 min) cd rust-port/wifi-densepose-rs cargo test --workspace --no-default-features -# Rust — single crate check (no GPU needed) +# Rust - single crate check (no GPU needed) cargo check -p wifi-densepose-train --no-default-features -# Python — deterministic proof verification (SHA-256) +# Python - deterministic proof verification (SHA-256) python v1/data/proof/verify.py -# Python — test suite +# Python - test suite cd v1 && python -m pytest tests/ -x -q ``` -### ESP32 Firmware Build (Windows — Python subprocess required) +### ESP32 Firmware Build (Windows - Python subprocess required) ```bash # Build 8MB firmware (real WiFi CSI mode, no mocks) # See CLAUDE.local.md for the full Python subprocess command @@ -125,7 +125,7 @@ python -m serial.tools.miniterm COM7 115200 4. Tag: `git tag v0.X.Y-esp32 && git push origin v0.X.Y-esp32` 5. Release: `gh release create v0.X.Y-esp32 --title "..." --notes-file ...` 6. Verify on real hardware (COM7) before publishing -7. **CRITICAL:** Always test with real WiFi CSI, not mock mode — mock missed the Kconfig threshold bug +7. **CRITICAL:** Always test with real WiFi CSI, not mock mode - mock missed the Kconfig threshold bug ### Crate Publishing Order Crates must be published in dependency order: @@ -150,18 +150,18 @@ Crates must be published in dependency order: **After any significant code change, run the full validation:** ```bash -# 1. Rust tests — must be 1,031+ passed, 0 failed +# 1. Rust tests - must be 1,031+ passed, 0 failed cd rust-port/wifi-densepose-rs cargo test --workspace --no-default-features -# 2. Python proof — must print VERDICT: PASS +# 2. Python proof - must print VERDICT: PASS cd ../.. python v1/data/proof/verify.py # 3. Generate witness bundle (includes both above + firmware hashes) bash scripts/generate-witness-bundle.sh -# 4. Self-verify the bundle — must be 7/7 PASS +# 4. Self-verify the bundle - must be 7/7 PASS cd dist/witness-bundle-ADR028-*/ bash VERIFY.sh ``` @@ -174,20 +174,20 @@ python v1/data/proof/verify.py ``` **Witness bundle contents** (`dist/witness-bundle-ADR028-.tar.gz`): -- `WITNESS-LOG-028.md` — 33-row attestation matrix with evidence per capability -- `ADR-028-esp32-capability-audit.md` — Full audit findings -- `proof/verify.py` + `expected_features.sha256` — Deterministic pipeline proof -- `test-results/rust-workspace-tests.log` — Full cargo test output -- `firmware-manifest/source-hashes.txt` — SHA-256 of all 7 ESP32 firmware files -- `crate-manifest/versions.txt` — All 15 crates with versions -- `VERIFY.sh` — One-command self-verification for recipients +- `WITNESS-LOG-028.md` - 33-row attestation matrix with evidence per capability +- `ADR-028-esp32-capability-audit.md` - Full audit findings +- `proof/verify.py` + `expected_features.sha256` - Deterministic pipeline proof +- `test-results/rust-workspace-tests.log` - Full cargo test output +- `firmware-manifest/source-hashes.txt` - SHA-256 of all 7 ESP32 firmware files +- `crate-manifest/versions.txt` - All 15 crates with versions +- `VERIFY.sh` - One-command self-verification for recipients **Key proof artifacts:** -- `v1/data/proof/verify.py` — Trust Kill Switch: feeds reference signal through production pipeline, hashes output -- `v1/data/proof/expected_features.sha256` — Published expected hash -- `v1/data/proof/sample_csi_data.json` — 1,000 synthetic CSI frames (seed=42) -- `docs/WITNESS-LOG-028.md` — 11-step reproducible verification procedure -- `docs/adr/ADR-028-esp32-capability-audit.md` — Complete audit record +- `v1/data/proof/verify.py` - Trust Kill Switch: feeds reference signal through production pipeline, hashes output +- `v1/data/proof/expected_features.sha256` - Published expected hash +- `v1/data/proof/sample_csi_data.json` - 1,000 synthetic CSI frames (seed=42) +- `docs/WITNESS-LOG-028.md` - 11-step reproducible verification procedure +- `docs/adr/ADR-028-esp32-capability-audit.md` - Complete audit record ### Branch Default branch: `main` @@ -202,24 +202,24 @@ Active feature branch: `ruvsense-full-implementation` (PR #77) - ALWAYS prefer editing an existing file to creating a new one - NEVER proactively create documentation files (*.md) or README files unless explicitly requested - NEVER save working files, text/mds, or tests to the root folder -- Never continuously check status after spawning a swarm — wait for results +- Never continuously check status after spawning a swarm - wait for results - ALWAYS read a file before editing it - NEVER commit secrets, credentials, or .env files ## File Organization -- NEVER save to root folder — use the directories below -- `docs/adr/` — Architecture Decision Records (43 ADRs) -- `docs/ddd/` — Domain-Driven Design models -- `rust-port/wifi-densepose-rs/crates/` — Rust workspace crates (15 crates) -- `rust-port/wifi-densepose-rs/crates/wifi-densepose-signal/src/ruvsense/` — RuvSense multistatic modules (14 files) -- `rust-port/wifi-densepose-rs/crates/wifi-densepose-ruvector/src/viewpoint/` — Cross-viewpoint fusion (5 files) -- `rust-port/wifi-densepose-rs/crates/wifi-densepose-hardware/src/esp32/` — ESP32 TDM protocol -- `firmware/esp32-csi-node/main/` — ESP32 C firmware (channel hopping, NVS config, TDM) -- `v1/src/` — Python source (core, hardware, services, api) -- `v1/data/proof/` — Deterministic CSI proof bundles -- `.claude-flow/` — Claude Flow coordination state (committed for team sharing) -- `.claude/` — Claude Code settings, agents, memory (committed for team sharing) +- NEVER save to root folder - use the directories below +- `docs/adr/` - Architecture Decision Records (43 ADRs) +- `docs/ddd/` - Domain-Driven Design models +- `rust-port/wifi-densepose-rs/crates/` - Rust workspace crates (15 crates) +- `rust-port/wifi-densepose-rs/crates/wifi-densepose-signal/src/ruvsense/` - RuvSense multistatic modules (14 files) +- `rust-port/wifi-densepose-rs/crates/wifi-densepose-ruvector/src/viewpoint/` - Cross-viewpoint fusion (5 files) +- `rust-port/wifi-densepose-rs/crates/wifi-densepose-hardware/src/esp32/` - ESP32 TDM protocol +- `firmware/esp32-csi-node/main/` - ESP32 C firmware (channel hopping, NVS config, TDM) +- `v1/src/` - Python source (core, hardware, services, api) +- `v1/data/proof/` - Deterministic CSI proof bundles +- `.claude-flow/` - Claude Flow coordination state (committed for team sharing) +- `.claude/` - Claude Code settings, agents, memory (committed for team sharing) ## Project Architecture @@ -242,18 +242,18 @@ Active feature branch: `ruvsense-full-implementation` (PR #77) Before merging any PR, verify each item applies and is addressed: -1. **Rust tests pass** — `cargo test --workspace --no-default-features` (1,031+ passed, 0 failed) -2. **Python proof passes** — `python v1/data/proof/verify.py` (VERDICT: PASS) -3. **README.md** — Update platform tables, crate descriptions, hardware tables, feature summaries if scope changed -4. **CLAUDE.md** — Update crate table, ADR list, module tables, version if scope changed -5. **CHANGELOG.md** — Add entry under `[Unreleased]` with what was added/fixed/changed -6. **User guide** (`docs/user-guide.md`) — Update if new data sources, CLI flags, or setup steps were added -7. **ADR index** — Update ADR count in README docs table if a new ADR was created -8. **Witness bundle** — Regenerate if tests or proof hash changed: `bash scripts/generate-witness-bundle.sh` -9. **Docker Hub image** — Only rebuild if Dockerfile, dependencies, or runtime behavior changed -10. **Crate publishing** — Only needed if a crate is published to crates.io and its public API changed -11. **`.gitignore`** — Add any new build artifacts or binaries -12. **Security audit** — Run security review for new modules touching hardware/network boundaries +1. **Rust tests pass** - `cargo test --workspace --no-default-features` (1,031+ passed, 0 failed) +2. **Python proof passes** - `python v1/data/proof/verify.py` (VERDICT: PASS) +3. **README.md** - Update platform tables, crate descriptions, hardware tables, feature summaries if scope changed +4. **CLAUDE.md** - Update crate table, ADR list, module tables, version if scope changed +5. **CHANGELOG.md** - Add entry under `[Unreleased]` with what was added/fixed/changed +6. **User guide** (`docs/user-guide.md`) - Update if new data sources, CLI flags, or setup steps were added +7. **ADR index** - Update ADR count in README docs table if a new ADR was created +8. **Witness bundle** - Regenerate if tests or proof hash changed: `bash scripts/generate-witness-bundle.sh` +9. **Docker Hub image** - Only rebuild if Dockerfile, dependencies, or runtime behavior changed +10. **Crate publishing** - Only needed if a crate is published to crates.io and its public API changed +11. **`.gitignore`** - Add any new build artifacts or binaries +12. **Security audit** - Run security review for new modules touching hardware/network boundaries ## Build & Test @@ -292,14 +292,14 @@ npm run lint - MUST initialize the swarm using CLI tools when starting complex tasks - MUST spawn concurrent agents using Claude Code's Task tool -- Never use CLI tools alone for execution — Task tool agents do the actual work +- Never use CLI tools alone for execution - Task tool agents do the actual work - MUST call CLI tools AND Task tool in ONE message for complex work ### 3-Tier Model Routing (ADR-026) | Tier | Handler | Latency | Cost | Use Cases | |------|---------|---------|------|-----------| -| **1** | Agent Booster (WASM) | <1ms | $0 | Simple transforms (var→const, add types) — Skip LLM | +| **1** | Agent Booster (WASM) | <1ms | $0 | Simple transforms (var→const, add types) - Skip LLM | | **2** | Haiku | ~500ms | $0.0002 | Simple tasks, low complexity (<30%) | | **3** | Sonnet/Opus | 2-5s | $0.003-0.015 | Complex reasoning, architecture, security (>30%) | @@ -323,8 +323,8 @@ npx @claude-flow/cli@latest swarm init --topology hierarchical --max-agents 8 -- - ALWAYS use `run_in_background: true` for all agent Task calls - ALWAYS put ALL agent Task calls in ONE message for parallel execution -- After spawning, STOP — do NOT add more tool calls or check status -- Never poll TaskOutput or check swarm status — trust agents to return +- After spawning, STOP - do NOT add more tool calls or check status +- Never poll TaskOutput or check swarm status - trust agents to return - When agent results arrive, review ALL results before proceeding ## V3 CLI Commands @@ -388,7 +388,7 @@ npx @claude-flow/cli@latest memory retrieve --key "pattern-auth" --namespace pat ## Quick Setup ```bash -claude mcp add claude-flow -- npx -y @claude-flow/cli@latest +claude mcp add claude-flow - npx -y @claude-flow/cli@latest npx @claude-flow/cli@latest daemon start npx @claude-flow/cli@latest doctor --fix ``` diff --git a/Makefile b/Makefile index f58f01fa9..f9b20dbd4 100644 --- a/Makefile +++ b/Makefile @@ -37,15 +37,15 @@ check: @./install.sh --check-only # ─── Verification ──────────────────────────────────────────── -# Trust Kill Switch -- one-command proof replay +# Trust Kill Switch - one-command proof replay verify: @./verify -# Verbose mode -- show detailed feature statistics and Doppler spectrum +# Verbose mode - show detailed feature statistics and Doppler spectrum verify-verbose: @./verify --verbose -# Full audit -- verify pipeline + scan codebase for mock/random patterns +# Full audit - verify pipeline + scan codebase for mock/random patterns verify-audit: @./verify --verbose --audit @@ -57,7 +57,7 @@ build-wasm: cd rust-port/wifi-densepose-rs && wasm-pack build crates/wifi-densepose-wasm --target web --release build-wasm-mat: - cd rust-port/wifi-densepose-rs && wasm-pack build crates/wifi-densepose-wasm --target web --release -- --features mat + cd rust-port/wifi-densepose-rs && wasm-pack build crates/wifi-densepose-wasm --target web --release - --features mat test-rust: cd rust-port/wifi-densepose-rs && cargo test --workspace diff --git a/README.md b/README.md index 10860dd0f..d3061f7ee 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ Instead of relying on cameras or cloud models, it observes whatever signals exist in a space such as WiFi, radio waves across the spectrum, motion patterns, vibration, sound, or other sensory inputs and builds an understanding of what is happening locally. -Built on top of [RuVector](https://github.com/ruvnet/ruvector/), the project became widely known for its implementation of WiFi DensePose — a sensing technique first explored in academic research such as Carnegie Mellon University's *DensePose From WiFi* work. That research demonstrated that WiFi signals can be used to reconstruct human pose. +Built on top of [RuVector](https://github.com/ruvnet/ruvector/), the project became widely known for its implementation of WiFi DensePose - a sensing technique first explored in academic research such as Carnegie Mellon University's *DensePose From WiFi* work. That research demonstrated that WiFi signals can be used to reconstruct human pose. RuView extends that concept into a practical edge system. By analyzing Channel State Information (CSI) disturbances caused by human movement, RuView reconstructs body position, breathing rate, heart rate, and presence in real time using physics-based signal processing and machine learning. @@ -28,7 +28,7 @@ In practice this means ordinary environments gain a new kind of spatial awarenes ### Built for low-power edge applications -[Edge modules](#edge-intelligence-adr-041) are small programs that run directly on the ESP32 sensor — no internet needed, no cloud fees, instant response. +[Edge modules](#edge-intelligence-adr-041) are small programs that run directly on the ESP32 sensor - no internet needed, no cloud fees, instant response. [![Rust 1.85+](https://img.shields.io/badge/rust-1.85+-orange.svg)](https://www.rust-lang.org/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) @@ -48,14 +48,14 @@ In practice this means ordinary environments gain a new kind of spatial awarenes > | **Through-wall** | Fresnel zone geometry + multipath modeling | Up to 5m depth | ```bash -# 30 seconds to live sensing — no toolchain required +# 30 seconds to live sensing - no toolchain required docker pull ruvnet/wifi-densepose:latest docker run -p 3000:3000 ruvnet/wifi-densepose:latest # Open http://localhost:3000 ``` > [!NOTE] -> **CSI-capable hardware required.** Pose estimation, vital signs, and through-wall sensing rely on Channel State Information (CSI) — per-subcarrier amplitude and phase data that standard consumer WiFi does not expose. You need CSI-capable hardware (ESP32-S3 or a research NIC) for full functionality. Consumer WiFi laptops can only provide RSSI-based presence detection, which is significantly less capable. +> **CSI-capable hardware required.** Pose estimation, vital signs, and through-wall sensing rely on Channel State Information (CSI) - per-subcarrier amplitude and phase data that standard consumer WiFi does not expose. You need CSI-capable hardware (ESP32-S3 or a research NIC) for full functionality. Consumer WiFi laptops can only provide RSSI-based presence detection, which is significantly less capable. > **Hardware options** for live CSI capture: > @@ -75,25 +75,25 @@ docker run -p 3000:3000 ruvnet/wifi-densepose:latest |----------|-------------| | [User Guide](docs/user-guide.md) | Step-by-step guide: installation, first run, API usage, hardware setup, training | | [Build Guide](docs/build-guide.md) | Building from source (Rust and Python) | -| [Architecture Decisions](docs/adr/README.md) | 62 ADRs — why each technical choice was made, organized by domain (hardware, signal processing, ML, platform, infrastructure) | -| [Domain Models](docs/ddd/README.md) | 7 DDD models (RuvSense, Signal Processing, Training Pipeline, Hardware Platform, Sensing Server, WiFi-Mat, CHCI) — bounded contexts, aggregates, domain events, and ubiquitous language | -| [Desktop App](rust-port/wifi-densepose-rs/crates/wifi-densepose-desktop/README.md) | **WIP** — Tauri v2 desktop app for node management, OTA updates, WASM deployment, and mesh visualization | -| [Medical Examples](examples/medical/README.md) | Contactless blood pressure, heart rate, breathing rate via 60 GHz mmWave radar — $15 hardware, no wearable | +| [Architecture Decisions](docs/adr/README.md) | 62 ADRs - why each technical choice was made, organized by domain (hardware, signal processing, ML, platform, infrastructure) | +| [Domain Models](docs/ddd/README.md) | 7 DDD models (RuvSense, Signal Processing, Training Pipeline, Hardware Platform, Sensing Server, WiFi-Mat, CHCI) - bounded contexts, aggregates, domain events, and ubiquitous language | +| [Desktop App](rust-port/wifi-densepose-rs/crates/wifi-densepose-desktop/README.md) | **WIP** - Tauri v2 desktop app for node management, OTA updates, WASM deployment, and mesh visualization | +| [Medical Examples](examples/medical/README.md) | Contactless blood pressure, heart rate, breathing rate via 60 GHz mmWave radar - $15 hardware, no wearable | --- - WiFi DensePose — Live pose detection with setup guide + WiFi DensePose - Live pose detection with setup guide
- Real-time pose skeleton from WiFi CSI signals — no cameras, no wearables + Real-time pose skeleton from WiFi CSI signals - no cameras, no wearables

▶ Live Observatory Demo  |  ▶ Dual-Modal Pose Fusion Demo -> The [server](#-quick-start) is optional for visualization and aggregation — the ESP32 [runs independently](#esp32-s3-hardware-pipeline) for presence detection, vital signs, and fall alerts. +> The [server](#-quick-start) is optional for visualization and aggregation - the ESP32 [runs independently](#esp32-s3-hardware-pipeline) for presence detection, vital signs, and fall alerts. > > **Live ESP32 pipeline**: Connect an ESP32-S3 node → run the [sensing server](#sensing-server) → open the [pose fusion demo](https://ruvnet.github.io/RuView/pose-fusion.html) for real-time dual-modal pose estimation (webcam + WiFi CSI). See [ADR-059](docs/adr/ADR-059-live-esp32-csi-pipeline.md). @@ -102,31 +102,31 @@ docker run -p 3000:3000 ruvnet/wifi-densepose:latest ### Sensing -See people, breathing, and heartbeats through walls — using only WiFi signals already in the room. +See people, breathing, and heartbeats through walls - using only WiFi signals already in the room. | | Feature | What It Means | |---|---------|---------------| -| 🔒 | **Privacy-First** | Tracks human pose using only WiFi signals — no cameras, no video, no images stored | +| 🔒 | **Privacy-First** | Tracks human pose using only WiFi signals - no cameras, no video, no images stored | | 💓 | **Vital Signs** | Detects breathing rate (6-30 breaths/min) and heart rate (40-120 bpm) without any wearable | -| 👥 | **Multi-Person** | Tracks multiple people simultaneously, each with independent pose and vitals — no hard software limit (physics: ~3-5 per AP with 56 subcarriers, more with multi-AP) | -| 🧱 | **Through-Wall** | WiFi passes through walls, furniture, and debris — works where cameras cannot | +| 👥 | **Multi-Person** | Tracks multiple people simultaneously, each with independent pose and vitals - no hard software limit (physics: ~3-5 per AP with 56 subcarriers, more with multi-AP) | +| 🧱 | **Through-Wall** | WiFi passes through walls, furniture, and debris - works where cameras cannot | | 🚑 | **Disaster Response** | Detects trapped survivors through rubble and classifies injury severity (START triage) | | 📡 | **Multistatic Mesh** | 4-6 low-cost sensor nodes work together, combining 12+ overlapping signal paths for full 360-degree room coverage with sub-inch accuracy and no person mix-ups ([ADR-029](docs/adr/ADR-029-ruvsense-multistatic-sensing-mode.md)) | -| 🌐 | **Persistent Field Model** | The system learns the RF signature of each room — then subtracts the room to isolate human motion, detect drift over days, predict intent before movement starts, and flag spoofing attempts ([ADR-030](docs/adr/ADR-030-ruvsense-persistent-field-model.md)) | +| 🌐 | **Persistent Field Model** | The system learns the RF signature of each room - then subtracts the room to isolate human motion, detect drift over days, predict intent before movement starts, and flag spoofing attempts ([ADR-030](docs/adr/ADR-030-ruvsense-persistent-field-model.md)) | ### Intelligence -The system learns on its own and gets smarter over time — no hand-tuning, no labeled data required. +The system learns on its own and gets smarter over time - no hand-tuning, no labeled data required. | | Feature | What It Means | |---|---------|---------------| -| 🧠 | **Self-Learning** | Teaches itself from raw WiFi data — no labeled training sets, no cameras needed to bootstrap ([ADR-024](docs/adr/ADR-024-contrastive-csi-embedding-model.md)) | -| 🎯 | **AI Signal Processing** | Attention networks, graph algorithms, and smart compression replace hand-tuned thresholds — adapts to each room automatically ([RuVector](https://github.com/ruvnet/ruvector)) | -| 🌍 | **Works Everywhere** | Train once, deploy in any room — adversarial domain generalization strips environment bias so models transfer across rooms, buildings, and hardware ([ADR-027](docs/adr/ADR-027-cross-environment-domain-generalization.md)) | -| 👁️ | **Cross-Viewpoint Fusion** | AI combines what each sensor sees from its own angle — fills in blind spots and depth ambiguity that no single viewpoint can resolve on its own ([ADR-031](docs/adr/ADR-031-ruview-sensing-first-rf-mode.md)) | -| 🔮 | **Signal-Line Protocol** | A 6-stage processing pipeline transforms raw WiFi signals into structured body representations — from signal cleanup through graph-based spatial reasoning to final pose output ([ADR-033](docs/adr/ADR-033-crv-signal-line-sensing-integration.md)) | +| 🧠 | **Self-Learning** | Teaches itself from raw WiFi data - no labeled training sets, no cameras needed to bootstrap ([ADR-024](docs/adr/ADR-024-contrastive-csi-embedding-model.md)) | +| 🎯 | **AI Signal Processing** | Attention networks, graph algorithms, and smart compression replace hand-tuned thresholds - adapts to each room automatically ([RuVector](https://github.com/ruvnet/ruvector)) | +| 🌍 | **Works Everywhere** | Train once, deploy in any room - adversarial domain generalization strips environment bias so models transfer across rooms, buildings, and hardware ([ADR-027](docs/adr/ADR-027-cross-environment-domain-generalization.md)) | +| 👁️ | **Cross-Viewpoint Fusion** | AI combines what each sensor sees from its own angle - fills in blind spots and depth ambiguity that no single viewpoint can resolve on its own ([ADR-031](docs/adr/ADR-031-ruview-sensing-first-rf-mode.md)) | +| 🔮 | **Signal-Line Protocol** | A 6-stage processing pipeline transforms raw WiFi signals into structured body representations - from signal cleanup through graph-based spatial reasoning to final pose output ([ADR-033](docs/adr/ADR-033-crv-signal-line-sensing-integration.md)) | | 🔒 | **QUIC Mesh Security** | All sensor-to-sensor communication is encrypted end-to-end with tamper detection, replay protection, and seamless reconnection if a node moves or drops offline ([ADR-032](docs/adr/ADR-032-multistatic-mesh-security-hardening.md)) | -| 🎯 | **Adaptive Classifier** | Records labeled CSI sessions, trains a 15-feature logistic regression model in pure Rust, and learns your room's unique signal characteristics — replaces hand-tuned thresholds with data-driven classification ([ADR-048](docs/adr/ADR-048-adaptive-csi-classifier.md)) | +| 🎯 | **Adaptive Classifier** | Records labeled CSI sessions, trains a 15-feature logistic regression model in pure Rust, and learns your room's unique signal characteristics - replaces hand-tuned thresholds with data-driven classification ([ADR-048](docs/adr/ADR-048-adaptive-csi-classifier.md)) | ### Performance & Deployment @@ -134,19 +134,19 @@ Fast enough for real-time use, small enough for edge devices, simple enough for | | Feature | What It Means | |---|---------|---------------| -| ⚡ | **Real-Time** | Analyzes WiFi signals in under 100 microseconds per frame — fast enough for live monitoring | +| ⚡ | **Real-Time** | Analyzes WiFi signals in under 100 microseconds per frame - fast enough for live monitoring | | 🦀 | **810x Faster** | Complete Rust rewrite: 54,000 frames/sec pipeline, multi-arch Docker image, 1,031+ tests | -| 🐳 | **One-Command Setup** | `docker pull ruvnet/wifi-densepose:latest` — live sensing in 30 seconds, no toolchain needed (amd64 + arm64 / Apple Silicon) | -| 📡 | **Fully Local** | Runs completely on a $9 ESP32 — no internet connection, no cloud account, no recurring fees. Detects presence, vital signs, and falls on-device with instant response | -| 📦 | **Portable Models** | Trained models package into a single `.rvf` file — runs on edge, cloud, or browser (WASM) | -| 🔭 | **Observatory Visualization** | Cinematic Three.js dashboard with 5 holographic panels — subcarrier manifold, vital signs oracle, presence heatmap, phase constellation, convergence engine — all driven by live or demo CSI data ([ADR-047](docs/adr/ADR-047-psychohistory-observatory-visualization.md)) | -| 📟 | **AMOLED Display** | ESP32-S3 boards with built-in AMOLED screens show real-time presence, vital signs, and room status directly on the sensor — no phone or PC needed ([ADR-045](docs/adr/ADR-045-amoled-display-support.md)) | +| 🐳 | **One-Command Setup** | `docker pull ruvnet/wifi-densepose:latest` - live sensing in 30 seconds, no toolchain needed (amd64 + arm64 / Apple Silicon) | +| 📡 | **Fully Local** | Runs completely on a $9 ESP32 - no internet connection, no cloud account, no recurring fees. Detects presence, vital signs, and falls on-device with instant response | +| 📦 | **Portable Models** | Trained models package into a single `.rvf` file - runs on edge, cloud, or browser (WASM) | +| 🔭 | **Observatory Visualization** | Cinematic Three.js dashboard with 5 holographic panels - subcarrier manifold, vital signs oracle, presence heatmap, phase constellation, convergence engine - all driven by live or demo CSI data ([ADR-047](docs/adr/ADR-047-psychohistory-observatory-visualization.md)) | +| 📟 | **AMOLED Display** | ESP32-S3 boards with built-in AMOLED screens show real-time presence, vital signs, and room status directly on the sensor - no phone or PC needed ([ADR-045](docs/adr/ADR-045-amoled-display-support.md)) | --- ## 🔬 How It Works -WiFi routers flood every room with radio waves. When a person moves — or even breathes — those waves scatter differently. WiFi DensePose reads that scattering pattern and reconstructs what happened: +WiFi routers flood every room with radio waves. When a person moves - or even breathes - those waves scatter differently. WiFi DensePose reads that scattering pattern and reconstructs what happened: ``` WiFi Router → radio waves pass through room → hit human body → scatter @@ -170,15 +170,15 @@ Neural Network: processed signals → 17 body keypoints + vital signs + room mod Output: real-time pose, breathing, heart rate, room fingerprint, drift alerts ``` -No training cameras required — the [Self-Learning system (ADR-024)](docs/adr/ADR-024-contrastive-csi-embedding-model.md) bootstraps from raw WiFi data alone. [MERIDIAN (ADR-027)](docs/adr/ADR-027-cross-environment-domain-generalization.md) ensures the model works in any room, not just the one it trained in. +No training cameras required - the [Self-Learning system (ADR-024)](docs/adr/ADR-024-contrastive-csi-embedding-model.md) bootstraps from raw WiFi data alone. [MERIDIAN (ADR-027)](docs/adr/ADR-027-cross-environment-domain-generalization.md) ensures the model works in any room, not just the one it trained in. --- ## 🏢 Use Cases & Applications -WiFi sensing works anywhere WiFi exists. No new hardware in most cases — just software on existing access points or a $8 ESP32 add-on. Because there are no cameras, deployments avoid privacy regulations (GDPR video, HIPAA imaging) by design. +WiFi sensing works anywhere WiFi exists. No new hardware in most cases - just software on existing access points or a $8 ESP32 add-on. Because there are no cameras, deployments avoid privacy regulations (GDPR video, HIPAA imaging) by design. -**Scaling:** Each AP distinguishes ~3-5 people (56 subcarriers). Multi-AP multiplies linearly — a 4-AP retail mesh covers ~15-20 occupants. No hard software limit; the practical ceiling is signal physics. +**Scaling:** Each AP distinguishes ~3-5 people (56 subcarriers). Multi-AP multiplies linearly - a 4-AP retail mesh covers ~15-20 occupants. No hard software limit; the practical ceiling is signal physics. | | Why WiFi sensing wins | Traditional alternative | |---|----------------------|----------------------| @@ -189,77 +189,77 @@ WiFi sensing works anywhere WiFi exists. No new hardware in most cases — just | 🔌 | **WiFi already deployed everywhere** | PIR/radar sensors require new wiring per room |
-🏥 Everyday — Healthcare, retail, office, hospitality (commodity WiFi) +🏥 Everyday - Healthcare, retail, office, hospitality (commodity WiFi) | Use Case | What It Does | Hardware | Key Metric | Edge Module | |----------|-------------|----------|------------|-------------| -| **Elderly care / assisted living** | Fall detection, nighttime activity monitoring, breathing rate during sleep — no wearable compliance needed | 1 ESP32-S3 per room ($8) | Fall alert <2s | [Sleep Apnea](docs/edge-modules/medical.md), [Gait Analysis](docs/edge-modules/medical.md) | +| **Elderly care / assisted living** | Fall detection, nighttime activity monitoring, breathing rate during sleep - no wearable compliance needed | 1 ESP32-S3 per room ($8) | Fall alert <2s | [Sleep Apnea](docs/edge-modules/medical.md), [Gait Analysis](docs/edge-modules/medical.md) | | **Hospital patient monitoring** | Continuous breathing + heart rate for non-critical beds without wired sensors; nurse alert on anomaly | 1-2 APs per ward | Breathing: 6-30 BPM | [Respiratory Distress](docs/edge-modules/medical.md), [Cardiac Arrhythmia](docs/edge-modules/medical.md) | | **Emergency room triage** | Automated occupancy count + wait-time estimation; detect patient distress (abnormal breathing) in waiting areas | Existing hospital WiFi | Occupancy accuracy >95% | [Queue Length](docs/edge-modules/retail.md), [Panic Motion](docs/edge-modules/security.md) | -| **Retail occupancy & flow** | Real-time foot traffic, dwell time by zone, queue length — no cameras, no opt-in, GDPR-friendly | Existing store WiFi + 1 ESP32 | Dwell resolution ~1m | [Customer Flow](docs/edge-modules/retail.md), [Dwell Heatmap](docs/edge-modules/retail.md) | +| **Retail occupancy & flow** | Real-time foot traffic, dwell time by zone, queue length - no cameras, no opt-in, GDPR-friendly | Existing store WiFi + 1 ESP32 | Dwell resolution ~1m | [Customer Flow](docs/edge-modules/retail.md), [Dwell Heatmap](docs/edge-modules/retail.md) | | **Office space utilization** | Which desks/rooms are actually occupied, meeting room no-shows, HVAC optimization based on real presence | Existing enterprise WiFi | Presence latency <1s | [Meeting Room](docs/edge-modules/building.md), [HVAC Presence](docs/edge-modules/building.md) | | **Hotel & hospitality** | Room occupancy without door sensors, minibar/bathroom usage patterns, energy savings on empty rooms | Existing hotel WiFi | 15-30% HVAC savings | [Energy Audit](docs/edge-modules/building.md), [Lighting Zones](docs/edge-modules/building.md) | -| **Restaurants & food service** | Table turnover tracking, kitchen staff presence, restroom occupancy displays — no cameras in dining areas | Existing WiFi | Queue wait ±30s | [Table Turnover](docs/edge-modules/retail.md), [Queue Length](docs/edge-modules/retail.md) | +| **Restaurants & food service** | Table turnover tracking, kitchen staff presence, restroom occupancy displays - no cameras in dining areas | Existing WiFi | Queue wait ±30s | [Table Turnover](docs/edge-modules/retail.md), [Queue Length](docs/edge-modules/retail.md) | | **Parking garages** | Pedestrian presence in stairwells and elevators where cameras have blind spots; security alert if someone lingers | Existing WiFi | Through-concrete walls | [Loitering](docs/edge-modules/security.md), [Elevator Count](docs/edge-modules/building.md) |
-🏟️ Specialized — Events, fitness, education, civic (CSI-capable hardware) +🏟️ Specialized - Events, fitness, education, civic (CSI-capable hardware) | Use Case | What It Does | Hardware | Key Metric | Edge Module | |----------|-------------|----------|------------|-------------| -| **Smart home automation** | Room-level presence triggers (lights, HVAC, music) that work through walls — no dead zones, no motion-sensor timeouts | 2-3 ESP32-S3 nodes ($24) | Through-wall range ~5m | [HVAC Presence](docs/edge-modules/building.md), [Lighting Zones](docs/edge-modules/building.md) | -| **Fitness & sports** | Rep counting, posture correction, breathing cadence during exercise — no wearable, no camera in locker rooms | 3+ ESP32-S3 mesh | Pose: 17 keypoints | [Breathing Sync](docs/edge-modules/exotic.md), [Gait Analysis](docs/edge-modules/medical.md) | -| **Childcare & schools** | Naptime breathing monitoring, playground headcount, restricted-area alerts — privacy-safe for minors | 2-4 ESP32-S3 per zone | Breathing: ±1 BPM | [Sleep Apnea](docs/edge-modules/medical.md), [Perimeter Breach](docs/edge-modules/security.md) | +| **Smart home automation** | Room-level presence triggers (lights, HVAC, music) that work through walls - no dead zones, no motion-sensor timeouts | 2-3 ESP32-S3 nodes ($24) | Through-wall range ~5m | [HVAC Presence](docs/edge-modules/building.md), [Lighting Zones](docs/edge-modules/building.md) | +| **Fitness & sports** | Rep counting, posture correction, breathing cadence during exercise - no wearable, no camera in locker rooms | 3+ ESP32-S3 mesh | Pose: 17 keypoints | [Breathing Sync](docs/edge-modules/exotic.md), [Gait Analysis](docs/edge-modules/medical.md) | +| **Childcare & schools** | Naptime breathing monitoring, playground headcount, restricted-area alerts - privacy-safe for minors | 2-4 ESP32-S3 per zone | Breathing: ±1 BPM | [Sleep Apnea](docs/edge-modules/medical.md), [Perimeter Breach](docs/edge-modules/security.md) | | **Event venues & concerts** | Crowd density mapping, crush-risk detection via breathing compression, emergency evacuation flow tracking | Multi-AP mesh (4-8 APs) | Density per m² | [Customer Flow](docs/edge-modules/retail.md), [Panic Motion](docs/edge-modules/security.md) | | **Stadiums & arenas** | Section-level occupancy for dynamic pricing, concession staffing, emergency egress flow modeling | Enterprise AP grid | 15-20 per AP mesh | [Dwell Heatmap](docs/edge-modules/retail.md), [Queue Length](docs/edge-modules/retail.md) | -| **Houses of worship** | Attendance counting without facial recognition — privacy-sensitive congregations, multi-room campus tracking | Existing WiFi | Zone-level accuracy | [Elevator Count](docs/edge-modules/building.md), [Energy Audit](docs/edge-modules/building.md) | -| **Warehouse & logistics** | Worker safety zones, forklift proximity alerts, occupancy in hazardous areas — works through shelving and pallets | Industrial AP mesh | Alert latency <500ms | [Forklift Proximity](docs/edge-modules/industrial.md), [Confined Space](docs/edge-modules/industrial.md) | +| **Houses of worship** | Attendance counting without facial recognition - privacy-sensitive congregations, multi-room campus tracking | Existing WiFi | Zone-level accuracy | [Elevator Count](docs/edge-modules/building.md), [Energy Audit](docs/edge-modules/building.md) | +| **Warehouse & logistics** | Worker safety zones, forklift proximity alerts, occupancy in hazardous areas - works through shelving and pallets | Industrial AP mesh | Alert latency <500ms | [Forklift Proximity](docs/edge-modules/industrial.md), [Confined Space](docs/edge-modules/industrial.md) | | **Civic infrastructure** | Public restroom occupancy (no cameras possible), subway platform crowding, shelter headcount during emergencies | Municipal WiFi + ESP32 | Real-time headcount | [Customer Flow](docs/edge-modules/retail.md), [Loitering](docs/edge-modules/security.md) | -| **Museums & galleries** | Visitor flow heatmaps, exhibit dwell time, crowd bottleneck alerts — no cameras near artwork (flash/theft risk) | Existing WiFi | Zone dwell ±5s | [Dwell Heatmap](docs/edge-modules/retail.md), [Shelf Engagement](docs/edge-modules/retail.md) | +| **Museums & galleries** | Visitor flow heatmaps, exhibit dwell time, crowd bottleneck alerts - no cameras near artwork (flash/theft risk) | Existing WiFi | Zone dwell ±5s | [Dwell Heatmap](docs/edge-modules/retail.md), [Shelf Engagement](docs/edge-modules/retail.md) |
-🤖 Robotics & Industrial — Autonomous systems, manufacturing, android spatial awareness +🤖 Robotics & Industrial - Autonomous systems, manufacturing, android spatial awareness -WiFi sensing gives robots and autonomous systems a spatial awareness layer that works where LIDAR and cameras fail — through dust, smoke, fog, and around corners. The CSI signal field acts as a "sixth sense" for detecting humans in the environment without requiring line-of-sight. +WiFi sensing gives robots and autonomous systems a spatial awareness layer that works where LIDAR and cameras fail - through dust, smoke, fog, and around corners. The CSI signal field acts as a "sixth sense" for detecting humans in the environment without requiring line-of-sight. | Use Case | What It Does | Hardware | Key Metric | Edge Module | |----------|-------------|----------|------------|-------------| -| **Cobot safety zones** | Detect human presence near collaborative robots — auto-slow or stop before contact, even behind obstructions | 2-3 ESP32-S3 per cell | Presence latency <100ms | [Forklift Proximity](docs/edge-modules/industrial.md), [Perimeter Breach](docs/edge-modules/security.md) | -| **Warehouse AMR navigation** | Autonomous mobile robots sense humans around blind corners, through shelving racks — no LIDAR occlusion | ESP32 mesh along aisles | Through-shelf detection | [Forklift Proximity](docs/edge-modules/industrial.md), [Loitering](docs/edge-modules/security.md) | -| **Android / humanoid spatial awareness** | Ambient human pose sensing for social robots — detect gestures, approach direction, and personal space without cameras always on | Onboard ESP32-S3 module | 17-keypoint pose | [Gesture Language](docs/edge-modules/exotic.md), [Emotion Detection](docs/edge-modules/exotic.md) | -| **Manufacturing line monitoring** | Worker presence at each station, ergonomic posture alerts, headcount for shift compliance — works through equipment | Industrial AP per zone | Pose + breathing | [Confined Space](docs/edge-modules/industrial.md), [Gait Analysis](docs/edge-modules/medical.md) | +| **Cobot safety zones** | Detect human presence near collaborative robots - auto-slow or stop before contact, even behind obstructions | 2-3 ESP32-S3 per cell | Presence latency <100ms | [Forklift Proximity](docs/edge-modules/industrial.md), [Perimeter Breach](docs/edge-modules/security.md) | +| **Warehouse AMR navigation** | Autonomous mobile robots sense humans around blind corners, through shelving racks - no LIDAR occlusion | ESP32 mesh along aisles | Through-shelf detection | [Forklift Proximity](docs/edge-modules/industrial.md), [Loitering](docs/edge-modules/security.md) | +| **Android / humanoid spatial awareness** | Ambient human pose sensing for social robots - detect gestures, approach direction, and personal space without cameras always on | Onboard ESP32-S3 module | 17-keypoint pose | [Gesture Language](docs/edge-modules/exotic.md), [Emotion Detection](docs/edge-modules/exotic.md) | +| **Manufacturing line monitoring** | Worker presence at each station, ergonomic posture alerts, headcount for shift compliance - works through equipment | Industrial AP per zone | Pose + breathing | [Confined Space](docs/edge-modules/industrial.md), [Gait Analysis](docs/edge-modules/medical.md) | | **Construction site safety** | Exclusion zone enforcement around heavy machinery, fall detection from scaffolding, personnel headcount | Ruggedized ESP32 mesh | Alert <2s, through-dust | [Panic Motion](docs/edge-modules/security.md), [Structural Vibration](docs/edge-modules/industrial.md) | | **Agricultural robotics** | Detect farm workers near autonomous harvesters in dusty/foggy field conditions where cameras are unreliable | Weatherproof ESP32 nodes | Range ~10m open field | [Forklift Proximity](docs/edge-modules/industrial.md), [Rain Detection](docs/edge-modules/exotic.md) | -| **Drone landing zones** | Verify landing area is clear of humans — WiFi sensing works in rain, dust, and low light where downward cameras fail | Ground ESP32 nodes | Presence: >95% accuracy | [Perimeter Breach](docs/edge-modules/security.md), [Tailgating](docs/edge-modules/security.md) | -| **Clean room monitoring** | Personnel tracking without cameras (particle contamination risk from camera fans) — gown compliance via pose | Existing cleanroom WiFi | No particulate emission | [Clean Room](docs/edge-modules/industrial.md), [Livestock Monitor](docs/edge-modules/industrial.md) | +| **Drone landing zones** | Verify landing area is clear of humans - WiFi sensing works in rain, dust, and low light where downward cameras fail | Ground ESP32 nodes | Presence: >95% accuracy | [Perimeter Breach](docs/edge-modules/security.md), [Tailgating](docs/edge-modules/security.md) | +| **Clean room monitoring** | Personnel tracking without cameras (particle contamination risk from camera fans) - gown compliance via pose | Existing cleanroom WiFi | No particulate emission | [Clean Room](docs/edge-modules/industrial.md), [Livestock Monitor](docs/edge-modules/industrial.md) |
-🔥 Extreme — Through-wall, disaster, defense, underground +🔥 Extreme - Through-wall, disaster, defense, underground -These scenarios exploit WiFi's ability to penetrate solid materials — concrete, rubble, earth — where no optical or infrared sensor can reach. The WiFi-Mat disaster module (ADR-001) is specifically designed for this tier. +These scenarios exploit WiFi's ability to penetrate solid materials - concrete, rubble, earth - where no optical or infrared sensor can reach. The WiFi-Mat disaster module (ADR-001) is specifically designed for this tier. | Use Case | What It Does | Hardware | Key Metric | Edge Module | |----------|-------------|----------|------------|-------------| | **Search & rescue (WiFi-Mat)** | Detect survivors through rubble/debris via breathing signature, START triage color classification, 3D localization | Portable ESP32 mesh + laptop | Through 30cm concrete | [Respiratory Distress](docs/edge-modules/medical.md), [Seizure Detection](docs/edge-modules/medical.md) | | **Firefighting** | Locate occupants through smoke and walls before entry; breathing detection confirms life signs remotely | Portable mesh on truck | Works in zero visibility | [Sleep Apnea](docs/edge-modules/medical.md), [Panic Motion](docs/edge-modules/security.md) | -| **Prison & secure facilities** | Cell occupancy verification, distress detection (abnormal vitals), perimeter sensing — no camera blind spots | Dedicated AP infrastructure | 24/7 vital signs | [Cardiac Arrhythmia](docs/edge-modules/medical.md), [Loitering](docs/edge-modules/security.md) | +| **Prison & secure facilities** | Cell occupancy verification, distress detection (abnormal vitals), perimeter sensing - no camera blind spots | Dedicated AP infrastructure | 24/7 vital signs | [Cardiac Arrhythmia](docs/edge-modules/medical.md), [Loitering](docs/edge-modules/security.md) | | **Military / tactical** | Through-wall personnel detection, room clearing confirmation, hostage vital signs at standoff distance | Directional WiFi + custom FW | Range: 5m through wall | [Perimeter Breach](docs/edge-modules/security.md), [Weapon Detection](docs/edge-modules/security.md) | -| **Border & perimeter security** | Detect human presence in tunnels, behind fences, in vehicles — passive sensing, no active illumination to reveal position | Concealed ESP32 mesh | Passive / covert | [Perimeter Breach](docs/edge-modules/security.md), [Tailgating](docs/edge-modules/security.md) | +| **Border & perimeter security** | Detect human presence in tunnels, behind fences, in vehicles - passive sensing, no active illumination to reveal position | Concealed ESP32 mesh | Passive / covert | [Perimeter Breach](docs/edge-modules/security.md), [Tailgating](docs/edge-modules/security.md) | | **Mining & underground** | Worker presence in tunnels where GPS/cameras fail, breathing detection after collapse, headcount at safety points | Ruggedized ESP32 mesh | Through rock/earth | [Confined Space](docs/edge-modules/industrial.md), [Respiratory Distress](docs/edge-modules/medical.md) | | **Maritime & naval** | Below-deck personnel tracking through steel bulkheads (limited range, requires tuning), man-overboard detection | Ship WiFi + ESP32 | Through 1-2 bulkheads | [Structural Vibration](docs/edge-modules/industrial.md), [Panic Motion](docs/edge-modules/security.md) | -| **Wildlife research** | Non-invasive animal activity monitoring in enclosures or dens — no light pollution, no visual disturbance | Weatherproof ESP32 nodes | Zero light emission | [Livestock Monitor](docs/edge-modules/industrial.md), [Dream Stage](docs/edge-modules/exotic.md) | +| **Wildlife research** | Non-invasive animal activity monitoring in enclosures or dens - no light pollution, no visual disturbance | Weatherproof ESP32 nodes | Zero light emission | [Livestock Monitor](docs/edge-modules/industrial.md), [Dream Stage](docs/edge-modules/exotic.md) |
### Edge Intelligence ([ADR-041](docs/adr/ADR-041-wasm-module-collection.md)) -Small programs that run directly on the ESP32 sensor — no internet needed, no cloud fees, instant response. Each module is a tiny WASM file (5-30 KB) that you upload to the device over-the-air. It reads WiFi signal data and makes decisions locally in under 10 ms. [ADR-041](docs/adr/ADR-041-wasm-module-collection.md) defines 60 modules across 13 categories — all 60 are implemented with 609 tests passing. +Small programs that run directly on the ESP32 sensor - no internet needed, no cloud fees, instant response. Each module is a tiny WASM file (5-30 KB) that you upload to the device over-the-air. It reads WiFi signal data and makes decisions locally in under 10 ms. [ADR-041](docs/adr/ADR-041-wasm-module-collection.md) defines 60 modules across 13 categories - all 60 are implemented with 609 tests passing. | | Category | Examples | |---|----------|---------| @@ -269,18 +269,18 @@ Small programs that run directly on the ESP32 sensor — no internet needed, no | 🛒 | [**Retail & Hospitality**](docs/edge-modules/retail.md) | Queue length, dwell heatmaps, customer flow, table turnover | | 🏭 | [**Industrial**](docs/edge-modules/industrial.md) | Forklift proximity, confined space monitoring, structural vibration | | 🔮 | [**Exotic & Research**](docs/edge-modules/exotic.md) | Sleep staging, emotion detection, sign language, breathing sync | -| 📡 | [**Signal Intelligence**](docs/edge-modules/signal-intelligence.md) | Cleans and sharpens raw WiFi signals — focuses on important regions, filters noise, fills in missing data, and tracks which person is which | -| 🧠 | [**Adaptive Learning**](docs/edge-modules/adaptive-learning.md) | The sensor learns new gestures and patterns on its own over time — no cloud needed, remembers what it learned even after updates | +| 📡 | [**Signal Intelligence**](docs/edge-modules/signal-intelligence.md) | Cleans and sharpens raw WiFi signals - focuses on important regions, filters noise, fills in missing data, and tracks which person is which | +| 🧠 | [**Adaptive Learning**](docs/edge-modules/adaptive-learning.md) | The sensor learns new gestures and patterns on its own over time - no cloud needed, remembers what it learned even after updates | | 🗺️ | [**Spatial Reasoning**](docs/edge-modules/spatial-temporal.md) | Figures out where people are in a room, which zones matter most, and tracks movement across areas using graph-based spatial logic | | ⏱️ | [**Temporal Analysis**](docs/edge-modules/spatial-temporal.md) | Learns daily routines, detects when patterns break (someone didn't get up), and verifies safety rules are being followed over time | | 🛡️ | [**AI Security**](docs/edge-modules/ai-security.md) | Detects signal replay attacks, WiFi jamming, injection attempts, and flags abnormal behavior that could indicate tampering | | ⚛️ | [**Quantum-Inspired**](docs/edge-modules/autonomous.md) | Uses quantum-inspired math to map room-wide signal coherence and search for optimal sensor configurations | -| 🤖 | [**Autonomous & Exotic**](docs/edge-modules/autonomous.md) | Self-managing sensor mesh — auto-heals dropped nodes, plans its own actions, and explores experimental signal representations | +| 🤖 | [**Autonomous & Exotic**](docs/edge-modules/autonomous.md) | Self-managing sensor mesh - auto-heals dropped nodes, plans its own actions, and explores experimental signal representations | All implemented modules are `no_std` Rust, share a [common utility library](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/vendor_common.rs), and talk to the host through a 12-function API. Full documentation: [**Edge Modules Guide**](docs/edge-modules/README.md). See the [complete implemented module list](#edge-module-list) below.
-🧩 Edge Intelligence — All 65 Modules Implemented (ADR-041 complete) +🧩 Edge Intelligence - All 65 Modules Implemented (ADR-041 complete) All 60 modules are implemented, tested (609 tests passing), and ready to deploy. They compile to `wasm32-unknown-unknown`, run on ESP32-S3 via WASM3, and share a [common utility library](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/vendor_common.rs). Source: [`crates/wifi-densepose-wasm-edge/src/`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/) @@ -298,27 +298,27 @@ All 60 modules are implemented, tested (609 tests passing), and ready to deploy. **Vendor-integrated modules** (24 modules, ADR-041 Category 7): -**📡 Signal Intelligence** — Real-time CSI analysis and feature extraction +**📡 Signal Intelligence** - Real-time CSI analysis and feature extraction | Module | File | What It Does | Budget | |--------|------|-------------|--------| -| Flash Attention | [`sig_flash_attention.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/sig_flash_attention.rs) | Tiled attention over 8 subcarrier groups — finds spatial focus regions and entropy | S (<5ms) | +| Flash Attention | [`sig_flash_attention.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/sig_flash_attention.rs) | Tiled attention over 8 subcarrier groups - finds spatial focus regions and entropy | S (<5ms) | | Coherence Gate | [`sig_coherence_gate.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/sig_coherence_gate.rs) | Z-score phasor gating with hysteresis: Accept / PredictOnly / Reject / Recalibrate | L (<2ms) | | Temporal Compress | [`sig_temporal_compress.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/sig_temporal_compress.rs) | 3-tier adaptive quantization (8-bit hot / 5-bit warm / 3-bit cold) | L (<2ms) | | Sparse Recovery | [`sig_sparse_recovery.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/sig_sparse_recovery.rs) | ISTA L1 reconstruction for dropped subcarriers | H (<10ms) | | Person Match | [`sig_mincut_person_match.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/sig_mincut_person_match.rs) | Hungarian-lite bipartite assignment for multi-person tracking | S (<5ms) | | Optimal Transport | [`sig_optimal_transport.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/sig_optimal_transport.rs) | Sliced Wasserstein-1 distance with 4 projections | L (<2ms) | -**🧠 Adaptive Learning** — On-device learning without cloud connectivity +**🧠 Adaptive Learning** - On-device learning without cloud connectivity | Module | File | What It Does | Budget | |--------|------|-------------|--------| -| DTW Gesture Learn | [`lrn_dtw_gesture_learn.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/lrn_dtw_gesture_learn.rs) | User-teachable gesture recognition — 3-rehearsal protocol, 16 templates | S (<5ms) | +| DTW Gesture Learn | [`lrn_dtw_gesture_learn.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/lrn_dtw_gesture_learn.rs) | User-teachable gesture recognition - 3-rehearsal protocol, 16 templates | S (<5ms) | | Anomaly Attractor | [`lrn_anomaly_attractor.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/lrn_anomaly_attractor.rs) | 4D dynamical system attractor classification with Lyapunov exponents | H (<10ms) | | Meta Adapt | [`lrn_meta_adapt.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/lrn_meta_adapt.rs) | Hill-climbing self-optimization with safety rollback | L (<2ms) | -| EWC Lifelong | [`lrn_ewc_lifelong.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/lrn_ewc_lifelong.rs) | Elastic Weight Consolidation — remembers past tasks while learning new ones | S (<5ms) | +| EWC Lifelong | [`lrn_ewc_lifelong.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/lrn_ewc_lifelong.rs) | Elastic Weight Consolidation - remembers past tasks while learning new ones | S (<5ms) | -**🗺️ Spatial Reasoning** — Location, proximity, and influence mapping +**🗺️ Spatial Reasoning** - Location, proximity, and influence mapping | Module | File | What It Does | Budget | |--------|------|-------------|--------| @@ -326,7 +326,7 @@ All 60 modules are implemented, tested (609 tests passing), and ready to deploy. | Micro HNSW | [`spt_micro_hnsw.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/spt_micro_hnsw.rs) | 64-vector navigable small-world graph for nearest-neighbor search | S (<5ms) | | Spiking Tracker | [`spt_spiking_tracker.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/spt_spiking_tracker.rs) | 32 LIF neurons + 4 output zone neurons with STDP learning | S (<5ms) | -**⏱️ Temporal Analysis** — Activity patterns, logic verification, autonomous planning +**⏱️ Temporal Analysis** - Activity patterns, logic verification, autonomous planning | Module | File | What It Does | Budget | |--------|------|-------------|--------| @@ -334,35 +334,35 @@ All 60 modules are implemented, tested (609 tests passing), and ready to deploy. | Temporal Logic Guard | [`tmp_temporal_logic_guard.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/tmp_temporal_logic_guard.rs) | LTL formula verification on CSI event streams | S (<5ms) | | GOAP Autonomy | [`tmp_goap_autonomy.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/tmp_goap_autonomy.rs) | Goal-Oriented Action Planning for autonomous module management | S (<5ms) | -**🛡️ AI Security** — Tamper detection and behavioral anomaly profiling +**🛡️ AI Security** - Tamper detection and behavioral anomaly profiling | Module | File | What It Does | Budget | |--------|------|-------------|--------| | Prompt Shield | [`ais_prompt_shield.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/ais_prompt_shield.rs) | FNV-1a replay detection, injection detection (10x amplitude), jamming (SNR) | L (<2ms) | | Behavioral Profiler | [`ais_behavioral_profiler.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/ais_behavioral_profiler.rs) | 6D behavioral profile with Mahalanobis anomaly scoring | S (<5ms) | -**⚛️ Quantum-Inspired** — Quantum computing metaphors applied to CSI analysis +**⚛️ Quantum-Inspired** - Quantum computing metaphors applied to CSI analysis | Module | File | What It Does | Budget | |--------|------|-------------|--------| | Quantum Coherence | [`qnt_quantum_coherence.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/qnt_quantum_coherence.rs) | Bloch sphere mapping, Von Neumann entropy, decoherence detection | S (<5ms) | | Interference Search | [`qnt_interference_search.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/qnt_interference_search.rs) | 16 room-state hypotheses with Grover-inspired oracle + diffusion | S (<5ms) | -**🤖 Autonomous Systems** — Self-governing and self-healing behaviors +**🤖 Autonomous Systems** - Self-governing and self-healing behaviors | Module | File | What It Does | Budget | |--------|------|-------------|--------| | Psycho-Symbolic | [`aut_psycho_symbolic.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/aut_psycho_symbolic.rs) | 16-rule forward-chaining knowledge base with contradiction detection | S (<5ms) | | Self-Healing Mesh | [`aut_self_healing_mesh.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/aut_self_healing_mesh.rs) | 8-node mesh with health tracking, degradation/recovery, coverage healing | S (<5ms) | -**🔮 Exotic (Vendor)** — Novel mathematical models for CSI interpretation +**🔮 Exotic (Vendor)** - Novel mathematical models for CSI interpretation | Module | File | What It Does | Budget | |--------|------|-------------|--------| | Time Crystal | [`exo_time_crystal.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/exo_time_crystal.rs) | Autocorrelation subharmonic detection in 256-frame history | S (<5ms) | | Hyperbolic Space | [`exo_hyperbolic_space.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/exo_hyperbolic_space.rs) | Poincare ball embedding with 32 reference locations, hyperbolic distance | S (<5ms) | -**🏥 Medical & Health** (Category 1) — Contactless health monitoring +**🏥 Medical & Health** (Category 1) - Contactless health monitoring | Module | File | What It Does | Budget | |--------|------|-------------|--------| @@ -372,7 +372,7 @@ All 60 modules are implemented, tested (609 tests passing), and ready to deploy. | Gait Analysis | [`med_gait_analysis.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/med_gait_analysis.rs) | Tracks walking patterns and detects changes | S (<5ms) | | Seizure Detection | [`med_seizure_detect.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/med_seizure_detect.rs) | 6-state machine for tonic-clonic seizure recognition | S (<5ms) | -**🔐 Security & Safety** (Category 2) — Perimeter and threat detection +**🔐 Security & Safety** (Category 2) - Perimeter and threat detection | Module | File | What It Does | Budget | |--------|------|-------------|--------| @@ -382,7 +382,7 @@ All 60 modules are implemented, tested (609 tests passing), and ready to deploy. | Loitering | [`sec_loitering.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/sec_loitering.rs) | Alerts when someone lingers too long in a zone | S (<5ms) | | Panic Motion | [`sec_panic_motion.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/sec_panic_motion.rs) | Detects fleeing, struggling, or panic movement | S (<5ms) | -**🏢 Smart Building** (Category 3) — Automation and energy efficiency +**🏢 Smart Building** (Category 3) - Automation and energy efficiency | Module | File | What It Does | Budget | |--------|------|-------------|--------| @@ -392,7 +392,7 @@ All 60 modules are implemented, tested (609 tests passing), and ready to deploy. | Meeting Room | [`bld_meeting_room.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/bld_meeting_room.rs) | Tracks meeting lifecycle: start, headcount, end, availability | S (<5ms) | | Energy Audit | [`bld_energy_audit.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/bld_energy_audit.rs) | Tracks after-hours usage and room utilization rates | S (<5ms) | -**🛒 Retail & Hospitality** (Category 4) — Customer insights without cameras +**🛒 Retail & Hospitality** (Category 4) - Customer insights without cameras | Module | File | What It Does | Budget | |--------|------|-------------|--------| @@ -402,7 +402,7 @@ All 60 modules are implemented, tested (609 tests passing), and ready to deploy. | Table Turnover | [`ret_table_turnover.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/ret_table_turnover.rs) | Restaurant table lifecycle: seated, dining, vacated | S (<5ms) | | Shelf Engagement | [`ret_shelf_engagement.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/ret_shelf_engagement.rs) | Detects browsing, considering, and reaching for products | S (<5ms) | -**🏭 Industrial & Specialized** (Category 5) — Safety and compliance +**🏭 Industrial & Specialized** (Category 5) - Safety and compliance | Module | File | What It Does | Budget | |--------|------|-------------|--------| @@ -412,7 +412,7 @@ All 60 modules are implemented, tested (609 tests passing), and ready to deploy. | Livestock Monitor | [`ind_livestock_monitor.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/ind_livestock_monitor.rs) | Animal presence, stillness, and escape alerts | S (<5ms) | | Structural Vibration | [`ind_structural_vibration.rs`](rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/ind_structural_vibration.rs) | Seismic events, mechanical resonance, structural drift | S (<5ms) | -**🔮 Exotic & Research** (Category 6) — Experimental sensing applications +**🔮 Exotic & Research** (Category 6) - Experimental sensing applications | Module | File | What It Does | Budget | |--------|------|-------------|--------| @@ -430,13 +430,13 @@ All 60 modules are implemented, tested (609 tests passing), and ready to deploy. ---
-🧠 Self-Learning WiFi AI (ADR-024) — Adaptive recognition, self-optimization, and intelligent anomaly detection +🧠 Self-Learning WiFi AI (ADR-024) - Adaptive recognition, self-optimization, and intelligent anomaly detection -Every WiFi signal that passes through a room creates a unique fingerprint of that space. WiFi-DensePose already reads these fingerprints to track people, but until now it threw away the internal "understanding" after each reading. The Self-Learning WiFi AI captures and preserves that understanding as compact, reusable vectors — and continuously optimizes itself for each new environment. +Every WiFi signal that passes through a room creates a unique fingerprint of that space. WiFi-DensePose already reads these fingerprints to track people, but until now it threw away the internal "understanding" after each reading. The Self-Learning WiFi AI captures and preserves that understanding as compact, reusable vectors - and continuously optimizes itself for each new environment. **What it does in plain terms:** - Turns any WiFi signal into a 128-number "fingerprint" that uniquely describes what's happening in a room -- Learns entirely on its own from raw WiFi data — no cameras, no labeling, no human supervision needed +- Learns entirely on its own from raw WiFi data - no cameras, no labeling, no human supervision needed - Recognizes rooms, detects intruders, identifies people, and classifies activities using only WiFi - Runs on an $8 ESP32 chip (the entire model fits in 55 KB of memory) - Produces both body pose tracking AND environment fingerprints in a single computation @@ -445,11 +445,11 @@ Every WiFi signal that passes through a room creates a unique fingerprint of tha | What | How it works | Why it matters | |------|-------------|----------------| -| **Self-supervised learning** | The model watches WiFi signals and teaches itself what "similar" and "different" look like, without any human-labeled data | Deploy anywhere — just plug in a WiFi sensor and wait 10 minutes | +| **Self-supervised learning** | The model watches WiFi signals and teaches itself what "similar" and "different" look like, without any human-labeled data | Deploy anywhere - just plug in a WiFi sensor and wait 10 minutes | | **Room identification** | Each room produces a distinct WiFi fingerprint pattern | Know which room someone is in without GPS or beacons | | **Anomaly detection** | An unexpected person or event creates a fingerprint that doesn't match anything seen before | Automatic intrusion and fall detection as a free byproduct | | **Person re-identification** | Each person disturbs WiFi in a slightly different way, creating a personal signature | Track individuals across sessions without cameras | -| **Environment adaptation** | MicroLoRA adapters (1,792 parameters per room) fine-tune the model for each new space | Adapts to a new room with minimal data — 93% less than retraining from scratch | +| **Environment adaptation** | MicroLoRA adapters (1,792 parameters per room) fine-tune the model for each new space | Adapts to a new room with minimal data - 93% less than retraining from scratch | | **Memory preservation** | EWC++ regularization remembers what was learned during pretraining | Switching to a new task doesn't erase prior knowledge | | **Hard-negative mining** | Training focuses on the most confusing examples to learn faster | Better accuracy with the same amount of training data | @@ -465,16 +465,16 @@ WiFi Signal [56 channels] → Transformer + Graph Neural Network ```bash # Step 1: Learn from raw WiFi data (no labels needed) -cargo run -p wifi-densepose-sensing-server -- --pretrain --dataset data/csi/ --pretrain-epochs 50 +cargo run -p wifi-densepose-sensing-server - --pretrain --dataset data/csi/ --pretrain-epochs 50 # Step 2: Fine-tune with pose labels for full capability -cargo run -p wifi-densepose-sensing-server -- --train --dataset data/mmfi/ --epochs 100 --save-rvf model.rvf +cargo run -p wifi-densepose-sensing-server - --train --dataset data/mmfi/ --epochs 100 --save-rvf model.rvf -# Step 3: Use the model — extract fingerprints from live WiFi -cargo run -p wifi-densepose-sensing-server -- --model model.rvf --embed +# Step 3: Use the model - extract fingerprints from live WiFi +cargo run -p wifi-densepose-sensing-server - --model model.rvf --embed -# Step 4: Search — find similar environments or detect anomalies -cargo run -p wifi-densepose-sensing-server -- --model model.rvf --build-index env +# Step 4: Search - find similar environments or detect anomalies +cargo run -p wifi-densepose-sensing-server - --model model.rvf --build-index env ``` **Training Modes** @@ -503,7 +503,7 @@ cargo run -p wifi-densepose-sensing-server -- --model model.rvf --build-index en | Per-room MicroLoRA adapter | ~1,800 | 2 KB | | **Total** | **~55,000** | **55 KB** (of 520 KB available) | -The self-learning system builds on the [AI Backbone (RuVector)](#ai-backbone-ruvector) signal-processing layer — attention, graph algorithms, and compression — adding contrastive learning on top. +The self-learning system builds on the [AI Backbone (RuVector)](#ai-backbone-ruvector) signal-processing layer - attention, graph algorithms, and compression - adding contrastive learning on top. See [`docs/adr/ADR-024-contrastive-csi-embedding-model.md`](docs/adr/ADR-024-contrastive-csi-embedding-model.md) for full architectural details. @@ -514,7 +514,7 @@ See [`docs/adr/ADR-024-contrastive-csi-embedding-model.md`](docs/adr/ADR-024-con ## 📦 Installation
-Guided Installer — Interactive hardware detection and profile selection +Guided Installer - Interactive hardware detection and profile selection ```bash ./install.sh @@ -544,13 +544,13 @@ The installer walks through 7 steps: system detection, toolchain check, WiFi har
-From Source — Rust (primary) or Python +From Source - Rust (primary) or Python ```bash git clone https://github.com/ruvnet/RuView.git cd RuView -# Rust (primary — 810x faster) +# Rust (primary - 810x faster) cd rust-port/wifi-densepose-rs cargo build --release cargo test --workspace @@ -568,10 +568,10 @@ pip install wifi-densepose[all] # All optional deps
-Docker — Pre-built images, no toolchain needed +Docker - Pre-built images, no toolchain needed ```bash -# Rust sensing server (132 MB — recommended) +# Rust sensing server (132 MB - recommended) docker pull ruvnet/wifi-densepose:latest docker run -p 3000:3000 -p 3001:3001 -p 5005:5005/udp ruvnet/wifi-densepose:latest @@ -596,18 +596,18 @@ docker run --rm -v $(pwd):/out ruvnet/wifi-densepose:latest --export-rvf /out/mo
System Requirements -- **Rust**: 1.70+ (primary runtime — install via [rustup](https://rustup.rs/)) +- **Rust**: 1.70+ (primary runtime - install via [rustup](https://rustup.rs/)) - **Python**: 3.8+ (for verification and legacy v1 API) - **OS**: Linux (Ubuntu 18.04+), macOS (10.15+), Windows 10+ - **Memory**: Minimum 4GB RAM, Recommended 8GB+ - **Storage**: 2GB free space for models and data -- **Network**: WiFi interface with CSI capability (optional — installer detects what you have) +- **Network**: WiFi interface with CSI capability (optional - installer detects what you have) - **GPU**: Optional (NVIDIA CUDA or Apple Metal)
-Rust Crates — Individual crates on crates.io +Rust Crates - Individual crates on crates.io The Rust workspace consists of 15 crates, all published to [crates.io](https://crates.io/): @@ -626,25 +626,25 @@ cargo add wifi-densepose-ruvector # RuVector v2.0.4 integration layer (ADR-017 | Crate | Description | RuVector | crates.io | |-------|-------------|----------|-----------| -| [`wifi-densepose-core`](https://crates.io/crates/wifi-densepose-core) | Foundation types, traits, and utilities | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-core.svg)](https://crates.io/crates/wifi-densepose-core) | +| [`wifi-densepose-core`](https://crates.io/crates/wifi-densepose-core) | Foundation types, traits, and utilities | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-core.svg)](https://crates.io/crates/wifi-densepose-core) | | [`wifi-densepose-signal`](https://crates.io/crates/wifi-densepose-signal) | SOTA CSI signal processing (SpotFi, FarSense, Widar 3.0) | `mincut`, `attn-mincut`, `attention`, `solver` | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-signal.svg)](https://crates.io/crates/wifi-densepose-signal) | -| [`wifi-densepose-nn`](https://crates.io/crates/wifi-densepose-nn) | Multi-backend inference (ONNX, PyTorch, Candle) | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-nn.svg)](https://crates.io/crates/wifi-densepose-nn) | +| [`wifi-densepose-nn`](https://crates.io/crates/wifi-densepose-nn) | Multi-backend inference (ONNX, PyTorch, Candle) | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-nn.svg)](https://crates.io/crates/wifi-densepose-nn) | | [`wifi-densepose-train`](https://crates.io/crates/wifi-densepose-train) | Training pipeline with MM-Fi dataset (NeurIPS 2023) | **All 5** | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-train.svg)](https://crates.io/crates/wifi-densepose-train) | | [`wifi-densepose-mat`](https://crates.io/crates/wifi-densepose-mat) | Mass Casualty Assessment Tool (disaster survivor detection) | `solver`, `temporal-tensor` | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-mat.svg)](https://crates.io/crates/wifi-densepose-mat) | -| [`wifi-densepose-ruvector`](https://crates.io/crates/wifi-densepose-ruvector) | RuVector v2.0.4 integration layer — 7 signal+MAT integration points (ADR-017) | **All 5** | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-ruvector.svg)](https://crates.io/crates/wifi-densepose-ruvector) | -| [`wifi-densepose-vitals`](https://crates.io/crates/wifi-densepose-vitals) | Vital signs: breathing (6-30 BPM), heart rate (40-120 BPM) | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-vitals.svg)](https://crates.io/crates/wifi-densepose-vitals) | -| [`wifi-densepose-hardware`](https://crates.io/crates/wifi-densepose-hardware) | ESP32, Intel 5300, Atheros CSI sensor interfaces | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-hardware.svg)](https://crates.io/crates/wifi-densepose-hardware) | -| [`wifi-densepose-wifiscan`](https://crates.io/crates/wifi-densepose-wifiscan) | Multi-BSSID WiFi scanning (Windows, macOS, Linux) | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-wifiscan.svg)](https://crates.io/crates/wifi-densepose-wifiscan) | -| [`wifi-densepose-wasm`](https://crates.io/crates/wifi-densepose-wasm) | WebAssembly bindings for browser deployment | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-wasm.svg)](https://crates.io/crates/wifi-densepose-wasm) | -| [`wifi-densepose-sensing-server`](https://crates.io/crates/wifi-densepose-sensing-server) | Axum server: UDP ingestion, WebSocket broadcast | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-sensing-server.svg)](https://crates.io/crates/wifi-densepose-sensing-server) | -| [`wifi-densepose-cli`](https://crates.io/crates/wifi-densepose-cli) | Command-line tool for MAT disaster scanning | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-cli.svg)](https://crates.io/crates/wifi-densepose-cli) | -| [`wifi-densepose-api`](https://crates.io/crates/wifi-densepose-api) | REST + WebSocket API layer | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-api.svg)](https://crates.io/crates/wifi-densepose-api) | -| [`wifi-densepose-config`](https://crates.io/crates/wifi-densepose-config) | Configuration management | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-config.svg)](https://crates.io/crates/wifi-densepose-config) | -| [`wifi-densepose-db`](https://crates.io/crates/wifi-densepose-db) | Database persistence (PostgreSQL, SQLite, Redis) | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-db.svg)](https://crates.io/crates/wifi-densepose-db) | +| [`wifi-densepose-ruvector`](https://crates.io/crates/wifi-densepose-ruvector) | RuVector v2.0.4 integration layer - 7 signal+MAT integration points (ADR-017) | **All 5** | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-ruvector.svg)](https://crates.io/crates/wifi-densepose-ruvector) | +| [`wifi-densepose-vitals`](https://crates.io/crates/wifi-densepose-vitals) | Vital signs: breathing (6-30 BPM), heart rate (40-120 BPM) | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-vitals.svg)](https://crates.io/crates/wifi-densepose-vitals) | +| [`wifi-densepose-hardware`](https://crates.io/crates/wifi-densepose-hardware) | ESP32, Intel 5300, Atheros CSI sensor interfaces | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-hardware.svg)](https://crates.io/crates/wifi-densepose-hardware) | +| [`wifi-densepose-wifiscan`](https://crates.io/crates/wifi-densepose-wifiscan) | Multi-BSSID WiFi scanning (Windows, macOS, Linux) | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-wifiscan.svg)](https://crates.io/crates/wifi-densepose-wifiscan) | +| [`wifi-densepose-wasm`](https://crates.io/crates/wifi-densepose-wasm) | WebAssembly bindings for browser deployment | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-wasm.svg)](https://crates.io/crates/wifi-densepose-wasm) | +| [`wifi-densepose-sensing-server`](https://crates.io/crates/wifi-densepose-sensing-server) | Axum server: UDP ingestion, WebSocket broadcast | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-sensing-server.svg)](https://crates.io/crates/wifi-densepose-sensing-server) | +| [`wifi-densepose-cli`](https://crates.io/crates/wifi-densepose-cli) | Command-line tool for MAT disaster scanning | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-cli.svg)](https://crates.io/crates/wifi-densepose-cli) | +| [`wifi-densepose-api`](https://crates.io/crates/wifi-densepose-api) | REST + WebSocket API layer | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-api.svg)](https://crates.io/crates/wifi-densepose-api) | +| [`wifi-densepose-config`](https://crates.io/crates/wifi-densepose-config) | Configuration management | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-config.svg)](https://crates.io/crates/wifi-densepose-config) | +| [`wifi-densepose-db`](https://crates.io/crates/wifi-densepose-db) | Database persistence (PostgreSQL, SQLite, Redis) | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-db.svg)](https://crates.io/crates/wifi-densepose-db) | -All crates integrate with [RuVector v2.0.4](https://github.com/ruvnet/ruvector) — see [AI Backbone](#ai-backbone-ruvector) below. +All crates integrate with [RuVector v2.0.4](https://github.com/ruvnet/ruvector) - see [AI Backbone](#ai-backbone-ruvector) below. -**[rUv Neural](rust-port/wifi-densepose-rs/crates/ruv-neural/)** — A separate 12-crate workspace for brain network topology analysis, neural decoding, and medical sensing. See [rUv Neural](#ruv-neural) in Models & Training. +**[rUv Neural](rust-port/wifi-densepose-rs/crates/ruv-neural/)** - A separate 12-crate workspace for brain network topology analysis, neural decoding, and medical sensing. See [rUv Neural](#ruv-neural) in Models & Training.
@@ -658,7 +658,7 @@ All crates integrate with [RuVector v2.0.4](https://github.com/ruvnet/ruvector) ### 1. Install ```bash -# Fastest path — Docker +# Fastest path - Docker docker pull ruvnet/wifi-densepose:latest docker run -p 3000:3000 ruvnet/wifi-densepose:latest @@ -718,14 +718,14 @@ asyncio.run(stream()) ## 📋 Table of Contents
-📡 Signal Processing & Sensing — From raw WiFi frames to vital signs +📡 Signal Processing & Sensing - From raw WiFi frames to vital signs -The signal processing stack transforms raw WiFi Channel State Information into actionable human sensing data. Starting from 56-192 subcarrier complex values captured at 20 Hz, the pipeline applies research-grade algorithms (SpotFi phase correction, Hampel outlier rejection, Fresnel zone modeling) to extract breathing rate, heart rate, motion level, and multi-person body pose — all in pure Rust with zero external ML dependencies. +The signal processing stack transforms raw WiFi Channel State Information into actionable human sensing data. Starting from 56-192 subcarrier complex values captured at 20 Hz, the pipeline applies research-grade algorithms (SpotFi phase correction, Hampel outlier rejection, Fresnel zone modeling) to extract breathing rate, heart rate, motion level, and multi-person body pose - all in pure Rust with zero external ML dependencies. | Section | Description | Docs | |---------|-------------|------| -| [Key Features](#key-features) | Sensing, Intelligence, and Performance & Deployment capabilities | — | -| [How It Works](#how-it-works) | End-to-end pipeline: radio waves → CSI capture → signal processing → AI → pose + vitals | — | +| [Key Features](#key-features) | Sensing, Intelligence, and Performance & Deployment capabilities | - | +| [How It Works](#how-it-works) | End-to-end pipeline: radio waves → CSI capture → signal processing → AI → pose + vitals | - | | [ESP32-S3 Hardware Pipeline](#esp32-s3-hardware-pipeline) | 20 Hz CSI streaming, binary frame parsing, flash & provision | [ADR-018](docs/adr/ADR-018-esp32-dev-implementation.md) · [Tutorial #34](https://github.com/ruvnet/RuView/issues/34) | | [Vital Sign Detection](#vital-sign-detection) | Breathing 6-30 BPM, heartbeat 40-120 BPM, FFT peak detection | [ADR-021](docs/adr/ADR-021-vital-sign-detection-rvdna-pipeline.md) | | [WiFi Scan Domain Layer](#wifi-scan-domain-layer) | 8-stage RSSI pipeline, multi-BSSID fingerprinting, Windows WiFi | [ADR-022](docs/adr/ADR-022-windows-wifi-enhanced-fidelity-ruvector.md) · [Tutorial #36](https://github.com/ruvnet/RuView/issues/36) | @@ -735,7 +735,7 @@ The signal processing stack transforms raw WiFi Channel State Information into a
-🧠 Models & Training — DensePose pipeline, RVF containers, SONA adaptation, RuVector integration +🧠 Models & Training - DensePose pipeline, RVF containers, SONA adaptation, RuVector integration The neural pipeline uses a graph transformer with cross-attention to map CSI feature matrices to 17 COCO body keypoints and DensePose UV coordinates. Models are packaged as single-file `.rvf` containers with progressive loading (Layer A instant, Layer B warm, Layer C full). SONA (Self-Optimizing Neural Architecture) enables continuous on-device adaptation via micro-LoRA + EWC++ without catastrophic forgetting. Signal processing is powered by 5 [RuVector](https://github.com/ruvnet/ruvector) crates (v2.0.4) with 7 integration points across the Rust workspace, plus 6 additional vendored crates for inference and graph intelligence. @@ -752,33 +752,33 @@ The neural pipeline uses a graph transformer with cross-attention to map CSI fea
-🖥️ Usage & Configuration — CLI flags, API endpoints, hardware setup +🖥️ Usage & Configuration - CLI flags, API endpoints, hardware setup The Rust sensing server is the primary interface, offering a comprehensive CLI with flags for data source selection, model loading, training, benchmarking, and RVF export. A REST API (Axum) and WebSocket server provide real-time data access. The Python v1 CLI remains available for legacy workflows. | Section | Description | Docs | |---------|-------------|------| -| [CLI Usage](#cli-usage) | `--source`, `--train`, `--benchmark`, `--export-rvf`, `--model`, `--progressive` | — | -| [REST API & WebSocket](#rest-api--websocket) | 6 REST endpoints (sensing, vitals, BSSID, SONA), WebSocket real-time stream | — | +| [CLI Usage](#cli-usage) | `--source`, `--train`, `--benchmark`, `--export-rvf`, `--model`, `--progressive` | - | +| [REST API & WebSocket](#rest-api--websocket) | 6 REST endpoints (sensing, vitals, BSSID, SONA), WebSocket real-time stream | - | | [Hardware Support](#hardware-support-1) | ESP32-S3 ($8), Intel 5300 ($15), Atheros AR9580 ($20), Windows RSSI ($0) | [ADR-012](docs/adr/ADR-012-esp32-csi-sensor-mesh.md) · [ADR-013](docs/adr/ADR-013-feature-level-sensing-commodity-gear.md) |
-⚙️ Development & Testing — 542+ tests, CI, deployment +⚙️ Development & Testing - 542+ tests, CI, deployment -The project maintains 542+ pure-Rust tests across 7 crate suites with zero mocks — every test runs against real algorithm implementations. Hardware-free simulation mode (`--source simulate`) enables full-stack testing without physical devices. Docker images are published on Docker Hub for zero-setup deployment. +The project maintains 542+ pure-Rust tests across 7 crate suites with zero mocks - every test runs against real algorithm implementations. Hardware-free simulation mode (`--source simulate`) enables full-stack testing without physical devices. Docker images are published on Docker Hub for zero-setup deployment. | Section | Description | Docs | |---------|-------------|------| -| [Testing](#testing) | 7 test suites: sensing-server (229), signal (83), mat (139), wifiscan (91), RVF (16), vitals (18) | — | -| [Deployment](#deployment) | Docker images (132 MB Rust / 569 MB Python), docker-compose, env vars | — | -| [Contributing](#contributing) | Fork → branch → test → PR workflow, Rust and Python dev setup | — | +| [Testing](#testing) | 7 test suites: sensing-server (229), signal (83), mat (139), wifiscan (91), RVF (16), vitals (18) | - | +| [Deployment](#deployment) | Docker images (132 MB Rust / 569 MB Python), docker-compose, env vars | - | +| [Contributing](#contributing) | Fork → branch → test → PR workflow, Rust and Python dev setup | - |
-📊 Performance & Benchmarks — Measured throughput, latency, resource usage +📊 Performance & Benchmarks - Measured throughput, latency, resource usage All benchmarks are measured on the Rust sensing server using `cargo bench` and the built-in `--benchmark` CLI flag. The Rust v2 implementation delivers 810x end-to-end speedup over the Python v1 baseline, with motion detection reaching 5,400x improvement. The vital sign detector processes 11,665 frames/second in a single-threaded benchmark. @@ -790,7 +790,7 @@ All benchmarks are measured on the Rust sensing server using `cargo bench` and t
-📄 Meta — License, changelog, support +📄 Meta - License, changelog, support WiFi DensePose is MIT-licensed open source, developed by [ruvnet](https://github.com/ruvnet). The project has been in active development since March 2025, with 3 major releases delivering the Rust port, SOTA signal processing, disaster response module, and end-to-end training pipeline. @@ -805,7 +805,7 @@ WiFi DensePose is MIT-licensed open source, developed by [ruvnet](https://github ---
-🌍 Cross-Environment Generalization (ADR-027 — Project MERIDIAN) — Train once, deploy in any room without retraining +🌍 Cross-Environment Generalization (ADR-027 - Project MERIDIAN) - Train once, deploy in any room without retraining | What | How it works | Why it matters | |------|-------------|----------------| @@ -813,7 +813,7 @@ WiFi DensePose is MIT-licensed open source, developed by [ruvnet](https://github | **Geometry Encoder (FiLM)** | Transmitter/receiver positions are Fourier-encoded and injected as scale+shift conditioning on every layer | The model knows *where* the hardware is, so it doesn't need to memorize layout | | **Hardware Normalizer** | Resamples any chipset's CSI to a canonical 56-subcarrier format with standardized amplitude | Intel 5300 and ESP32 data look identical to the model | | **Virtual Domain Augmentation** | Generates synthetic environments with random room scale, wall reflections, scatterers, and noise profiles | Training sees 1000s of rooms even with data from just 2-3 | -| **Rapid Adaptation (TTT)** | Contrastive test-time training with LoRA weight generation from a few unlabeled frames | Zero-shot deployment — the model self-tunes on arrival | +| **Rapid Adaptation (TTT)** | Contrastive test-time training with LoRA weight generation from a few unlabeled frames | Zero-shot deployment - the model self-tunes on arrival | | **Cross-Domain Evaluator** | Leave-one-out evaluation across all training environments with per-environment PCK/OKS metrics | Proves generalization, not just memorization | **Architecture** @@ -838,7 +838,7 @@ CSI Encoder (existing) ──→ latent features **Security hardening:** - Bounded calibration buffer (max 10,000 frames) prevents memory exhaustion -- `adapt()` returns `Result<_, AdaptError>` — no panics on bad input +- `adapt()` returns `Result<_, AdaptError>` - no panics on bad input - Atomic instance counter ensures unique weight initialization across threads - Division-by-zero guards on all augmentation parameters @@ -847,9 +847,9 @@ See [`docs/adr/ADR-027-cross-environment-domain-generalization.md`](docs/adr/ADR
-🔍 Independent Capability Audit (ADR-028) — 1,031 tests, SHA-256 proof, self-verifying witness bundle +🔍 Independent Capability Audit (ADR-028) - 1,031 tests, SHA-256 proof, self-verifying witness bundle -A [3-agent parallel audit](docs/adr/ADR-028-esp32-capability-audit.md) independently verified every claim in this repository — ESP32 hardware, signal processing, neural networks, training pipeline, deployment, and security. Results: +A [3-agent parallel audit](docs/adr/ADR-028-esp32-capability-audit.md) independently verified every claim in this repository - ESP32 hardware, signal processing, neural networks, training pipeline, deployment, and security. Results: ``` Rust tests: 1,031 passed, 0 failed @@ -881,16 +881,16 @@ cd dist/witness-bundle-ADR028-*/ && bash VERIFY.sh
-📡 Multistatic Sensing (ADR-029/030/031 — Project RuvSense + RuView) — Multiple ESP32 nodes fuse viewpoints for production-grade pose, tracking, and exotic sensing +📡 Multistatic Sensing (ADR-029/030/031 - Project RuvSense + RuView) - Multiple ESP32 nodes fuse viewpoints for production-grade pose, tracking, and exotic sensing -A single WiFi receiver can track people, but has blind spots — limbs behind the torso are invisible, depth is ambiguous, and two people at similar range create overlapping signals. RuvSense solves this by coordinating multiple ESP32 nodes into a **multistatic mesh** where every node acts as both transmitter and receiver, creating N×(N-1) measurement links from N devices. +A single WiFi receiver can track people, but has blind spots - limbs behind the torso are invisible, depth is ambiguous, and two people at similar range create overlapping signals. RuvSense solves this by coordinating multiple ESP32 nodes into a **multistatic mesh** where every node acts as both transmitter and receiver, creating N×(N-1) measurement links from N devices. **What it does in plain terms:** - 4 ESP32-S3 nodes ($48 total) provide 12 TX-RX measurement links covering 360 degrees - Each node hops across WiFi channels 1/6/11, tripling effective bandwidth from 20→60 MHz -- Coherence gating rejects noisy frames automatically — no manual tuning, stable for days +- Coherence gating rejects noisy frames automatically - no manual tuning, stable for days - Two-person tracking at 20 Hz with zero identity swaps over 10 minutes -- The room itself becomes a persistent model — the system remembers, predicts, and explains +- The room itself becomes a persistent model - the system remembers, predicts, and explains **Three ADRs, one pipeline:** @@ -961,7 +961,7 @@ Pose Tracker + DensePose 17-keypoint Kalman, re-ID via AETHER embeddings | `csi_collector.c` | Channel hop table, timer-driven hop, NDP injection stub | | `nvs_config.c` | 5 new NVS keys: hop_count, channel_list, dwell_ms, tdm_slot, tdm_node_count | -**DDD Domain Model** — 6 bounded contexts: Multistatic Sensing, Coherence, Pose Tracking, Field Model, Cross-Room Identity, Adversarial Detection. Full specification: [`docs/ddd/ruvsense-domain-model.md`](docs/ddd/ruvsense-domain-model.md). +**DDD Domain Model** - 6 bounded contexts: Multistatic Sensing, Coherence, Pose Tracking, Field Model, Cross-Room Identity, Adversarial Detection. Full specification: [`docs/ddd/ruvsense-domain-model.md`](docs/ddd/ruvsense-domain-model.md). See the ADR documents for full architectural details, GOAP integration plans, and research references. @@ -979,11 +979,11 @@ Maps the CRV (Coordinate Remote Viewing) signal-line methodology to WiFi CSI pro | I | Ideograms | Raw CSI gestalt (manmade/natural/movement/energy) | Poincare ball hyperbolic embeddings | | II | Sensory | Amplitude textures, phase patterns, frequency colors | Multi-head attention vectors | | III | Dimensional | AP mesh spatial topology, node geometry | GNN graph topology | -| IV | Emotional/AOL | Coherence gating — signal vs noise separation | SNN temporal encoding | -| V | Interrogation | Cross-stage probing — query pose against CSI history | Differentiable search | +| IV | Emotional/AOL | Coherence gating - signal vs noise separation | SNN temporal encoding | +| V | Interrogation | Cross-stage probing - query pose against CSI history | Differentiable search | | VI | 3D Model | Composite person estimation, MinCut partitioning | Graph partitioning | -**Cross-Session Convergence**: When multiple AP clusters observe the same person, CRV convergence analysis finds agreement in their signal embeddings — directly mapping to cross-room identity continuity. +**Cross-Session Convergence**: When multiple AP clusters observe the same person, CRV convergence analysis finds agreement in their signal embeddings - directly mapping to cross-room identity continuity. ```rust use wifi_densepose_ruvector::crv::WifiCrvPipeline; @@ -1001,11 +1001,11 @@ let convergence = pipeline.find_cross_room_convergence("person-001", 0.75)?; ``` **Architecture**: -- `CsiGestaltClassifier` — Maps CSI amplitude/phase patterns to 6 gestalt types -- `CsiSensoryEncoder` — Extracts texture/color/temperature/luminosity features from subcarriers -- `MeshTopologyEncoder` — Encodes AP mesh as GNN graph (Stage III) -- `CoherenceAolDetector` — Maps coherence gate states to AOL noise detection (Stage IV) -- `WifiCrvPipeline` — Orchestrates all 6 stages into unified sensing session +- `CsiGestaltClassifier` - Maps CSI amplitude/phase patterns to 6 gestalt types +- `CsiSensoryEncoder` - Extracts texture/color/temperature/luminosity features from subcarriers +- `MeshTopologyEncoder` - Encodes AP mesh as GNN graph (Stage III) +- `CoherenceAolDetector` - Maps coherence gate states to AOL noise detection (Stage IV) +- `WifiCrvPipeline` - Orchestrates all 6 stages into unified sensing session
@@ -1014,9 +1014,9 @@ let convergence = pipeline.find_cross_room_convergence("person-001", 0.75)?; ## 📡 Signal Processing & Sensing
-📡 ESP32-S3 Hardware Pipeline (ADR-018) — 28 Hz CSI streaming, flash & provision +📡 ESP32-S3 Hardware Pipeline (ADR-018) - 28 Hz CSI streaming, flash & provision -A single ESP32-S3 board (~$9) captures WiFi signal data 28 times per second and streams it over UDP. A host server can visualize and record the data, but the ESP32 can also run on its own — detecting presence, measuring breathing and heart rate, and alerting on falls without any server at all. +A single ESP32-S3 board (~$9) captures WiFi signal data 28 times per second and streams it over UDP. A host server can visualize and record the data, but the ESP32 can also run on its own - detecting presence, measuring breathing and heart rate, and alerting on falls without any server at all. ``` ESP32-S3 node UDP/5005 Host server (optional) @@ -1044,24 +1044,24 @@ ESP32-S3 node UDP/5005 Host server (optional) ### Flash and provision -Download a pre-built binary — no build toolchain needed: +Download a pre-built binary - no build toolchain needed: | Release | What's included | Tag | |---------|-----------------|-----| -| [v0.5.0](https://github.com/ruvnet/RuView/releases/tag/v0.5.0-esp32) | **Stable** — mmWave sensor fusion ([ADR-063](docs/adr/ADR-063-mmwave-sensor-fusion.md)), auto-detect MR60BHA2/LD2410, 48-byte fused vitals, all v0.4.3.1 fixes | `v0.5.0-esp32` | +| [v0.5.0](https://github.com/ruvnet/RuView/releases/tag/v0.5.0-esp32) | **Stable** - mmWave sensor fusion ([ADR-063](docs/adr/ADR-063-mmwave-sensor-fusion.md)), auto-detect MR60BHA2/LD2410, 48-byte fused vitals, all v0.4.3.1 fixes | `v0.5.0-esp32` | | [v0.4.3.1](https://github.com/ruvnet/RuView/releases/tag/v0.4.3.1-esp32) | Fall detection fix ([#263](https://github.com/ruvnet/RuView/issues/263)), 4MB flash ([#265](https://github.com/ruvnet/RuView/issues/265)), watchdog fix ([#266](https://github.com/ruvnet/RuView/issues/266)) | `v0.4.3.1-esp32` | | [v0.4.1](https://github.com/ruvnet/RuView/releases/tag/v0.4.1-esp32) | CSI build fix, compile guard, AMOLED display, edge intelligence ([ADR-057](docs/adr/ADR-057-firmware-csi-build-guard.md)) | `v0.4.1-esp32` | -| [v0.3.0-alpha](https://github.com/ruvnet/RuView/releases/tag/v0.3.0-alpha-esp32) | Alpha — adds on-device edge intelligence and WASM modules ([ADR-039](docs/adr/ADR-039-esp32-edge-intelligence.md), [ADR-040](docs/adr/ADR-040-wasm-programmable-sensing.md)) | `v0.3.0-alpha-esp32` | +| [v0.3.0-alpha](https://github.com/ruvnet/RuView/releases/tag/v0.3.0-alpha-esp32) | Alpha - adds on-device edge intelligence and WASM modules ([ADR-039](docs/adr/ADR-039-esp32-edge-intelligence.md), [ADR-040](docs/adr/ADR-040-wasm-programmable-sensing.md)) | `v0.3.0-alpha-esp32` | | [v0.2.0](https://github.com/ruvnet/RuView/releases/tag/v0.2.0-esp32) | Raw CSI streaming, multi-node TDM, channel hopping | `v0.2.0-esp32` | ```bash -# 1. Flash the firmware to your ESP32-S3 (8MB flash — most boards) +# 1. Flash the firmware to your ESP32-S3 (8MB flash - most boards) python -m esptool --chip esp32s3 --port COM7 --baud 460800 \ write_flash --flash-mode dio --flash-size 8MB --flash-freq 80m \ 0x0 bootloader.bin 0x8000 partition-table.bin \ 0xf000 ota_data_initial.bin 0x20000 esp32-csi-node.bin -# 1b. For 4MB flash boards (e.g. ESP32-S3 SuperMini 4MB) — use the 4MB binaries: +# 1b. For 4MB flash boards (e.g. ESP32-S3 SuperMini 4MB) - use the 4MB binaries: python -m esptool --chip esp32s3 --port COM7 --baud 460800 \ write_flash --flash-mode dio --flash-size 4MB --flash-freq 80m \ 0x0 bootloader.bin 0x8000 partition-table-4mb.bin \ @@ -1072,7 +1072,7 @@ python firmware/esp32-csi-node/provision.py --port COM7 \ --ssid "YourWiFi" --password "secret" --target-ip 192.168.1.20 # 3. (Optional) Start the host server to visualize data -cargo run -p wifi-densepose-sensing-server -- --http-port 3000 --source auto +cargo run -p wifi-densepose-sensing-server - --http-port 3000 --source auto # Open http://localhost:3000 ``` @@ -1092,20 +1092,20 @@ python firmware/esp32-csi-node/provision.py --port COM8 \ --node-id 1 --tdm-slot 1 --tdm-total 3 ``` -Nodes can also hop across WiFi channels (1, 6, 11) to increase sensing bandwidth — configured via [ADR-029](docs/adr/ADR-029-ruvsense-multistatic-sensing-mode.md) channel hopping. +Nodes can also hop across WiFi channels (1, 6, 11) to increase sensing bandwidth - configured via [ADR-029](docs/adr/ADR-029-ruvsense-multistatic-sensing-mode.md) channel hopping. ### On-device intelligence (v0.3.0-alpha) -The alpha firmware can analyze signals locally and send compact results instead of raw data. This means the ESP32 works standalone — no server needed for basic sensing. Disabled by default for backward compatibility. +The alpha firmware can analyze signals locally and send compact results instead of raw data. This means the ESP32 works standalone - no server needed for basic sensing. Disabled by default for backward compatibility. | Tier | What it does | RAM used | |------|-------------|----------| -| **0** | Off — streams raw CSI only (same as v0.2.0) | 0 KB | +| **0** | Off - streams raw CSI only (same as v0.2.0) | 0 KB | | **1** | Cleans up signals, picks the best subcarriers, compresses data (saves 30-50% bandwidth) | ~30 KB | | **2** | Everything in Tier 1 + detects presence, measures breathing and heart rate, detects falls | ~33 KB | | **3** | Everything in Tier 2 + runs custom WASM modules (gesture recognition, intrusion detection, and [63 more](docs/edge-modules/README.md)) | ~160 KB/module | -Enable without reflashing — just reprovision: +Enable without reflashing - just reprovision: ```bash # Turn on Tier 2 (vitals) on an already-flashed node @@ -1125,7 +1125,7 @@ See [firmware/esp32-csi-node/README.md](firmware/esp32-csi-node/README.md), [ADR
-🦀 Rust Implementation (v2) — 810x faster, 54K fps pipeline +🦀 Rust Implementation (v2) - 810x faster, 54K fps pipeline ### Performance Benchmarks (Validated) @@ -1155,7 +1155,7 @@ cargo bench --package wifi-densepose-signal
-💓 Vital Sign Detection (ADR-021) — Breathing and heartbeat via FFT +💓 Vital Sign Detection (ADR-021) - Breathing and heartbeat via FFT | Capability | Range | Method | |------------|-------|--------| @@ -1174,7 +1174,7 @@ See [ADR-021](docs/adr/ADR-021-vital-sign-detection-rvdna-pipeline.md).
-📡 WiFi Scan Domain Layer (ADR-022/025) — 8-stage RSSI pipeline for Windows, macOS, and Linux WiFi +📡 WiFi Scan Domain Layer (ADR-022/025) - 8-stage RSSI pipeline for Windows, macOS, and Linux WiFi | Stage | Purpose | |-------|---------| @@ -1196,9 +1196,9 @@ See [ADR-022](docs/adr/ADR-022-windows-wifi-enhanced-fidelity-ruvector.md) and [
-🚨 WiFi-Mat: Disaster Response — Search & rescue, START triage, 3D localization +🚨 WiFi-Mat: Disaster Response - Search & rescue, START triage, 3D localization -WiFi signals penetrate non-metallic debris (concrete, wood, drywall) where cameras and thermal sensors cannot reach. The WiFi-Mat module (`wifi-densepose-mat`, 139 tests) uses CSI analysis to detect survivors trapped under rubble, classify their condition using the START triage protocol, and estimate their 3D position — giving rescue teams actionable intelligence within seconds of deployment. +WiFi signals penetrate non-metallic debris (concrete, wood, drywall) where cameras and thermal sensors cannot reach. The WiFi-Mat module (`wifi-densepose-mat`, 139 tests) uses CSI analysis to detect survivors trapped under rubble, classify their condition using the START triage protocol, and estimate their 3D position - giving rescue teams actionable intelligence within seconds of deployment. | Capability | How It Works | Performance Target | |------------|-------------|-------------------| @@ -1241,18 +1241,18 @@ response.start_scanning().await?;
-🔬 SOTA Signal Processing (ADR-014) — 6 research-grade algorithms +🔬 SOTA Signal Processing (ADR-014) - 6 research-grade algorithms -The signal processing layer bridges the gap between raw commodity WiFi hardware output and research-grade sensing accuracy. Each algorithm addresses a specific limitation of naive CSI processing — from hardware-induced phase corruption to environment-dependent multipath interference. All six are implemented in `wifi-densepose-signal/src/` with deterministic tests and no mock data. +The signal processing layer bridges the gap between raw commodity WiFi hardware output and research-grade sensing accuracy. Each algorithm addresses a specific limitation of naive CSI processing - from hardware-induced phase corruption to environment-dependent multipath interference. All six are implemented in `wifi-densepose-signal/src/` with deterministic tests and no mock data. | Algorithm | What It Does | Why It Matters | Math | Source | |-----------|-------------|----------------|------|--------| -| **Conjugate Multiplication** | Multiplies CSI antenna pairs: `H₁[k] × conj(H₂[k])` | Cancels CFO, SFO, and packet detection delay that corrupt raw phase — preserves only environment-caused phase differences | `CSI_ratio[k] = H₁[k] * conj(H₂[k])` | [SpotFi](https://dl.acm.org/doi/10.1145/2789168.2790124) (SIGCOMM 2015) | +| **Conjugate Multiplication** | Multiplies CSI antenna pairs: `H₁[k] × conj(H₂[k])` | Cancels CFO, SFO, and packet detection delay that corrupt raw phase - preserves only environment-caused phase differences | `CSI_ratio[k] = H₁[k] * conj(H₂[k])` | [SpotFi](https://dl.acm.org/doi/10.1145/2789168.2790124) (SIGCOMM 2015) | | **Hampel Filter** | Replaces outliers using running median ± scaled MAD | Z-score uses mean/std which are corrupted by the very outliers it detects (masking effect). Hampel uses median/MAD, resisting up to 50% contamination | `σ̂ = 1.4826 × MAD` | Standard DSP; WiGest (2015) | | **Fresnel Zone Model** | Models signal variation from chest displacement crossing Fresnel zone boundaries | Zero-crossing counting fails in multipath-rich environments. Fresnel predicts *where* breathing should appear based on TX-RX-body geometry | `ΔΦ = 2π × 2Δd / λ`, `A = \|sin(ΔΦ/2)\|` | [FarSense](https://dl.acm.org/doi/10.1145/3300061.3345431) (MobiCom 2019) | | **CSI Spectrogram** | Sliding-window FFT (STFT) per subcarrier → 2D time-frequency matrix | Breathing = 0.2-0.4 Hz band, walking = 1-2 Hz, static = noise. 2D structure enables CNN spatial pattern recognition that 1D features miss | `S[t,f] = \|Σₙ x[n] w[n-t] e^{-j2πfn}\|²` | Standard since 2018 | -| **Subcarrier Selection** | Ranks subcarriers by motion sensitivity (variance ratio) and selects top-K | Not all subcarriers respond to motion — some sit in multipath nulls. Selecting the 10-20 most sensitive improves SNR by 6-10 dB | `sensitivity[k] = var_motion / var_static` | [WiDance](https://dl.acm.org/doi/10.1145/3117811.3117826) (MobiCom 2017) | -| **Body Velocity Profile** | Extracts velocity distribution from Doppler shifts across subcarriers | BVP is domain-independent — same velocity profile regardless of room layout, furniture, or AP placement. Basis for cross-environment recognition | `BVP[v,t] = Σₖ \|STFTₖ[v,t]\|` | [Widar 3.0](https://dl.acm.org/doi/10.1145/3328916) (MobiSys 2019) | +| **Subcarrier Selection** | Ranks subcarriers by motion sensitivity (variance ratio) and selects top-K | Not all subcarriers respond to motion - some sit in multipath nulls. Selecting the 10-20 most sensitive improves SNR by 6-10 dB | `sensitivity[k] = var_motion / var_static` | [WiDance](https://dl.acm.org/doi/10.1145/3117811.3117826) (MobiCom 2017) | +| **Body Velocity Profile** | Extracts velocity distribution from Doppler shifts across subcarriers | BVP is domain-independent - same velocity profile regardless of room layout, furniture, or AP placement. Basis for cross-environment recognition | `BVP[v,t] = Σₖ \|STFTₖ[v,t]\|` | [Widar 3.0](https://dl.acm.org/doi/10.1145/3328916) (MobiSys 2019) | **Processing pipeline order:** Raw CSI → Conjugate multiplication (phase cleaning) → Hampel filter (outlier removal) → Subcarrier selection (top-K) → CSI spectrogram (time-frequency) → Fresnel model (breathing) + BVP (activity) @@ -1265,11 +1265,11 @@ See [ADR-014](docs/adr/ADR-014-sota-signal-processing.md) for full mathematical ## 🧠 Models & Training
-🤖 AI Backbone: RuVector — Attention, graph algorithms, and edge-AI compression powering the sensing pipeline +🤖 AI Backbone: RuVector - Attention, graph algorithms, and edge-AI compression powering the sensing pipeline Raw WiFi signals are noisy, redundant, and environment-dependent. [RuVector](https://github.com/ruvnet/ruvector) is the AI intelligence layer that transforms them into clean, structured input for the DensePose neural network. It uses **attention mechanisms** to learn which signals to trust, **graph algorithms** that automatically discover which WiFi channels are sensitive to body motion, and **compressed representations** that make edge inference possible on an $8 microcontroller. -Without RuVector, WiFi DensePose would need hand-tuned thresholds, brute-force matrix math, and 4x more memory — making real-time edge inference impossible. +Without RuVector, WiFi DensePose would need hand-tuned thresholds, brute-force matrix math, and 4x more memory - making real-time edge inference impossible. ``` Raw WiFi CSI (56 subcarriers, noisy) @@ -1300,9 +1300,9 @@ See [issue #67](https://github.com/ruvnet/RuView/issues/67) for a deep dive with
-📦 RVF Model Container — Single-file deployment with progressive loading +📦 RVF Model Container - Single-file deployment with progressive loading -The [RuVector Format (RVF)](https://github.com/ruvnet/ruvector/tree/main/crates/rvf) packages an entire trained model — weights, HNSW indexes, quantization codebooks, SONA adaptation deltas, and WASM inference runtime — into a single self-contained binary file. No external dependencies are needed at deployment time. +The [RuVector Format (RVF)](https://github.com/ruvnet/ruvector/tree/main/crates/rvf) packages an entire trained model - weights, HNSW indexes, quantization codebooks, SONA adaptation deltas, and WASM inference runtime - into a single self-contained binary file. No external dependencies are needed at deployment time. **Container structure:** @@ -1342,8 +1342,8 @@ The [RuVector Format (RVF)](https://github.com/ruvnet/ruvector/tree/main/crates/ |----------|--------| | **Format** | Segment-based binary, 20+ segment types, CRC32 integrity per segment | | **Progressive Loading** | **Layer A** (<5ms): manifest + entry points → **Layer B** (100ms-1s): hot weights + adjacency → **Layer C** (seconds): full graph | -| **Signing** | Ed25519 training proofs for verifiable provenance — chain of custody from training data to deployed model | -| **Quantization** | Per-segment temperature-tiered: f32 (full), f16 (half), u8 (int8), int4 — with SIMD-accelerated distance computation | +| **Signing** | Ed25519 training proofs for verifiable provenance - chain of custody from training data to deployed model | +| **Quantization** | Per-segment temperature-tiered: f32 (full), f16 (half), u8 (int8), int4 - with SIMD-accelerated distance computation | | **CLI** | `--export-rvf` (generate), `--load-rvf` (config), `--save-rvf` (persist), `--model` (inference), `--progressive` (3-layer load) | ```bash @@ -1362,9 +1362,9 @@ Built on the [rvf](https://github.com/ruvnet/ruvector/tree/main/crates/rvf) crat
-🧬 Training & Fine-Tuning — MM-Fi/Wi-Pose pre-training, SONA adaptation +🧬 Training & Fine-Tuning - MM-Fi/Wi-Pose pre-training, SONA adaptation -The training pipeline implements 8 phases in pure Rust (7,832 lines, zero external ML dependencies). It trains a graph transformer with cross-attention to map CSI feature matrices to 17 COCO body keypoints and DensePose UV coordinates — following the approach from the CMU "DensePose From WiFi" paper ([arXiv:2301.00250](https://arxiv.org/abs/2301.00250)). RuVector crates provide the core building blocks: [ruvector-attention](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-attention) for cross-attention layers, [ruvector-mincut](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-mincut) for multi-person matching, and [ruvector-temporal-tensor](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-temporal-tensor) for CSI buffer compression. +The training pipeline implements 8 phases in pure Rust (7,832 lines, zero external ML dependencies). It trains a graph transformer with cross-attention to map CSI feature matrices to 17 COCO body keypoints and DensePose UV coordinates - following the approach from the CMU "DensePose From WiFi" paper ([arXiv:2301.00250](https://arxiv.org/abs/2301.00250)). RuVector crates provide the core building blocks: [ruvector-attention](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-attention) for cross-attention layers, [ruvector-mincut](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-mincut) for multi-person matching, and [ruvector-temporal-tensor](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-temporal-tensor) for CSI buffer compression. **Three-tier data strategy:** @@ -1385,9 +1385,9 @@ The training pipeline implements 8 phases in pure Rust (7,832 lines, zero extern | 5 | `sparse_inference.rs` (753 lines) | NeuronProfiler hot/cold partitioning, SparseLinear (skip cold rows), INT8/FP16 quantization | [ruvector-sparse-inference](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-sparse-inference) | | 6 | `rvf_pipeline.rs` (1,027 lines) | Progressive 3-layer loader, HNSW index, OverlayGraph, `RvfModelBuilder` | [ruvector-core](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-core) (HNSW) | | 7 | `rvf_container.rs` (914 lines) | Binary container format, 6+ segment types, CRC32 integrity | [rvf](https://github.com/ruvnet/ruvector/tree/main/crates/rvf) | -| 8 | `main.rs` integration | `--train`, `--model`, `--progressive` CLI flags, REST endpoints | — | +| 8 | `main.rs` integration | `--train`, `--model`, `--progressive` CLI flags, REST endpoints | - | -**SONA (Self-Optimizing Neural Architecture)** — the continuous adaptation system: +**SONA (Self-Optimizing Neural Architecture)** - the continuous adaptation system: | Component | What It Does | Why It Matters | |-----------|-------------|----------------| @@ -1413,14 +1413,14 @@ See [ADR-023](docs/adr/ADR-023-trained-densepose-model-ruvector-pipeline.md) ·
-🔩 RuVector Crates — 11 vendored signal intelligence crates from github.com/ruvnet/ruvector +🔩 RuVector Crates - 11 vendored signal intelligence crates from github.com/ruvnet/ruvector **5 directly-used crates** (v2.0.4, declared in `Cargo.toml`, 7 integration points): | Crate | What It Does | Where It's Used in WiFi-DensePose | Source | |-------|-------------|-----------------------------------|--------| | [`ruvector-attention`](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-attention) | Scaled dot-product attention, MoE routing, sparse attention | `model.rs` (spatial attention), `bvp.rs` (sensitivity-weighted velocity profiles) | [crate](https://crates.io/crates/ruvector-attention) | -| [`ruvector-mincut`](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-mincut) | Subpolynomial dynamic min-cut O(n^1.5 log n) | `metrics.rs` (DynamicPersonMatcher — multi-person assignment), `subcarrier_selection.rs` (sensitive/insensitive split) | [crate](https://crates.io/crates/ruvector-mincut) | +| [`ruvector-mincut`](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-mincut) | Subpolynomial dynamic min-cut O(n^1.5 log n) | `metrics.rs` (DynamicPersonMatcher - multi-person assignment), `subcarrier_selection.rs` (sensitive/insensitive split) | [crate](https://crates.io/crates/ruvector-mincut) | | [`ruvector-attn-mincut`](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-attn-mincut) | Attention-gated spectrogram noise suppression | `model.rs` (antenna attention gating), `spectrogram.rs` (gate noisy time-frequency bins) | [crate](https://crates.io/crates/ruvector-attn-mincut) | | [`ruvector-solver`](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-solver) | Sparse Neumann series solver O(sqrt(n)) | `fresnel.rs` (TX-body-RX geometry), `triangulation.rs` (3D localization), `subcarrier.rs` (sparse interpolation 114→56) | [crate](https://crates.io/crates/ruvector-solver) | | [`ruvector-temporal-tensor`](https://github.com/ruvnet/ruvector/tree/main/crates/ruvector-temporal-tensor) | Tiered temporal compression (8/7/5/3-bit) | `dataset.rs` (CSI buffer compression), `breathing.rs` + `heartbeat.rs` (compressed vital sign spectrograms) | [crate](https://crates.io/crates/ruvector-temporal-tensor) | @@ -1441,16 +1441,16 @@ The full RuVector ecosystem includes 90+ crates. See [github.com/ruvnet/ruvector
-🧠 rUv Neural — Brain topology analysis ecosystem for neural decoding and medical sensing +🧠 rUv Neural - Brain topology analysis ecosystem for neural decoding and medical sensing -[**rUv Neural**](rust-port/wifi-densepose-rs/crates/ruv-neural/README.md) is a 12-crate Rust ecosystem that extends RuView's signal processing into brain network topology analysis. It transforms neural magnetic field measurements from quantum sensors (NV diamond magnetometers, optically pumped magnetometers) into dynamic connectivity graphs, using minimum cut algorithms to detect cognitive state transitions in real time. The ecosystem includes crates for signal processing (`ruv-neural-signal`), graph construction (`ruv-neural-graph`), HNSW-indexed pattern memory (`ruv-neural-memory`), graph embeddings (`ruv-neural-embed`), cognitive state decoding (`ruv-neural-decoder`), and ESP32/WASM edge targets. Medical and research applications include early neurological disease detection via topology signatures, brain-computer interfaces, clinical neurofeedback, and non-invasive biomedical sensing -- bridging RuView's RF sensing architecture with the emerging field of quantum biomedical diagnostics. +[**rUv Neural**](rust-port/wifi-densepose-rs/crates/ruv-neural/README.md) is a 12-crate Rust ecosystem that extends RuView's signal processing into brain network topology analysis. It transforms neural magnetic field measurements from quantum sensors (NV diamond magnetometers, optically pumped magnetometers) into dynamic connectivity graphs, using minimum cut algorithms to detect cognitive state transitions in real time. The ecosystem includes crates for signal processing (`ruv-neural-signal`), graph construction (`ruv-neural-graph`), HNSW-indexed pattern memory (`ruv-neural-memory`), graph embeddings (`ruv-neural-embed`), cognitive state decoding (`ruv-neural-decoder`), and ESP32/WASM edge targets. Medical and research applications include early neurological disease detection via topology signatures, brain-computer interfaces, clinical neurofeedback, and non-invasive biomedical sensing - bridging RuView's RF sensing architecture with the emerging field of quantum biomedical diagnostics.
---
-🏗️ System Architecture — End-to-end data flow from CSI capture to REST/WebSocket API +🏗️ System Architecture - End-to-end data flow from CSI capture to REST/WebSocket API ### End-to-End Pipeline @@ -1470,7 +1470,7 @@ graph TB BRIDGE["Bridge
I/Q → amplitude + phase"] end - subgraph SIGNAL ["🔬 Signal Processing — RuVector v2.0.4"] + subgraph SIGNAL ["🔬 Signal Processing - RuVector v2.0.4"] direction TB PHASE["Phase Sanitization
SpotFi conjugate multiply"] HAMPEL["Hampel Filter
Outlier rejection · σ=3"] @@ -1628,7 +1628,7 @@ graph TB ## 🖥️ CLI Usage
-Rust Sensing Server — Primary CLI interface +Rust Sensing Server - Primary CLI interface ```bash # Start with simulated data (no hardware) @@ -1672,7 +1672,7 @@ graph TB
-REST API & WebSocket — Endpoints reference +REST API & WebSocket - Endpoints reference #### REST API (Rust Sensing Server) @@ -1692,7 +1692,7 @@ WebSocket: `ws://localhost:3001/ws/sensing` (real-time sensing + vital signs)
-Hardware Support — Devices, cost, and guides +Hardware Support - Devices, cost, and guides | Hardware | CSI | Cost | Guide | |----------|-----|------|-------| @@ -1706,7 +1706,7 @@ WebSocket: `ws://localhost:3001/ws/sensing` (real-time sensing + vital signs)
-QEMU Firmware Testing (ADR-061) — 9-Layer Platform +QEMU Firmware Testing (ADR-061) - 9-Layer Platform Test ESP32-S3 firmware without physical hardware using Espressif's QEMU fork. The platform provides 9 layers of testing capability: @@ -1782,7 +1782,7 @@ See [ADR-062](docs/adr/ADR-062-qemu-swarm-configurator.md) and the [User Guide](
-Python Legacy CLI — v1 API server commands +Python Legacy CLI - v1 API server commands ```bash wifi-densepose start # Start API server @@ -1800,7 +1800,7 @@ wifi-densepose tasks list # List background tasks
Documentation Links -- [User Guide](docs/user-guide.md) — installation, first run, API, hardware setup, QEMU testing +- [User Guide](docs/user-guide.md) - installation, first run, API, hardware setup, QEMU testing - [WiFi-Mat User Guide](docs/wifi-mat-user-guide.md) | [Domain Model](docs/ddd/wifi-mat-domain-model.md) - [ADR-061](docs/adr/ADR-061-qemu-esp32s3-firmware-testing.md) QEMU platform | [ADR-062](docs/adr/ADR-062-qemu-swarm-configurator.md) Swarm configurator - [ADR-021](docs/adr/ADR-021-vital-sign-detection-rvdna-pipeline.md) | [ADR-022](docs/adr/ADR-022-windows-wifi-enhanced-fidelity-ruvector.md) | [ADR-023](docs/adr/ADR-023-trained-densepose-model-ruvector-pipeline.md) @@ -1812,10 +1812,10 @@ wifi-densepose tasks list # List background tasks ## 🧪 Testing
-542+ tests across 7 suites — zero mocks, hardware-free simulation +542+ tests across 7 suites - zero mocks, hardware-free simulation ```bash -# Rust tests (primary — 542+ tests) +# Rust tests (primary - 542+ tests) cd rust-port/wifi-densepose-rs cargo test --workspace @@ -1849,7 +1849,7 @@ python -m pytest v1/tests/ -v ## 🚀 Deployment
-Docker deployment — Production setup with docker-compose +Docker deployment - Production setup with docker-compose ```bash # Rust sensing server (132 MB) @@ -1883,7 +1883,7 @@ POSE_MAX_PERSONS=10 # Max tracked individuals ## 📊 Performance Metrics
-Measured benchmarks — Rust sensing server, validated via cargo bench +Measured benchmarks - Rust sensing server, validated via cargo bench ### Rust Sensing Server @@ -1944,66 +1944,66 @@ pre-commit install
Release history -### v3.2.0 — 2026-03-03 +### v3.2.0 - 2026-03-03 Edge intelligence: 24 hot-loadable WASM modules for on-device CSI processing on ESP32-S3. -- **ADR-041 Edge Intelligence Modules** — 24 `no_std` Rust modules compiled to `wasm32-unknown-unknown`, loaded via WASM3 on ESP32; 8 categories covering signal intelligence, adaptive learning, spatial reasoning, temporal analysis, AI security, quantum-inspired, autonomous systems, and exotic algorithms -- **Vendor Integration** — Algorithms ported from `midstream` (DTW, attractors, Flash Attention, min-cut, optimal transport) and `sublinear-time-solver` (PageRank, HNSW, sparse recovery, spiking NN) -- **On-device gesture learning** — User-teachable DTW gesture recognition with 3-rehearsal protocol and 16 template slots -- **Lifelong learning (EWC++)** — Elastic Weight Consolidation prevents catastrophic forgetting when learning new tasks -- **AI security modules** — FNV-1a replay detection, injection/jamming detection, 6D behavioral anomaly profiling with Mahalanobis scoring -- **Self-healing mesh** — 8-node mesh with health tracking, degradation/recovery hysteresis, and coverage redistribution -- **Common utility library** — `vendor_common.rs` shared across all 24 modules: CircularBuffer, EMA, WelfordStats, DTW, FixedPriorityQueue, vector math -- **243 tests passing** — All modules include comprehensive inline tests; 0 failures -- **Security audit** — 15 findings addressed (1 critical, 3 high, 6 medium, 5 low) - -### v3.1.0 — 2026-03-02 - -Multistatic sensing, persistent field model, and cross-viewpoint fusion — the biggest capability jump since v2.0. - -- **Project RuvSense (ADR-029)** — Multistatic mesh: TDM protocol, channel hopping (ch1/6/11), multi-band frame fusion, coherence gating, 17-keypoint Kalman tracker with re-ID; 10 new signal modules (5,300+ lines) -- **RuvSense Persistent Field Model (ADR-030)** — 7 exotic sensing tiers: field normal modes (SVD), RF tomography, longitudinal drift detection, intention prediction, cross-room identity, gesture classification, adversarial detection -- **Project RuView (ADR-031)** — Cross-viewpoint attention with geometric bias, Geometric Diversity Index, viewpoint fusion orchestrator; 5 new ruvector modules (2,200+ lines) -- **TDM Hardware Protocol** — ESP32 sensing coordinator: sync beacons, slot scheduling, clock drift compensation (±10ppm), 20 Hz aggregate rate -- **Channel-Hopping Firmware** — ESP32 firmware extended with hop table, timer-driven channel switching, NDP injection stub; NVS config for all TDM parameters; fully backward-compatible -- **DDD Domain Model** — 6 bounded contexts, ubiquitous language, aggregate roots, domain events, full event bus specification -- **`ruvector-crv` 6-stage CRV signal-line integration (ADR-033)** — Maps Coordinate Remote Viewing methodology to WiFi CSI: gestalt classification, sensory encoding, GNN topology, SNN coherence gating, differentiable search, MinCut partitioning; cross-session convergence for multi-room identity continuity -- **ADR-032 multistatic mesh security hardening** — HMAC-SHA256 beacon auth, SipHash-2-4 frame integrity, NDP rate limiter, coherence gate timeout, bounded buffers, NVS credential zeroing, atomic firmware state -- **ADR-032a QUIC transport layer** — `midstreamer-quic` TLS 1.3 AEAD for aggregator nodes, dual-mode security (ManualCrypto/QuicTransport), QUIC stream mapping, connection migration, congestion control -- **ADR-033 CRV signal-line sensing integration** — Architecture decision record for the 6-stage CRV pipeline mapping to ruvector components -- **Temporal gesture matching** — `midstreamer-temporal-compare` DTW/LCS/edit-distance gesture classification with quantized feature comparison -- **Attractor drift analysis** — `midstreamer-attractor` Takens' theorem phase-space embedding with Lyapunov exponent regime detection (Stable/Periodic/Chaotic) -- **v0.3.0 published** — All 15 workspace crates published to [crates.io](https://crates.io/crates/wifi-densepose-core) with updated dependencies +- **ADR-041 Edge Intelligence Modules** - 24 `no_std` Rust modules compiled to `wasm32-unknown-unknown`, loaded via WASM3 on ESP32; 8 categories covering signal intelligence, adaptive learning, spatial reasoning, temporal analysis, AI security, quantum-inspired, autonomous systems, and exotic algorithms +- **Vendor Integration** - Algorithms ported from `midstream` (DTW, attractors, Flash Attention, min-cut, optimal transport) and `sublinear-time-solver` (PageRank, HNSW, sparse recovery, spiking NN) +- **On-device gesture learning** - User-teachable DTW gesture recognition with 3-rehearsal protocol and 16 template slots +- **Lifelong learning (EWC++)** - Elastic Weight Consolidation prevents catastrophic forgetting when learning new tasks +- **AI security modules** - FNV-1a replay detection, injection/jamming detection, 6D behavioral anomaly profiling with Mahalanobis scoring +- **Self-healing mesh** - 8-node mesh with health tracking, degradation/recovery hysteresis, and coverage redistribution +- **Common utility library** - `vendor_common.rs` shared across all 24 modules: CircularBuffer, EMA, WelfordStats, DTW, FixedPriorityQueue, vector math +- **243 tests passing** - All modules include comprehensive inline tests; 0 failures +- **Security audit** - 15 findings addressed (1 critical, 3 high, 6 medium, 5 low) + +### v3.1.0 - 2026-03-02 + +Multistatic sensing, persistent field model, and cross-viewpoint fusion - the biggest capability jump since v2.0. + +- **Project RuvSense (ADR-029)** - Multistatic mesh: TDM protocol, channel hopping (ch1/6/11), multi-band frame fusion, coherence gating, 17-keypoint Kalman tracker with re-ID; 10 new signal modules (5,300+ lines) +- **RuvSense Persistent Field Model (ADR-030)** - 7 exotic sensing tiers: field normal modes (SVD), RF tomography, longitudinal drift detection, intention prediction, cross-room identity, gesture classification, adversarial detection +- **Project RuView (ADR-031)** - Cross-viewpoint attention with geometric bias, Geometric Diversity Index, viewpoint fusion orchestrator; 5 new ruvector modules (2,200+ lines) +- **TDM Hardware Protocol** - ESP32 sensing coordinator: sync beacons, slot scheduling, clock drift compensation (±10ppm), 20 Hz aggregate rate +- **Channel-Hopping Firmware** - ESP32 firmware extended with hop table, timer-driven channel switching, NDP injection stub; NVS config for all TDM parameters; fully backward-compatible +- **DDD Domain Model** - 6 bounded contexts, ubiquitous language, aggregate roots, domain events, full event bus specification +- **`ruvector-crv` 6-stage CRV signal-line integration (ADR-033)** - Maps Coordinate Remote Viewing methodology to WiFi CSI: gestalt classification, sensory encoding, GNN topology, SNN coherence gating, differentiable search, MinCut partitioning; cross-session convergence for multi-room identity continuity +- **ADR-032 multistatic mesh security hardening** - HMAC-SHA256 beacon auth, SipHash-2-4 frame integrity, NDP rate limiter, coherence gate timeout, bounded buffers, NVS credential zeroing, atomic firmware state +- **ADR-032a QUIC transport layer** - `midstreamer-quic` TLS 1.3 AEAD for aggregator nodes, dual-mode security (ManualCrypto/QuicTransport), QUIC stream mapping, connection migration, congestion control +- **ADR-033 CRV signal-line sensing integration** - Architecture decision record for the 6-stage CRV pipeline mapping to ruvector components +- **Temporal gesture matching** - `midstreamer-temporal-compare` DTW/LCS/edit-distance gesture classification with quantized feature comparison +- **Attractor drift analysis** - `midstreamer-attractor` Takens' theorem phase-space embedding with Lyapunov exponent regime detection (Stable/Periodic/Chaotic) +- **v0.3.0 published** - All 15 workspace crates published to [crates.io](https://crates.io/crates/wifi-densepose-core) with updated dependencies - **28,000+ lines of new Rust code** across 26 modules with 400+ tests -- **Security hardened** — Bounded buffers, NaN guards, no panics in public APIs, input validation at all boundaries +- **Security hardened** - Bounded buffers, NaN guards, no panics in public APIs, input validation at all boundaries -### v3.0.0 — 2026-03-01 +### v3.0.0 - 2026-03-01 Major release: AETHER contrastive embedding model, AI signal processing backbone, cross-platform adapters, Docker Hub images, and comprehensive README overhaul. -- **Project AETHER (ADR-024)** — Self-supervised contrastive learning for WiFi CSI fingerprinting, similarity search, and anomaly detection; 55 KB model fits on ESP32 -- **AI Backbone (`wifi-densepose-ruvector`)** — 7 RuVector integration points replacing hand-tuned thresholds with attention, graph algorithms, and smart compression; [published to crates.io](https://crates.io/crates/wifi-densepose-ruvector) -- **Cross-platform RSSI adapters** — macOS CoreWLAN and Linux `iw` Rust adapters with `#[cfg(target_os)]` gating (ADR-025) -- **Docker images published** — `ruvnet/wifi-densepose:latest` (132 MB Rust) and `:python` (569 MB) -- **Project MERIDIAN (ADR-027)** — Cross-environment domain generalization: gradient reversal, geometry-conditioned FiLM, virtual domain augmentation, contrastive test-time training; zero-shot room transfer -- **10-phase DensePose training pipeline (ADR-023/027)** — Graph transformer, 6-term composite loss, SONA adaptation, RVF packaging, hardware normalization, domain-adversarial training -- **Vital sign detection (ADR-021)** — FFT-based breathing (6-30 BPM) and heartbeat (40-120 BPM), 11,665 fps -- **WiFi scan domain layer (ADR-022/025)** — 8-stage signal intelligence pipeline for Windows, macOS, and Linux -- **700+ Rust tests** — All passing, zero mocks +- **Project AETHER (ADR-024)** - Self-supervised contrastive learning for WiFi CSI fingerprinting, similarity search, and anomaly detection; 55 KB model fits on ESP32 +- **AI Backbone (`wifi-densepose-ruvector`)** - 7 RuVector integration points replacing hand-tuned thresholds with attention, graph algorithms, and smart compression; [published to crates.io](https://crates.io/crates/wifi-densepose-ruvector) +- **Cross-platform RSSI adapters** - macOS CoreWLAN and Linux `iw` Rust adapters with `#[cfg(target_os)]` gating (ADR-025) +- **Docker images published** - `ruvnet/wifi-densepose:latest` (132 MB Rust) and `:python` (569 MB) +- **Project MERIDIAN (ADR-027)** - Cross-environment domain generalization: gradient reversal, geometry-conditioned FiLM, virtual domain augmentation, contrastive test-time training; zero-shot room transfer +- **10-phase DensePose training pipeline (ADR-023/027)** - Graph transformer, 6-term composite loss, SONA adaptation, RVF packaging, hardware normalization, domain-adversarial training +- **Vital sign detection (ADR-021)** - FFT-based breathing (6-30 BPM) and heartbeat (40-120 BPM), 11,665 fps +- **WiFi scan domain layer (ADR-022/025)** - 8-stage signal intelligence pipeline for Windows, macOS, and Linux +- **700+ Rust tests** - All passing, zero mocks -### v2.0.0 — 2026-02-28 +### v2.0.0 - 2026-02-28 Complete Rust sensing server, SOTA signal processing, WiFi-Mat disaster response, ESP32 hardware, RuVector integration, guided installer, and security hardening. -- **Rust sensing server** — Axum REST API + WebSocket, 810x speedup over Python, 54K fps pipeline -- **RuVector integration** — 11 vendored crates for HNSW, attention, GNN, temporal compression, min-cut, solver -- **6 SOTA signal algorithms (ADR-014)** — SpotFi, Hampel, Fresnel, spectrogram, subcarrier selection, BVP -- **WiFi-Mat disaster response** — START triage, 3D localization, priority alerts — 139 tests -- **ESP32 CSI hardware** — Binary frame parsing, $54 starter kit, 20 Hz streaming -- **Guided installer** — 7-step hardware detection, 8 install profiles -- **Three.js visualization** — 3D body model, 17 joints, real-time WebSocket -- **Security hardening** — 10 vulnerabilities fixed +- **Rust sensing server** - Axum REST API + WebSocket, 810x speedup over Python, 54K fps pipeline +- **RuVector integration** - 11 vendored crates for HNSW, attention, GNN, temporal compression, min-cut, solver +- **6 SOTA signal algorithms (ADR-014)** - SpotFi, Hampel, Fresnel, spectrogram, subcarrier selection, BVP +- **WiFi-Mat disaster response** - START triage, 3D localization, priority alerts - 139 tests +- **ESP32 CSI hardware** - Binary frame parsing, $54 starter kit, 20 Hz streaming +- **Guided installer** - 7-step hardware detection, 8 install profiles +- **Three.js visualization** - 3D body model, 17 joints, real-time WebSocket +- **Security hardening** - 10 vulnerabilities fixed
@@ -2011,7 +2011,7 @@ Complete Rust sensing server, SOTA signal processing, WiFi-Mat disaster response ## 📄 License -MIT License — see [LICENSE](LICENSE) for details. +MIT License - see [LICENSE](LICENSE) for details. ## 📞 Support @@ -2019,4 +2019,4 @@ MIT License — see [LICENSE](LICENSE) for details. --- -**WiFi DensePose** — Privacy-preserving human pose estimation through WiFi signals. +**WiFi DensePose** - Privacy-preserving human pose estimation through WiFi signals. diff --git a/docs/WITNESS-LOG-028.md b/docs/WITNESS-LOG-028.md index 78ea16f13..f02c12494 100644 --- a/docs/WITNESS-LOG-028.md +++ b/docs/WITNESS-LOG-028.md @@ -1,4 +1,4 @@ -# Witness Verification Log — ADR-028 ESP32 Capability Audit +# Witness Verification Log - ADR-028 ESP32 Capability Audit > **Purpose:** Machine-verifiable attestation of repository capabilities at a specific commit. > Third parties can re-run these checks to confirm or refute each claim independently. @@ -32,7 +32,7 @@ cd wifi-densepose git checkout 96b01008 ``` -### Step 2: Rust Workspace — Full Test Suite +### Step 2: Rust Workspace - Full Test Suite ```bash cd rust-port/wifi-densepose-rs @@ -94,12 +94,12 @@ cargo test -p wifi-densepose-hardware --no-default-features ``` **Expected:** 32 tests pass, including: -- `parse_valid_frame` — validates magic 0xC5110001, field extraction -- `parse_invalid_magic` — rejects non-CSI data -- `parse_insufficient_data` — rejects truncated frames -- `multi_antenna_frame` — handles MIMO configurations -- `amplitude_phase_conversion` — I/Q → (amplitude, phase) math -- `bridge_from_known_iq` — hardware→signal crate bridge +- `parse_valid_frame` - validates magic 0xC5110001, field extraction +- `parse_invalid_magic` - rejects non-CSI data +- `parse_insufficient_data` - rejects truncated frames +- `multi_antenna_frame` - handles MIMO configurations +- `amplitude_phase_conversion` - I/Q → (amplitude, phase) math +- `bridge_from_known_iq` - hardware→signal crate bridge ### Step 7: Verify Signal Processing Algorithms @@ -122,13 +122,13 @@ cargo test -p wifi-densepose-train --no-default-features ``` **Expected:** 174+ tests pass, including ADR-027 modules: -- `domain_within_configured_ranges` — virtual domain parameter bounds -- `augment_frame_preserves_length` — output shape correctness -- `augment_frame_identity_domain_approx_input` — identity transform ≈ input -- `deterministic_same_seed_same_output` — reproducibility -- `adapt_empty_buffer_returns_error` — no panic on empty input -- `adapt_zero_rank_returns_error` — no panic on invalid config -- `buffer_cap_evicts_oldest` — bounded memory (max 10,000 frames) +- `domain_within_configured_ranges` - virtual domain parameter bounds +- `augment_frame_preserves_length` - output shape correctness +- `augment_frame_identity_domain_approx_input` - identity transform ≈ input +- `deterministic_same_seed_same_output` - reproducibility +- `adapt_empty_buffer_returns_error` - no panic on empty input +- `adapt_zero_rank_returns_error` - no panic on invalid config +- `buffer_cap_evicts_oldest` - bounded memory (max 10,000 frames) ### Step 9: Verify Python Proof System @@ -201,7 +201,7 @@ Each row is independently verifiable. Status reflects audit-time findings. | 20 | Contrastive self-supervised learning (ADR-024) | Yes | **YES** | Projection head, InfoNCE + VICReg in `model.rs` | | 21 | Vital sign detection (breathing + heartbeat) | Yes | **YES** | `vitals` crate (1,863 lines), 6-30 BPM / 40-120 BPM | | 22 | WiFi-MAT disaster response (START triage) | Yes | **YES** | `mat` crate, 153 tests, detection+localization+alerting | -| 23 | Deterministic proof system (SHA-256) | Yes | **YES** | PASS — hash `8c0680d7...` matches (numpy 2.4.2, scipy 1.17.1) | +| 23 | Deterministic proof system (SHA-256) | Yes | **YES** | PASS - hash `8c0680d7...` matches (numpy 2.4.2, scipy 1.17.1) | | 24 | 15 crates published on crates.io @ v0.2.0 | Yes | **YES** | All published 2026-03-01 | | 25 | Docker images on Docker Hub | Yes | **YES** | `ruvnet/wifi-densepose:latest` (132 MB), `:python` (569 MB) | | 26 | WASM browser deployment | Yes | **YES** | `wifi-densepose-wasm` crate, wasm-bindgen, Three.js | @@ -236,7 +236,7 @@ Each row is independently verifiable. Status reflects audit-time findings. ### For Reviewers / Due Diligence 1. Run Steps 2-10 (no hardware needed) to confirm all software claims -2. Check the attestation matrix — rows marked **YES** have passing test evidence +2. Check the attestation matrix - rows marked **YES** have passing test evidence 3. Rows marked **NO** or **NOT MEASURED** are honest gaps, not hidden 4. The proof system (Step 9) demonstrates commitment to verifiability diff --git a/docs/adr/.issue-177-body.md b/docs/adr/.issue-177-body.md index 09a5464d7..32f3cb452 100644 --- a/docs/adr/.issue-177-body.md +++ b/docs/adr/.issue-177-body.md @@ -1,8 +1,8 @@ ## Introduction -RuView is a WiFi-based human pose estimation system built on ESP32 CSI (Channel State Information). Today, managing a RuView deployment requires juggling **6+ disconnected CLI tools**: `esptool.py` for flashing, `provision.py` for NVS configuration, `curl` for OTA and WASM management, `cargo run` for the sensing server, a browser for visualization, and manual IP tracking for node discovery. There is no single tool that provides a unified view of the entire deployment — from ESP32 hardware through the sensing pipeline to pose visualization. +RuView is a WiFi-based human pose estimation system built on ESP32 CSI (Channel State Information). Today, managing a RuView deployment requires juggling **6+ disconnected CLI tools**: `esptool.py` for flashing, `provision.py` for NVS configuration, `curl` for OTA and WASM management, `cargo run` for the sensing server, a browser for visualization, and manual IP tracking for node discovery. There is no single tool that provides a unified view of the entire deployment - from ESP32 hardware through the sensing pipeline to pose visualization. -This issue tracks the implementation of **RuView Desktop** — a Tauri v2 cross-platform desktop application that replaces all of these tools with a single, cohesive interface. The application is designed as the **control plane** for the RuView platform, managing the full lifecycle: discover, flash, provision, OTA, load WASM, observe sensing. +This issue tracks the implementation of **RuView Desktop** - a Tauri v2 cross-platform desktop application that replaces all of these tools with a single, cohesive interface. The application is designed as the **control plane** for the RuView platform, managing the full lifecycle: discover, flash, provision, OTA, load WASM, observe sensing. ### Why Tauri (Not Electron/Flutter/Web) diff --git a/docs/adr/ADR-002-ruvector-rvf-integration-strategy.md b/docs/adr/ADR-002-ruvector-rvf-integration-strategy.md index 7b07fd7b6..3861f495a 100644 --- a/docs/adr/ADR-002-ruvector-rvf-integration-strategy.md +++ b/docs/adr/ADR-002-ruvector-rvf-integration-strategy.md @@ -138,7 +138,7 @@ crates/wifi-densepose-rvf/ ruvector-mincut = "2.0.4" # Dynamic min-cut, O(n^1.5 log n) graph partitioning ruvector-attn-mincut = "2.0.4" # Attention + mincut gating in one pass ruvector-temporal-tensor = "2.0.4" # Tiered temporal compression (50-75% memory reduction) -ruvector-solver = "2.0.4" # NeumannSolver — O(√n) Neumann series convergence +ruvector-solver = "2.0.4" # NeumannSolver - O(√n) Neumann series convergence ruvector-attention = "2.0.4" # ScaledDotProductAttention ``` diff --git a/docs/adr/ADR-012-esp32-csi-sensor-mesh.md b/docs/adr/ADR-012-esp32-csi-sensor-mesh.md index 54f417858..46de9ff3d 100644 --- a/docs/adr/ADR-012-esp32-csi-sensor-mesh.md +++ b/docs/adr/ADR-012-esp32-csi-sensor-mesh.md @@ -1,7 +1,7 @@ # ADR-012: ESP32 CSI Sensor Mesh for Distributed Sensing ## Status -Accepted — Partially Implemented (firmware + aggregator working, see ADR-018) +Accepted - Partially Implemented (firmware + aggregator working, see ADR-018) ## Date 2026-02-28 @@ -273,7 +273,7 @@ python scripts/provision.py --port COM7 \ --ssid "YourWiFi" --password "secret" --target-ip 192.168.1.20 # Run aggregator -cargo run -p wifi-densepose-hardware --bin aggregator -- --bind 0.0.0.0:5005 --verbose +cargo run -p wifi-densepose-hardware --bin aggregator - --bind 0.0.0.0:5005 --verbose ``` **Option B: Build from source with Docker (no ESP-IDF install needed)** @@ -295,7 +295,7 @@ python -m esptool --chip esp32s3 --port COM7 --baud 460800 \ 0x10000 esp32-csi-node.bin # Step 4: Run aggregator -cargo run -p wifi-densepose-hardware --bin aggregator -- --bind 0.0.0.0:5005 --verbose +cargo run -p wifi-densepose-hardware --bin aggregator - --bind 0.0.0.0:5005 --verbose ``` **Verified**: 20 Hz CSI streaming, 64/128/192 subcarrier frames, RSSI -47 to -88 dBm. diff --git a/docs/adr/ADR-013-feature-level-sensing-commodity-gear.md b/docs/adr/ADR-013-feature-level-sensing-commodity-gear.md index 40a6ae28b..e3e2dba70 100644 --- a/docs/adr/ADR-013-feature-level-sensing-commodity-gear.md +++ b/docs/adr/ADR-013-feature-level-sensing-commodity-gear.md @@ -1,7 +1,7 @@ # ADR-013: Feature-Level Sensing on Commodity Gear (Option 3) ## Status -Accepted — Implemented (36/36 unit tests pass, see `v1/src/sensing/` and `v1/tests/unit/test_sensing.py`) +Accepted - Implemented (36/36 unit tests pass, see `v1/src/sensing/` and `v1/tests/unit/test_sensing.py`) ## Date 2026-02-28 @@ -384,7 +384,7 @@ The full commodity sensing pipeline is implemented in `v1/src/sensing/`: | Classifier | `classifier.py` | `PresenceClassifier` with ABSENT/PRESENT_STILL/ACTIVE levels, confidence scoring | | Backend | `backend.py` | `CommodityBackend` wiring collector → extractor → classifier, reports PRESENCE + MOTION capabilities | -**Test coverage**: 36 tests in `v1/tests/unit/test_sensing.py` — all passing: +**Test coverage**: 36 tests in `v1/tests/unit/test_sensing.py` - all passing: - `TestRingBuffer` (4), `TestSimulatedCollector` (5), `TestFeatureExtractor` (8), `TestCusum` (4), `TestPresenceClassifier` (7), `TestCommodityBackend` (6), `TestBandPower` (2) **Dependencies**: `numpy`, `scipy` (for FFT and spectral analysis) diff --git a/docs/adr/ADR-014-sota-signal-processing.md b/docs/adr/ADR-014-sota-signal-processing.md index 3319a1aaf..cb4d725cb 100644 --- a/docs/adr/ADR-014-sota-signal-processing.md +++ b/docs/adr/ADR-014-sota-signal-processing.md @@ -31,7 +31,7 @@ each with deterministic tests and no mock data. **What:** Multiply CSI from antenna pair (i,j) as `H_i * conj(H_j)` to cancel carrier frequency offset (CFO), sampling frequency offset (SFO), and packet -detection delay — all of which corrupt raw phase measurements. +detection delay - all of which corrupt raw phase measurements. **Why:** Raw CSI phase from commodity hardware (ESP32, Intel 5300) includes random offsets that change per packet. Conjugate multiplication preserves only @@ -111,7 +111,7 @@ Select top-K subcarriers by sensitivity score. subcarriers. BVP is a 2D representation (velocity × time) that encodes how different body parts move at different speeds. -**Why:** BVP is domain-independent — the same velocity profile appears regardless +**Why:** BVP is domain-independent - the same velocity profile appears regardless of room layout, furniture, or AP placement. This makes it the basis for cross-environment gesture and activity recognition. @@ -124,12 +124,12 @@ subcarriers: `BVP[v,t] = Σ_k |STFT_k[v,t]|` where v maps to velocity via ## Implementation All algorithms implemented in `wifi-densepose-signal/src/` as new modules: -- `csi_ratio.rs` — Conjugate multiplication -- `hampel.rs` — Hampel filter -- `fresnel.rs` — Fresnel zone breathing model -- `spectrogram.rs` — CSI spectrogram generation -- `subcarrier_selection.rs` — Sensitivity-based selection -- `bvp.rs` — Body Velocity Profile extraction +- `csi_ratio.rs` - Conjugate multiplication +- `hampel.rs` - Hampel filter +- `fresnel.rs` - Fresnel zone breathing model +- `spectrogram.rs` - CSI spectrogram generation +- `subcarrier_selection.rs` - Sensitivity-based selection +- `bvp.rs` - Body Velocity Profile extraction Each module has: - Deterministic unit tests with known input/output diff --git a/docs/adr/ADR-015-public-dataset-training-strategy.md b/docs/adr/ADR-015-public-dataset-training-strategy.md index 474282720..554c19f71 100644 --- a/docs/adr/ADR-015-public-dataset-training-strategy.md +++ b/docs/adr/ADR-015-public-dataset-training-strategy.md @@ -171,10 +171,10 @@ interpolation as a first-class operation with tests proving correctness. ## References -- Yang et al., "MM-Fi: Multi-Modal Non-Intrusive 4D Human Dataset" (NeurIPS 2023) — arXiv:2305.10345 +- Yang et al., "MM-Fi: Multi-Modal Non-Intrusive 4D Human Dataset" (NeurIPS 2023) - arXiv:2305.10345 - Geng et al., "DensePose From WiFi" (CMU, arXiv:2301.00250, 2023) - Yan et al., "Person-in-WiFi 3D" (CVPR 2024) -- NjtechCVLab, "Wi-Pose Dataset" — github.com/NjtechCVLab/Wi-PoseDataset +- NjtechCVLab, "Wi-Pose Dataset" - github.com/NjtechCVLab/Wi-PoseDataset - ADR-012: ESP32 CSI Sensor Mesh (hardware target) - ADR-013: Feature-Level Sensing on Commodity Gear - ADR-014: SOTA Signal Processing Algorithms diff --git a/docs/adr/ADR-016-ruvector-integration.md b/docs/adr/ADR-016-ruvector-integration.md index f50033476..cc8551d26 100644 --- a/docs/adr/ADR-016-ruvector-integration.md +++ b/docs/adr/ADR-016-ruvector-integration.md @@ -53,10 +53,10 @@ mincut.cut_edges() -> Vec // edges crossing the cut ``` `MinCutResult` contains: -- `value: f64` — minimum cut weight +- `value: f64` - minimum cut weight - `is_exact: bool` - `approximation_ratio: f64` -- `partition: Option<(Vec, Vec)>` — S and T node sets +- `partition: Option<(Vec, Vec)>` - S and T node sets #### ruvector-attn-mincut @@ -285,7 +285,7 @@ important for multi-person scenarios. ### Files unchanged -`config.rs`, `losses.rs`, `trainer.rs`, `proof.rs`, `error.rs` — no change needed. +`config.rs`, `losses.rs`, `trainer.rs`, `proof.rs`, `error.rs` - no change needed. ### Feature gating diff --git a/docs/adr/ADR-017-ruvector-signal-mat-integration.md b/docs/adr/ADR-017-ruvector-signal-mat-integration.md index 810c02f88..f181a3223 100644 --- a/docs/adr/ADR-017-ruvector-signal-mat-integration.md +++ b/docs/adr/ADR-017-ruvector-signal-mat-integration.md @@ -15,13 +15,13 @@ ADR-016 integrated all five published ruvector v2.0.4 crates into the Two production crates that pre-date ADR-016 remain without ruvector integration despite having concrete, high-value integration points: -1. **`wifi-densepose-signal`** — SOTA signal processing algorithms (ADR-014): +1. **`wifi-densepose-signal`** - SOTA signal processing algorithms (ADR-014): conjugate multiplication, Hampel filter, Fresnel zone breathing model, CSI spectrogram, subcarrier sensitivity selection, Body Velocity Profile (BVP). These algorithms perform independent element-wise operations or brute-force exhaustive search without subpolynomial optimization. -2. **`wifi-densepose-mat`** — Disaster detection (ADR-001): multi-AP +2. **`wifi-densepose-mat`** - Disaster detection (ADR-001): multi-AP triangulation, breathing/heartbeat waveform detection, triage classification. Time-series data is uncompressed and localization uses closed-form geometry without iterative system solving. @@ -80,7 +80,7 @@ and edges encode variance-ratio similarity (|sensitivity_i − sensitivity_j|^ `DynamicMinCut` finds the minimum bisection separating high-sensitivity (motion-responsive) from low-sensitivity (noise-dominated) subcarriers. As new static/motion measurements arrive, `insert_edge`/`delete_edge` incrementally -update the partition in O(n^1.5 log n) amortized — no full re-sort needed. +update the partition in O(n^1.5 log n) amortized - no full re-sort needed. ```rust use ruvector_mincut::{DynamicMinCut, MinCutBuilder}; @@ -136,7 +136,7 @@ matrix [freq_bins × time_frames]. All bins weighted equally for downstream CNN. **ruvector integration:** After STFT, treat each time frame as a sequence token (d = n_freq_bins, seq_len = n_time_frames). Apply `attn_mincut` to gate which -time-frequency cells contribute to the spectrogram output — suppressing noise +time-frequency cells contribute to the spectrogram output - suppressing noise frames and multipath artifacts while amplifying body-motion periods. ```rust @@ -230,7 +230,7 @@ automatically, without requiring manual selection or a separate sensitivity step **Current approach:** Closed-form Fresnel zone radius formula assuming known TX-RX-body geometry. In practice, exact distances d1 (TX→body) and d2 -(body→RX) are unknown — only the TX-RX straight-line distance D is known from +(body→RX) are unknown - only the TX-RX straight-line distance D is known from AP placement. **ruvector integration:** When multiple subcarriers observe different Fresnel @@ -291,7 +291,7 @@ pub fn solve_fresnel_geometry( **Advantage:** Converts the Fresnel model from a single fixed-geometry formula into a data-driven geometry estimator. With 3+ observations (subcarriers at -different frequencies), NeumannSolver converges in O(√n) iterations — critical +different frequencies), NeumannSolver converges in O(√n) iterations - critical for real-time breathing detection at 100 Hz. --- @@ -365,7 +365,7 @@ pub fn solve_triangulation( **Advantage:** For a disaster site with 5–20 APs, the TDoA system has N×(N-1)/2 = 10–190 measurements but only 2 unknowns (x, y). The normal equations are 2×2 regardless of N. NeumannSolver converges in O(1) iterations for well-conditioned -2×2 systems — eliminating Gaussian elimination overhead. +2×2 systems - eliminating Gaussian elimination overhead. --- @@ -445,7 +445,7 @@ handle 2–4× more concurrent zones. **Current approach:** Heartbeat detection uses micro-Doppler spectrograms: sliding STFT of CSI amplitude time-series. Each zone stores a spectrogram of shape [n_freq_bins=128, n_time=600] (60 seconds at 10 Hz output rate): -128 × 600 × 4 bytes = **307 KB per zone**. With 16 zones: 4.9 MB — acceptable, +128 × 600 × 4 bytes = **307 KB per zone**. With 16 zones: 4.9 MB - acceptable, but heartbeat spectrograms are the most access-intensive (queried at every triage update). @@ -533,7 +533,7 @@ ruvector-attention = { workspace = true } ADR-002's dependency strategy section specifies non-existent crates: ```toml -# WRONG (ADR-002 original — these crates do not exist at crates.io) +# WRONG (ADR-002 original - these crates do not exist at crates.io) ruvector-core = { version = "0.1", features = ["hnsw", "sona", "gnn"] } ruvector-data-framework = { version = "0.1", features = ["rvf", "witness", "crypto"] } ruvector-consensus = { version = "0.1", features = ["raft"] } diff --git a/docs/adr/ADR-018-esp32-dev-implementation.md b/docs/adr/ADR-018-esp32-dev-implementation.md index 6cb70f3db..2d18d836a 100644 --- a/docs/adr/ADR-018-esp32-dev-implementation.md +++ b/docs/adr/ADR-018-esp32-dev-implementation.md @@ -10,7 +10,7 @@ Proposed ADR-012 established the ESP32 CSI Sensor Mesh architecture: hardware rationale, firmware file structure, `csi_feature_frame_t` C struct, aggregator design, clock-drift handling via feature-level fusion, and a $54 starter BOM. That ADR answers *what* to build and *why*. -This ADR answers *how* to build it — the concrete development sequence, the specific integration points in existing code, and how to test each layer before hardware is in hand. +This ADR answers *how* to build it - the concrete development sequence, the specific integration points in existing code, and how to test each layer before hardware is in hand. ### Current State @@ -18,12 +18,12 @@ This ADR answers *how* to build it — the concrete development sequence, the sp | Component | Location | Status | |-----------|----------|--------| -| Binary frame parser | `wifi-densepose-hardware/src/esp32_parser.rs` | Complete — `Esp32CsiParser::parse_frame()`, `parse_stream()`, 7 passing tests | -| Frame types | `wifi-densepose-hardware/src/csi_frame.rs` | Complete — `CsiFrame`, `CsiMetadata`, `SubcarrierData`, `to_amplitude_phase()` | -| Parse error types | `wifi-densepose-hardware/src/error.rs` | Complete — `ParseError` enum with 6 variants | -| Signal processing pipeline | `wifi-densepose-signal` crate | Complete — Hampel, Fresnel, BVP, Doppler, spectrogram | -| CSI extractor (Python) | `v1/src/hardware/csi_extractor.py` | Stub — `_read_raw_data()` raises `NotImplementedError` | -| Router interface (Python) | `v1/src/hardware/router_interface.py` | Stub — `_parse_csi_response()` raises `RouterConnectionError` | +| Binary frame parser | `wifi-densepose-hardware/src/esp32_parser.rs` | Complete - `Esp32CsiParser::parse_frame()`, `parse_stream()`, 7 passing tests | +| Frame types | `wifi-densepose-hardware/src/csi_frame.rs` | Complete - `CsiFrame`, `CsiMetadata`, `SubcarrierData`, `to_amplitude_phase()` | +| Parse error types | `wifi-densepose-hardware/src/error.rs` | Complete - `ParseError` enum with 6 variants | +| Signal processing pipeline | `wifi-densepose-signal` crate | Complete - Hampel, Fresnel, BVP, Doppler, spectrogram | +| CSI extractor (Python) | `v1/src/hardware/csi_extractor.py` | Stub - `_read_raw_data()` raises `NotImplementedError` | +| Router interface (Python) | `v1/src/hardware/router_interface.py` | Stub - `_parse_csi_response()` raises `RouterConnectionError` | **Not yet implemented:** @@ -59,7 +59,7 @@ The firmware must write frames in this exact format. The parser already validate We will implement the ESP32 development stack in four sequential layers, each independently testable before hardware is available. -### Layer 1 — ESP-IDF Firmware (`firmware/esp32-csi-node/`) +### Layer 1 - ESP-IDF Firmware (`firmware/esp32-csi-node/`) Implement the C firmware project per the file structure in ADR-012. Key design decisions deferred from ADR-012: @@ -113,7 +113,7 @@ CONFIG_FREERTOS_HZ=1000 **Build toolchain**: ESP-IDF v5.2+ (pinned). Docker image: `espressif/idf:v5.2` for reproducible CI. -### Layer 2 — UDP Aggregator (`crates/wifi-densepose-hardware/src/aggregator/`) +### Layer 2 - UDP Aggregator (`crates/wifi-densepose-hardware/src/aggregator/`) New module within the hardware crate. Entry point: `aggregator_main()` callable as a binary target. @@ -153,7 +153,7 @@ impl Esp32Aggregator { let _ = self.tx.try_send(frame); // drop if pipeline is full } Err(e) => { - // Log and continue — never crash on bad UDP packet + // Log and continue - never crash on bad UDP packet eprintln!("aggregator: parse error: {e}"); } } @@ -164,7 +164,7 @@ impl Esp32Aggregator { **Testable without hardware**: The test suite generates frames using `build_test_frame()` (same helper pattern as `esp32_parser.rs` tests) and sends them over a loopback UDP socket. The aggregator receives and forwards them identically to real hardware frames. -### Layer 3 — CsiFrame → CsiData Bridge +### Layer 3 - CsiFrame → CsiData Bridge Bridge from `wifi-densepose-hardware::CsiFrame` to the signal processing type `wifi_densepose_signal::CsiData` (or a compatible intermediate type consumed by the Rust pipeline). @@ -209,7 +209,7 @@ impl From for CsiData { The bridge test: parse a known binary frame, convert to `CsiData`, assert `amplitude[0]` = √(I₀² + Q₀²) to within f64 precision. -### Layer 4 — Python `_read_raw_data()` Real Implementation +### Layer 4 - Python `_read_raw_data()` Real Implementation Replace the `NotImplementedError` stub in `v1/src/hardware/csi_extractor.py` with a UDP socket reader. This allows the Python pipeline to receive real CSI from the aggregator while the Rust pipeline is being integrated. @@ -240,7 +240,7 @@ class CSIExtractor: return data except _socket.timeout: raise CSIExtractionError( - "No CSI data received within timeout — " + "No CSI data received within timeout - " "is the ESP32 aggregator running?" ) ``` @@ -250,7 +250,7 @@ This is tested with a mock UDP server in the unit tests (existing `test_csi_extr ## Development Sequence ``` -Phase 1 (Firmware + Aggregator — no pipeline integration needed): +Phase 1 (Firmware + Aggregator - no pipeline integration needed): 1. Write firmware/esp32-csi-node/ C project (ESP-IDF v5.2) 2. Flash to one ESP32-S3-DevKitC board 3. Verify binary frames arrive on laptop UDP socket using Wireshark @@ -312,8 +312,8 @@ The existing `esp32_parser.rs` test suite already validates parsing of correctly - [Espressif ESP-CSI Repository](https://github.com/espressif/esp-csi) - [ESP-IDF WiFi CSI API Reference](https://docs.espressif.com/projects/esp-idf/en/stable/esp32/api-guides/wifi.html#wi-fi-channel-state-information) -- `wifi-densepose-hardware/src/esp32_parser.rs` — binary frame parser implementation -- `wifi-densepose-hardware/src/csi_frame.rs` — `CsiFrame`, `to_amplitude_phase()` +- `wifi-densepose-hardware/src/esp32_parser.rs` - binary frame parser implementation +- `wifi-densepose-hardware/src/csi_frame.rs` - `CsiFrame`, `to_amplitude_phase()` - ADR-012: ESP32 CSI Sensor Mesh (architecture) - ADR-011: Python Proof-of-Reality and Mock Elimination - ADR-014: SOTA Signal Processing diff --git a/docs/adr/ADR-019-sensing-only-ui-mode.md b/docs/adr/ADR-019-sensing-only-ui-mode.md index 3a102ab02..abe1f852a 100644 --- a/docs/adr/ADR-019-sensing-only-ui-mode.md +++ b/docs/adr/ADR-019-sensing-only-ui-mode.md @@ -11,7 +11,7 @@ The WiFi-DensePose UI was originally built to require the full FastAPI DensePose backend (`localhost:8000`) for all functionality. This backend depends on heavy Python packages (PyTorch ~2GB, torchvision, OpenCV, SQLAlchemy, Redis) making it impractical for lightweight sensing-only deployments where the user simply wants to visualize live WiFi signal data from ESP32 CSI or Windows RSSI collectors. -A Rust port exists (`rust-port/wifi-densepose-rs`) using Axum with lighter runtime footprint (~10MB binary, ~5MB RAM), but it still requires libtorch C++ bindings and OpenBLAS for compilation—a non-trivial build. +A Rust port exists (`rust-port/wifi-densepose-rs`) using Axum with lighter runtime footprint (~10MB binary, ~5MB RAM), but it still requires libtorch C++ bindings and OpenBLAS for compilation - a non-trivial build. Users need a way to run the UI with **only the sensing pipeline** active, without installing the full DensePose backend stack. @@ -99,10 +99,10 @@ Windows WiFi RSSI ───┘ │ │ ## Consequences ### Positive -- UI works with zero heavy dependencies—only `pip install websockets` (+ numpy/scipy already installed) +- UI works with zero heavy dependencies - only `pip install websockets` (+ numpy/scipy already installed) - ESP32 CSI data flows end-to-end without PyTorch, OpenCV, or database - Existing DensePose tabs still work when the full backend is running -- Clean console output—no `ERR_CONNECTION_REFUSED` spam in sensing-only mode +- Clean console output - no `ERR_CONNECTION_REFUSED` spam in sensing-only mode ### Negative - Two separate WebSocket endpoints: `:8765` (sensing) and `:8000/api/v1/stream/pose` (DensePose) diff --git a/docs/adr/ADR-020-rust-ruvector-ai-model-migration.md b/docs/adr/ADR-020-rust-ruvector-ai-model-migration.md index e954b1839..0a613aff0 100644 --- a/docs/adr/ADR-020-rust-ruvector-ai-model-migration.md +++ b/docs/adr/ADR-020-rust-ruvector-ai-model-migration.md @@ -76,14 +76,14 @@ ESP32 (UDP :5005) ──▶ Rust Axum server (:8000) ──▶ UI (browser) ├── /health/* (health checks) ├── /api/v1/pose/* (pose estimation) ├── /api/v1/stream/* (WebSocket pose stream) - ├── /ws/sensing (sensing WebSocket — replaces :8765) + ├── /ws/sensing (sensing WebSocket - replaces :8765) └── /ws/mat/stream (MAT domain events) ``` ### Build Configuration ```toml -# Lightweight build — no libtorch, no OpenBLAS +# Lightweight build - no libtorch, no OpenBLAS cargo build --release -p wifi-densepose-mat --no-default-features --features "std,api,onnx" # Full build with all backends diff --git a/docs/adr/ADR-021-vital-sign-detection-rvdna-pipeline.md b/docs/adr/ADR-021-vital-sign-detection-rvdna-pipeline.md index 378479580..9ec05a421 100644 --- a/docs/adr/ADR-021-vital-sign-detection-rvdna-pipeline.md +++ b/docs/adr/ADR-021-vital-sign-detection-rvdna-pipeline.md @@ -11,7 +11,7 @@ ### The Need for Vital Sign Detection -WiFi-based vital sign monitoring is a rapidly maturing field. Channel State Information (CSI) captures fine-grained multipath propagation changes caused by physiological movements -- chest displacement from respiration (1-5 mm amplitude, 0.1-0.5 Hz) and body surface displacement from cardiac activity (0.1-0.5 mm, 0.8-2.0 Hz). Our existing WiFi-DensePose project already implements motion detection, presence sensing, and body velocity profiling (BVP), but lacks a dedicated vital sign extraction pipeline. +WiFi-based vital sign monitoring is a rapidly maturing field. Channel State Information (CSI) captures fine-grained multipath propagation changes caused by physiological movements - chest displacement from respiration (1-5 mm amplitude, 0.1-0.5 Hz) and body surface displacement from cardiac activity (0.1-0.5 mm, 0.8-2.0 Hz). Our existing WiFi-DensePose project already implements motion detection, presence sensing, and body velocity profiling (BVP), but lacks a dedicated vital sign extraction pipeline. Vital sign detection extends the project's value from occupancy sensing into health monitoring, enabling contactless respiratory rate and heart rate estimation for applications in eldercare, sleep monitoring, disaster survivor detection (ADR-001), and clinical triage. @@ -21,7 +21,7 @@ The `vendor/ruvector` codebase provides a rich set of signal processing primitiv | Crate | Key Primitives | Vital Sign Relevance | |-------|---------------|---------------------| -| `ruvector-temporal-tensor` | `TemporalTensorCompressor`, `TieredStore`, `TierPolicy`, tiered quantization (8/7/5/3-bit) | Stores compressed CSI temporal streams with adaptive precision -- hot (real-time vital signs) at 8-bit, warm (historical) at 5-bit, cold (archive) at 3-bit | +| `ruvector-temporal-tensor` | `TemporalTensorCompressor`, `TieredStore`, `TierPolicy`, tiered quantization (8/7/5/3-bit) | Stores compressed CSI temporal streams with adaptive precision - hot (real-time vital signs) at 8-bit, warm (historical) at 5-bit, cold (archive) at 3-bit | | `ruvector-nervous-system` | `PredictiveLayer`, `OscillatoryRouter`, `GlobalWorkspace`, `DVSEvent`, `EventRingBuffer`, `ShardedEventBus`, `EpropSynapse`, `Dendrite`, `ModernHopfield` | Predictive coding suppresses static CSI components (90-99% bandwidth reduction), oscillatory routing isolates respiratory vs cardiac frequency bands, event bus handles high-throughput CSI streams | | `ruvector-attention` | `ScaledDotProductAttention`, Mixture of Experts (MoE), PDE attention, sparse attention | Attention-weighted subcarrier selection for vital sign sensitivity, already used in BVP extraction | | `ruvector-coherence` | `SpectralCoherenceScore`, `HnswHealthMonitor`, spectral gap estimation, Fiedler value | Spectral analysis of CSI time series, coherence between subcarrier pairs for breathing/heartbeat isolation | @@ -29,8 +29,8 @@ The `vendor/ruvector` codebase provides a rich set of signal processing primitiv | `ruvector-core` | `VectorDB`, HNSW index, SIMD distance, quantization | Fingerprint-based pattern matching of vital sign waveform templates | | `sona` | `SonaEngine`, `TrajectoryBuilder`, micro-LoRA, EWC++ | Self-optimizing adaptation of vital sign extraction parameters per environment | | `ruvector-sparse-inference` | Sparse model execution, precision management | Efficient inference on edge devices with constrained compute | -| `ruQu` | `FilterPipeline` (Structural/Shift/Evidence), `AdaptiveThresholds` (Welford, EMA, CUSUM-style), `DriftDetector` (step-change, variance expansion, oscillation), `QuantumFabric` (256-tile parallel processing) | **Three-filter decision pipeline** for vital sign gating -- structural filter detects signal partition/degradation, shift filter catches distribution drift in vital sign baselines, evidence filter provides anytime-valid statistical rigor. `DriftDetector` directly detects respiratory/cardiac parameter drift. `AdaptiveThresholds` self-tunes anomaly thresholds with outcome feedback (precision/recall/F1). 256-tile fabric maps to parallel subcarrier processing. | -| DNA example (`examples/dna`) | `BiomarkerProfile`, `StreamProcessor`, `RingBuffer`, `BiomarkerReading`, z-score anomaly detection, CUSUM changepoint detection, EMA, trend analysis | Direct analog -- the biomarker streaming engine processes time-series health data with anomaly detection, which maps exactly to vital sign monitoring | +| `ruQu` | `FilterPipeline` (Structural/Shift/Evidence), `AdaptiveThresholds` (Welford, EMA, CUSUM-style), `DriftDetector` (step-change, variance expansion, oscillation), `QuantumFabric` (256-tile parallel processing) | **Three-filter decision pipeline** for vital sign gating - structural filter detects signal partition/degradation, shift filter catches distribution drift in vital sign baselines, evidence filter provides anytime-valid statistical rigor. `DriftDetector` directly detects respiratory/cardiac parameter drift. `AdaptiveThresholds` self-tunes anomaly thresholds with outcome feedback (precision/recall/F1). 256-tile fabric maps to parallel subcarrier processing. | +| DNA example (`examples/dna`) | `BiomarkerProfile`, `StreamProcessor`, `RingBuffer`, `BiomarkerReading`, z-score anomaly detection, CUSUM changepoint detection, EMA, trend analysis | Direct analog - the biomarker streaming engine processes time-series health data with anomaly detection, which maps exactly to vital sign monitoring | ### Current Project State @@ -144,7 +144,7 @@ The vital sign module adds a **PredictiveLayer** gate from `ruvector-nervous-sys use ruvector_nervous_system::routing::PredictiveLayer; pub struct CsiVitalPreprocessor { - /// Predictive coding layer -- suppresses static CSI components. + /// Predictive coding layer - suppresses static CSI components. /// Only transmits residuals (changes) exceeding threshold. /// Achieves 90-99% bandwidth reduction on stable environments. predictive: PredictiveLayer, @@ -699,7 +699,7 @@ Router::new() The existing SensingTab.js Gaussian splat visualization (ADR-019) is extended with: -- **Breathing ring**: Already prototyped in `generate_signal_field()` as the `breath_ring` variable -- amplitude modulated by `variance` and `tick`. This is replaced with the actual breathing waveform from the vital sign extractor. +- **Breathing ring**: Already prototyped in `generate_signal_field()` as the `breath_ring` variable - amplitude modulated by `variance` and `tick`. This is replaced with the actual breathing waveform from the vital sign extractor. - **Heart rate indicator**: Pulsing opacity overlay synced to estimated heart rate. - **Vital sign panel**: Side panel showing HR/RR values, trend sparklines, and anomaly alerts. @@ -992,10 +992,10 @@ The current Windows WiFi mode (`--source wifi`) uses `netsh wlan show interfaces | Capability | Mechanism | Quality | |---|---|---| -| **Presence detection** | RSSI variance over time via `DriftDetector` | Good -- ruQu detects StepChange when a person enters/leaves | -| **Coarse breathing estimate** | RSSI temporal modulation at 0.1-0.5 Hz | Fair -- single-signal source, needs 30+ seconds of stationary RSSI | -| **Environmental drift** | `AdaptiveThresholds` + `DriftDetector` on RSSI series | Good -- detects linear trends, step changes, oscillating interference | -| **Signal quality gating** | ruQu `FilterPipeline` gates unreliable readings | Good -- suppresses false readings during WiFi fluctuations | +| **Presence detection** | RSSI variance over time via `DriftDetector` | Good - ruQu detects StepChange when a person enters/leaves | +| **Coarse breathing estimate** | RSSI temporal modulation at 0.1-0.5 Hz | Fair - single-signal source, needs 30+ seconds of stationary RSSI | +| **Environmental drift** | `AdaptiveThresholds` + `DriftDetector` on RSSI series | Good - detects linear trends, step changes, oscillating interference | +| **Signal quality gating** | ruQu `FilterPipeline` gates unreliable readings | Good - suppresses false readings during WiFi fluctuations | ### What Does NOT Work in Windows WiFi Mode diff --git a/docs/adr/ADR-022-windows-wifi-enhanced-fidelity-ruvector.md b/docs/adr/ADR-022-windows-wifi-enhanced-fidelity-ruvector.md index 3196db96a..2fbd4e5d2 100644 --- a/docs/adr/ADR-022-windows-wifi-enhanced-fidelity-ruvector.md +++ b/docs/adr/ADR-022-windows-wifi-enhanced-fidelity-ruvector.md @@ -20,7 +20,7 @@ The current Windows WiFi mode in `wifi-densepose-sensing-server` (`:main.rs:382- - **~2 Hz effective sampling rate** (process spawn overhead) - **No spatial diversity** (single observation point) -This is insufficient for any meaningful DensePose estimation. The ESP32 path provides 56 subcarriers with I/Q data at 100+ Hz, while the Windows path provides 1 scalar at 2 Hz -- a **2,800x data deficit**. +This is insufficient for any meaningful DensePose estimation. The ESP32 path provides 56 subcarriers with I/Q data at 100+ Hz, while the Windows path provides 1 scalar at 2 Hz - a **2,800x data deficit**. ### 1.2 The Opportunity: Multi-BSSID Spatial Diversity @@ -379,7 +379,7 @@ pub struct WindowsWifiConfig { pub enable_fingerprint: bool, /// Enable SONA adaptation (default: true) pub enable_adaptation: bool, - /// Breathing band (Hz) — relaxed for low sample rate + /// Breathing band (Hz) - relaxed for low sample rate pub breathing_band: (f64, f64), /// Motion variance threshold for presence detection pub motion_threshold: f64, @@ -1310,7 +1310,7 @@ let posture = self.fingerprint.classify_embedding(&bssid_embeddings); ## Implementation Status (2026-02-28) -### Phase 1: Domain Model -- COMPLETE +### Phase 1: Domain Model - COMPLETE - `wifi-densepose-wifiscan` crate created with DDD bounded contexts - `MultiApFrame` value object with amplitudes, phases, variances, histories - `BssidRegistry` aggregate root with Welford running statistics (capacity 32, 30s expiry) @@ -1318,7 +1318,7 @@ let posture = self.fingerprint.classify_embedding(&bssid_embeddings); - `EnhancedSensingResult` output type with motion, breathing, posture, quality - Hexagonal architecture: `WlanScanPort` trait for adapter abstraction -### Phase 2: Signal Intelligence Pipeline -- COMPLETE +### Phase 2: Signal Intelligence Pipeline - COMPLETE 8-stage pure-Rust pipeline with 125 passing tests: | Stage | Module | Implementation | @@ -1334,12 +1334,12 @@ let posture = self.fingerprint.classify_embedding(&bssid_embeddings); Performance: ~2.1M frames/sec (debug), ~12M frames/sec (release). -### Phase 3: Server Integration -- IN PROGRESS +### Phase 3: Server Integration - IN PROGRESS - Wiring `WindowsWifiPipeline` into `wifi-densepose-sensing-server` - Tier 2 `WlanApiScanner` async adapter stub (upgrade path to native WLAN API) - Extended `SensingUpdate` with enhanced motion, breathing, posture, quality fields -### Phase 4: Tier 2 Native WLAN API -- PLANNED +### Phase 4: Tier 2 Native WLAN API - PLANNED - Native `wlanapi.dll` FFI for 10-20 Hz scan rates - SONA adaptation layer for per-environment tuning - Multi-environment benchmarking diff --git a/docs/adr/ADR-023-trained-densepose-model-ruvector-pipeline.md b/docs/adr/ADR-023-trained-densepose-model-ruvector-pipeline.md index b648df1e4..e07dcf64d 100644 --- a/docs/adr/ADR-023-trained-densepose-model-ruvector-pipeline.md +++ b/docs/adr/ADR-023-trained-densepose-model-ruvector-pipeline.md @@ -15,7 +15,7 @@ The WiFi-DensePose system currently operates in two distinct modes: 1. **WiFi CSI sensing** (working): ESP32 streams CSI frames → Rust aggregator → feature extraction → presence/motion classification. 41 tests passing, verified at ~20 Hz with real hardware. -2. **Heuristic pose derivation** (working but approximate): The Rust sensing server generates 17 COCO keypoints from WiFi signal properties using hand-crafted rules (`derive_pose_from_sensing()` in `sensing-server/src/main.rs`). This is not a trained model — keypoint positions are derived from signal amplitude, phase variance, and motion metrics rather than learned from labeled data. +2. **Heuristic pose derivation** (working but approximate): The Rust sensing server generates 17 COCO keypoints from WiFi signal properties using hand-crafted rules (`derive_pose_from_sensing()` in `sensing-server/src/main.rs`). This is not a trained model - keypoint positions are derived from signal amplitude, phase variance, and motion metrics rather than learned from labeled data. Neither mode produces **DensePose-quality** body surface estimation. The CMU "DensePose From WiFi" paper (arXiv:2301.00250) demonstrated that a neural network trained on paired WiFi CSI + camera pose data can produce dense body surface UV coordinates from WiFi alone. However, that approach requires: @@ -67,8 +67,8 @@ The `vendor/ruvector/` subtree provides 90+ crates. The following are directly r ### RVF Container Format The RuVector Format (RVF) is a segment-based binary container format designed to package -intelligence artifacts — embeddings, HNSW indexes, quantized weights, WASM runtimes, witness -proofs, and metadata — into a single self-contained file. Key properties: +intelligence artifacts - embeddings, HNSW indexes, quantized weights, WASM runtimes, witness +proofs, and metadata - into a single self-contained file. Key properties: - **64-byte segment headers** (`SegmentHeader`, magic `0x52564653` "RVFS") with type discriminator, content hash, compression, and timestamp - **Progressive loading**: Layer A (entry points, <5ms) → Layer B (hot adjacency, 100ms–1s) → Layer C (full graph, seconds) @@ -79,7 +79,7 @@ proofs, and metadata — into a single self-contained file. Key properties: The trained DensePose model will be packaged as an `.rvf` container, making it a single self-contained artifact that includes model weights, HNSW-indexed embedding tables, min-cut graph overlays, quantization codebooks, SONA adaptation deltas, and the WASM inference -runtime — deployable to any host without external dependencies. +runtime - deployable to any host without external dependencies. ## Decision @@ -192,7 +192,7 @@ CSI features [B, T*tx*rx, sub] Visual features [B, 3, 48, 48] ``` -**RuVector enhancement**: Replace standard multi-head self-attention in the bottleneck with `ruvector-graph-transformer`. The graph structure encodes the physical antenna topology — nodes that are closer in space (adjacent ESP32 nodes in the mesh) or time (consecutive frames) have stronger edge weights. This injects domain-specific inductive bias that standard attention lacks. +**RuVector enhancement**: Replace standard multi-head self-attention in the bottleneck with `ruvector-graph-transformer`. The graph structure encodes the physical antenna topology - nodes that are closer in space (adjacent ESP32 nodes in the mesh) or time (consecutive frames) have stronger edge weights. This injects domain-specific inductive bias that standard attention lacks. #### 2b. GNN Body Graph Reasoning @@ -227,7 +227,7 @@ KeypointHead DensePoseHead ConfidenceHead heatmaps parts + UV quality score ``` -**RuVector enhancement**: `ruvector-gnn` replaces the flat spatial decoder with a graph neural network that operates on the human body graph. WiFi CSI is inherently noisy — GNN message passing between anatomically connected joints enforces that predicted keypoints maintain plausible body structure even when individual joint predictions are uncertain. +**RuVector enhancement**: `ruvector-gnn` replaces the flat spatial decoder with a graph neural network that operates on the human body graph. WiFi CSI is inherently noisy - GNN message passing between anatomically connected joints enforces that predicted keypoints maintain plausible body structure even when individual joint predictions are uncertain. #### 2c. Sparse Inference for Edge Deployment @@ -259,9 +259,9 @@ Trained model weights (full precision) #### 3a. Dataset Loading and Preprocessing -Primary dataset: **MM-Fi** (NeurIPS 2023) — 40 subjects, 27 actions, 114 subcarriers, 3 RX antennas, 17 COCO keypoints + DensePose UV annotations. +Primary dataset: **MM-Fi** (NeurIPS 2023) - 40 subjects, 27 actions, 114 subcarriers, 3 RX antennas, 17 COCO keypoints + DensePose UV annotations. -Secondary dataset: **Wi-Pose** — 12 subjects, 12 actions, 30 subcarriers, 3×3 antenna array, 18 keypoints. +Secondary dataset: **Wi-Pose** - 12 subjects, 12 actions, 30 subcarriers, 3×3 antenna array, 18 keypoints. ``` ┌──────────────────────────────────────────────────────────┐ @@ -287,7 +287,7 @@ For samples with 3D keypoints but no DensePose UV maps: 1. Run Detectron2 DensePose R-CNN on paired RGB frames (one-time preprocessing step on GPU workstation) 2. Generate `(part_labels [H,W], u_coords [H,W], v_coords [H,W])` pseudo-labels 3. Cache as `.npy` alongside original data -4. Teacher model is discarded after label generation — inference uses WiFi only +4. Teacher model is discarded after label generation - inference uses WiFi only #### 3c. Loss Function @@ -409,7 +409,7 @@ Total inference budget: **<15ms per frame** at 20 Hz on x86, **<50ms** on ESP32- ### Stage 6: RVF Model Container Format The trained model is packaged as a single `.rvf` file that contains everything needed for -inference — no external weight files, no ONNX runtime, no Python dependencies. +inference - no external weight files, no ONNX runtime, no Python dependencies. #### RVF DensePose Container Layout @@ -424,7 +424,7 @@ wifi-densepose-v1.rvf (single file, ~15-30 MB) │ ├── Segment directory (offsets to all segments) │ │ └── Level-1 TLV manifest with metadata tags │ ├───────────────────────────────────────────────────────────────┤ -│ SEGMENT 1: Vec (0x01) — Model Weight Embeddings │ +│ SEGMENT 1: Vec (0x01) - Model Weight Embeddings │ │ ├── ModalityTranslator weights [64→128→256→3, Conv1D+ConvT] │ │ ├── ResNet18 backbone weights [3→64→128→256, residual blocks] │ │ ├── KeypointHead weights [256→17, deconv layers] │ @@ -434,17 +434,17 @@ wifi-densepose-v1.rvf (single file, ~15-30 MB) │ Format: flat f32 vectors, 768-dim per weight tensor │ │ Total: ~5M parameters → ~20MB f32, ~10MB f16, ~5MB INT8 │ ├───────────────────────────────────────────────────────────────┤ -│ SEGMENT 2: Index (0x02) — HNSW Embedding Index │ +│ SEGMENT 2: Index (0x02) - HNSW Embedding Index │ │ ├── Layer A: Entry points + coarse routing centroids │ │ │ (loaded first, <5ms, enables approximate search) │ │ ├── Layer B: Hot region adjacency for frequently │ │ │ accessed weight clusters (100ms load) │ │ └── Layer C: Full adjacency graph for exact nearest │ │ neighbor lookup across all weight partitions │ -│ Use: Fast weight lookup for sparse inference — │ +│ Use: Fast weight lookup for sparse inference - │ │ only load hot neurons, skip cold neurons via HNSW routing │ ├───────────────────────────────────────────────────────────────┤ -│ SEGMENT 3: Overlay (0x03) — Dynamic Min-Cut Graph │ +│ SEGMENT 3: Overlay (0x03) - Dynamic Min-Cut Graph │ │ ├── Subcarrier partition graph (sensitive vs insensitive) │ │ ├── Min-cut witnesses from ruvector-mincut │ │ ├── Antenna topology graph (ESP32 mesh spatial layout) │ @@ -453,15 +453,15 @@ wifi-densepose-v1.rvf (single file, ~15-30 MB) │ Dynamic updates via ruvector-mincut insert/delete_edge │ │ as environment changes (furniture moves, new obstacles) │ ├───────────────────────────────────────────────────────────────┤ -│ SEGMENT 4: Quant (0x06) — Quantization Codebooks │ +│ SEGMENT 4: Quant (0x06) - Quantization Codebooks │ │ ├── INT8 codebook for backbone (4x memory reduction) │ │ ├── FP16 scale factors for translator + heads │ │ ├── Binary quantization tables for SIMD distance compute │ │ └── Per-layer calibration statistics (min, max, zero-point) │ -│ Use: rvf-quant temperature-tiered quantization — │ +│ Use: rvf-quant temperature-tiered quantization - │ │ hot layers stay f16, warm layers u8, cold layers binary │ ├───────────────────────────────────────────────────────────────┤ -│ SEGMENT 5: Witness (0x0A) — Training Proof Chain │ +│ SEGMENT 5: Witness (0x0A) - Training Proof Chain │ │ ├── Deterministic training proof (seed, loss curve, hash) │ │ ├── Dataset provenance (MM-Fi commit hash, download URL) │ │ ├── Validation metrics (PCK@0.2, OKS mAP, GPS scores) │ @@ -471,7 +471,7 @@ wifi-densepose-v1.rvf (single file, ~15-30 MB) │ training run. Anyone can re-run training with same seed │ │ and verify the weight hash matches the witness. │ ├───────────────────────────────────────────────────────────────┤ -│ SEGMENT 6: Meta (0x07) — Model Metadata │ +│ SEGMENT 6: Meta (0x07) - Model Metadata │ │ ├── COCO keypoint names and skeleton connectivity │ │ ├── DensePose body part labels (24 parts + background) │ │ ├── UV coordinate range and resolution │ @@ -479,7 +479,7 @@ wifi-densepose-v1.rvf (single file, ~15-30 MB) │ ├── RuVector crate versions used during training │ │ └── Environment calibration profiles (named, per-room) │ ├───────────────────────────────────────────────────────────────┤ -│ SEGMENT 7: AggregateWeights (0x36) — SONA LoRA Deltas │ +│ SEGMENT 7: AggregateWeights (0x36) - SONA LoRA Deltas │ │ ├── Per-environment LoRA adaptation matrices (A, B per layer)│ │ ├── EWC++ Fisher information diagonal │ │ ├── Optimal θ* reference parameters │ @@ -488,7 +488,7 @@ wifi-densepose-v1.rvf (single file, ~15-30 MB) │ Use: Multiple environment adaptations stored in one file. │ │ Server loads the matching profile or creates a new one. │ ├───────────────────────────────────────────────────────────────┤ -│ SEGMENT 8: Profile (0x0B) — RVDNA Domain Profile │ +│ SEGMENT 8: Profile (0x0B) - RVDNA Domain Profile │ │ ├── Domain: "wifi-csi-densepose" │ │ ├── Input spec: [B, T*ant, sub] CSI tensor format │ │ ├── Output spec: keypoints [B,17,H,W], parts [B,25,H,W], │ @@ -496,20 +496,20 @@ wifi-densepose-v1.rvf (single file, ~15-30 MB) │ ├── Hardware requirements: min RAM, recommended GPU │ │ └── Supported data sources: esp32, wifi-rssi, simulation │ ├───────────────────────────────────────────────────────────────┤ -│ SEGMENT 9: Crypto (0x0C) — Signature and Keys │ +│ SEGMENT 9: Crypto (0x0C) - Signature and Keys │ │ ├── Ed25519 public key for model publisher │ │ ├── Signature over all segment content hashes │ │ └── Certificate chain (optional, for enterprise deployment) │ ├───────────────────────────────────────────────────────────────┤ -│ SEGMENT 10: Wasm (0x10) — Self-Bootstrapping Runtime │ +│ SEGMENT 10: Wasm (0x10) - Self-Bootstrapping Runtime │ │ ├── Compiled WASM inference engine │ │ │ (ruvector-sparse-inference-wasm) │ │ ├── WASM microkernel for RVF segment parsing │ │ └── Browser-compatible: load .rvf → run inference in-browser │ -│ Use: The .rvf file is fully self-contained — a WASM host │ +│ Use: The .rvf file is fully self-contained - a WASM host │ │ can execute inference without any external dependencies. │ ├───────────────────────────────────────────────────────────────┤ -│ SEGMENT 11: Dashboard (0x11) — Embedded Visualization │ +│ SEGMENT 11: Dashboard (0x11) - Embedded Visualization │ │ ├── Three.js-based pose visualization (HTML/JS/CSS) │ │ ├── Gaussian splat renderer for signal field │ │ └── Served at http://localhost:8080/ when model is loaded │ @@ -540,7 +540,7 @@ wifi-densepose-v1.rvf (single file, ~15-30 MB) **Progressive availability**: Inference begins after step 6 (~5ms) with approximate results. Full accuracy is reached by step 9 (~500ms). This enables instant startup -with gradually improving quality — critical for real-time applications. +with gradually improving quality - critical for real-time applications. #### RVF Build Pipeline @@ -548,7 +548,7 @@ After training completes, the model is packaged into an `.rvf` file: ```bash # Build the RVF container from trained checkpoint -cargo run -p wifi-densepose-train --bin build-rvf -- \ +cargo run -p wifi-densepose-train --bin build-rvf - \ --checkpoint checkpoints/best-pck.pt \ --quantize int8,fp16 \ --hnsw-build \ @@ -558,7 +558,7 @@ cargo run -p wifi-densepose-train --bin build-rvf -- \ --output wifi-densepose-v1.rvf # Verify the built container -cargo run -p wifi-densepose-train --bin verify-rvf -- \ +cargo run -p wifi-densepose-train --bin verify-rvf - \ --input wifi-densepose-v1.rvf \ --verify-signature \ --verify-witness \ @@ -727,13 +727,13 @@ let dashboard = container.load_dashboard()?; ### Positive - **Trained model produces accurate DensePose**: Moves from heuristic keypoints to learned body surface estimation backed by public dataset evaluation -- **RuVector signal intelligence is a differentiator**: Graph transformers on antenna topology and GNN body reasoning are novel — no prior WiFi pose system uses these techniques -- **SONA enables zero-shot deployment**: New environments don't require full retraining — LoRA adaptation with <50 gradient steps converges in seconds +- **RuVector signal intelligence is a differentiator**: Graph transformers on antenna topology and GNN body reasoning are novel - no prior WiFi pose system uses these techniques +- **SONA enables zero-shot deployment**: New environments don't require full retraining - LoRA adaptation with <50 gradient steps converges in seconds - **Sparse inference enables edge deployment**: PowerInfer-style neuron partitioning brings DensePose inference to ESP32-class hardware -- **Graceful degradation**: Server falls back to heuristic pose when no model file is present — existing functionality is preserved -- **Single-file deployment via RVF**: Trained model, embeddings, HNSW index, quantization codebooks, SONA adaptation profiles, WASM runtime, and dashboard UI packaged in one `.rvf` file — deploy by copying a single file +- **Graceful degradation**: Server falls back to heuristic pose when no model file is present - existing functionality is preserved +- **Single-file deployment via RVF**: Trained model, embeddings, HNSW index, quantization codebooks, SONA adaptation profiles, WASM runtime, and dashboard UI packaged in one `.rvf` file - deploy by copying a single file - **Progressive loading**: RVF Layer A loads in <5ms for instant startup; full accuracy reached in ~500ms as remaining segments load -- **Verifiable provenance**: RVF Witness segment contains deterministic training proof with Ed25519 signature — anyone can re-run training and verify weight hash +- **Verifiable provenance**: RVF Witness segment contains deterministic training proof with Ed25519 signature - anyone can re-run training and verify weight hash - **Self-bootstrapping**: RVF Wasm segment enables browser-based inference with no server-side dependencies - **Open evaluation**: PCK, OKS, GPS metrics on public MM-Fi dataset provide reproducible, comparable results @@ -772,21 +772,21 @@ let dashboard = container.load_dashboard()?; **ruQu** ("Classical nervous system for quantum machines") provides real-time coherence assessment via dynamic min-cut. While primarily designed for quantum error correction -(syndrome decoding, surface code arbitration), its core primitive — the `CoherenceGate` — +(syndrome decoding, surface code arbitration), its core primitive - the `CoherenceGate` - is architecturally relevant to WiFi CSI processing: - **CoherenceGate** uses `ruvector-mincut` to make real-time gate/pass decisions on signal streams based on structural coherence thresholds. In quantum computing, this gates qubit syndrome streams. For WiFi CSI, the same mechanism could gate CSI - subcarrier streams — passing only subcarriers whose coherence (phase stability across + subcarrier streams - passing only subcarriers whose coherence (phase stability across antennas) exceeds a dynamic threshold. - **Syndrome filtering** (`filters.rs`) implements Kalman-like adaptive filters that - could be repurposed for CSI noise filtering — treating each subcarrier's amplitude + could be repurposed for CSI noise filtering - treating each subcarrier's amplitude drift as a "syndrome" stream. - **Min-cut gated transformer** integration (optional feature) provides coherence-optimized - attention with 50% FLOP reduction — directly applicable to the `ModalityTranslator` + attention with 50% FLOP reduction - directly applicable to the `ModalityTranslator` bottleneck. **Decision**: ruQu is not included in the initial pipeline (Phase 1-8) but is marked as a @@ -800,9 +800,9 @@ The pipeline supports three data sources for training, used in combination: | Source | Subcarriers | Pose Labels | Volume | Cost | When | |--------|-------------|-------------|--------|------|------| -| **MM-Fi** (public) | 114 → 56 (interpolated) | 17 COCO + DensePose UV | 40 subjects, 320K frames | Free (CC BY-NC) | Phase 1 — bootstrap | -| **Wi-Pose** (public) | 30 → 56 (zero-padded) | 18 keypoints | 12 subjects, 166K packets | Free (research) | Phase 1 — diversity | -| **ESP32 self-collected** | 56 (native) | Teacher-student from camera | Unlimited, environment-specific | Hardware only ($54) | Phase 4+ — fine-tuning | +| **MM-Fi** (public) | 114 → 56 (interpolated) | 17 COCO + DensePose UV | 40 subjects, 320K frames | Free (CC BY-NC) | Phase 1 - bootstrap | +| **Wi-Pose** (public) | 30 → 56 (zero-padded) | 18 keypoints | 12 subjects, 166K packets | Free (research) | Phase 1 - diversity | +| **ESP32 self-collected** | 56 (native) | Teacher-student from camera | Unlimited, environment-specific | Hardware only ($54) | Phase 4+ - fine-tuning | **Recommended approach: Both public + ESP32 data.** diff --git a/docs/adr/ADR-024-contrastive-csi-embedding-model.md b/docs/adr/ADR-024-contrastive-csi-embedding-model.md index a7c9b4712..1cbb49943 100644 --- a/docs/adr/ADR-024-contrastive-csi-embedding-model.md +++ b/docs/adr/ADR-024-contrastive-csi-embedding-model.md @@ -1,11 +1,11 @@ -# ADR-024: Project AETHER -- Contrastive CSI Embedding Model via CsiToPoseTransformer Backbone +# ADR-024: Project AETHER - Contrastive CSI Embedding Model via CsiToPoseTransformer Backbone | Field | Value | |-------|-------| | **Status** | Proposed | | **Date** | 2026-03-01 | | **Deciders** | ruv | -| **Codename** | **AETHER** -- Ambient Electromagnetic Topology for Hierarchical Embedding and Recognition | +| **Codename** | **AETHER** - Ambient Electromagnetic Topology for Hierarchical Embedding and Recognition | | **Relates to** | ADR-004 (HNSW Fingerprinting), ADR-005 (SONA Self-Learning), ADR-006 (GNN-Enhanced CSI), ADR-014 (SOTA Signal Processing), ADR-015 (Public Datasets), ADR-016 (RuVector Integration), ADR-023 (Trained DensePose Pipeline) | --- @@ -20,7 +20,7 @@ These representations are currently **task-coupled**: they exist only as transie 1. **Extract and persist** these representations as reusable, queryable embedding vectors 2. **Compare** CSI observations via learned similarity ("is this the same room?" / "is this the same person?") -3. **Pretrain** the backbone in a self-supervised manner from unlabeled CSI streams -- the most abundant data source +3. **Pretrain** the backbone in a self-supervised manner from unlabeled CSI streams - the most abundant data source 4. **Transfer** learned representations across WiFi hardware, environments, or deployment sites 5. **Feed** semantically meaningful vectors into HNSW indices (ADR-004) instead of hand-crafted feature encodings @@ -28,7 +28,7 @@ The gap between what the transformer *internally knows* and what the system *ext ### 1.2 Why "AETHER"? -The name reflects the historical concept of the luminiferous aether -- the invisible medium through which electromagnetic waves were once theorized to propagate. In our context, WiFi signals propagate through physical space, and AETHER extracts a latent geometric understanding of that space from the signals themselves. The name captures three core ideas: +The name reflects the historical concept of the luminiferous aether - the invisible medium through which electromagnetic waves were once theorized to propagate. In our context, WiFi signals propagate through physical space, and AETHER extracts a latent geometric understanding of that space from the signals themselves. The name captures three core ideas: - **Ambient**: Works with the WiFi signals already present in any indoor environment - **Electromagnetic Topology**: Captures the topological structure of multipath propagation @@ -40,10 +40,10 @@ We evaluated and rejected a generative "RuvLLM" approach. The GOAP analysis: | Factor | Generative (Autoregressive) | Contrastive (AETHER) | |--------|---------------------------|---------------------| -| **Domain fit** | CSI is 56 continuous floats at 20 Hz -- not a discrete token vocabulary. Autoregressive generation is architecturally mismatched. | Contrastive learning on continuous sensor data is the established SOTA (SimCLR, BYOL, VICReg, CAPC). | +| **Domain fit** | CSI is 56 continuous floats at 20 Hz - not a discrete token vocabulary. Autoregressive generation is architecturally mismatched. | Contrastive learning on continuous sensor data is the established SOTA (SimCLR, BYOL, VICReg, CAPC). | | **Model size** | Generative transformers need millions of parameters for meaningful sequence modeling. | Reuses existing 28K-param CsiToPoseTransformer + 25K projection head = 53K total. | | **Edge deployment** | Cannot run on ESP32 (240 MHz, 520 KB SRAM). | INT8-quantized 53K params = ~53 KB. 10% of ESP32 SRAM. | -| **Training data** | Requires massive CSI corpus for autoregressive pretraining to converge. | Self-supervised augmentations work with any CSI stream -- even minutes of data. | +| **Training data** | Requires massive CSI corpus for autoregressive pretraining to converge. | Self-supervised augmentations work with any CSI stream - even minutes of data. | | **Inference** | Autoregressive decoding is sequential; violates 20 Hz real-time constraint. | Single forward pass: <2 ms at INT8. | | **Infrastructure** | New model architecture, tokenizer, trainer, quantizer, RVF packaging. | One new module (`embedding.rs`), one new loss term, one new RVF segment type. | | **Collapse risk** | Mode collapse in generation manifests as repetitive outputs. | Embedding collapse is detectable (variance monitoring) and preventable (VICReg regularization). | @@ -70,7 +70,7 @@ Recent advances that directly inform AETHER's design: - **IdentiFi** (2025): Contrastive learning for WiFi-based person identification using latent CSI representations. Demonstrates that contrastive pretraining in the signal domain produces identity-discriminative embeddings without requiring spatial position labels. - **WhoFi** (2025): Transformer-based WiFi CSI encoding for person re-identification achieving 95.5% accuracy on NTU-Fi. Validates that transformer backbones learn re-identification-quality features from CSI. -- **CAPC** (2024): Context-Aware Predictive Coding for WiFi sensing -- integrates CPC and Barlow Twins to learn temporally and contextually consistent representations from unlabeled WiFi data. +- **CAPC** (2024): Context-Aware Predictive Coding for WiFi sensing - integrates CPC and Barlow Twins to learn temporally and contextually consistent representations from unlabeled WiFi data. - **SSL for WiFi HAR Survey** (2025, arXiv:2506.12052): Comprehensive evaluation of SimCLR, VICReg, Barlow Twins, and SimSiam on WiFi CSI for human activity recognition. VICReg achieves best downstream accuracy but requires careful hyperparameter tuning; SimCLR shows more stable training. - **ContraWiMAE** (2024-2025): Masked autoencoder + contrastive pretraining for wireless channel representation learning, demonstrating that hybrid SSL objectives outperform pure contrastive or pure reconstructive approaches. - **Wi-PER81** (2025): Benchmark dataset of 162K wireless packets for WiFi-based person re-identification using Siamese networks on signal amplitude heatmaps. @@ -120,7 +120,7 @@ CSI Frame(s) [n_pairs x n_subcarriers] 2. **128-dim output**: Standard in contrastive learning literature (SimCLR, MoCo, CLIP). Large enough for high-recall HNSW search, small enough for edge deployment. L2-normalized to the unit hypersphere for cosine similarity. -3. **BatchNorm1D in projection head**: Prevents representation collapse by maintaining feature variance across the batch dimension. Acts as an implicit contrastive mechanism (VICReg insight) -- decorrelates embedding dimensions. +3. **BatchNorm1D in projection head**: Prevents representation collapse by maintaining feature variance across the batch dimension. Acts as an implicit contrastive mechanism (VICReg insight) - decorrelates embedding dimensions. 4. **Shared backbone, independent heads**: The backbone (csi_embed, cross-attention, GNN) is shared between pose regression and embedding extraction. This enables multi-task training where contrastive and supervised signals co-regularize the backbone. @@ -202,7 +202,7 @@ This ensures that CSI embeddings of the same pose are close in embedding space, ### 2.3 Training Strategy: Three-Phase Pipeline -#### Phase A -- Self-Supervised Pretraining (No Labels) +#### Phase A - Self-Supervised Pretraining (No Labels) ``` Raw CSI Window W (any stream, any environment) @@ -220,7 +220,7 @@ Raw CSI Window W (any stream, any environment) - **Epochs**: 100-200 (convergence monitored via embedding uniformity and alignment metrics) - **Monitoring**: Track `alignment = E[||z_i - z_j||^2]` for positive pairs (should decrease) and `uniformity = log(E[exp(-2 * ||z_i - z_j||^2)])` over all pairs (should decrease, indicating uniform distribution on hypersphere) -#### Phase B -- Supervised Fine-Tuning (Labeled Data) +#### Phase B - Supervised Fine-Tuning (Labeled Data) After pretraining, attach `xyz_head` and `conf_head` and fine-tune with the existing 6-term composite loss (ADR-023 Phase 4), optionally keeping the contrastive loss as a regularizer: @@ -232,7 +232,7 @@ lambda_c = 0.1 (contrastive acts as regularizer, not primary objective) The pretrained backbone starts with representations that already understand CSI spatial structure, typically requiring 3-10x fewer labeled samples for equivalent pose accuracy. -#### Phase C -- Cross-Modal Alignment (Optional, requires paired data) +#### Phase C - Cross-Modal Alignment (Optional, requires paired data) Adds `L_cross` to align CSI and pose embedding spaces. Only applicable when paired CSI + camera pose data is available (MM-Fi provides this). @@ -511,7 +511,7 @@ pub struct TrainerConfig { // New method on Trainer: impl Trainer { /// Self-supervised pretraining epoch using AETHER contrastive loss. - /// No pose labels required -- only raw CSI windows. + /// No pose labels required - only raw CSI windows. pub fn pretrain_epoch( &mut self, csi_windows: &[Vec>], @@ -716,7 +716,7 @@ The `embed()` method already exists and returns `[17 x d_model]`. No modificatio ### Positive - **Self-supervised pretraining from unlabeled CSI**: Any WiFi CSI stream (no cameras, no annotations) can pretrain the embedding backbone, radically reducing labeled data requirements. This is the single most impactful capability: WiFi signals are ubiquitous and free. -- **Reuses 100% of existing infrastructure**: No new model architecture -- extends the existing CsiToPoseTransformer with one module, one loss term, one RVF segment type. +- **Reuses 100% of existing infrastructure**: No new model architecture - extends the existing CsiToPoseTransformer with one module, one loss term, one RVF segment type. - **HNSW-ready embeddings**: 128-dim L2-normalized vectors plug directly into the HNSW indices proposed in ADR-004, fulfilling that ADR's "vector encode" pipeline gap. - **Multi-use embeddings**: Same model produces pose keypoints AND embedding vectors in a single forward pass. Two capabilities for the price of one inference. - **Anomaly detection without task-specific models**: OOD CSI frames produce embeddings distant from the training distribution. Fall detection, intrusion detection, and environment change detection emerge as byproducts of the embedding space geometry. @@ -815,7 +815,7 @@ mod integration_tests { --- -## 6. Phase 7: Deep RuVector Integration — MicroLoRA + EWC++ + Library Losses +## 6. Phase 7: Deep RuVector Integration - MicroLoRA + EWC++ + Library Losses **Status**: Required (promoted from Future Work after capability audit) @@ -841,11 +841,11 @@ pub struct ProjectionHead { - **Total: 1,792 params/env** vs 24,832 full ProjectionHead = **93% reduction** **Methods to add:** -- `ProjectionHead::with_lora(rank: usize)` — constructor with LoRA adapters +- `ProjectionHead::with_lora(rank: usize)` - constructor with LoRA adapters - `ProjectionHead::forward()` modified: `out = base_out + lora.forward(input)` when adapters present -- `ProjectionHead::merge_lora()` / `unmerge_lora()` — for fast environment switching -- `ProjectionHead::freeze_base()` — freeze base weights, train only LoRA -- `ProjectionHead::lora_params() -> Vec` — flatten only LoRA weights for checkpoint +- `ProjectionHead::merge_lora()` / `unmerge_lora()` - for fast environment switching +- `ProjectionHead::freeze_base()` - freeze base weights, train only LoRA +- `ProjectionHead::lora_params() -> Vec` - flatten only LoRA weights for checkpoint **Environment switching workflow:** 1. Compute `z_csi` for incoming CSI diff --git a/docs/adr/ADR-025-macos-corewlan-wifi-sensing.md b/docs/adr/ADR-025-macos-corewlan-wifi-sensing.md index 491ecea68..d46bee07a 100644 --- a/docs/adr/ADR-025-macos-corewlan-wifi-sensing.md +++ b/docs/adr/ADR-025-macos-corewlan-wifi-sensing.md @@ -5,7 +5,7 @@ | **Status** | Proposed | | **Date** | 2026-03-01 | | **Deciders** | ruv | -| **Codename** | **ORCA** — OS-native Radio Channel Acquisition | +| **Codename** | **ORCA** - OS-native Radio Channel Acquisition | | **Relates to** | ADR-013 (Feature-Level Sensing Commodity Gear), ADR-022 (Windows WiFi Enhanced Fidelity), ADR-014 (SOTA Signal Processing), ADR-018 (ESP32 Dev Implementation) | | **Issue** | [#56](https://github.com/ruvnet/wifi-densepose/issues/56) | | **Build/Test Target** | Mac Mini (M2 Pro, macOS 26.3) | @@ -16,7 +16,7 @@ ### 1.1 The Gap: macOS Is a Silent Fallback -The `--source auto` path in `sensing-server` probes for ESP32 UDP, then Windows `netsh`, then falls back to simulated mode. macOS users hit the simulation path silently — there is no macOS WiFi adapter. This is the only major desktop platform without real WiFi sensing support. +The `--source auto` path in `sensing-server` probes for ESP32 UDP, then Windows `netsh`, then falls back to simulated mode. macOS users hit the simulation path silently - there is no macOS WiFi adapter. This is the only major desktop platform without real WiFi sensing support. ### 1.2 Platform Constraints (macOS 26.3+) @@ -39,7 +39,7 @@ Same principle as ADR-022 (Windows): visible APs serve as pseudo-subcarriers. A | Windows `netsh` (ADR-022) | 10-30 BSSIDs | ~2 Hz | Presence, motion, coarse breathing | | **macOS CoreWLAN (this ADR)** | **10-30 SSIDs** | **~0.3-0.5 Hz** | **Presence, motion** | -The lower scan rate vs Windows is offset by higher signal quality — CoreWLAN returns calibrated dBm (not percentage) plus noise floor, enabling proper SNR computation. +The lower scan rate vs Windows is offset by higher signal quality - CoreWLAN returns calibrated dBm (not percentage) plus noise floor, enabling proper SNR computation. ### 1.4 Why Swift Subprocess (Not FFI) @@ -50,7 +50,7 @@ The lower scan rate vs Windows is offset by higher signal quality — CoreWLAN r | `objc2` crate (Rust ObjC bridge) | High | CoreWLAN not in upstream `objc2-frameworks` | Requires manual class definitions | Rejected | | `swift-bridge` crate | High | Young ecosystem, async bridging unsupported | Requires Swift build integration in Cargo | Rejected | -The `Command::new()` + parse JSON pattern is proven — it's exactly what `NetshBssidScanner` does for Windows. The subprocess boundary also isolates Apple framework dependencies from the Rust build graph. +The `Command::new()` + parse JSON pattern is proven - it's exactly what `NetshBssidScanner` does for Windows. The subprocess boundary also isolates Apple framework dependencies from the Rust build graph. ### 1.5 SOTA: Platform-Adaptive WiFi Sensing @@ -68,11 +68,11 @@ Implement a **macOS CoreWLAN sensing adapter** as a Swift helper binary + Rust a ### 2.1 Design Principles -1. **Subprocess isolation** — Swift binary is a standalone tool, built and versioned independently of the Rust workspace. -2. **Same domain types** — macOS adapter produces `Vec`, identical to the Windows path. All downstream processing reuses as-is. -3. **SSID:channel as synthetic BSSID** — When real BSSIDs are redacted (no Location Services), `sha256(ssid + channel)[:12]` generates a stable pseudo-BSSID. Documented limitation: same-SSID same-channel APs collapse to one observation. -4. **`#[cfg(target_os = "macos")]` gating** — macOS-specific code compiles only on macOS. Windows and Linux builds are unaffected. -5. **Graceful degradation** — If the Swift helper is not found or fails, `--source auto` skips macOS WiFi and falls back to simulated mode with a clear warning. +1. **Subprocess isolation** - Swift binary is a standalone tool, built and versioned independently of the Rust workspace. +2. **Same domain types** - macOS adapter produces `Vec`, identical to the Windows path. All downstream processing reuses as-is. +3. **SSID:channel as synthetic BSSID** - When real BSSIDs are redacted (no Location Services), `sha256(ssid + channel)[:12]` generates a stable pseudo-BSSID. Documented limitation: same-SSID same-channel APs collapse to one observation. +4. **`#[cfg(target_os = "macos")]` gating** - macOS-specific code compiles only on macOS. Windows and Linux builds are unaffected. +5. **Graceful degradation** - If the Swift helper is not found or fails, `--source auto` skips macOS WiFi and falls back to simulated mode with a clear warning. --- @@ -229,8 +229,8 @@ The existing 8-stage `WindowsWifiPipeline` (ADR-022) operates entirely on `Bssid ### 4.3 No New Rust Dependencies -- `std::process::Command` — subprocess spawning (stdlib) -- `serde_json` — JSON parsing (already in workspace) +- `std::process::Command` - subprocess spawning (stdlib) +- `serde_json` - JSON parsing (already in workspace) - No changes to `Cargo.toml` --- @@ -288,7 +288,7 @@ All verification on Mac Mini (M2 Pro, macOS 26.3). | **Slow scan rate** (~0.3 Hz) | Breathing extraction unreliable (below Nyquist) | Motion/presence still work. Breathing marked low-confidence. Future: cache + connected AP fast-poll hybrid. | | **Requires Swift helper in PATH** | Extra build step for source builds | `build.sh` provided. Docker image pre-bundles it. Clear error message when missing. | | **Location Services for BSSID** | Full BSSID requires user permission prompt | System degrades gracefully to SSID:channel pseudo-BSSID without permission. | -| **No CSI** | Cannot match ESP32 pose estimation accuracy | Expected — this is RSSI-tier sensing (presence + motion). Same limitation as Windows. | +| **No CSI** | Cannot match ESP32 pose estimation accuracy | Expected - this is RSSI-tier sensing (presence + motion). Same limitation as Windows. | --- @@ -307,9 +307,9 @@ All verification on Mac Mini (M2 Pro, macOS 26.3). ## 8. References - [Apple CoreWLAN Documentation](https://developer.apple.com/documentation/corewlan) -- [CWWiFiClient](https://developer.apple.com/documentation/corewlan/cwwificlient) — Primary WiFi interface API -- [CWNetwork](https://developer.apple.com/documentation/corewlan/cwnetwork) — Scan result type (SSID, RSSI, channel, noise) -- [macOS 15 airport removal](https://developer.apple.com/forums/thread/732431) — Apple Developer Forums +- [CWWiFiClient](https://developer.apple.com/documentation/corewlan/cwwificlient) - Primary WiFi interface API +- [CWNetwork](https://developer.apple.com/documentation/corewlan/cwnetwork) - Scan result type (SSID, RSSI, channel, noise) +- [macOS 15 airport removal](https://developer.apple.com/forums/thread/732431) - Apple Developer Forums - ADR-022: Windows WiFi Enhanced Fidelity (analogous platform adapter) - ADR-013: Feature-Level Sensing from Commodity Gear - Issue [#56](https://github.com/ruvnet/wifi-densepose/issues/56): macOS support request diff --git a/docs/adr/ADR-026-survivor-track-lifecycle.md b/docs/adr/ADR-026-survivor-track-lifecycle.md index d76e5313a..a03bb5c7c 100644 --- a/docs/adr/ADR-026-survivor-track-lifecycle.md +++ b/docs/adr/ADR-026-survivor-track-lifecycle.md @@ -3,7 +3,7 @@ **Status:** Accepted **Date:** 2026-03-01 **Deciders:** WiFi-DensePose Core Team -**Domain:** MAT (Mass Casualty Assessment Tool) — `wifi-densepose-mat` +**Domain:** MAT (Mass Casualty Assessment Tool) - `wifi-densepose-mat` **Supersedes:** None **Related:** ADR-001 (WiFi-MAT disaster detection), ADR-017 (ruvector signal/MAT integration) @@ -15,19 +15,19 @@ The MAT crate's `Survivor` entity has `SurvivorStatus` states (`Active / Rescued / Lost / Deceased / FalsePositive`) and `is_stale()` / `mark_lost()` methods, but these are insufficient for real operational use: -1. **Manually driven state transitions** — no controller automatically fires +1. **Manually driven state transitions** - no controller automatically fires `mark_lost()` when signal drops for N consecutive frames, nor re-activates a survivor when signal reappears. -2. **Frame-local assignment only** — `DynamicPersonMatcher` (metrics.rs) solves +2. **Frame-local assignment only** - `DynamicPersonMatcher` (metrics.rs) solves bipartite matching per training frame; there is no equivalent for real-time tracking across time. -3. **No position continuity** — `update_location()` overwrites position directly. +3. **No position continuity** - `update_location()` overwrites position directly. Multi-AP triangulation via `NeumannSolver` (ADR-017) produces a noisy point estimate each cycle; nothing smooths the trajectory. -4. **No re-identification** — when `SurvivorStatus::Lost`, reappearance of the +4. **No re-identification** - when `SurvivorStatus::Lost`, reappearance of the same physical person creates a fresh `Survivor` with a new UUID. Vital-sign history is lost and survivor count is inflated. @@ -47,7 +47,7 @@ The MAT crate's `Survivor` entity has `SurvivorStatus` states Add a **`tracking` bounded context** within `wifi-densepose-mat` at `src/tracking/`, implementing three collaborating components: -### 1. Kalman Filter — Constant-Velocity 3-D Model (`kalman.rs`) +### 1. Kalman Filter - Constant-Velocity 3-D Model (`kalman.rs`) State vector `x = [px, py, pz, vx, vy, vz]` (position + velocity in metres / m·s⁻¹). @@ -100,13 +100,13 @@ Per-tick algorithm: ``` update(observations, dt_secs): - 1. Predict — advance Kalman state for all Active + Lost tracks - 2. Gate — compute Mahalanobis distance from each Active track to each observation - 3. Associate — greedy nearest-neighbour (gated); Hungarian for N ≤ 10 - 4. Re-ID — unmatched observations vs Lost tracks via CsiFingerprint - 5. Birth — still-unmatched observations → new Tentative tracks - 6. Update — matched tracks: Kalman update + vitals update + lifecycle.hit() - 7. Lifecycle — unmatched tracks: lifecycle.miss(); transitions Lost→Terminated + 1. Predict - advance Kalman state for all Active + Lost tracks + 2. Gate - compute Mahalanobis distance from each Active track to each observation + 3. Associate - greedy nearest-neighbour (gated); Hungarian for N ≤ 10 + 4. Re-ID - unmatched observations vs Lost tracks via CsiFingerprint + 5. Birth - still-unmatched observations → new Tentative tracks + 6. Update - matched tracks: Kalman update + vitals update + lifecycle.hit() + 7. Lifecycle - unmatched tracks: lifecycle.miss(); transitions Lost→Terminated ``` --- @@ -117,11 +117,11 @@ update(observations, dt_secs): ``` tracking/ -├── mod.rs — public API re-exports -├── kalman.rs — KalmanState value object -├── fingerprint.rs — CsiFingerprint value object -├── lifecycle.rs — TrackState enum, TrackLifecycle entity, TrackerConfig -└── tracker.rs — SurvivorTracker aggregate root +├── mod.rs - public API re-exports +├── kalman.rs - KalmanState value object +├── fingerprint.rs - CsiFingerprint value object +├── lifecycle.rs - TrackState enum, TrackLifecycle entity, TrackerConfig +└── tracker.rs - SurvivorTracker aggregate root TrackedSurvivor entity (wraps Survivor + tracking state) DetectionObservation value object AssociationResult value object @@ -170,7 +170,7 @@ tracking/ ### Risk Mitigation -- **Conservative re-ID**: threshold 0.35 (not 0.5) — prefer new survivor record +- **Conservative re-ID**: threshold 0.35 (not 0.5) - prefer new survivor record over incorrect merge. Operators can manually merge via the API if needed. - **Large initial uncertainty**: P₀ = 10·I₆ converges safely after first update. - **`Terminated` is unrecoverable**: prevents runaway re-linking. @@ -184,7 +184,7 @@ tracking/ |-------------|-----------------| | **DeepSORT** (appearance embedding + Kalman) | Requires visual features; not applicable to WiFi CSI | | **Particle filter** | Better for nonlinear dynamics; overkill for slow-moving rubble survivors | -| **Pure frame-local assignment** | Current state — insufficient; causes all described problems | +| **Pure frame-local assignment** | Current state - insufficient; causes all described problems | | **IoU-based tracking** | Requires bounding boxes from camera; WiFi gives only positions | --- diff --git a/docs/adr/ADR-027-cross-environment-domain-generalization.md b/docs/adr/ADR-027-cross-environment-domain-generalization.md index 03b249803..ab1775f2d 100644 --- a/docs/adr/ADR-027-cross-environment-domain-generalization.md +++ b/docs/adr/ADR-027-cross-environment-domain-generalization.md @@ -1,11 +1,11 @@ -# ADR-027: Project MERIDIAN -- Cross-Environment Domain Generalization for WiFi Pose Estimation +# ADR-027: Project MERIDIAN - Cross-Environment Domain Generalization for WiFi Pose Estimation | Field | Value | |-------|-------| | **Status** | Proposed | | **Date** | 2026-03-01 | | **Deciders** | ruv | -| **Codename** | **MERIDIAN** -- Multi-Environment Robust Inference via Domain-Invariant Alignment Networks | +| **Codename** | **MERIDIAN** - Multi-Environment Robust Inference via Domain-Invariant Alignment Networks | | **Relates to** | ADR-005 (SONA Self-Learning), ADR-014 (SOTA Signal Processing), ADR-015 (Public Datasets), ADR-016 (RuVector Integration), ADR-023 (Trained DensePose Pipeline), ADR-024 (AETHER Contrastive Embeddings) | --- @@ -14,7 +14,7 @@ ### 1.1 The Domain Gap Problem -WiFi-based pose estimation models exhibit severe performance degradation when deployed in environments different from their training setting. A model trained in Room A with a specific transceiver layout, wall material composition, and furniture arrangement can lose 40-70% accuracy when moved to Room B -- even in the same building. This brittleness is the single largest barrier to real-world WiFi sensing deployment. +WiFi-based pose estimation models exhibit severe performance degradation when deployed in environments different from their training setting. A model trained in Room A with a specific transceiver layout, wall material composition, and furniture arrangement can lose 40-70% accuracy when moved to Room B - even in the same building. This brittleness is the single largest barrier to real-world WiFi sensing deployment. The root cause is three-fold: @@ -50,7 +50,7 @@ Five concurrent lines of research have converged on the domain generalization pr | Current Capability | Gap | MERIDIAN Addition | |-------------------|-----|------------------| -| AETHER embeddings (ADR-024) | Embeddings encode environment identity -- useful for fingerprinting but harmful for cross-environment transfer | Environment-disentangled embeddings with explicit factorization | +| AETHER embeddings (ADR-024) | Embeddings encode environment identity - useful for fingerprinting but harmful for cross-environment transfer | Environment-disentangled embeddings with explicit factorization | | SONA LoRA adapters (ADR-005) | Adapters must be manually created per environment; no mechanism to generate them from few-shot data | Zero-shot environment adaptation via geometry-conditioned inference | | MM-Fi/Wi-Pose training (ADR-015) | Single-environment train/eval; no cross-domain protocol | Multi-domain training protocol with environment augmentation | | SpotFi phase correction (ADR-014) | Hardware-specific phase calibration | Hardware-invariant CSI normalization layer | @@ -130,7 +130,7 @@ Total loss: + lambda_env * L_env_recon ``` -The GRL reverses the gradient flowing from `L_domain` into `PoseEncoder`, meaning the PoseEncoder is trained to **maximize** domain classification error -- forcing `h_pose` to shed all environment-specific information. +The GRL reverses the gradient flowing from `L_domain` into `PoseEncoder`, meaning the PoseEncoder is trained to **maximize** domain classification error - forcing `h_pose` to shed all environment-specific information. **Key hyperparameters:** - `lambda_adv`: Adversarial weight, annealed from 0.0 to 1.0 over first 20 epochs using the schedule `lambda_adv(p) = 2 / (1 + exp(-10 * p)) - 1` where `p = epoch / max_epochs` @@ -291,7 +291,7 @@ This leverages the existing SONA infrastructure (ADR-005) to generate environmen ## 4. Implementation -### 4.1 Phase 1 -- Hardware Normalizer (Week 1) +### 4.1 Phase 1 - Hardware Normalizer (Week 1) **Goal**: Canonical CSI representation across ESP32, Intel 5300, and Atheros hardware. @@ -308,7 +308,7 @@ This leverages the existing SONA infrastructure (ADR-005) to generate environmen - [ ] Phase sanitization removes linear trend (validated against SpotFi output) - [ ] Unit tests with synthetic ESP32 (64 sub) and Intel 5300 (30 sub) frames -### 4.2 Phase 2 -- Domain Factorizer + GRL (Week 2-3) +### 4.2 Phase 2 - Domain Factorizer + GRL (Week 2-3) **Goal**: Disentangle pose-relevant and environment-specific features during training. @@ -318,7 +318,7 @@ This leverages the existing SONA infrastructure (ADR-005) to generate environmen - `crates/wifi-densepose-train/src/trainer.rs` (add L_domain to composite loss, GRL annealing) - `crates/wifi-densepose-train/src/dataset.rs` (add domain labels to DataPipeline) -**Key implementation detail -- Gradient Reversal Layer:** +**Key implementation detail - Gradient Reversal Layer:** ```rust /// Gradient Reversal Layer: identity in forward pass, negates gradient in backward. @@ -345,7 +345,7 @@ impl GradientReversalLayer { - [ ] Pose accuracy on source domains degrades <5% vs non-adversarial baseline - [ ] Cross-domain pose accuracy improves >20% on held-out environment -### 4.3 Phase 3 -- Geometry Encoder + FiLM Conditioning (Week 3-4) +### 4.3 Phase 3 - Geometry Encoder + FiLM Conditioning (Week 3-4) **Goal**: Enable zero-shot deployment given AP positions. @@ -360,7 +360,7 @@ impl GradientReversalLayer { - [ ] FiLM conditioning reduces cross-layout MPJPE by >30% vs unconditioned baseline - [ ] Inference overhead <100us per frame (geometry encoding is amortized per-session) -### 4.4 Phase 4 -- Virtual Domain Augmentation (Week 4-5) +### 4.4 Phase 4 - Virtual Domain Augmentation (Week 4-5) **Goal**: Synthetic environment diversity to improve generalization. @@ -377,7 +377,7 @@ impl GradientReversalLayer { - [ ] Training with virtual augmentation improves unseen-environment accuracy by >15% - [ ] No regression on seen-environment accuracy (within 2%) -### 4.5 Phase 5 -- Few-Shot Rapid Adaptation (Week 5-6) +### 4.5 Phase 5 - Few-Shot Rapid Adaptation (Week 5-6) **Goal**: 10-second calibration enables environment-specific fine-tuning without labels. @@ -392,7 +392,7 @@ impl GradientReversalLayer { - [ ] Calibration completes in <5 seconds on x86 (including contrastive TTT) - [ ] Adapted LoRA weights serializable to RVF container (ADR-023 Segment type) -### 4.6 Phase 6 -- Cross-Domain Evaluation Protocol (Week 6-7) +### 4.6 Phase 6 - Cross-Domain Evaluation Protocol (Week 6-7) **Goal**: Rigorous multi-domain evaluation using MM-Fi's scene/subject splits. @@ -411,7 +411,7 @@ impl GradientReversalLayer { | **Domain gap ratio** | cross-domain / in-domain MPJPE (lower = better; target <1.5) | | **Adaptation speedup** | Labeled samples saved vs training from scratch (target >5x) | -### 4.7 Phase 7 -- RVF Container + Deployment (Week 7-8) +### 4.7 Phase 7 - RVF Container + Deployment (Week 7-8) **Goal**: Package MERIDIAN-enhanced models for edge deployment. @@ -432,18 +432,18 @@ impl GradientReversalLayer { ```bash # Train with MERIDIAN domain generalization -cargo run -p wifi-densepose-sensing-server -- \ +cargo run -p wifi-densepose-sensing-server - \ --train --dataset data/mmfi/ --epochs 100 \ --meridian --n-virtual-domains 3 \ --save-rvf model-meridian.rvf # Deploy with geometry conditioning (zero-shot) -cargo run -p wifi-densepose-sensing-server -- \ +cargo run -p wifi-densepose-sensing-server - \ --model model-meridian.rvf \ --ap-positions "0,0,2.5;3.5,0,2.5;1.75,4,2.5" # Calibrate in new environment (few-shot, 10 seconds) -cargo run -p wifi-densepose-sensing-server -- \ +cargo run -p wifi-densepose-sensing-server - \ --model model-meridian.rvf --calibrate --calibrate-duration 10 ``` @@ -485,7 +485,7 @@ ADRs 002-011 were proposed during the initial architecture phase. MERIDIAN direc | Proposed ADR | Gap | How MERIDIAN Closes It | |-------------|-----|----------------------| -| **ADR-004**: HNSW Vector Search Fingerprinting | CSI fingerprints are environment-specific — a fingerprint learned in Room A is useless in Room B | MERIDIAN's `DomainFactorizer` produces **environment-disentangled embeddings** (`h_pose`). When fed into ADR-024's `FingerprintIndex`, these embeddings match across rooms because environment information has been factored out. The `h_env` path captures room identity separately, enabling both cross-room matching AND room identification in a single model. | +| **ADR-004**: HNSW Vector Search Fingerprinting | CSI fingerprints are environment-specific - a fingerprint learned in Room A is useless in Room B | MERIDIAN's `DomainFactorizer` produces **environment-disentangled embeddings** (`h_pose`). When fed into ADR-024's `FingerprintIndex`, these embeddings match across rooms because environment information has been factored out. The `h_env` path captures room identity separately, enabling both cross-room matching AND room identification in a single model. | | **ADR-005**: SONA Self-Learning for Pose Estimation | SONA LoRA adapters must be manually created per environment with labeled data | MERIDIAN Phase 5 (`RapidAdaptation`) extends SONA with **unsupervised adapter generation**: 10 seconds of unlabeled WiFi data + contrastive test-time training automatically produces a per-room LoRA adapter. No labels, no manual intervention. The existing `SonaProfile` in `sona.rs` gains a `meridian_calibration` field for storing adaptation state. | | **ADR-006**: GNN-Enhanced CSI Pattern Recognition | GNN treats each environment's patterns independently; no cross-environment transfer | MERIDIAN's domain-adversarial training regularizes the GCN layers (ADR-023's `GnnStack`) to learn **structure-preserving, environment-invariant** graph features. The gradient reversal layer forces the GCN to shed room-specific multipath patterns while retaining body-pose-relevant spatial relationships between keypoints. | @@ -501,7 +501,7 @@ These ADRs remain independent tracks but MERIDIAN creates enabling infrastructur | Proposed ADR | Gap | How MERIDIAN Enables It | |-------------|-----|------------------------| -| **ADR-003**: RVF Cognitive Containers | CSI pipeline stages produce ephemeral data; no persistent cognitive state across sessions | MERIDIAN's RVF container extensions (Phase 7: `GEOM`, `DOMAIN`, `HWSTATS` segments) establish the pattern for **environment-aware model packaging**. A cognitive container could store per-room adaptation history, geometry profiles, and domain statistics — building on MERIDIAN's segment format. The `h_env` embeddings are natural candidates for persistent environment memory. | +| **ADR-003**: RVF Cognitive Containers | CSI pipeline stages produce ephemeral data; no persistent cognitive state across sessions | MERIDIAN's RVF container extensions (Phase 7: `GEOM`, `DOMAIN`, `HWSTATS` segments) establish the pattern for **environment-aware model packaging**. A cognitive container could store per-room adaptation history, geometry profiles, and domain statistics - building on MERIDIAN's segment format. The `h_env` embeddings are natural candidates for persistent environment memory. | | **ADR-008**: Distributed Consensus for Multi-AP | Multiple APs need coordinated sensing; no agreement protocol for conflicting observations | MERIDIAN's `GeometryEncoder` already models variable-count AP positions via permutation-invariant `DeepSets`. This provides the **geometric foundation** for multi-AP fusion: each AP's CSI is geometry-conditioned independently, then fused. A consensus layer (Raft or BFT) would sit above MERIDIAN to reconcile conflicting pose estimates from different AP vantage points. The `HardwareNormalizer` ensures mixed hardware (ESP32 + Intel 5300 across APs) produces comparable features. | | **ADR-009**: RVF WASM Runtime for Edge | Self-contained WASM model execution without server dependency | MERIDIAN's +12K parameter overhead (67K total) remains within the WASM size budget. The `HardwareNormalizer` is critical for WASM deployment: browser-based inference must handle whatever CSI format the connected hardware provides. WASM builds should include the geometry conditioning path so users can specify AP layout in the browser UI. | @@ -511,9 +511,9 @@ These ADRs address orthogonal concerns and should be pursued separately: | Proposed ADR | Gap | Recommendation | |-------------|-----|----------------| -| **ADR-007**: Post-Quantum Cryptography | WiFi sensing data reveals presence, health, and activity — quantum computers could break current encryption of sensing streams | **Pursue independently.** MERIDIAN does not address data-in-transit security. PQC should be applied to WebSocket streams (`/ws/sensing`, `/ws/mat/stream`) and RVF model containers (replace Ed25519 signing with ML-DSA/Dilithium). Priority: medium — no imminent quantum threat, but healthcare deployments may require PQC compliance for long-term data retention. | -| **ADR-010**: Witness Chains for Audit Trail | Disaster triage decisions (ADR-001) need tamper-proof audit trails for legal/regulatory compliance | **Pursue independently.** MERIDIAN's domain adaptation improves triage accuracy in unfamiliar environments (rubble, collapsed buildings), which reduces the need for audit trail corrections. But the audit trail itself — hash chains, Merkle proofs, timestamped triage events — is a separate integrity concern. Priority: high for disaster response deployments. | -| **ADR-011**: Python Proof-of-Reality (URGENT) | Python v1 contains mock/placeholder code that undermines credibility; `verify.py` exists but mock paths remain | **Pursue independently.** This is a Python v1 code quality issue, not an ML/architecture concern. The Rust port (v2+) has no mock code — all 542+ tests run against real algorithm implementations. Recommendation: either complete the mock elimination in Python v1 or formally deprecate Python v1 in favor of the Rust stack. Priority: high for credibility. | +| **ADR-007**: Post-Quantum Cryptography | WiFi sensing data reveals presence, health, and activity - quantum computers could break current encryption of sensing streams | **Pursue independently.** MERIDIAN does not address data-in-transit security. PQC should be applied to WebSocket streams (`/ws/sensing`, `/ws/mat/stream`) and RVF model containers (replace Ed25519 signing with ML-DSA/Dilithium). Priority: medium - no imminent quantum threat, but healthcare deployments may require PQC compliance for long-term data retention. | +| **ADR-010**: Witness Chains for Audit Trail | Disaster triage decisions (ADR-001) need tamper-proof audit trails for legal/regulatory compliance | **Pursue independently.** MERIDIAN's domain adaptation improves triage accuracy in unfamiliar environments (rubble, collapsed buildings), which reduces the need for audit trail corrections. But the audit trail itself - hash chains, Merkle proofs, timestamped triage events - is a separate integrity concern. Priority: high for disaster response deployments. | +| **ADR-011**: Python Proof-of-Reality (URGENT) | Python v1 contains mock/placeholder code that undermines credibility; `verify.py` exists but mock paths remain | **Pursue independently.** This is a Python v1 code quality issue, not an ML/architecture concern. The Rust port (v2+) has no mock code - all 542+ tests run against real algorithm implementations. Recommendation: either complete the mock elimination in Python v1 or formally deprecate Python v1 in favor of the Rust stack. Priority: high for credibility. | ### 6.5 Gap Closure Summary diff --git a/docs/adr/ADR-028-esp32-capability-audit.md b/docs/adr/ADR-028-esp32-capability-audit.md index 8836b7ef1..0d4dab7f0 100644 --- a/docs/adr/ADR-028-esp32-capability-audit.md +++ b/docs/adr/ADR-028-esp32-capability-audit.md @@ -13,7 +13,7 @@ ## 1. Purpose -This ADR records a comprehensive, independently audited inventory of the wifi-densepose repository's ESP32 hardware capabilities, signal processing stack, neural network architectures, deployment infrastructure, and security posture. It serves as a **witness record** — a point-in-time attestation that third parties can use to verify what the codebase actually contains vs. what is claimed. +This ADR records a comprehensive, independently audited inventory of the wifi-densepose repository's ESP32 hardware capabilities, signal processing stack, neural network architectures, deployment infrastructure, and security posture. It serves as a **witness record** - a point-in-time attestation that third parties can use to verify what the codebase actually contains vs. what is claimed. --- @@ -31,7 +31,7 @@ Three parallel research agents examined the full repository simultaneously: --- -## 3. ESP32 Hardware — Confirmed Capabilities +## 3. ESP32 Hardware - Confirmed Capabilities ### 3.1 Firmware (C, ESP-IDF v5.2) @@ -133,7 +133,7 @@ python scripts/provision.py --port COM7 \ --- -## 4. Signal Processing — Confirmed Algorithms +## 4. Signal Processing - Confirmed Algorithms ### 4.1 SOTA Algorithms (ADR-014, wifi-densepose-signal) @@ -208,7 +208,7 @@ python scripts/provision.py --port COM7 \ --- -## 5. Deployment Infrastructure — Confirmed +## 5. Deployment Infrastructure - Confirmed ### 5.1 Published Artifacts diff --git a/docs/adr/ADR-029-ruvsense-multistatic-sensing-mode.md b/docs/adr/ADR-029-ruvsense-multistatic-sensing-mode.md index 45e1c781c..b7ba21e53 100644 --- a/docs/adr/ADR-029-ruvsense-multistatic-sensing-mode.md +++ b/docs/adr/ADR-029-ruvsense-multistatic-sensing-mode.md @@ -1,11 +1,11 @@ -# ADR-029: Project RuvSense -- Sensing-First RF Mode for Multistatic WiFi DensePose +# ADR-029: Project RuvSense - Sensing-First RF Mode for Multistatic WiFi DensePose | Field | Value | |-------|-------| | **Status** | Proposed | | **Date** | 2026-03-02 | | **Deciders** | ruv | -| **Codename** | **RuvSense** -- RuVector-Enhanced Sensing for Multistatic Fidelity | +| **Codename** | **RuvSense** - RuVector-Enhanced Sensing for Multistatic Fidelity | | **Relates to** | ADR-012 (ESP32 Mesh), ADR-014 (SOTA Signal Processing), ADR-016 (RuVector Training), ADR-017 (RuVector Signal+MAT), ADR-018 (ESP32 Implementation), ADR-024 (AETHER Embeddings), ADR-026 (Survivor Track Lifecycle), ADR-027 (MERIDIAN Generalization) | --- @@ -74,7 +74,7 @@ static uint32_t s_dwell_ms = 50; // 50ms per channel At 100 Hz raw CSI rate with 50 ms dwell across 3 channels, each channel yields ~33 frames/second. The existing ADR-018 binary frame format already carries `channel_freq_mhz` at offset 8, so no wire format change is needed. -> **Note (Issue #127 fix):** In promiscuous mode, CSI callbacks fire 100-500+ times/sec — far exceeding the channel dwell rate. The firmware now rate-limits UDP sends to 50 Hz and applies a 100 ms ENOMEM backoff if lwIP buffers are exhausted. This is essential for stable channel hopping under load. +> **Note (Issue #127 fix):** In promiscuous mode, CSI callbacks fire 100-500+ times/sec - far exceeding the channel dwell rate. The firmware now rate-limits UDP sends to 50 Hz and applies a 100 ms ENOMEM backoff if lwIP buffers are exhausted. This is essential for stable channel hopping under load. **NDP frame injection:** `esp_wifi_80211_tx()` injects deterministic Null Data Packet frames (preamble-only, no payload, ~24 us airtime) at GPIO-triggered intervals. This is sensing-first: the primary RF emission purpose is CSI measurement, not data communication. @@ -116,7 +116,7 @@ With N ESP32 nodes, collect N `MultiBandCsiFrame` per time slot and fuse with ge | 1 | Node B | A | C | D | 4 ms | | 2 | Node C | A | B | D | 4 ms | | 3 | Node D | A | B | C | 4 ms | -| 4 | -- | Processing + fusion | | | 30 ms | +| 4 | - | Processing + fusion | | | 30 ms | | **Total** | | | | | **50 ms = 20 Hz** | Synchronization: GPIO pulse from aggregator node at cycle start. Clock drift at ±10ppm over 50 ms is ~0.5 us, well within the 1 ms guard interval. @@ -348,10 +348,10 @@ No new workspace dependencies. All ruvector crates are already in the workspace - **12 independent viewpoints** from 4 commodity $10 nodes (C(4,2) × 2 links) - **20 Hz update rate** with Kalman-smoothed output for sub-30mm torso jitter - **Days-long stability** via coherence gating + SONA recalibration -- **All five ruvector crates exercised** — consistent algorithmic foundation -- **$73-91 total BOM** — accessible for research and production -- **802.11bf forward-compatible** — investment protected as commercial sensing arrives -- **Cognitum upgrade path** — same software stack, swap ESP32 for higher-bandwidth front end +- **All five ruvector crates exercised** - consistent algorithmic foundation +- **$73-91 total BOM** - accessible for research and production +- **802.11bf forward-compatible** - investment protected as commercial sensing arrives +- **Cognitum upgrade path** - same software stack, swap ESP32 for higher-bandwidth front end ### 9.2 Negative diff --git a/docs/adr/ADR-030-ruvsense-persistent-field-model.md b/docs/adr/ADR-030-ruvsense-persistent-field-model.md index 52cccb829..bd73ffac1 100644 --- a/docs/adr/ADR-030-ruvsense-persistent-field-model.md +++ b/docs/adr/ADR-030-ruvsense-persistent-field-model.md @@ -1,11 +1,11 @@ -# ADR-030: RuvSense Persistent Field Model — Longitudinal Drift Detection and Exotic Sensing Tiers +# ADR-030: RuvSense Persistent Field Model - Longitudinal Drift Detection and Exotic Sensing Tiers | Field | Value | |-------|-------| | **Status** | Proposed | | **Date** | 2026-03-02 | | **Deciders** | ruv | -| **Codename** | **RuvSense Field** — Persistent Electromagnetic World Model | +| **Codename** | **RuvSense Field** - Persistent Electromagnetic World Model | | **Relates to** | ADR-029 (RuvSense Multistatic), ADR-005 (SONA Self-Learning), ADR-024 (AETHER Embeddings), ADR-016 (RuVector Integration), ADR-026 (Survivor Track Lifecycle), ADR-027 (MERIDIAN Generalization) | --- @@ -20,19 +20,19 @@ The most exotic capabilities come from this shift in abstraction level: - The room is the model, not the person - People are structured perturbations to a baseline - Changes are deltas from a known state, not raw measurements -- Time is a first-class dimension — the system remembers days, not frames +- Time is a first-class dimension - the system remembers days, not frames ### 1.2 The Seven Capability Tiers | Tier | Capability | Foundation | |------|-----------|-----------| -| 1 | **Field Normal Modes** — Room electromagnetic eigenstructure | Baseline calibration + SVD | -| 2 | **Coarse RF Tomography** — 3D occupancy volume from link attenuations | Sparse tomographic inversion | -| 3 | **Intention Lead Signals** — Pre-movement prediction (200-500ms lead) | Temporal embedding trajectory analysis | -| 4 | **Longitudinal Biomechanics Drift** — Personal baseline deviation over days | Welford statistics + HNSW memory | -| 5 | **Cross-Room Continuity** — Identity persistence across spaces without optics | Environment fingerprinting + transition graph | -| 6 | **Invisible Interaction Layer** — Multi-user gesture control through walls/darkness | Per-person CSI perturbation classification | -| 7 | **Adversarial Detection** — Physically impossible signal identification | Multi-link consistency + field model constraints | +| 1 | **Field Normal Modes** - Room electromagnetic eigenstructure | Baseline calibration + SVD | +| 2 | **Coarse RF Tomography** - 3D occupancy volume from link attenuations | Sparse tomographic inversion | +| 3 | **Intention Lead Signals** - Pre-movement prediction (200-500ms lead) | Temporal embedding trajectory analysis | +| 4 | **Longitudinal Biomechanics Drift** - Personal baseline deviation over days | Welford statistics + HNSW memory | +| 5 | **Cross-Room Continuity** - Identity persistence across spaces without optics | Environment fingerprinting + transition graph | +| 6 | **Invisible Interaction Layer** - Multi-user gesture control through walls/darkness | Per-person CSI perturbation classification | +| 7 | **Adversarial Detection** - Physically impossible signal identification | Multi-link consistency + field model constraints | ### 1.3 Signals, Not Diagnoses @@ -133,7 +133,7 @@ wifi-densepose-signal/src/ruvsense/ ### 2.3 Field Normal Modes (Tier 1) -**What it is:** The room's electromagnetic eigenstructure — the stable propagation paths, reflection coefficients, and interference patterns when nobody is present. +**What it is:** The room's electromagnetic eigenstructure - the stable propagation paths, reflection coefficients, and interference patterns when nobody is present. **How it works:** 1. During quiet periods (empty room, overnight), collect 10 minutes of CSI across all links @@ -260,7 +260,7 @@ Longitudinal biomechanics tracker for rehabilitation and occupational health. ### 3.5 Vertical Recommendation for First Hardware SKU -**Invisible Guardian** — the elderly care wellness monitor. Rationale: +**Invisible Guardian** - the elderly care wellness monitor. Rationale: 1. Largest addressable market with immediate revenue (aging population, care facility demand) 2. Lowest regulatory bar (consumer wellness, no diagnostic claims) 3. Privacy advantage over cameras is a selling point, not a limitation @@ -314,9 +314,9 @@ All five crates are exercised across the exotic tiers: - **Room becomes self-sensing**: Field normal modes provide a persistent baseline that explains change as structured deltas - **7-day autonomous operation**: Coherence gating + SONA adaptation + longitudinal memory eliminate manual tuning -- **Privacy by design**: No images, no audio, no reconstructable data — only embeddings and statistical summaries +- **Privacy by design**: No images, no audio, no reconstructable data - only embeddings and statistical summaries - **Traceable evidence**: Every drift alert links to stored embeddings, timestamps, and graph constraints -- **Multiple product categories**: Same software stack, different packaging — Guardian, Twin, Interaction, Drift Monitor +- **Multiple product categories**: Same software stack, different packaging - Guardian, Twin, Interaction, Drift Monitor - **Regulatory clarity**: Consumer wellness first, clinical decision support later with accumulated dataset - **Security primitive**: Coherence gating detects adversarial injection, not just quality issues diff --git a/docs/adr/ADR-031-ruview-sensing-first-rf-mode.md b/docs/adr/ADR-031-ruview-sensing-first-rf-mode.md index eb1ad6c63..08d07adaa 100644 --- a/docs/adr/ADR-031-ruview-sensing-first-rf-mode.md +++ b/docs/adr/ADR-031-ruview-sensing-first-rf-mode.md @@ -1,11 +1,11 @@ -# ADR-031: Project RuView -- Sensing-First RF Mode for Multistatic Fidelity Enhancement +# ADR-031: Project RuView - Sensing-First RF Mode for Multistatic Fidelity Enhancement | Field | Value | |-------|-------| | **Status** | Proposed | | **Date** | 2026-03-02 | | **Deciders** | ruv | -| **Codename** | **RuView** -- RuVector Viewpoint-Integrated Enhancement | +| **Codename** | **RuView** - RuVector Viewpoint-Integrated Enhancement | | **Relates to** | ADR-012 (ESP32 Mesh), ADR-014 (SOTA Signal), ADR-016 (RuVector Integration), ADR-017 (RuVector Signal+MAT), ADR-021 (Vital Signs), ADR-024 (AETHER Embeddings), ADR-027 (MERIDIAN Cross-Environment) | --- @@ -20,7 +20,7 @@ Current WiFi DensePose operates with a single transmitter-receiver pair (or sing - **Depth ambiguity**: Motion along the RF propagation axis (toward/away from receiver) produces minimal phase change. - **Multi-person confusion**: Two people at similar range but different angles create overlapping CSI signatures. -The ESP32 mesh (ADR-012) partially addresses this via feature-level fusion across 3-6 nodes, but feature-level fusion cannot learn optimal fusion weights -- it uses hand-crafted aggregation (max, mean, coherent sum). +The ESP32 mesh (ADR-012) partially addresses this via feature-level fusion across 3-6 nodes, but feature-level fusion cannot learn optimal fusion weights - it uses hand-crafted aggregation (max, mean, coherent sum). ### 1.2 Three Fidelity Levers @@ -252,10 +252,10 @@ pub enum ViewpointFusionEvent { | File | Purpose | RuVector Crate | |------|---------|---------------| -| `crates/wifi-densepose-ruvector/src/viewpoint/mod.rs` | Module root, re-exports | -- | +| `crates/wifi-densepose-ruvector/src/viewpoint/mod.rs` | Module root, re-exports | - | | `crates/wifi-densepose-ruvector/src/viewpoint/attention.rs` | Cross-viewpoint scaled dot-product attention with geometric bias | ruvector-attention | | `crates/wifi-densepose-ruvector/src/viewpoint/geometry.rs` | GeometricDiversityIndex, Cramer-Rao bound estimation | ruvector-solver | -| `crates/wifi-densepose-ruvector/src/viewpoint/coherence.rs` | Coherence gating for environment stability | -- (pure math) | +| `crates/wifi-densepose-ruvector/src/viewpoint/coherence.rs` | Coherence gating for environment stability | - (pure math) | | `crates/wifi-densepose-ruvector/src/viewpoint/fusion.rs` | MultistaticArray aggregate, orchestrates fusion pipeline | ruvector-attention + ruvector-attn-mincut | ### 4.2 Phase 2: Signal Processing Extension @@ -268,7 +268,7 @@ pub enum ViewpointFusionEvent { | File | Purpose | RuVector Crate | |------|---------|---------------| -| `crates/wifi-densepose-hardware/src/esp32/tdm.rs` | TDM sensing protocol coordinator | -- (protocol logic) | +| `crates/wifi-densepose-hardware/src/esp32/tdm.rs` | TDM sensing protocol coordinator | - (protocol logic) | ### 4.4 Phase 4: Training and Metrics @@ -325,10 +325,10 @@ pub enum ViewpointFusionEvent { ### 6.1 Positive -- **Fundamental geometric improvement**: Viewpoint diversity reduces body self-occlusion and depth ambiguity -- these are physics, not model, limitations. +- **Fundamental geometric improvement**: Viewpoint diversity reduces body self-occlusion and depth ambiguity - these are physics, not model, limitations. - **Uses existing silicon**: ESP32-S3, commodity WiFi, no custom RF hardware required for Silver tier. - **Learned fusion weights**: Embedding-level fusion (Tier 3) outperforms hand-crafted feature-level fusion (Tier 2). -- **Composes with existing ADRs**: AETHER (per-viewpoint), MERIDIAN (cross-environment), and RuView (cross-viewpoint) are orthogonal -- they compose freely. +- **Composes with existing ADRs**: AETHER (per-viewpoint), MERIDIAN (cross-environment), and RuView (cross-viewpoint) are orthogonal - they compose freely. - **IEEE 802.11bf aligned**: TDM protocol maps to 802.11bf sensing sessions, enabling future migration to standard-compliant APs. - **Commodity price point**: $84 for 6-node Silver-tier deployment. diff --git a/docs/adr/ADR-032-multistatic-mesh-security-hardening.md b/docs/adr/ADR-032-multistatic-mesh-security-hardening.md index 168a5f3b7..90f3e4a5a 100644 --- a/docs/adr/ADR-032-multistatic-mesh-security-hardening.md +++ b/docs/adr/ADR-032-multistatic-mesh-security-hardening.md @@ -17,9 +17,9 @@ A security audit of the RuvSense multistatic sensing stack (ADR-029 through ADR- The findings fall into three categories: -1. **Missing cryptographic authentication** -- The TDM SyncBeacon and CSI frame formats lack any message authentication, allowing rogue nodes to inject spoofed beacons or frames into the mesh. -2. **Unbounded or unprotected resources** -- The NDP injection path has no rate limiter, the coherence gate recalibration state has no timeout cap, and the cross-room transition log grows without bound. -3. **Memory safety on embedded targets** -- NVS credential buffers are not zeroed after use, and static mutable globals in the CSI collector are accessed from both ESP32-S3 cores without synchronization. +1. **Missing cryptographic authentication** - The TDM SyncBeacon and CSI frame formats lack any message authentication, allowing rogue nodes to inject spoofed beacons or frames into the mesh. +2. **Unbounded or unprotected resources** - The NDP injection path has no rate limiter, the coherence gate recalibration state has no timeout cap, and the cross-room transition log grows without bound. +3. **Memory safety on embedded targets** - NVS credential buffers are not zeroed after use, and static mutable globals in the CSI collector are accessed from both ESP32-S3 cores without synchronization. ### 1.2 Threat Model @@ -38,8 +38,8 @@ The primary threat actor is a rogue ESP32 node on the same LAN subnet or within ### 1.3 Design Constraints - ESP32-S3 has limited CPU budget: cryptographic operations must complete within the 1 ms guard interval between TDM slots. -- HMAC-SHA256 on ESP32-S3 (hardware-accelerated via `mbedtls`) completes in approximately 15 us for 24-byte payloads -- well within budget. -- SipHash-2-4 completes in approximately 2 us for 64-byte payloads on ESP32-S3 -- suitable for per-frame MAC. +- HMAC-SHA256 on ESP32-S3 (hardware-accelerated via `mbedtls`) completes in approximately 15 us for 24-byte payloads - well within budget. +- SipHash-2-4 completes in approximately 2 us for 64-byte payloads on ESP32-S3 - suitable for per-frame MAC. - No TLS or TCP is available on the sensing data path (UDP broadcast for latency). - Pre-shared key (PSK) model is acceptable because all nodes in a mesh deployment are provisioned by the same operator. @@ -79,7 +79,7 @@ tag = HMAC-SHA256(key, message)[0..8] (truncated to 8 bytes) - Each receiver maintains a `last_accepted_nonce` per sender. A beacon is accepted only if `nonce > last_accepted_nonce - REPLAY_WINDOW`, where `REPLAY_WINDOW = 16` (accounts for packet reordering over UDP). - Nonce overflow (after 2^32 beacons at 20 Hz = ~6.8 years) triggers a mandatory key rotation. -**Implementation location:** `crates/wifi-densepose-hardware/src/esp32/tdm.rs` -- extend `SyncBeacon::to_bytes()` and `SyncBeacon::from_bytes()` to produce/consume the 28-byte authenticated format. Add `SyncBeacon::verify()` method. +**Implementation location:** `crates/wifi-densepose-hardware/src/esp32/tdm.rs` - extend `SyncBeacon::to_bytes()` and `SyncBeacon::from_bytes()` to produce/consume the 28-byte authenticated format. Add `SyncBeacon::verify()` method. ### 2.2 CSI Frame Integrity (M-3) @@ -110,8 +110,8 @@ siphash_key = HMAC-SHA256(mesh_key, "csi-frame-siphash")[0..16] The SipHash key is derived once at boot from the mesh key and cached in memory. **Implementation locations:** -- `firmware/esp32-csi-node/main/csi_collector.c` -- compute SipHash tag in `csi_serialize_frame()`, bump magic constant. -- `crates/wifi-densepose-hardware/src/esp32/` -- add frame verification in the aggregator's frame parser. +- `firmware/esp32-csi-node/main/csi_collector.c` - compute SipHash tag in `csi_serialize_frame()`, bump magic constant. +- `crates/wifi-densepose-hardware/src/esp32/` - add frame verification in the aggregator's frame parser. ### 2.3 NDP Injection Rate Limiter (M-4) @@ -135,7 +135,7 @@ typedef struct { `csi_inject_ndp_frame()` returns `ESP_ERR_NOT_ALLOWED` when the bucket is empty. The rate limiter parameters are configurable via NVS keys `ndp_max_tokens` and `ndp_refill_hz`. -**Implementation location:** `firmware/esp32-csi-node/main/csi_collector.c` -- add `ndp_rate_limiter_t` state and check in `csi_inject_ndp_frame()`. +**Implementation location:** `firmware/esp32-csi-node/main/csi_collector.c` - add `ndp_rate_limiter_t` state and check in `csi_inject_ndp_frame()`. ### 2.4 Coherence Gate Recalibration Timeout (M-5) @@ -166,7 +166,7 @@ pub struct GatePolicyConfig { } ``` -**Implementation location:** `crates/wifi-densepose-signal/src/ruvsense/coherence_gate.rs` -- extend `GateDecision` enum, modify `GatePolicy::evaluate()`. +**Implementation location:** `crates/wifi-densepose-signal/src/ruvsense/coherence_gate.rs` - extend `GateDecision` enum, modify `GatePolicy::evaluate()`. ### 2.5 Bounded Transition Log (L-1) @@ -184,7 +184,7 @@ pub struct CrossRoomConfig { The ring buffer is implemented as a `VecDeque` with a capacity check on push. When `transitions.len() >= max_transitions`, `transitions.pop_front()` before pushing. This preserves the append-only audit trail semantics (events are never mutated, only evicted by age). -**Implementation location:** `crates/wifi-densepose-signal/src/ruvsense/cross_room.rs` -- change `transitions: Vec` to `transitions: VecDeque`, add eviction logic in `match_entry()`. +**Implementation location:** `crates/wifi-densepose-signal/src/ruvsense/cross_room.rs` - change `transitions: Vec` to `transitions: VecDeque`, add eviction logic in `match_entry()`. ### 2.6 NVS Password Buffer Zeroing (L-4) @@ -205,7 +205,7 @@ static void secure_zero(void *ptr, size_t len) { Apply to all three `nvs_get_str` call sites in `nvs_config_load()` (ssid, password, target_ip). -**Implementation location:** `firmware/esp32-csi-node/main/nvs_config.c` -- add `explicit_bzero(buf, sizeof(buf))` after each `nvs_get_str` block. +**Implementation location:** `firmware/esp32-csi-node/main/nvs_config.c` - add `explicit_bzero(buf, sizeof(buf))` after each `nvs_get_str` block. ### 2.7 Atomic Access for Static Mutable State (L-5) @@ -228,7 +228,7 @@ static SemaphoreHandle_t s_hop_mutex = NULL; The mutex is created in `csi_collector_init()` and taken/released around hop table reads in `csi_hop_next_channel()` and writes in `csi_collector_set_hop_table()`. -**Implementation location:** `firmware/esp32-csi-node/main/csi_collector.c` -- add `_Atomic` qualifiers, create and use `s_hop_mutex`. +**Implementation location:** `firmware/esp32-csi-node/main/csi_collector.c` - add `_Atomic` qualifiers, create and use `s_hop_mutex`. ### 2.8 Key Management @@ -453,11 +453,11 @@ Three dedicated QUIC streams separate traffic by priority: Beyond QUIC transport, three additional midstreamer crates enhance the sensing pipeline: -1. **`midstreamer-scheduler` v0.1.0** -- Replaces manual timer-based TDM slot scheduling with an ultra-low-latency real-time task scheduler. Provides deterministic slot firing with sub-microsecond jitter. +1. **`midstreamer-scheduler` v0.1.0** - Replaces manual timer-based TDM slot scheduling with an ultra-low-latency real-time task scheduler. Provides deterministic slot firing with sub-microsecond jitter. -2. **`midstreamer-temporal-compare` v0.1.0** -- Enhances gesture DTW matching (ADR-030 Tier 6) with temporal sequence comparison primitives. Provides optimized Sakoe-Chiba band DTW, LCS, and edit-distance kernels. +2. **`midstreamer-temporal-compare` v0.1.0** - Enhances gesture DTW matching (ADR-030 Tier 6) with temporal sequence comparison primitives. Provides optimized Sakoe-Chiba band DTW, LCS, and edit-distance kernels. -3. **`midstreamer-attractor` v0.1.0** -- Enhances longitudinal drift detection (ADR-030 Tier 4) with dynamical systems analysis. Detects phase-space attractor shifts that indicate biomechanical regime changes before they manifest as simple metric drift. +3. **`midstreamer-attractor` v0.1.0** - Enhances longitudinal drift detection (ADR-030 Tier 4) with dynamical systems analysis. Detects phase-space attractor shifts that indicate biomechanical regime changes before they manifest as simple metric drift. ### 6.5 Fallback Strategy @@ -500,8 +500,8 @@ The QUIC transport layer is additive, not a replacement: 4. Espressif. "ESP32-S3 Technical Reference Manual." Section 26: SHA Accelerator. 5. Turner, J. (2006). "Token Bucket Rate Limiting." RFC 2697 (adapted). 6. ADR-029 through ADR-031 (internal). -7. `midstreamer-quic` v0.1.0 -- QUIC multi-stream support. crates.io. -8. `midstreamer-scheduler` v0.1.0 -- Ultra-low-latency real-time task scheduler. crates.io. -9. `midstreamer-temporal-compare` v0.1.0 -- Temporal sequence comparison. crates.io. -10. `midstreamer-attractor` v0.1.0 -- Dynamical systems analysis. crates.io. +7. `midstreamer-quic` v0.1.0 - QUIC multi-stream support. crates.io. +8. `midstreamer-scheduler` v0.1.0 - Ultra-low-latency real-time task scheduler. crates.io. +9. `midstreamer-temporal-compare` v0.1.0 - Temporal sequence comparison. crates.io. +10. `midstreamer-attractor` v0.1.0 - Dynamical systems analysis. crates.io. 11. Iyengar, J. & Thomson, M. (2021). "QUIC: A UDP-Based Multiplexed and Secure Transport." RFC 9000. diff --git a/docs/adr/ADR-033-crv-signal-line-sensing-integration.md b/docs/adr/ADR-033-crv-signal-line-sensing-integration.md index c7b644b20..a5339cc18 100644 --- a/docs/adr/ADR-033-crv-signal-line-sensing-integration.md +++ b/docs/adr/ADR-033-crv-signal-line-sensing-integration.md @@ -1,11 +1,11 @@ -# ADR-033: CRV Signal Line Sensing Integration -- Mapping 6-Stage Coordinate Remote Viewing to WiFi-DensePose Pipeline +# ADR-033: CRV Signal Line Sensing Integration - Mapping 6-Stage Coordinate Remote Viewing to WiFi-DensePose Pipeline | Field | Value | |-------|-------| | **Status** | Proposed | | **Date** | 2026-03-01 | | **Deciders** | ruv | -| **Codename** | **CRV-Sense** -- Coordinate Remote Viewing Signal Line for WiFi Sensing | +| **Codename** | **CRV-Sense** - Coordinate Remote Viewing Signal Line for WiFi Sensing | | **Relates to** | ADR-016 (RuVector Integration), ADR-017 (RuVector Signal+MAT), ADR-024 (AETHER Embeddings), ADR-029 (RuvSense Multistatic), ADR-030 (Persistent Field Model), ADR-031 (RuView Viewpoint Fusion), ADR-032 (Mesh Security) | --- @@ -18,12 +18,12 @@ Coordinate Remote Viewing (CRV) is a structured 6-stage protocol that progressiv The WiFi-DensePose sensing pipeline follows a strikingly similar progressive refinement: -1. Raw CSI arrives as an undifferentiated signal -- the system must first classify the gestalt character of the RF environment. -2. Per-subcarrier amplitude/phase/frequency features are extracted -- analogous to sensory impressions. -3. The AP mesh forms a spatial topology with node positions and link geometry -- a dimensional sketch. -4. Coherence gating separates valid signal from noise and interference -- analytically overlaid artifacts must be detected and removed. -5. Pose estimation queries earlier CSI features for cross-referencing -- interrogation of the accumulated evidence. -6. Final multi-person partitioning produces the composite DensePose output -- the 3D model. +1. Raw CSI arrives as an undifferentiated signal - the system must first classify the gestalt character of the RF environment. +2. Per-subcarrier amplitude/phase/frequency features are extracted - analogous to sensory impressions. +3. The AP mesh forms a spatial topology with node positions and link geometry - a dimensional sketch. +4. Coherence gating separates valid signal from noise and interference - analytically overlaid artifacts must be detected and removed. +5. Pose estimation queries earlier CSI features for cross-referencing - interrogation of the accumulated evidence. +6. Final multi-person partitioning produces the composite DensePose output - the 3D model. This structural isomorphism is not accidental. Both CRV and WiFi sensing solve the same abstract problem: extract structured information from a noisy, high-dimensional signal space through progressive refinement with explicit noise separation. @@ -33,15 +33,15 @@ The `ruvector-crv` crate provides the following public API: | Component | Purpose | Upstream Dependency | |-----------|---------|-------------------| -| `CrvSessionManager` | Session lifecycle: create, add stage data, convergence analysis | -- | -| `StageIEncoder` | Poincare ball hyperbolic embeddings for gestalt primitives | -- (internal hyperbolic math) | +| `CrvSessionManager` | Session lifecycle: create, add stage data, convergence analysis | - | +| `StageIEncoder` | Poincare ball hyperbolic embeddings for gestalt primitives | - (internal hyperbolic math) | | `StageIIEncoder` | Multi-head attention for sensory vectors | `ruvector-attention` | | `StageIIIEncoder` | GNN graph topology encoding | `ruvector-gnn` | -| `StageIVEncoder` | SNN temporal encoding for AOL (Analytical Overlay) detection | -- (internal SNN) | -| `StageVEngine` | Differentiable search and cross-referencing | -- (internal soft attention) | +| `StageIVEncoder` | SNN temporal encoding for AOL (Analytical Overlay) detection | - (internal SNN) | +| `StageVEngine` | Differentiable search and cross-referencing | - (internal soft attention) | | `StageVIModeler` | MinCut partitioning for composite model | `ruvector-mincut` | -| `ConvergenceResult` | Cross-session agreement analysis | -- | -| `CrvConfig` | Configuration (384-d default, curvature, AOL threshold, SNN params) | -- | +| `ConvergenceResult` | Cross-session agreement analysis | - | +| `CrvConfig` | Configuration (384-d default, curvature, AOL threshold, SNN params) | - | Key types: `GestaltType` (Manmade/Natural/Movement/Energy/Water/Land), `SensoryModality` (Texture/Color/Temperature/Sound/...), `AOLDetection` (content + anomaly score), `SignalLineProbe` (query + attention weights), `TargetPartition` (MinCut cluster + centroid). @@ -63,7 +63,7 @@ The `wifi-densepose-ruvector` crate already depends on `ruvector-crv` in its `Ca ### 1.4 The Key Insight: Cross-Session Convergence = Cross-Room Identity -CRV's convergence analysis compares independent sessions targeting the same coordinate to find agreement in their embeddings. In WiFi-DensePose, different AP clusters in different rooms are independent "viewers" of the same person. When a person moves from Room A to Room B, the CRV convergence mechanism can find agreement between the Room A embedding trail and the Room B initial embeddings -- establishing identity continuity without cameras. +CRV's convergence analysis compares independent sessions targeting the same coordinate to find agreement in their embeddings. In WiFi-DensePose, different AP clusters in different rooms are independent "viewers" of the same person. When a person moves from Room A to Room B, the CRV convergence mechanism can find agreement between the Room A embedding trail and the Room B initial embeddings - establishing identity continuity without cameras. --- @@ -264,7 +264,7 @@ impl MeshTopologyEncoder { ### 2.5 Stage IV: Coherence Gating as AOL Detection -**CRV mapping:** Stage IV detects Analytical Overlay (AOL) -- moments when the analytical mind contaminates the raw signal with pre-existing assumptions. In WiFi sensing, the coherence gate (ADR-030/032) serves the same function: it detects when environmental interference, multipath changes, or hardware artifacts contaminate the CSI signal, and flags those frames for exclusion. +**CRV mapping:** Stage IV detects Analytical Overlay (AOL) - moments when the analytical mind contaminates the raw signal with pre-existing assumptions. In WiFi sensing, the coherence gate (ADR-030/032) serves the same function: it detects when environmental interference, multipath changes, or hardware artifacts contaminate the CSI signal, and flags those frames for exclusion. | CRV AOL Concept | WiFi Coherence Analog | |-----------------|---------------------| @@ -305,7 +305,7 @@ impl CoherenceAolDetector { ### 2.6 Stage V: Pose Interrogation via Differentiable Search -**CRV mapping:** Stage V is the interrogation phase -- probing earlier stage data with specific queries to extract targeted information. In WiFi sensing, this maps to querying the accumulated CSI feature history with a pose hypothesis to find supporting or contradicting evidence. +**CRV mapping:** Stage V is the interrogation phase - probing earlier stage data with specific queries to extract targeted information. In WiFi sensing, this maps to querying the accumulated CSI feature history with a pose hypothesis to find supporting or contradicting evidence. **WiFi domain types:** @@ -334,11 +334,11 @@ impl PoseInterrogator { } ``` -**Integration point:** `ruvsense/field_model.rs` maintains the persistent electromagnetic field model (ADR-030). The `PoseInterrogator` wraps this with CRV Stage V semantics -- the field model's history becomes the corpus that `StageVEngine` searches over, and the pose hypothesis becomes the probe query. +**Integration point:** `ruvsense/field_model.rs` maintains the persistent electromagnetic field model (ADR-030). The `PoseInterrogator` wraps this with CRV Stage V semantics - the field model's history becomes the corpus that `StageVEngine` searches over, and the pose hypothesis becomes the probe query. ### 2.7 Stage VI: Multi-Person Partitioning via MinCut -**CRV mapping:** Stage VI produces the composite 3D model by clustering accumulated data into distinct target partitions via MinCut. In WiFi sensing, this maps to multi-person separation -- partitioning the accumulated CSI embeddings into person-specific clusters. +**CRV mapping:** Stage VI produces the composite 3D model by clustering accumulated data into distinct target partitions via MinCut. In WiFi sensing, this maps to multi-person separation - partitioning the accumulated CSI embeddings into person-specific clusters. **WiFi domain types:** @@ -458,27 +458,27 @@ impl WifiCrvSession { | File | Purpose | Upstream Dependency | |------|---------|-------------------| -| `crates/wifi-densepose-ruvector/src/crv/mod.rs` | Module root, re-exports all CRV-Sense types | -- | +| `crates/wifi-densepose-ruvector/src/crv/mod.rs` | Module root, re-exports all CRV-Sense types | - | | `crates/wifi-densepose-ruvector/src/crv/config.rs` | `WifiCrvConfig` extending `CrvConfig` with WiFi-specific defaults (128-d instead of 384-d to match AETHER) | `ruvector-crv` | | `crates/wifi-densepose-ruvector/src/crv/session.rs` | `WifiCrvSession` wrapping `CrvSessionManager` | `ruvector-crv` | -| `crates/wifi-densepose-ruvector/src/crv/output.rs` | `WifiCrvOutput` struct with per-stage embeddings and diagnostics | -- | +| `crates/wifi-densepose-ruvector/src/crv/output.rs` | `WifiCrvOutput` struct with per-stage embeddings and diagnostics | - | ### 3.2 Phase 2: Stage Encoders (New Files) | File | Purpose | Upstream Dependency | |------|---------|-------------------| -| `crates/wifi-densepose-ruvector/src/crv/gestalt.rs` | `CsiGestaltClassifier` -- Stage I Poincare ball embedding | `ruvector-crv::StageIEncoder` | -| `crates/wifi-densepose-ruvector/src/crv/sensory.rs` | `CsiSensoryEncoder` -- Stage II multi-head attention | `ruvector-crv::StageIIEncoder`, `ruvector-attention` | -| `crates/wifi-densepose-ruvector/src/crv/topology.rs` | `MeshTopologyEncoder` -- Stage III GNN topology | `ruvector-crv::StageIIIEncoder`, `ruvector-gnn` | -| `crates/wifi-densepose-ruvector/src/crv/coherence.rs` | `CoherenceAolDetector` -- Stage IV SNN temporal encoding | `ruvector-crv::StageIVEncoder` | -| `crates/wifi-densepose-ruvector/src/crv/interrogation.rs` | `PoseInterrogator` -- Stage V differentiable search | `ruvector-crv::StageVEngine` | -| `crates/wifi-densepose-ruvector/src/crv/partition.rs` | `PersonPartitioner` -- Stage VI MinCut partitioning | `ruvector-crv::StageVIModeler`, `ruvector-mincut` | +| `crates/wifi-densepose-ruvector/src/crv/gestalt.rs` | `CsiGestaltClassifier` - Stage I Poincare ball embedding | `ruvector-crv::StageIEncoder` | +| `crates/wifi-densepose-ruvector/src/crv/sensory.rs` | `CsiSensoryEncoder` - Stage II multi-head attention | `ruvector-crv::StageIIEncoder`, `ruvector-attention` | +| `crates/wifi-densepose-ruvector/src/crv/topology.rs` | `MeshTopologyEncoder` - Stage III GNN topology | `ruvector-crv::StageIIIEncoder`, `ruvector-gnn` | +| `crates/wifi-densepose-ruvector/src/crv/coherence.rs` | `CoherenceAolDetector` - Stage IV SNN temporal encoding | `ruvector-crv::StageIVEncoder` | +| `crates/wifi-densepose-ruvector/src/crv/interrogation.rs` | `PoseInterrogator` - Stage V differentiable search | `ruvector-crv::StageVEngine` | +| `crates/wifi-densepose-ruvector/src/crv/partition.rs` | `PersonPartitioner` - Stage VI MinCut partitioning | `ruvector-crv::StageVIModeler`, `ruvector-mincut` | ### 3.3 Phase 3: Cross-Session Convergence | File | Purpose | Upstream Dependency | |------|---------|-------------------| -| `crates/wifi-densepose-ruvector/src/crv/convergence.rs` | `MultiViewerConvergence` -- cross-room identity matching | `ruvector-crv::CrvSessionManager` | +| `crates/wifi-densepose-ruvector/src/crv/convergence.rs` | `MultiViewerConvergence` - cross-room identity matching | `ruvector-crv::CrvSessionManager` | ### 3.4 Phase 4: Integration with Existing Modules (Edits to Existing Files) @@ -563,7 +563,7 @@ pub enum CrvSensingEvent { ### 4.2 Integration with Existing Bounded Contexts -**Signal (wifi-densepose-signal):** New traits `CrvGestaltSource` and `CrvSensorySource` allow the CRV module to consume signal processing outputs without tight coupling. The signal crate does not depend on the CRV crate -- the dependency flows one direction only. +**Signal (wifi-densepose-signal):** New traits `CrvGestaltSource` and `CrvSensorySource` allow the CRV module to consume signal processing outputs without tight coupling. The signal crate does not depend on the CRV crate - the dependency flows one direction only. **Training (wifi-densepose-train):** The `PersonPartitioner` (Stage VI) produces the same MinCut partitions as the existing `DynamicPersonMatcher`. A shared trait `PersonSeparator` allows both to be used interchangeably. @@ -577,13 +577,13 @@ All seven `ruvector` crates exercised by the CRV-Sense integration: | CRV Stage | ruvector Crate | API Used | WiFi-DensePose Role | |-----------|---------------|----------|-------------------| -| I (Gestalt) | -- (internal Poincare math) | `StageIEncoder::encode()` | Hyperbolic embedding of CSI gestalt taxonomy | +| I (Gestalt) | - (internal Poincare math) | `StageIEncoder::encode()` | Hyperbolic embedding of CSI gestalt taxonomy | | II (Sensory) | `ruvector-attention` | `StageIIEncoder::encode()` | Multi-head attention over subcarrier features | | III (Dimensional) | `ruvector-gnn` | `StageIIIEncoder::encode()` | GNN encoding of AP mesh topology | -| IV (AOL) | -- (internal SNN) | `StageIVEncoder::encode()` | SNN temporal encoding of coherence violations | -| V (Interrogation) | -- (internal soft attention) | `StageVEngine::search()` | Differentiable search over field model history | +| IV (AOL) | - (internal SNN) | `StageIVEncoder::encode()` | SNN temporal encoding of coherence violations | +| V (Interrogation) | - (internal soft attention) | `StageVEngine::search()` | Differentiable search over field model history | | VI (Composite) | `ruvector-mincut` | `StageVIModeler::partition()` | MinCut person separation | -| Convergence | -- (cosine similarity) | `CrvSessionManager::find_convergence()` | Cross-room identity matching | +| Convergence | - (cosine similarity) | `CrvSessionManager::find_convergence()` | Cross-room identity matching | Additionally, the CRV module benefits from existing ruvector integrations already in the workspace: diff --git a/docs/adr/ADR-034-expo-mobile-app.md b/docs/adr/ADR-034-expo-mobile-app.md index c0d7036e8..69978df2f 100644 --- a/docs/adr/ADR-034-expo-mobile-app.md +++ b/docs/adr/ADR-034-expo-mobile-app.md @@ -5,7 +5,7 @@ | **Status** | Accepted | | **Date** | 2026-03-02 | | **Deciders** | MaTriXy, rUv | -| **Codename** | **FieldView** -- Mobile Companion for WiFi-DensePose Field Deployment | +| **Codename** | **FieldView** - Mobile Companion for WiFi-DensePose Field Deployment | | **Relates to** | ADR-019 (Sensing-Only UI Mode), ADR-021 (Vital Sign Detection), ADR-026 (Survivor Track Lifecycle), ADR-029 (RuvSense Multistatic), ADR-031 (RuView Sensing-First RF), ADR-032 (Mesh Security) | --- @@ -40,8 +40,8 @@ In all three scenarios, the mobile device does not communicate with ESP32 nodes The desktop web UI (`ui/`) and the mobile app share no code at the component level, but they consume the same backend APIs: -- **WebSocket**: `ws://host:3001/ws/sensing` -- streaming SensingFrame JSON -- **REST**: `http://host:3000/api/v1/...` -- configuration, history, health +- **WebSocket**: `ws://host:3001/ws/sensing` - streaming SensingFrame JSON +- **REST**: `http://host:3000/api/v1/...` - configuration, history, health The mobile app's Three.js Gaussian splat viewer (LiveScreen) loads the same splat HTML bundle used by the desktop UI, rendered inside a WebView (native) or iframe (web). diff --git a/docs/adr/ADR-035-live-sensing-ui-accuracy.md b/docs/adr/ADR-035-live-sensing-ui-accuracy.md index 3f818a985..b36899bec 100644 --- a/docs/adr/ADR-035-live-sensing-ui-accuracy.md +++ b/docs/adr/ADR-035-live-sensing-ui-accuracy.md @@ -10,10 +10,10 @@ Accepted Issue #86 reported that the live demo shows a static/barely-animated stick figure and the sensing page displays inaccurate data, despite a working ESP32 sending real CSI frames. Investigation revealed three root causes: -1. **Docker defaults to `--source simulated`** — even with a real ESP32 connected, the server generates synthetic sine-wave data instead of reading UDP frames. -2. **Live demo pose is analytically computed** — `derive_pose_from_sensing()` generates keypoints using `sin(tick)` math unrelated to actual signal content. No trained `.rvf` model is loaded by default. -3. **Sensing feature extraction is oversimplified** — the server uses single-frame thresholds for motion detection and has no temporal analysis (breathing FFT, sliding window variance, frame history). -4. **No data source indicator** — users cannot tell whether they are seeing real or simulated data. +1. **Docker defaults to `--source simulated`** - even with a real ESP32 connected, the server generates synthetic sine-wave data instead of reading UDP frames. +2. **Live demo pose is analytically computed** - `derive_pose_from_sensing()` generates keypoints using `sin(tick)` math unrelated to actual signal content. No trained `.rvf` model is loaded by default. +3. **Sensing feature extraction is oversimplified** - the server uses single-frame thresholds for motion detection and has no temporal analysis (breathing FFT, sliding window variance, frame history). +4. **No data source indicator** - users cannot tell whether they are seeing real or simulated data. ## Decision @@ -50,7 +50,7 @@ Issue #86 reported that the live demo shows a static/barely-animated stick figur ### Positive - Users with real ESP32 hardware get real data by default (auto-detect). -- Simulated data is clearly labeled — no more confusion about data authenticity. +- Simulated data is clearly labeled - no more confusion about data authenticity. - Pose skeleton visually responds to actual signal changes (motion, breathing, variance). - Feature extraction produces physiologically meaningful metrics (breathing rate via Goertzel, temporal motion detection). - Setup guide manages expectations about what each hardware configuration provides. @@ -58,7 +58,7 @@ Issue #86 reported that the live demo shows a static/barely-animated stick figur ### Negative - Signal-derived pose is still an approximation, not neural network inference. Per-limb tracking requires a trained `.rvf` model + 4+ ESP32 nodes. - Goertzel filter bank adds ~O(9×N) computation per frame (negligible at 100 frames). -- Users with only 1 ESP32 may still be disappointed that arm tracking doesn't work — but the UI now explains why. +- Users with only 1 ESP32 may still be disappointed that arm tracking doesn't work - but the UI now explains why. ### 5. Dark mode consistency - Live Demo tab converted from light theme to dark mode matching the rest of the UI. @@ -72,7 +72,7 @@ All four render modes in the pose visualization dropdown now produce distinct vi | **Skeleton** | Green lines connecting joints + red keypoint dots | | **Keypoints** | Large colored dots with glow and labels, no connecting lines | | **Heatmap** | Gaussian radial blobs per keypoint (hue per person), faint skeleton overlay at 25% opacity | -| **Dense** | Body region segmentation with colored filled polygons — head (red), torso (blue), left arm (green), right arm (orange), left leg (purple), right leg (yellow) | +| **Dense** | Body region segmentation with colored filled polygons - head (red), torso (blue), left arm (green), right arm (orange), left leg (purple), right leg (yellow) | Previously heatmap and dense were stubs that fell back to skeleton mode. @@ -80,19 +80,19 @@ Previously heatmap and dense were stubs that fell back to skeleton mode. The `pose_source` field from the WebSocket message was being dropped in `convertZoneDataToRestFormat()` in `pose.service.js`. Now passed through so the Estimation Mode badge displays correctly. ## Files Changed -- `docker/Dockerfile.rust` — `CSI_SOURCE=auto` env, shell entrypoint for variable expansion -- `docker/docker-compose.yml` — `CSI_SOURCE=${CSI_SOURCE:-auto}`, shell command string -- `wifi-densepose-sensing-server/src/main.rs` — frame history buffer, Goertzel breathing estimation, temporal motion score, signal-driven pose derivation, pose_source field, 100ms tick default -- `ui/services/sensing.service.js` — `dataSource` state, delayed simulation fallback, `_simulated` marker -- `ui/services/pose.service.js` — `pose_source` passthrough in data conversion -- `ui/components/SensingTab.js` — data source banner, "About This Data" card -- `ui/components/LiveDemoTab.js` — estimation mode badge, setup guide panel, dark mode theme -- `ui/utils/pose-renderer.js` — heatmap (Gaussian blobs) and dense (body region segmentation) render modes -- `ui/style.css` — banner, badge, guide panel, and about-text styles -- `README.md` — live pose detection screenshot -- `assets/screen.png` — screenshot asset +- `docker/Dockerfile.rust` - `CSI_SOURCE=auto` env, shell entrypoint for variable expansion +- `docker/docker-compose.yml` - `CSI_SOURCE=${CSI_SOURCE:-auto}`, shell command string +- `wifi-densepose-sensing-server/src/main.rs` - frame history buffer, Goertzel breathing estimation, temporal motion score, signal-driven pose derivation, pose_source field, 100ms tick default +- `ui/services/sensing.service.js` - `dataSource` state, delayed simulation fallback, `_simulated` marker +- `ui/services/pose.service.js` - `pose_source` passthrough in data conversion +- `ui/components/SensingTab.js` - data source banner, "About This Data" card +- `ui/components/LiveDemoTab.js` - estimation mode badge, setup guide panel, dark mode theme +- `ui/utils/pose-renderer.js` - heatmap (Gaussian blobs) and dense (body region segmentation) render modes +- `ui/style.css` - banner, badge, guide panel, and about-text styles +- `README.md` - live pose detection screenshot +- `assets/screen.png` - screenshot asset ## References - Issue: https://github.com/ruvnet/wifi-densepose/issues/86 -- ADR-029: RuvSense multistatic sensing mode (proposed — full pipeline integration) +- ADR-029: RuvSense multistatic sensing mode (proposed - full pipeline integration) - ADR-014: SOTA signal processing diff --git a/docs/adr/ADR-036-rvf-training-pipeline-ui.md b/docs/adr/ADR-036-rvf-training-pipeline-ui.md index 467c64968..b138e1668 100644 --- a/docs/adr/ADR-036-rvf-training-pipeline-ui.md +++ b/docs/adr/ADR-036-rvf-training-pipeline-ui.md @@ -8,19 +8,19 @@ Proposed ## Context -The wifi-densepose system currently operates in **signal-derived** mode — `derive_pose_from_sensing()` maps aggregate CSI features (motion power, breathing rate, variance) to keypoint positions using deterministic math. This gives whole-body presence and gross motion but cannot track individual limbs. +The wifi-densepose system currently operates in **signal-derived** mode - `derive_pose_from_sensing()` maps aggregate CSI features (motion power, breathing rate, variance) to keypoint positions using deterministic math. This gives whole-body presence and gross motion but cannot track individual limbs. The infrastructure for **model inference** mode exists but is disconnected: -1. **RVF container format** (`rvf_container.rs`, 1,102 lines) — a 64-byte-aligned binary format supporting model weights (`SEG_VEC`), metadata (`SEG_MANIFEST`), quantization (`SEG_QUANT`), LoRA profiles (`SEG_LORA`), contrastive embeddings (`SEG_EMBED`), and witness audit trails (`SEG_WITNESS`). Builder and reader are fully implemented with CRC32 integrity checks. +1. **RVF container format** (`rvf_container.rs`, 1,102 lines) - a 64-byte-aligned binary format supporting model weights (`SEG_VEC`), metadata (`SEG_MANIFEST`), quantization (`SEG_QUANT`), LoRA profiles (`SEG_LORA`), contrastive embeddings (`SEG_EMBED`), and witness audit trails (`SEG_WITNESS`). Builder and reader are fully implemented with CRC32 integrity checks. -2. **Training crate** (`wifi-densepose-train`) — AdamW optimizer, PCK@0.2/OKS metrics, LR scheduling with warmup, early stopping, CSV logging, and checkpoint export. Supports `CsiDataset` trait with planned MM-Fi (114→56 subcarrier interpolation) and Wi-Pose (30→56 zero-pad) loaders per ADR-015. +2. **Training crate** (`wifi-densepose-train`) - AdamW optimizer, PCK@0.2/OKS metrics, LR scheduling with warmup, early stopping, CSV logging, and checkpoint export. Supports `CsiDataset` trait with planned MM-Fi (114→56 subcarrier interpolation) and Wi-Pose (30→56 zero-pad) loaders per ADR-015. -3. **NN inference crate** (`wifi-densepose-nn`) — ONNX Runtime backend with CPU/GPU support, dynamic tensor shapes, thread-safe `OnnxBackend` wrapper, model info inspection, and warmup. +3. **NN inference crate** (`wifi-densepose-nn`) - ONNX Runtime backend with CPU/GPU support, dynamic tensor shapes, thread-safe `OnnxBackend` wrapper, model info inspection, and warmup. -4. **Sensing server CLI** (`--model `, `--train`, `--pretrain`, `--embed`) — flags exist for model loading, training mode, and embedding extraction, but the end-to-end path from raw CSI → trained `.rvf` → live inference is not wired together. +4. **Sensing server CLI** (`--model `, `--train`, `--pretrain`, `--embed`) - flags exist for model loading, training mode, and embedding extraction, but the end-to-end path from raw CSI → trained `.rvf` → live inference is not wired together. -5. **UI gaps** — No model management, training progress visualization, LoRA profile switching, or embedding inspection. The Settings panel lacks model configuration. The Live Demo has no way to load a trained model or compare signal-derived vs model-inference output side-by-side. +5. **UI gaps** - No model management, training progress visualization, LoRA profile switching, or embedding inspection. The Settings panel lacks model configuration. The Live Demo has no way to load a trained model or compare signal-derived vs model-inference output side-by-side. ### What users need @@ -97,14 +97,14 @@ On training completion: - `POST /api/v1/train/lora { base_model_id, dataset_ids[], profile_name, rank: 8, epochs: 20 }`. #### 3.2 Profile Switching -- `POST /api/v1/models/lora/activate { model_id, profile_name }` — hot-swap LoRA weights without reloading base model. +- `POST /api/v1/models/lora/activate { model_id, profile_name }` - hot-swap LoRA weights without reloading base model. - UI dropdown lists available profiles per loaded model. ### Phase 4: UI Integration #### 4.1 Model Management Panel (new: `ui/components/ModelPanel.js`) - **Model Library**: List loaded and available `.rvf` models with metadata (version, dataset, PCK score, size, created date). -- **Model Inspector**: Show RVF segment breakdown — weight count, quantization type, LoRA profiles, embedding config, witness hash. +- **Model Inspector**: Show RVF segment breakdown - weight count, quantization type, LoRA profiles, embedding config, witness hash. - **Load/Unload**: One-click model loading with progress bar. - **Compare**: Side-by-side signal-derived vs model-inference toggle in Live Demo. @@ -152,24 +152,24 @@ When a `.rvf` model is loaded: ### Positive - Users can train a model on **their own environment** without external tools or Python dependencies. - LoRA profiles mean a single base model adapts to multiple rooms in minutes, not hours. -- Training progress is visible in real-time — no black-box waiting. +- Training progress is visible in real-time - no black-box waiting. - A/B comparison lets users see the quality jump from signal-derived to model-inference. - RVF container bundles everything (weights, metadata, LoRA, witness) in one portable file. -- Self-supervised pretraining requires no labels — just leave ESP32s running. -- Progressive loading means the UI is never "loading..." — signal-derived kicks in immediately. +- Self-supervised pretraining requires no labels - just leave ESP32s running. +- Progressive loading means the UI is never "loading..." - signal-derived kicks in immediately. ### Negative - Training requires significant compute: GPU recommended for supervised training (CPU possible but 10-50x slower). -- MM-Fi and Wi-Pose datasets must be downloaded separately (10-50 GB each) — cannot be bundled. +- MM-Fi and Wi-Pose datasets must be downloaded separately (10-50 GB each) - cannot be bundled. - LoRA rank must be tuned per environment; too low loses expressiveness, too high overfits. - ONNX Runtime adds ~50 MB to the binary size when GPU support is enabled. -- Real-time inference at 10 FPS requires ~10ms per frame — tight budget on CPU. +- Real-time inference at 10 FPS requires ~10ms per frame - tight budget on CPU. - Teacher-student labeling (camera → pose labels → CSI training) requires camera access, which may conflict with the privacy-first premise. ### Mitigations - Provide pre-trained base `.rvf` model downloadable from releases (trained on MM-Fi + Wi-Pose). - INT8 quantization (`SEG_QUANT`) reduces model size 4x and speeds inference ~2x on CPU. -- Camera-based labeling is **optional** — self-supervised pretraining works without camera. +- Camera-based labeling is **optional** - self-supervised pretraining works without camera. - Training API validates VRAM availability before starting GPU training; falls back to CPU with warning. ## Implementation Order @@ -196,29 +196,29 @@ When a `.rvf` model is loaded: ## Files to Create/Modify ### New Files -- `ui/components/ModelPanel.js` — Model library, inspector, load/unload controls -- `ui/components/TrainingPanel.js` — Recording controls, training progress, metric charts -- `rust-port/.../sensing-server/src/recording.rs` — CSI recording API handlers -- `rust-port/.../sensing-server/src/training_api.rs` — Training API handlers + WS progress stream -- `rust-port/.../sensing-server/src/model_manager.rs` — Model loading, hot-swap, 32LoRA activation -- `data/models/` — Default model storage directory +- `ui/components/ModelPanel.js` - Model library, inspector, load/unload controls +- `ui/components/TrainingPanel.js` - Recording controls, training progress, metric charts +- `rust-port/.../sensing-server/src/recording.rs` - CSI recording API handlers +- `rust-port/.../sensing-server/src/training_api.rs` - Training API handlers + WS progress stream +- `rust-port/.../sensing-server/src/model_manager.rs` - Model loading, hot-swap, 32LoRA activation +- `data/models/` - Default model storage directory ### Modified Files -- `rust-port/.../sensing-server/src/main.rs` — Wire recording, training, and model APIs -- `rust-port/.../train/src/trainer.rs` — Add WebSocket progress callback, LoRA training mode -- `rust-port/.../train/src/dataset.rs` — MM-Fi and Wi-Pose dataset loaders -- `rust-port/.../nn/src/onnx.rs` — LoRA weight injection, INT8 quantization support -- `ui/components/LiveDemoTab.js` — Model selector, LoRA dropdown, A/B spsplit view -- `ui/components/SettingsPanel.js` — Model and training configuration sections -- `ui/components/PoseDetectionCanvas.js` — Pose trail rendering, confidence heatmap overlay -- `ui/services/pose.service.js` — Model-inference keypoint processing -- `ui/index.html` — Add Training tabhee -- `ui/style.css` — Styles for new panels +- `rust-port/.../sensing-server/src/main.rs` - Wire recording, training, and model APIs +- `rust-port/.../train/src/trainer.rs` - Add WebSocket progress callback, LoRA training mode +- `rust-port/.../train/src/dataset.rs` - MM-Fi and Wi-Pose dataset loaders +- `rust-port/.../nn/src/onnx.rs` - LoRA weight injection, INT8 quantization support +- `ui/components/LiveDemoTab.js` - Model selector, LoRA dropdown, A/B spsplit view +- `ui/components/SettingsPanel.js` - Model and training configuration sections +- `ui/components/PoseDetectionCanvas.js` - Pose trail rendering, confidence heatmap overlay +- `ui/services/pose.service.js` - Model-inference keypoint processing +- `ui/index.html` - Add Training tabhee +- `ui/style.css` - Styles for new panels ## References - ADR-015: MM-Fi + Wi-Pose training datasets - ADR-016: RuVector training pipeline integration -- ADR-024: Project AETHER — contrastive CSI embedding model +- ADR-024: Project AETHER - contrastive CSI embedding model - ADR-029: RuvSense multistatic sensing mode - ADR-031: RuView sensing-first RF mode (progressive loading) - ADR-035: Live sensing UI accuracy & data source transparency diff --git a/docs/adr/ADR-037-multi-person-pose-detection.md b/docs/adr/ADR-037-multi-person-pose-detection.md index f710aa06b..307126408 100644 --- a/docs/adr/ADR-037-multi-person-pose-detection.md +++ b/docs/adr/ADR-037-multi-person-pose-detection.md @@ -24,7 +24,7 @@ Estimate occupancy count from CSI signal statistics without decomposition. **Approach**: Eigenvalue analysis of the CSI covariance matrix across subcarriers. - Compute the 56×56 covariance matrix of CSI amplitudes over a sliding window (e.g., 50 frames / 5 seconds) -- Count eigenvalues above a noise threshold — each significant eigenvalue corresponds to an independent scatterer (person or static object) +- Count eigenvalues above a noise threshold - each significant eigenvalue corresponds to an independent scatterer (person or static object) - Subtract the static environment baseline (estimated during calibration or from the field model's SVD eigenstructure) - The residual significant eigenvalue count estimates person count @@ -77,12 +77,12 @@ Train a dedicated multi-person model using the RVF pipeline (ADR-036). - Enables room occupancy counting (Phase 1 alone is useful) - Distinct pose tracking per person enables activity recognition per individual -- Progressive approach — each phase delivers incremental value +- Progressive approach - each phase delivers incremental value - Reuses existing infrastructure (field model SVD, Kalman tracker, AETHER, RVF pipeline) ### Negative -- Single ESP32 node has fundamental spatial resolution limits — separating 2 people standing close together (< 0.5m) will be unreliable +- Single ESP32 node has fundamental spatial resolution limits - separating 2 people standing close together (< 0.5m) will be unreliable - NMF decomposition adds ~5-10ms latency per frame - Person count estimation will have false positives from large moving objects (pets, fans) - Phase 4 neural model requires multi-person training data collection @@ -90,7 +90,7 @@ Train a dedicated multi-person model using the RVF pipeline (ADR-036). ### Neutral - Multi-node multistatic mesh (ADR-029) dramatically improves multi-person separation but is a separate effort -- UI already supports multi-person rendering — no frontend changes needed for the `persons[]` array +- UI already supports multi-person rendering - no frontend changes needed for the `persons[]` array ## Affected Components @@ -115,7 +115,7 @@ Train a dedicated multi-person model using the RVF pipeline (ADR-036). ## Alternatives Considered -1. **Camera fusion**: Use a camera for person detection and WiFi for pose — rejected because the project goal is camera-free sensing. -2. **Multiple single-person models**: Run N independent pose estimators — rejected because they would produce correlated outputs from the same CSI data. -3. **Spatial filtering (beamforming)**: Use antenna array beamforming to isolate directions — rejected because single ESP32 has only 1 antenna; viable with multistatic mesh (ADR-029). -4. **Skip signal-derived, go straight to neural**: Train an end-to-end multi-person model — rejected because signal-derived provides faster iteration and interpretability for the early phases. +1. **Camera fusion**: Use a camera for person detection and WiFi for pose - rejected because the project goal is camera-free sensing. +2. **Multiple single-person models**: Run N independent pose estimators - rejected because they would produce correlated outputs from the same CSI data. +3. **Spatial filtering (beamforming)**: Use antenna array beamforming to isolate directions - rejected because single ESP32 has only 1 antenna; viable with multistatic mesh (ADR-029). +4. **Skip signal-derived, go straight to neural**: Train an end-to-end multi-person model - rejected because signal-derived provides faster iteration and interpretability for the early phases. diff --git a/docs/adr/ADR-038-sublinear-goal-oriented-action-planning.md b/docs/adr/ADR-038-sublinear-goal-oriented-action-planning.md index 7d8bf7f74..c7c523435 100644 --- a/docs/adr/ADR-038-sublinear-goal-oriented-action-planning.md +++ b/docs/adr/ADR-038-sublinear-goal-oriented-action-planning.md @@ -29,16 +29,16 @@ Manually navigating this decision space is error-prone. The developer must hold Goal-Oriented Action Planning (GOAP), originally developed for game AI by Jeff Orkin (2003), models the world as a set of boolean/numeric state properties and defines actions with typed preconditions and effects. A planner searches from the current world state to a goal state, producing an optimal action sequence. GOAP is a natural fit for this problem because: 1. **ADR implementations are actions** with clear preconditions (which other ADRs/hardware must exist) and effects (which capabilities are unlocked). -2. **The world state is observable** -- we can query cargo test results, check hardware connections, read crate manifests, and measure accuracy metrics. -3. **Goals are declarative** -- "I want multi-person tracking at 20 Hz" translates to `{multi_person_tracking: true, update_rate_hz: 20}`. -4. **Replanning is cheap** -- when hardware becomes available or a user changes goals, the planner re-runs in milliseconds. +2. **The world state is observable** - we can query cargo test results, check hardware connections, read crate manifests, and measure accuracy metrics. +3. **Goals are declarative** - "I want multi-person tracking at 20 Hz" translates to `{multi_person_tracking: true, update_rate_hz: 20}`. +4. **Replanning is cheap** - when hardware becomes available or a user changes goals, the planner re-runs in milliseconds. ### 1.3 Why Sublinear The naive GOAP planner uses A* search over the full action-state graph. With 37 ADRs, each potentially having multiple phases (ADR-037 has 4 phases, ADR-029 has 9 actions), the raw action count exceeds 80. The full state space is `2^N` for N boolean properties. Exhaustive search is wasteful because: - Most actions are irrelevant to any given goal (the user asking for vital signs does not need WebAssembly deployment actions in the search). -- The dependency graph is sparse -- most actions depend on 1-3 prerequisites, not all other actions. +- The dependency graph is sparse - most actions depend on 1-3 prerequisites, not all other actions. - Many state properties are independent (vital sign detection does not interact with WebAssembly compilation). A sublinear approach avoids exploring the full state space by exploiting this sparsity. @@ -173,7 +173,7 @@ pub enum Effect { ### 2.3 Goal Specification -Goals are expressed as partial world states -- a set of conditions that must be satisfied. +Goals are expressed as partial world states - a set of conditions that must be satisfied. ```rust pub struct Goal { @@ -411,7 +411,7 @@ When the user has not specified a single goal but asks "what should I work on ne 1. Construct the adjacency matrix where `A[i][j] = 1` if action j depends on action i (i.e., completing i unblocks j). 2. Run PageRank with damping factor 0.85. -3. Actions with the highest PageRank scores are the most "load-bearing" -- they unblock the most downstream work. +3. Actions with the highest PageRank scores are the most "load-bearing" - they unblock the most downstream work. 4. Filter to actions whose preconditions are currently satisfiable. 5. Return the top-K actions ranked by `PageRank_score * (1 / cost_days)` (value per effort). @@ -427,13 +427,13 @@ The GOAP planner is implemented as a TypeScript module within the claude-flow co ``` .claude-flow/goap/ - state.ts -- World state model and observation - actions.ts -- Action catalog (all ~80 actions) - planner.ts -- Sublinear A* planner with backward pruning - goals.ts -- Goal templates and user goal parser - executor.ts -- Swarm dispatch and action lifecycle - pagerank.ts -- Dependency graph prioritization - visualize.ts -- DOT graph export + state.ts - World state model and observation + actions.ts - Action catalog (all ~80 actions) + planner.ts - Sublinear A* planner with backward pruning + goals.ts - Goal templates and user goal parser + executor.ts - Swarm dispatch and action lifecycle + pagerank.ts - Dependency graph prioritization + visualize.ts - DOT graph export ``` ### 3.2 CLI Integration diff --git a/docs/adr/ADR-039-esp32-edge-intelligence.md b/docs/adr/ADR-039-esp32-edge-intelligence.md index 0eec7604b..ea76e48c5 100644 --- a/docs/adr/ADR-039-esp32-edge-intelligence.md +++ b/docs/adr/ADR-039-esp32-edge-intelligence.md @@ -17,16 +17,16 @@ WiFi-DensePose captures Channel State Information (CSI) from ESP32-S3 nodes and Implement a tiered edge processing pipeline on the ESP32-S3 that performs signal processing locally and sends compact results: -### Tier 0 — Raw Passthrough (default, backward compatible) +### Tier 0 - Raw Passthrough (default, backward compatible) No on-device processing. CSI frames streamed as-is (magic `0xC5110001`). -### Tier 1 — Basic Signal Processing +### Tier 1 - Basic Signal Processing - Phase extraction and unwrapping from I/Q pairs - Welford running variance per subcarrier - Top-K subcarrier selection by variance - Delta compression (XOR + RLE) for 30-50% bandwidth reduction (magic `0xC5110003`) -### Tier 2 — Full Edge Intelligence +### Tier 2 - Full Edge Intelligence All of Tier 1, plus: - Biquad IIR bandpass filters: breathing (0.1-0.5 Hz), heart rate (0.8-2.0 Hz) - Zero-crossing BPM estimation @@ -115,7 +115,7 @@ All configurable via `provision.py --edge-tier 2 --pres-thresh 0.05 ...` ### Negative - Firmware complexity increases (edge_processing.c is ~750 lines) - ESP32-S3 RAM usage increases ~12 KB for ring buffer + filter state -- Binary size increases from ~550 KB to ~925 KB with full WASM3 Tier 3 (10% free in 1 MB partition — see ADR-040) +- Binary size increases from ~550 KB to ~925 KB with full WASM3 Tier 3 (10% free in 1 MB partition - see ADR-040) ### Risks - BPM accuracy depends on subject distance and movement; needs real-world validation @@ -124,15 +124,15 @@ All configurable via `provision.py --edge-tier 2 --pres-thresh 0.05 ...` ## Implementation -- `firmware/esp32-csi-node/main/edge_processing.c` — DSP pipeline (~750 lines) -- `firmware/esp32-csi-node/main/edge_processing.h` — Types and API -- `firmware/esp32-csi-node/main/ota_update.c/h` — HTTP OTA endpoint -- `firmware/esp32-csi-node/main/power_mgmt.c/h` — Power management -- `rust-port/.../wifi-densepose-sensing-server/src/main.rs` — Vitals parser + REST endpoint -- `scripts/provision.py` — Edge config CLI arguments -- `.github/workflows/firmware-ci.yml` — CI build + size gate (updated to 950 KB for Tier 3) +- `firmware/esp32-csi-node/main/edge_processing.c` - DSP pipeline (~750 lines) +- `firmware/esp32-csi-node/main/edge_processing.h` - Types and API +- `firmware/esp32-csi-node/main/ota_update.c/h` - HTTP OTA endpoint +- `firmware/esp32-csi-node/main/power_mgmt.c/h` - Power management +- `rust-port/.../wifi-densepose-sensing-server/src/main.rs` - Vitals parser + REST endpoint +- `scripts/provision.py` - Edge config CLI arguments +- `.github/workflows/firmware-ci.yml` - CI build + size gate (updated to 950 KB for Tier 3) -### Tier 3 — WASM Programmable Sensing (ADR-040, ADR-041) +### Tier 3 - WASM Programmable Sensing (ADR-040, ADR-041) See [ADR-040](ADR-040-wasm-programmable-sensing.md) for hot-loadable WASM modules compiled from Rust, executed via WASM3 interpreter on-device. Core modules: @@ -140,9 +140,9 @@ gesture recognition, coherence monitoring, adversarial detection. [ADR-041](ADR-041-wasm-module-collection.md) defines the curated module collection (37 modules across 6 categories). Phase 1 implemented modules: -- `vital_trend.rs` — Clinical vital sign trend analysis (bradypnea, tachypnea, apnea) -- `intrusion.rs` — State-machine intrusion detection (calibrate-monitor-arm-alert) -- `occupancy.rs` — Spatial occupancy zone detection with per-zone variance analysis +- `vital_trend.rs` - Clinical vital sign trend analysis (bradypnea, tachypnea, apnea) +- `intrusion.rs` - State-machine intrusion detection (calibrate-monitor-arm-alert) +- `occupancy.rs` - Spatial occupancy zone detection with per-zone variance analysis ## Hardware Benchmark (RuView ESP32-S3) @@ -204,8 +204,8 @@ Measured on ESP32-S3 (QFN56 rev v0.2, 8 MB flash, 160 MHz, ESP-IDF v5.2). ### Findings -1. **Fall detection threshold too low** — default `fall_thresh=2000` (2.0 rad/s²) triggers 6.7 false positives/s in static indoor environment. Recommend increasing to 5000-8000 for typical deployments. -2. **No PSRAM on test board** — WASM arena falls back to internal heap. Boards with PSRAM would support larger modules. -3. **CSI rate exceeds spec** — measured 28.5 Hz vs. expected ~20 Hz. Performance headroom is better than estimated. -4. **WiFi-to-Ethernet isolation** — some routers block UDP between WiFi and wired clients. Recommend same-subnet verification in deployment guide. -5. **sendto ENOMEM crash (Issue #127)** — CSI callbacks in promiscuous mode fire 100-500+ times/sec, exhausting the lwIP pbuf pool and causing a guru meditation crash. Fixed with a dual approach: 50 Hz rate limiter in `csi_collector.c` (20 ms minimum send interval) and a 100 ms ENOMEM backoff in `stream_sender.c`. Binary size with fix: 947 KB. Hardware-verified stable for 200+ CSI callbacks with zero ENOMEM errors. +1. **Fall detection threshold too low** - default `fall_thresh=2000` (2.0 rad/s²) triggers 6.7 false positives/s in static indoor environment. Recommend increasing to 5000-8000 for typical deployments. +2. **No PSRAM on test board** - WASM arena falls back to internal heap. Boards with PSRAM would support larger modules. +3. **CSI rate exceeds spec** - measured 28.5 Hz vs. expected ~20 Hz. Performance headroom is better than estimated. +4. **WiFi-to-Ethernet isolation** - some routers block UDP between WiFi and wired clients. Recommend same-subnet verification in deployment guide. +5. **sendto ENOMEM crash (Issue #127)** - CSI callbacks in promiscuous mode fire 100-500+ times/sec, exhausting the lwIP pbuf pool and causing a guru meditation crash. Fixed with a dual approach: 50 Hz rate limiter in `csi_collector.c` (20 ms minimum send interval) and a 100 ms ENOMEM backoff in `stream_sender.c`. Binary size with fix: 947 KB. Hardware-verified stable for 200+ CSI callbacks with zero ENOMEM errors. diff --git a/docs/adr/ADR-040-wasm-programmable-sensing.md b/docs/adr/ADR-040-wasm-programmable-sensing.md index 351cb36f0..423e975bf 100644 --- a/docs/adr/ADR-040-wasm-programmable-sensing.md +++ b/docs/adr/ADR-040-wasm-programmable-sensing.md @@ -8,10 +8,10 @@ ADR-039 implemented Tiers 0-2 of the ESP32-S3 edge intelligence pipeline: - **Tier 0**: Raw CSI passthrough (magic `0xC5110001`) -- **Tier 1**: Basic DSP — phase unwrap, Welford stats, top-K, delta compression -- **Tier 2**: Full pipeline — vitals, presence, fall detection, multi-person +- **Tier 1**: Basic DSP - phase unwrap, Welford stats, top-K, delta compression +- **Tier 2**: Full pipeline - vitals, presence, fall detection, multi-person -The firmware uses ~820 KB of flash, leaving ~80 KB headroom in the 1 MB OTA partition. The ESP32-S3 has 8 MB PSRAM available for runtime data. New sensing algorithms (gesture recognition, signal coherence monitoring, adversarial detection) currently require a full firmware reflash — impractical for deployed sensor networks. +The firmware uses ~820 KB of flash, leaving ~80 KB headroom in the 1 MB OTA partition. The ESP32-S3 has 8 MB PSRAM available for runtime data. New sensing algorithms (gesture recognition, signal coherence monitoring, adversarial detection) currently require a full firmware reflash - impractical for deployed sensor networks. The project already has 35+ RuVector WASM crates and 28 pre-built `.wasm` binaries, but none are integrated into the ESP32 firmware. @@ -113,12 +113,12 @@ Core 1 (DSP Task) | Component | SRAM | PSRAM | Flash | |-----------|------|-------|-------| -| WASM3 interpreter | ~10 KB | — | ~100 KB | -| WASM module storage (×4) | — | 512 KB | — | -| WASM execution stack | 8 KB | — | — | -| Host API bindings | 2 KB | — | ~15 KB | -| HTTP upload handler | 1 KB | — | ~8 KB | -| RVF parser + verifier | 1 KB | — | ~6 KB | +| WASM3 interpreter | ~10 KB | - | ~100 KB | +| WASM module storage (×4) | - | 512 KB | - | +| WASM execution stack | 8 KB | - | - | +| Host API bindings | 2 KB | - | ~15 KB | +| HTTP upload handler | 1 KB | - | ~8 KB | +| RVF parser + verifier | 1 KB | - | ~6 KB | | **Total Tier 3** | **~22 KB** | **512 KB** | **~129 KB** | | **Running total (Tier 0-3)** | **~34 KB** | **512 KB** | **~925 KB** | @@ -130,13 +130,13 @@ Core 1 (DSP Task) |-----|------|---------|-------------| | `wasm_max` | u8 | 4 | Maximum concurrent WASM modules | | `wasm_verify` | u8 | 1 | Require signature verification (secure-by-default) | -| `wasm_pubkey` | blob(32) | — | Signing public key for WASM verification | +| `wasm_pubkey` | blob(32) | - | Signing public key for WASM verification | ## Consequences ### Positive - Deploy new sensing algorithms to 1000+ nodes without reflashing firmware -- 20-year extensibility horizon — new algorithms via .wasm uploads +- 20-year extensibility horizon - new algorithms via .wasm uploads - Algorithms developed/tested in Rust, compiled to portable WASM - PSRAM utilization (previously unused 8 MB) for module storage - Hot-swap algorithms for A/B testing in production deployments @@ -152,7 +152,7 @@ Core 1 (DSP Task) | Risk | Mitigation | |------|------------| -| WASM3 memory management may fragment PSRAM over time | Fixed 160 KB arenas pre-allocated at boot per slot — no runtime malloc/free cycles | +| WASM3 memory management may fragment PSRAM over time | Fixed 160 KB arenas pre-allocated at boot per slot - no runtime malloc/free cycles | | Complex WASM modules (>64 KB) may cause stack overflow in interpreter | `WASM_STACK_SIZE` = 8 KB, `d_m3MaxFunctionStackHeight` = 128; modules validated at load time | | HTTP upload endpoint requires network security | Ed25519 signature verification enabled by default (`wasm_verify=1`); disable only via NVS for lab/dev | | Runaway WASM module blocks DSP pipeline | Per-frame budget guard (10 ms default); module auto-stopped after 10 consecutive faults | @@ -160,13 +160,13 @@ Core 1 (DSP Task) ## Implementation -- `firmware/esp32-csi-node/components/wasm3/CMakeLists.txt` — WASM3 ESP-IDF component -- `firmware/esp32-csi-node/main/wasm_runtime.c/h` — Runtime host with 12 API bindings + manifest -- `firmware/esp32-csi-node/main/wasm_upload.c/h` — HTTP REST endpoints (RVF-aware) -- `firmware/esp32-csi-node/main/rvf_parser.c/h` — RVF container parser and verifier -- `rust-port/.../wifi-densepose-wasm-edge/` — Rust WASM crate (gesture, coherence, adversarial, rvf, occupancy, vital_trend, intrusion) -- `rust-port/.../wifi-densepose-sensing-server/src/main.rs` — `0xC5110004` parser -- `docs/adr/ADR-039-esp32-edge-intelligence.md` — Updated with Tier 3 reference +- `firmware/esp32-csi-node/components/wasm3/CMakeLists.txt` - WASM3 ESP-IDF component +- `firmware/esp32-csi-node/main/wasm_runtime.c/h` - Runtime host with 12 API bindings + manifest +- `firmware/esp32-csi-node/main/wasm_upload.c/h` - HTTP REST endpoints (RVF-aware) +- `firmware/esp32-csi-node/main/rvf_parser.c/h` - RVF container parser and verifier +- `rust-port/.../wifi-densepose-wasm-edge/` - Rust WASM crate (gesture, coherence, adversarial, rvf, occupancy, vital_trend, intrusion) +- `rust-port/.../wifi-densepose-sensing-server/src/main.rs` - `0xC5110004` parser +- `docs/adr/ADR-039-esp32-edge-intelligence.md` - Updated with Tier 3 reference --- @@ -179,7 +179,7 @@ The initial Tier 3 implementation addresses five production-readiness concerns: Dynamic `heap_caps_malloc` / `free` cycles on PSRAM fragment memory over days of continuous operation. Instead, each module slot pre-allocates a **160 KB fixed arena** at boot (`WASM_ARENA_SIZE`). The WASM binary and WASM3 runtime heap both live inside -this arena. Unloading a module zeroes the arena but never frees it — the slot is +this arena. Unloading a module zeroes the arena but never frees it - the slot is reused on the next `wasm_runtime_load()`. ``` @@ -321,7 +321,7 @@ maintained by Tier 1/2 DSP. Subcarriers are nodes; edge weights are pairwise Pearson correlation magnitudes over the Welford window. The algebraic connectivity (Fiedler value λ₂) of this graph's Laplacian approximates the mincut value. On ESP32-S3 with K=8 subcarriers, this -is an 8×8 eigenvalue problem — solvable with power iteration in <100 μs. +is an 8×8 eigenvalue problem - solvable with power iteration in <100 μs. ### B.5 Spiking and Sparse Optimizations @@ -337,13 +337,13 @@ When the budget is tight (Δλ ≈ 0, quiet room), WASM modules should: ### B.6 Thermal and Power Hooks ESP32-S3 provides: -- `temp_sensor_read()` — on-chip temperature (°C) +- `temp_sensor_read()` - on-chip temperature (°C) - ADC reading of battery voltage (if wired) -Thermal pressure: `T = clamp((temp_celsius - 60) / 20, 0, 1)` — ramps +Thermal pressure: `T = clamp((temp_celsius - 60) / 20, 0, 1)` - ramps from 0 at 60°C to 1.0 at 80°C (thermal throttle zone). -Battery pressure: `P = clamp((3.3 - battery_volts) / 0.6, 0, 1)` — ramps +Battery pressure: `P = clamp((3.3 - battery_volts) / 0.6, 0, 1)` - ramps from 0 at 3.3V to 1.0 at 2.7V (brownout zone). ### B.7 Transport Strategy @@ -394,15 +394,15 @@ board without PSRAM). WiFi connected to AP at RSSI -25 dBm, channel 5 BW20. ### Known Issues -1. **Fall threshold too sensitive** — default 2.0 rad/s² produces 6.7 false positives/s in static environment. Recommend 5.0-8.0 for deployment. -2. **No PSRAM on test board** — WASM arenas fall back to internal heap (316 KiB total). Production boards with 8 MB PSRAM will use dedicated PSRAM arenas. -3. **WiFi-Ethernet isolation** — some consumer routers block bridging between WiFi and wired clients. Verify network path during deployment. +1. **Fall threshold too sensitive** - default 2.0 rad/s² produces 6.7 false positives/s in static environment. Recommend 5.0-8.0 for deployment. +2. **No PSRAM on test board** - WASM arenas fall back to internal heap (316 KiB total). Production boards with 8 MB PSRAM will use dedicated PSRAM arenas. +3. **WiFi-Ethernet isolation** - some consumer routers block bridging between WiFi and wired clients. Verify network path during deployment. ### B.8 Implementation Plan | Step | Scope | Effort | |------|-------|--------| -| 1 | Add `edge_compute_fiedler()` in `edge_processing.c` — power iteration on 8×8 Laplacian | ~50 lines C | +| 1 | Add `edge_compute_fiedler()` in `edge_processing.c` - power iteration on 8×8 Laplacian | ~50 lines C | | 2 | Add budget controller struct and update formula in `wasm_runtime.c` | ~30 lines C | | 3 | Wire thermal/battery sensors into budget inputs | ~20 lines C | | 4 | Add delta-export dead-band filter in `wasm_runtime_on_frame()` | ~15 lines C | @@ -414,10 +414,10 @@ Total: ~125 lines of C, no new files. All constants configurable via NVS. | Failure | Behavior | |---------|----------| -| Δλ estimate wrong (correlation noise) | Budget oscillates — clamped by B_min/B_max | +| Δλ estimate wrong (correlation noise) | Budget oscillates - clamped by B_min/B_max | | Thermal sensor absent | T defaults to 0 (no throttle) | | Battery ADC not wired | P defaults to 0 (always-on mode) | -| All WASM modules budget-faulted | DSP pipeline runs Tier 2 only — graceful degradation | +| All WASM modules budget-faulted | DSP pipeline runs Tier 2 only - graceful degradation | --- @@ -457,17 +457,17 @@ Total overhead: 32 (header) + 96 (manifest) + 64 (signature) = **192 bytes**. | Offset | Size | Type | Field | |--------|------|------|-------| -| 0 | 32 | char[] | `module_name` — null-terminated ASCII | -| 32 | 2 | u16 | `required_host_api` — version (1 = current) | -| 34 | 4 | u32 | `capabilities` — RVF_CAP_* bitmask | -| 38 | 4 | u32 | `max_frame_us` — requested per-frame budget (0 = use default) | -| 42 | 2 | u16 | `max_events_per_sec` — rate limit (0 = unlimited) | -| 44 | 2 | u16 | `memory_limit_kb` — max WASM heap (0 = use default) | -| 46 | 2 | u16 | `event_schema_version` — for receiver compatibility | -| 48 | 32 | [u8;32] | `build_hash` — SHA-256 of WASM payload | -| 80 | 2 | u16 | `min_subcarriers` — minimum required (0 = any) | -| 82 | 2 | u16 | `max_subcarriers` — maximum expected (0 = any) | -| 84 | 10 | char[] | `author` — null-padded ASCII | +| 0 | 32 | char[] | `module_name` - null-terminated ASCII | +| 32 | 2 | u16 | `required_host_api` - version (1 = current) | +| 34 | 4 | u32 | `capabilities` - RVF_CAP_* bitmask | +| 38 | 4 | u32 | `max_frame_us` - requested per-frame budget (0 = use default) | +| 42 | 2 | u16 | `max_events_per_sec` - rate limit (0 = unlimited) | +| 44 | 2 | u16 | `memory_limit_kb` - max WASM heap (0 = use default) | +| 46 | 2 | u16 | `event_schema_version` - for receiver compatibility | +| 48 | 32 | [u8;32] | `build_hash` - SHA-256 of WASM payload | +| 80 | 2 | u16 | `min_subcarriers` - minimum required (0 = any) | +| 82 | 2 | u16 | `max_subcarriers` - maximum expected (0 = any) | +| 84 | 10 | char[] | `author` - null-padded ASCII | | 94 | 2 | [u8;2] | reserved (0) | ### C.4 Capability Bitmask @@ -483,7 +483,7 @@ Total overhead: 32 (header) + 96 (manifest) + 64 (signature) = **192 bytes**. | 6 | `LOG` | `csi_log` | Modules declare which host APIs they need. Future firmware versions may -refuse to link imports that aren't declared in capabilities — defense in +refuse to link imports that aren't declared in capabilities - defense in depth against supply-chain attacks. ### C.5 On-Device Flow diff --git a/docs/adr/ADR-041-wasm-module-collection.md b/docs/adr/ADR-041-wasm-module-collection.md index 9422be1c3..7fef9a8dc 100644 --- a/docs/adr/ADR-041-wasm-module-collection.md +++ b/docs/adr/ADR-041-wasm-module-collection.md @@ -1,4 +1,4 @@ -# ADR-041: WASM Module Collection -- Curated Sensing Algorithm Registry +# ADR-041: WASM Module Collection - Curated Sensing Algorithm Registry **Status**: Accepted (Phase 1 implemented, hardware-validated on RuView ESP32-S3) **Date**: 2026-03-02 @@ -15,12 +15,12 @@ budget control. Three flagship modules were defined (gesture, coherence, adversarial) as proof of capability. A runtime without a library of modules is an empty platform. The difference -between a product and a platform is the ecosystem -- and the ecosystem is the +between a product and a platform is the ecosystem - and the ecosystem is the module collection. Three strategic dynamics make a curated collection essential: **1. Platform flywheel effect.** Each new module increases the value of every deployed ESP32 node. A node purchased for sleep apnea monitoring becomes a -fall detector, an intrusion sensor, and an occupancy counter -- all via OTA +fall detector, an intrusion sensor, and an occupancy counter - all via OTA WASM uploads. This multiplies the addressable market without multiplying hardware SKUs. @@ -108,7 +108,7 @@ emits an apnea alert with duration. It also tracks apnea-hypopnea index | 102 | `AHI_UPDATE` | Events per hour (float) | **Estimated .wasm size**: 4 KB -**Budget tier**: L (lightweight, < 2 ms) -- primarily threshold checks on Tier 2 vitals +**Budget tier**: L (lightweight, < 2 ms) - primarily threshold checks on Tier 2 vitals **Difficulty**: Easy --- @@ -136,7 +136,7 @@ RMSSD exceeds 3 standard deviations from baseline. | 113 | `HRV_ANOMALY` | RMSSD value | **Estimated .wasm size**: 8 KB -**Budget tier**: S (standard, < 5 ms) -- requires phase history windowing +**Budget tier**: S (standard, < 5 ms) - requires phase history windowing **Difficulty**: Hard --- @@ -164,7 +164,7 @@ detection uses autocorrelation of the breathing amplitude envelope over a | 123 | `RESP_DISTRESS_LEVEL` | Severity 0.0--1.0 | **Estimated .wasm size**: 10 KB -**Budget tier**: H (heavy, < 10 ms) -- autocorrelation over 60 s window +**Budget tier**: H (heavy, < 10 ms) - autocorrelation over 60 s window **Difficulty**: Hard --- @@ -194,7 +194,7 @@ biomechanics literature. | 134 | `FESTINATION` | Acceleration pattern flag | **Estimated .wasm size**: 12 KB -**Budget tier**: H (heavy, < 10 ms) -- windowed periodicity analysis +**Budget tier**: H (heavy, < 10 ms) - windowed periodicity analysis **Difficulty**: Hard --- @@ -224,7 +224,7 @@ onset, and post-ictal stillness. | 143 | `POST_ICTAL` | Stillness duration (seconds) | **Estimated .wasm size**: 10 KB -**Budget tier**: S (standard, < 5 ms) -- frequency analysis on motion energy +**Budget tier**: S (standard, < 5 ms) - frequency analysis on motion energy **Difficulty**: Hard --- @@ -253,7 +253,7 @@ across on_timer calls. | 153 | `BASELINE_ESTABLISHED` | Hours of data collected | **Estimated .wasm size**: 6 KB -**Budget tier**: L (lightweight, < 2 ms) -- EWMA updates are O(1) +**Budget tier**: L (lightweight, < 2 ms) - EWMA updates are O(1) **Difficulty**: Medium --- @@ -325,7 +325,7 @@ Metallic objects have significantly higher RF reflectivity than biological tissue, creating distinctive amplitude spikes on specific subcarrier groups when a person carrying metal passes through the sensing field. The module computes a metal-presence index from the ratio of amplitude -variance to phase variance -- pure tissue produces coupled amplitude/phase +variance to phase variance - pure tissue produces coupled amplitude/phase changes, while metallic reflectors produce disproportionate amplitude perturbation. **Experimental: requires controlled environment calibration and should not be used as a sole security measure.** @@ -469,7 +469,7 @@ presence with a configurable arrival debounce (default: 10 seconds) and a departure timeout (default: 5 minutes). The departure timeout ensures HVAC does not shut down during brief absences (bathroom break, coffee run). Also reports an activity level (sedentary/active) for adaptive -comfort control -- sedentary occupants may prefer different temperature +comfort control - sedentary occupants may prefer different temperature setpoints. **Host API dependencies**: `csi_get_presence`, `csi_get_motion_energy`, @@ -683,7 +683,7 @@ components. Emits count deltas and periodic summaries. | 423 | `HOURLY_TRAFFIC` | Total passages in last hour | **Estimated .wasm size**: 8 KB -**Budget tier**: S (standard, < 5 ms) -- phase gradient computation +**Budget tier**: S (standard, < 5 ms) - phase gradient computation **Difficulty**: Medium --- @@ -753,7 +753,7 @@ produce a distinctive CSI signature: high-amplitude, low-frequency (< 0.3 Hz) phase modulation from the large metal body moving slowly, combined with engine/motor vibration harmonics. When this signature co-occurs with a human motion signature, a proximity alert fires. -Priority: CRITICAL -- this is a life-safety module. +Priority: CRITICAL - this is a life-safety module. **Host API dependencies**: `csi_get_phase`, `csi_get_amplitude`, `csi_get_variance`, `csi_get_motion_energy`, `csi_get_presence`, @@ -867,7 +867,7 @@ stall). Configurable for species via initialization parameters. earthquake P-wave early arrival, and structural stress. In a static environment with no human presence, CSI phase should be stable to within the noise floor (~0.02 rad). Structural vibration causes coherent -phase oscillation across all subcarriers simultaneously -- unlike +phase oscillation across all subcarriers simultaneously - unlike human movement which affects subcarrier groups selectively. The module maintains a vibration spectral density estimate and alerts on: seismic activity (broadband > 1 Hz), mechanical resonance (narrowband harmonics @@ -888,7 +888,7 @@ change indicating settlement or thermal expansion). | 543 | `VIBRATION_SPECTRUM` | Encoded spectral peaks | **Estimated .wasm size**: 10 KB -**Budget tier**: H (heavy, < 10 ms) -- spectral density estimation +**Budget tier**: H (heavy, < 10 ms) - spectral density estimation **Difficulty**: Hard --- @@ -896,7 +896,7 @@ change indicating settlement or thermal expansion). ## Category 6: Exotic & Research (Event IDs 600--699) These modules push WiFi CSI sensing into territory that sounds like science -fiction -- but every one is grounded in published peer-reviewed research. +fiction - but every one is grounded in published peer-reviewed research. WiFi signals at 2.4/5 GHz have wavelengths (12.5 cm / 6 cm) that interact with the human body at a resolution sufficient to detect chest wall displacement of 0.1 mm (breathing), wrist pulse of 0.01 mm (heartbeat), @@ -946,7 +946,7 @@ Just WiFi signals reflecting off a sleeping body. | 603 | `DEEP_SLEEP_RATIO` | % of total sleep | **Estimated .wasm size**: 14 KB -**Budget tier**: H (heavy, < 10 ms) -- multi-feature state machine +**Budget tier**: H (heavy, < 10 ms) - multi-feature state machine **Difficulty**: Hard --- @@ -1010,7 +1010,7 @@ produce measurable phase shifts of 0.1--0.5 radians. Published research per-letter recognition accuracy of >90% at distances up to 2 meters. This is an accessibility breakthrough: a deaf person can fingerspell -words in the air and have them recognized by WiFi -- no camera required, +words in the air and have them recognized by WiFi - no camera required, works through visual obstructions, and preserves privacy since no images are captured. @@ -1028,7 +1028,7 @@ are captured. | 623 | `GESTURE_REJECTED` | Non-letter movement detected | **Estimated .wasm size**: 18 KB (includes 26 DTW templates) -**Budget tier**: H (heavy, < 10 ms) -- DTW matching against 26 templates +**Budget tier**: H (heavy, < 10 ms) - DTW matching against 26 templates **Difficulty**: Hard --- @@ -1041,12 +1041,12 @@ arm movement), dynamics (forte/piano from motion amplitude), and basic gesture vocabulary (downbeat, upbeat, cutoff, fermata) from CSI phase patterns. The conducting pattern at 4/4 time produces a characteristic phase trajectory: strong downbeat, lateral second beat, higher third -beat, rebounding fourth beat -- each with distinct subcarrier signatures. +beat, rebounding fourth beat - each with distinct subcarrier signatures. The module outputs BPM, beat position (1-2-3-4), and dynamic level as events. A host application can map these to MIDI clock and CC messages for controlling synthesizers, lighting rigs, or interactive installations. -This is an air instrument -- conduct an orchestra with WiFi. +This is an air instrument - conduct an orchestra with WiFi. **Host API dependencies**: `csi_get_phase`, `csi_get_amplitude`, `csi_get_motion_energy`, `csi_get_phase_history`, `csi_get_variance`, @@ -1101,7 +1101,7 @@ the next room using only WiFi reflections off leaves. | 643 | `WATERING_EVENT` | Rapid amplitude recovery detected | **Estimated .wasm size**: 6 KB -**Budget tier**: L (lightweight, < 2 ms) -- only updates EWMA +**Budget tier**: L (lightweight, < 2 ms) - only updates EWMA **Difficulty**: Medium --- @@ -1165,7 +1165,7 @@ and identifies cessation. Works because the ESP32 node is physically mounted to the building structure, coupling rainfall vibrations into the RF path. -This is weather sensing without any outdoor sensors -- the WiFi signal +This is weather sensing without any outdoor sensors - the WiFi signal inside the building feels the rain on the roof. **Host API dependencies**: `csi_get_phase`, `csi_get_variance`, @@ -1189,7 +1189,7 @@ inside the building feels the rain on the roof. ### 6.8 `wdp-exo-breathing-sync` **Description**: Detects when multiple people's breathing patterns -synchronize -- a real phenomenon observed in meditation groups, sleeping +synchronize - a real phenomenon observed in meditation groups, sleeping couples, and audience/performer interactions. When two or more people are in the same CSI field, their individual breathing signatures appear as superimposed periodic components in the phase signal. The module performs @@ -1206,13 +1206,13 @@ people's breathing using WiFi CSI. Applications include: - **Couple sleep monitoring**: Detect when partners' breathing aligns during sleep (associated with deeper sleep quality). - **Crowd resonance**: Large-group breathing synchronization at concerts, - sports events, or religious gatherings -- a measurable indicator of + sports events, or religious gatherings - a measurable indicator of collective emotional engagement. - **Therapeutic monitoring**: Breathing synchronization between therapist and patient (rapport indicator). -The social coherence metric -- a number that quantifies how in-sync a -group of humans is breathing -- is something that was unmeasurable before +The social coherence metric - a number that quantifies how in-sync a +group of humans is breathing - is something that was unmeasurable before contactless sensing. WiFi CSI makes the invisible visible. **Host API dependencies**: `csi_get_bpm_breathing`, `csi_get_phase`, @@ -1229,7 +1229,7 @@ contactless sensing. WiFi CSI makes the invisible visible. | 673 | `SYNC_LOST` | Desynchronization event | **Estimated .wasm size**: 10 KB -**Budget tier**: S (standard, < 5 ms) -- cross-correlation of breathing components +**Budget tier**: S (standard, < 5 ms) - cross-correlation of breathing components **Difficulty**: Hard --- @@ -1557,7 +1557,7 @@ reverts changes that increase false positives. model influence propagation in multi-person sensing fields. Each detected person is a node; edge weights represent CSI cross-correlation between person-associated subcarrier groups. PageRank scores identify the -"dominant mover" -- the person whose motion most affects the CSI channel. +"dominant mover" - the person whose motion most affects the CSI channel. Useful for multi-person scenarios where you need to track the primary actor (e.g., a nurse in a patient room, a presenter in a meeting). @@ -1681,7 +1681,7 @@ invariants on sensing outputs using Linear Temporal Logic (LTL). Example rules: "Globally(presence=0 implies no fall_alert)" prevents false fall alarms in empty rooms. "Finally(intrusion implies alert within 10s)" ensures alerts are timely. The module monitors the event stream from -other modules and flags LTL violations -- detecting impossible event +other modules and flags LTL violations - detecting impossible event combinations that indicate sensor malfunction or adversarial tampering. **Vendor source**: `midstream/temporal-neural-solver` (LTL verification) @@ -1737,7 +1737,7 @@ to optimize the limited 4-slot runtime. The ESP32 becomes self-directing. **Description**: Adapts Midstream's AIMDS (AI Manipulation Defense System) pattern matcher for CSI event stream integrity. Detects adversarial -manipulation of CSI signals designed to trigger false events -- e.g., +manipulation of CSI signals designed to trigger false events - e.g., a replay attack that plays back recorded CSI to fake "empty room" while someone is present. The module compares incoming CSI statistical fingerprints against known attack patterns (replay, injection, jamming) @@ -1928,7 +1928,7 @@ frame distributions. Unlike variance-based motion detection that loses spatial information, optimal transport preserves the geometry of how energy moves across subcarriers between frames. Detects subtle motions (hand gestures, typing) that variance-based methods miss because the -total variance doesn't change -- only the distribution shifts. +total variance doesn't change - only the distribution shifts. **Vendor source**: `ruvector-math` (transport/sliced_wasserstein.rs) @@ -2090,7 +2090,7 @@ than Euclidean embedding for tree-structured spatial hierarchies. ### Vendor Module Implementation Priority -#### Phase 2a -- Vendor Quick Wins (Q2--Q3 2026) +#### Phase 2a - Vendor Quick Wins (Q2--Q3 2026) Modules that wrap existing, well-tested vendor algorithms with minimal adaptation. These deliver advanced capabilities with low implementation risk. @@ -2104,7 +2104,7 @@ adaptation. These deliver advanced capabilities with low implementation risk. | `wdp-spt-micro-hnsw` | ruvector | Smallest WASM footprint (11.8 KB); enables on-device fingerprinting | | `wdp-tmp-pattern-sequence` | midstream | LCS/DTW are mature algorithms; high user value for routine detection | -#### Phase 2b -- Vendor Advanced (Q3--Q4 2026) +#### Phase 2b - Vendor Advanced (Q3--Q4 2026) | Module | Vendor | Rationale | |--------|--------|-----------| @@ -2115,7 +2115,7 @@ adaptation. These deliver advanced capabilities with low implementation risk. | `wdp-spt-pagerank-influence` | sublinear | Novel approach to multi-person scene understanding | | `wdp-ais-behavioral-profiler` | midstream | Long-term security through learned baselines | -#### Phase 3 -- Vendor Frontier (2027+) +#### Phase 3 - Vendor Frontier (2027+) | Module | Vendor | Rationale | |--------|--------|-----------| @@ -2369,7 +2369,7 @@ sha256 = "def456..." 1. **Market multiplier**: A single $8 ESP32-S3 node becomes a multi-purpose sensing platform. A hospital buys one SKU and deploys sleep apnea detection in the ICU, fall detection in geriatrics, and queue management - in the ER -- all via WASM module uploads. No hardware changes, no + in the ER - all via WASM module uploads. No hardware changes, no reflashing. With vendor-integrated modules, the same node gains adaptive learning, autonomous planning, and quantum-inspired analysis. @@ -2403,7 +2403,7 @@ sha256 = "def456..." point of failure in the module collection. 7. **Vendor algorithm leverage**: The 24 vendor-integrated modules bring - algorithms that would take years to develop from scratch -- sublinear + algorithms that would take years to develop from scratch - sublinear solvers, attention mechanisms, temporal logic verification, spiking neural networks, quantum-inspired search. By wrapping existing battle-tested code behind the Host API, we convert library value @@ -2461,7 +2461,7 @@ sha256 = "def456..." ## Implementation Priority -### Phase 1 -- Ship First (Q2 2026) +### Phase 1 - Ship First (Q2 2026) These modules deliver immediate value with low implementation risk. They form the "launch collection" for the WASM module marketplace. @@ -2474,7 +2474,7 @@ They form the "launch collection" for the WASM module marketplace. | `wdp-ret-queue-length` | Planned | Retail deployments already in pipeline; queue analytics requested | | `wdp-med-vital-trend` | **Implemented** (`vital_trend.rs`) | Leverages existing vitals data; needed for clinical pilot | -### Phase 2 -- Community (Q3-Q4 2026) +### Phase 2 - Community (Q3-Q4 2026) These modules are medium-difficulty and designed for community contribution. Each has a well-defined scope and clear test criteria. @@ -2490,7 +2490,7 @@ Each has a well-defined scope and clear test criteria. | `wdp-exo-ghost-hunter` | Community enthusiasm driver; good PR and engagement | | `wdp-exo-rain-detect` | Simple and delightful; demonstrates CSI versatility | -### Phase 3 -- Research Frontier (2027+) +### Phase 3 - Research Frontier (2027+) These modules push the boundaries of WiFi CSI sensing and require specialized expertise, larger datasets, and possibly new Host API @@ -2646,14 +2646,14 @@ The repository provides a mock Host API for desktop testing: cargo test --target x86_64-unknown-linux-gnu # Run against recorded CSI data (if available) -cargo run --example replay -- --input ../../data/recordings/test.csv +cargo run --example replay - --input ../../data/recordings/test.csv ``` **6. Package as RVF.** ```bash # Build the RVF container (requires the wasm-edge CLI tool) -cargo run -p wifi-densepose-wasm-edge --features std -- \ +cargo run -p wifi-densepose-wasm-edge --features std - \ rvf pack \ --wasm target/wasm32-unknown-unknown/release/my_module.wasm \ --manifest wdp-exo-my-module.toml \ @@ -2720,11 +2720,11 @@ Unsigned modules can still be loaded on nodes with `wasm_verify=0` - ADR-039: ESP32-S3 Edge Intelligence Pipeline - ADR-040: WASM Programmable Sensing (Tier 3) -- `vendor/ruvector/` -- 76 crates: attention, min-cut, solvers, temporal +- `vendor/ruvector/` - 76 crates: attention, min-cut, solvers, temporal tensor, spiking networks, HNSW, quantum circuits, coherence gating -- `vendor/midstream/` -- 10 crates: AIMDS threat detection, DTW/LCS +- `vendor/midstream/` - 10 crates: AIMDS threat detection, DTW/LCS temporal comparison, attractor dynamics, LTL verification, meta-learning -- `vendor/sublinear-time-solver/` -- 11 crates: O(log n) solvers, +- `vendor/sublinear-time-solver/` - 11 crates: O(log n) solvers, PageRank, GOAP planning, psycho-symbolic reasoning, WASM neural inference - Liu et al., "Monitoring Vital Signs and Postures During Sleep Using WiFi Signals," MobiCom 2020 diff --git a/docs/adr/ADR-042-coherent-human-channel-imaging.md b/docs/adr/ADR-042-coherent-human-channel-imaging.md index 5a2949506..d52e1a493 100644 --- a/docs/adr/ADR-042-coherent-human-channel-imaging.md +++ b/docs/adr/ADR-042-coherent-human-channel-imaging.md @@ -1,4 +1,4 @@ -# ADR-042: Coherent Human Channel Imaging (CHCI) — Beyond WiFi CSI +# ADR-042: Coherent Human Channel Imaging (CHCI) - Beyond WiFi CSI **Status**: Proposed **Date**: 2026-03-03 @@ -10,7 +10,7 @@ ## Context -WiFi-DensePose currently relies on passive Channel State Information (CSI) extracted from standard 802.11 traffic frames. CSI is one specific way of estimating a channel response, but it is fundamentally constrained by a protocol designed for throughput and interoperability — not for sensing. +WiFi-DensePose currently relies on passive Channel State Information (CSI) extracted from standard 802.11 traffic frames. CSI is one specific way of estimating a channel response, but it is fundamentally constrained by a protocol designed for throughput and interoperability - not for sensing. ### Fundamental Limitations of Passive WiFi CSI @@ -27,7 +27,7 @@ These constraints impose a hard floor on sensing fidelity. Breathing detection ( ### What We Actually Want -The real objective is **coherent multipath sensing** — measuring the complex-valued impulse response of the human-occupied channel with sufficient phase stability and temporal resolution to reconstruct body surface geometry and sub-millimeter physiological motion. +The real objective is **coherent multipath sensing** - measuring the complex-valued impulse response of the human-occupied channel with sufficient phase stability and temporal resolution to reconstruct body surface geometry and sub-millimeter physiological motion. WiFi is optimized for throughput and interoperability. DensePose is optimized for phase stability and micro-Doppler fidelity. Those goals are not aligned. @@ -35,7 +35,7 @@ WiFi is optimized for throughput and interoperability. DensePose is optimized fo IEEE Std 802.11bf-2025 was published on September 26, 2025, defining WLAN Sensing as a first-class MAC/PHY capability. Key provisions: -- **Null Data PPDU (NDP) sounding**: Deterministic, known waveforms with no data payload — purpose-built for channel measurement +- **Null Data PPDU (NDP) sounding**: Deterministic, known waveforms with no data payload - purpose-built for channel measurement - **Sensing Measurement Setup (SMS)**: Negotiation protocol between sensing initiator and responder with unique session IDs - **Trigger-Based Sensing Measurement Exchange (TB SME)**: AP-coordinated sounding with Sensing Availability Windows (SAW) - **Multiband support**: Sub-7 GHz (2.4, 5, 6 GHz) plus 60 GHz mmWave @@ -67,13 +67,13 @@ This proves the hardware architecture described in this ADR is feasible at the E | Vayyar 4D imaging | 3–81 GHz | High (4D imaging) | Room-scale | $200+ | | Novelda X4 UWB | 7.29/8.748 GHz | Sub-mm | 0.4–10 m | $15–50 | -The gap between passive WiFi CSI (~0.87 mm) and coherent phase processing (~0.1 mm) represents a 9x improvement in displacement sensitivity — the difference between marginal and reliable heartbeat detection at ISM bands. +The gap between passive WiFi CSI (~0.87 mm) and coherent phase processing (~0.1 mm) represents a 9x improvement in displacement sensitivity - the difference between marginal and reliable heartbeat detection at ISM bands. --- ## Decision -We define **Coherent Human Channel Imaging (CHCI)** — a purpose-built coherent RF sensing protocol optimized for structural human motion, vital sign extraction, and body surface reconstruction. CHCI is not WiFi in the traditional sense. It is a sensing protocol that operates within ISM band regulatory constraints and can optionally maintain backward compatibility with 802.11bf. +We define **Coherent Human Channel Imaging (CHCI)** - a purpose-built coherent RF sensing protocol optimized for structural human motion, vital sign extraction, and body surface reconstruction. CHCI is not WiFi in the traditional sense. It is a sensing protocol that operates within ISM band regulatory constraints and can optionally maintain backward compatibility with 802.11bf. ### Architecture Overview @@ -256,7 +256,7 @@ This puts heartbeat detection (0.2–0.5 mm chest displacement) well within the └─────────────────────┘ ``` -**Key insight**: Lower frequency penetrates better (through-wall sensing, NLOS paths). Higher frequency provides finer spatial resolution. By treating each band as a projection of the same physical scene, the fusion model can achieve super-resolution beyond any single band — using body model priors (known human dimensions, joint angle constraints) to constrain the phase relationships across bands. +**Key insight**: Lower frequency penetrates better (through-wall sensing, NLOS paths). Higher frequency provides finer spatial resolution. By treating each band as a projection of the same physical scene, the fusion model can achieve super-resolution beyond any single band - using body model priors (known human dimensions, joint angle constraints) to constrain the phase relationships across bands. **Integration with existing code**: Extends `multiband.rs` from independent per-channel fusion to coherent cross-band phase alignment. The existing `CrossViewpointAttention` mechanism in `ruvector/src/viewpoint/attention.rs` provides the attention-weighted fusion foundation. @@ -345,7 +345,7 @@ Proposed CHCI (sensing-optimized): | Geometry | Dual-antenna diversity | Linear or L-shaped phased array | | Target signal | Far-field plane wave | Near-field chest wall displacement | -**Virtual aperture synthesis**: With 4 nodes × 4 antennas = 16 physical elements, MIMO virtual aperture provides 16 × 16 = 256 virtual channels. Combined with MUSIC or ESPRIT algorithms, this enables sub-degree angle-of-arrival estimation — sufficient to resolve individual body segments. +**Virtual aperture synthesis**: With 4 nodes × 4 antennas = 16 physical elements, MIMO virtual aperture provides 16 × 16 = 256 virtual channels. Combined with MUSIC or ESPRIT algorithms, this enables sub-degree angle-of-arrival estimation - sufficient to resolve individual body segments. ### 6. Cognitive Waveform Adaptation @@ -401,13 +401,13 @@ Proposed CHCI (sensing-optimized): └───────────────────────────────────────────────────────────────┘ ``` -**Power efficiency**: Cognitive adaptation reduces average power consumption by 60–80% compared to constant full-rate sounding. In IDLE mode (1 Hz, single band, low power), the system draws <10 mA from the ESP32-S3 radio — enabling battery-powered deployment. +**Power efficiency**: Cognitive adaptation reduces average power consumption by 60–80% compared to constant full-rate sounding. In IDLE mode (1 Hz, single band, low power), the system draws <10 mA from the ESP32-S3 radio - enabling battery-powered deployment. **Integration with ADR-039**: The cognitive waveform modes map directly to ADR-039 edge processing tiers. Tier 0 (raw CSI) corresponds to IDLE/ALERT. Tier 1 (phase unwrap, stats) corresponds to ACTIVE. Tier 2 (vitals, fall detection) corresponds to VITAL/SLEEP. The cognitive engine adds the waveform adaptation feedback loop that ADR-039 lacks. ### 7. Coherent Diffraction Tomography -**What changes**: Current tomography (`tomography.rs`) uses amplitude-only attenuation for voxel reconstruction. With coherent phase data from CHCI, we upgrade to diffraction tomography — resolving body surfaces rather than volumetric shadows. +**What changes**: Current tomography (`tomography.rs`) uses amplitude-only attenuation for voxel reconstruction. With coherent phase data from CHCI, we upgrade to diffraction tomography - resolving body surfaces rather than volumetric shadows. **Mathematical foundation**: @@ -426,7 +426,7 @@ Proposed (coherent diffraction tomography): Output: complex permittivity contrast per voxel (body surface) ``` -**Key advantage**: Diffraction tomography produces body surface geometry, not just occupancy maps. This directly feeds the DensePose UV mapping pipeline with geometric constraints — reducing the neural network's burden from "guess the surface from shadows" to "refine the surface from holographic reconstruction." +**Key advantage**: Diffraction tomography produces body surface geometry, not just occupancy maps. This directly feeds the DensePose UV mapping pipeline with geometric constraints - reducing the neural network's burden from "guess the surface from shadows" to "refine the surface from holographic reconstruction." **Performance projection** (based on ESPARGOS results and multi-band coverage): @@ -467,11 +467,11 @@ CHCI supports a **dual-mode architecture**: | **802.11bf NDP** | Standard-compliant NDP sounding | WiFi AP supports 802.11bf, moderate improvement over legacy | | **CHCI Native** | Full coherent sounding with shared clock | Purpose-deployed sensing mesh, maximum fidelity | -The firmware can switch between modes at runtime. The signal processing pipeline (`signal/src/ruvsense/`) accepts CSI from any mode — the coherent processing path activates when shared-clock metadata is present in the CSI frame header. +The firmware can switch between modes at runtime. The signal processing pipeline (`signal/src/ruvsense/`) accepts CSI from any mode - the coherent processing path activates when shared-clock metadata is present in the CSI frame header. **Question 2: Are you willing to own both transmitter and receiver hardware?** -Yes. CHCI requires owning both TX and RX to achieve phase coherence. The system is deployed as a self-contained sensing mesh — not parasitic on existing WiFi infrastructure. This is the fundamental architectural trade: compatibility for control. For sensing, that is a good trade. +Yes. CHCI requires owning both TX and RX to achieve phase coherence. The system is deployed as a self-contained sensing mesh - not parasitic on existing WiFi infrastructure. This is the fundamental architectural trade: compatibility for control. For sensing, that is a good trade. ### Hardware Bill of Materials (per CHCI node) @@ -535,8 +535,8 @@ New and modified crates: - **Standards-compliant path**: 802.11bf NDP sounding is a published IEEE standard (September 2025), providing regulatory clarity - **10× cost advantage**: $4.25/node vs $50+ for nearest comparable coherent sensing platform - **Through-wall preservation**: Operates at 2.4/5 GHz ISM bands, maintaining the through-wall sensing advantage that mmWave systems lack -- **Backward compatible**: Dual-mode firmware supports legacy CSI, 802.11bf NDP, and native CHCI — deployable incrementally -- **Privacy-preserving**: No cameras, no audio — same RF-only sensing paradigm as current WiFi-DensePose +- **Backward compatible**: Dual-mode firmware supports legacy CSI, 802.11bf NDP, and native CHCI - deployable incrementally +- **Privacy-preserving**: No cameras, no audio - same RF-only sensing paradigm as current WiFi-DensePose - **Power-efficient**: Cognitive waveform adaptation reduces average power 60–80% vs constant-rate sounding - **Body surface reconstruction**: Coherent diffraction tomography produces geometric constraints for DensePose, reducing neural network inference burden - **Proven feasibility**: ESPARGOS demonstrates phase-coherent WiFi sensing at ESP32 cost point (IEEE 2024) @@ -544,8 +544,8 @@ New and modified crates: ### Negative - **Custom hardware required**: Cannot parasitically sense from existing WiFi routers in CHCI Native mode (802.11bf mode can use compliant APs) -- **PCB design needed**: Reference clock distribution requires custom PCB — not a pure firmware upgrade -- **Calibration burden**: Coherent diffraction tomography requires empty-room reference scan — adds deployment friction +- **PCB design needed**: Reference clock distribution requires custom PCB - not a pure firmware upgrade +- **Calibration burden**: Coherent diffraction tomography requires empty-room reference scan - adds deployment friction - **Clock distribution complexity**: Coaxial cable distribution limits deployment flexibility vs fully wireless mesh - **Two-phase deployment**: Full CHCI requires Phases 1–6 (~24 weeks). Intermediate modes (NDP-only, Phase 1) provide incremental value. @@ -566,7 +566,7 @@ New and modified crates: ### Standards -1. IEEE Std 802.11bf-2025, "Standard for Information Technology — Telecommunications and Information Exchange between Systems — Local and Metropolitan Area Networks — Specific Requirements — Part 11: Wireless LAN Medium Access Control (MAC) and Physical Layer (PHY) Specifications — Amendment: Enhancements for Wireless Local Area Network (WLAN) Sensing," IEEE, September 2025. +1. IEEE Std 802.11bf-2025, "Standard for Information Technology - Telecommunications and Information Exchange between Systems - Local and Metropolitan Area Networks - Specific Requirements - Part 11: Wireless LAN Medium Access Control (MAC) and Physical Layer (PHY) Specifications - Amendment: Enhancements for Wireless Local Area Network (WLAN) Sensing," IEEE, September 2025. 2. ETSI EN 300 328 V2.2.2, "Wideband transmission systems; Data transmission equipment operating in the 2.4 GHz band," ETSI, July 2019. 3. FCC 47 CFR Part 15.247, "Operation within the bands 902–928 MHz, 2400–2483.5 MHz, and 5725–5850 MHz." diff --git a/docs/adr/ADR-043-sensing-server-ui-api-completion.md b/docs/adr/ADR-043-sensing-server-ui-api-completion.md index 7bb93d251..0f6717d2b 100644 --- a/docs/adr/ADR-043-sensing-server-ui-api-completion.md +++ b/docs/adr/ADR-043-sensing-server-ui-api-completion.md @@ -10,7 +10,7 @@ ## Context -The WiFi-DensePose sensing server (`wifi-densepose-sensing-server`) is a single-binary Axum server that receives ESP32 CSI frames via UDP, processes them through the RuVector signal pipeline, and serves both a web UI at `/ui/` and a REST/WebSocket API. The UI provides tabs for live sensing visualization, model management, CSI recording, and training -- all designed to operate without external dependencies. +The WiFi-DensePose sensing server (`wifi-densepose-sensing-server`) is a single-binary Axum server that receives ESP32 CSI frames via UDP, processes them through the RuVector signal pipeline, and serves both a web UI at `/ui/` and a REST/WebSocket API. The UI provides tabs for live sensing visualization, model management, CSI recording, and training - all designed to operate without external dependencies. However, the UI's JavaScript expected several backend endpoints that were not yet implemented in the Rust server. Opening the browser console revealed persistent 404 errors for model, recording, and training API routes. Three categories of functionality were broken: @@ -28,7 +28,7 @@ The Training tab calls `POST /api/v1/train/start` to launch a background trainin ### 4. Sensing Service Not Started on App Init -The web UI's `sensingService` singleton (which manages the WebSocket connection to `/ws/sensing`) was only started lazily when the user navigated to the Sensing tab (`SensingTab.js:182`). However, the Dashboard and Live Demo tabs both read `sensingService.dataSource` at load time — and since the service was never started, the status permanently showed **"RECONNECTING"** with no WebSocket connection attempt and no console errors. This silent failure affected the first-load experience for every user. +The web UI's `sensingService` singleton (which manages the WebSocket connection to `/ws/sensing`) was only started lazily when the user navigated to the Sensing tab (`SensingTab.js:182`). However, the Dashboard and Live Demo tabs both read `sensingService.dataSource` at load time - and since the service was never started, the status permanently showed **"RECONNECTING"** with no WebSocket connection attempt and no console errors. This silent failure affected the first-load experience for every user. ### 5. Mobile App Defects @@ -53,8 +53,8 @@ All 14 new handler functions are implemented directly in `main.rs` as async func │ Sensing Server (main.rs) │ │ │ │ Router::new() │ -│ ├── /api/v1/sensing/* (existing — CSI streaming) │ -│ ├── /api/v1/pose/* (existing — pose estimation) │ +│ ├── /api/v1/sensing/* (existing - CSI streaming) │ +│ ├── /api/v1/pose/* (existing - pose estimation) │ │ ├── /api/v1/models GET list_models (NEW) │ │ ├── /api/v1/models/active GET get_active_model (NEW) │ │ ├── /api/v1/models/load POST load_model (NEW) │ @@ -69,8 +69,8 @@ All 14 new handler functions are implemented directly in `main.rs` as async func │ ├── /api/v1/train/status GET train_status (NEW) │ │ ├── /api/v1/train/start POST train_start (NEW) │ │ ├── /api/v1/train/stop POST train_stop (NEW) │ -│ ├── /ws/sensing (existing — sensing WebSocket) │ -│ └── /ui/* (existing — static file serving) │ +│ ├── /ws/sensing (existing - sensing WebSocket) │ +│ └── /ui/* (existing - static file serving) │ │ │ │ AppStateInner (new fields) │ │ ├── discovered_models: Vec │ @@ -95,34 +95,34 @@ Routes are registered individually in the `http_app` Router before the static UI | Method | Path | Request Body | Response | Description | |--------|------|-------------|----------|-------------| -| `GET` | `/api/v1/models` | -- | `{ models: ModelInfo[], count: usize }` | Scan `data/models/` for `.rvf` files and return manifest metadata | -| `GET` | `/api/v1/models/{id}` | -- | `ModelInfo` | Detailed info for a single model (version, PCK score, LoRA profiles, segment count) | -| `GET` | `/api/v1/models/active` | -- | `ActiveModelInfo \| { status: "no_model" }` | Active model with runtime stats (avg inference ms, frames processed) | +| `GET` | `/api/v1/models` | - | `{ models: ModelInfo[], count: usize }` | Scan `data/models/` for `.rvf` files and return manifest metadata | +| `GET` | `/api/v1/models/{id}` | - | `ModelInfo` | Detailed info for a single model (version, PCK score, LoRA profiles, segment count) | +| `GET` | `/api/v1/models/active` | - | `ActiveModelInfo \| { status: "no_model" }` | Active model with runtime stats (avg inference ms, frames processed) | | `POST` | `/api/v1/models/load` | `{ model_id: string }` | `{ status: "loaded", model_id, weight_count }` | Load model weights into memory via `RvfReader`, set `model_loaded = true` | -| `POST` | `/api/v1/models/unload` | -- | `{ status: "unloaded", model_id }` | Drop loaded weights, set `model_loaded = false` | +| `POST` | `/api/v1/models/unload` | - | `{ status: "unloaded", model_id }` | Drop loaded weights, set `model_loaded = false` | | `POST` | `/api/v1/models/lora/activate` | `{ model_id, profile_name }` | `{ status: "activated", profile_name }` | Activate a LoRA adapter profile on the loaded model | -| `GET` | `/api/v1/models/lora/profiles` | -- | `{ model_id, profiles: string[], active }` | List LoRA profiles available in the loaded model | +| `GET` | `/api/v1/models/lora/profiles` | - | `{ model_id, profiles: string[], active }` | List LoRA profiles available in the loaded model | #### CSI Recording (`recording.rs`) | Method | Path | Request Body | Response | Description | |--------|------|-------------|----------|-------------| | `POST` | `/api/v1/recording/start` | `{ session_name, label?, duration_secs? }` | `{ status: "recording", session_id, file_path }` | Create a new `.csi.jsonl` file and begin appending frames | -| `POST` | `/api/v1/recording/stop` | -- | `{ status: "stopped", session_id, frame_count }` | Stop the active recording, write companion `.meta.json` | -| `GET` | `/api/v1/recording/list` | -- | `{ recordings: RecordingSession[], count }` | List all recordings by scanning `.meta.json` files | -| `GET` | `/api/v1/recording/download/{id}` | -- | `application/x-ndjson` file | Download the raw JSONL recording file | -| `DELETE` | `/api/v1/recording/{id}` | -- | `{ status: "deleted", deleted_files }` | Remove `.csi.jsonl` and `.meta.json` files | +| `POST` | `/api/v1/recording/stop` | - | `{ status: "stopped", session_id, frame_count }` | Stop the active recording, write companion `.meta.json` | +| `GET` | `/api/v1/recording/list` | - | `{ recordings: RecordingSession[], count }` | List all recordings by scanning `.meta.json` files | +| `GET` | `/api/v1/recording/download/{id}` | - | `application/x-ndjson` file | Download the raw JSONL recording file | +| `DELETE` | `/api/v1/recording/{id}` | - | `{ status: "deleted", deleted_files }` | Remove `.csi.jsonl` and `.meta.json` files | #### Training Pipeline (`training_api.rs`) | Method | Path | Request Body | Response | Description | |--------|------|-------------|----------|-------------| | `POST` | `/api/v1/train/start` | `TrainingConfig { epochs, batch_size, learning_rate, ... }` | `{ status: "started", run_id }` | Launch background training task against recorded CSI data | -| `POST` | `/api/v1/train/stop` | -- | `{ status: "stopped", run_id }` | Cancel the active training run via a stop signal | -| `GET` | `/api/v1/train/status` | -- | `TrainingStatus { phase, epoch, loss, ... }` | Current training state (idle, training, complete, failed) | +| `POST` | `/api/v1/train/stop` | - | `{ status: "stopped", run_id }` | Cancel the active training run via a stop signal | +| `GET` | `/api/v1/train/status` | - | `TrainingStatus { phase, epoch, loss, ... }` | Current training state (idle, training, complete, failed) | | `POST` | `/api/v1/train/pretrain` | `{ epochs?, learning_rate? }` | `{ status: "started", mode: "pretrain" }` | Start self-supervised contrastive pretraining (ADR-024) | | `POST` | `/api/v1/train/lora` | `{ profile_name, epochs?, rank? }` | `{ status: "started", mode: "lora" }` | Start LoRA fine-tuning on a loaded base model | -| `WS` | `/ws/train/progress` | -- | Streaming `TrainingProgress` JSON | Epoch-level progress with loss, metrics, and ETA | +| `WS` | `/ws/train/progress` | - | Streaming `TrainingProgress` JSON | Epoch-level progress with loss, metrics, and ETA | ### State Management @@ -203,13 +203,13 @@ The alternative would be to run a separate Python training service and proxy req 1. **Single-binary deployment**: WiFi-DensePose targets edge deployments (disaster response, building security, healthcare monitoring per ADR-034) where installing Python, pip, and PyTorch is impractical. A single Rust binary that handles sensing, recording, training, and inference is the correct architecture for field use. -2. **Zero-configuration UI**: The web UI is served by the same binary that exposes the API. When a user opens `http://server:8080/`, everything works -- no additional services to start, no ports to configure, no CORS to manage. +2. **Zero-configuration UI**: The web UI is served by the same binary that exposes the API. When a user opens `http://server:8080/`, everything works - no additional services to start, no ports to configure, no CORS to manage. 3. **Data locality**: CSI frames arrive via UDP, are processed for real-time display, and can simultaneously be written to disk for training. The recording module hooks directly into the CSI processing loop via `maybe_record_frame()`, avoiding any serialization overhead or inter-process communication. ### Why fix mobile in the same change? -The mobile app's WebSocket failure was caused by the same root problem -- assumptions about server port layout that did not match reality. Fixing the server API without fixing the mobile client would leave a broken user experience. The test fixes were included because the placeholder tests masked the WebSocket URL bug during development. +The mobile app's WebSocket failure was caused by the same root problem - assumptions about server port layout that did not match reality. Fixing the server API without fixing the mobile client would leave a broken user experience. The test fixes were included because the placeholder tests masked the WebSocket URL bug during development. --- @@ -218,7 +218,7 @@ The mobile app's WebSocket failure was caused by the same root problem -- assump ### Positive - **UI loads with zero console errors**: All model, recording, and training tabs render correctly and receive real data from the server -- **End-to-end workflow**: Users can record CSI data, train a model, load it, and see pose estimation results -- all from the web UI without any external tools +- **End-to-end workflow**: Users can record CSI data, train a model, load it, and see pose estimation results - all from the web UI without any external tools - **LoRA fine-tuning support**: Users can adapt a base model to new environments via LoRA profiles, activated through the UI - **Mobile app connects reliably**: The WebSocket URL builder uses same-origin port derivation, working correctly regardless of which port the server runs on - **25 real mobile tests**: Provide actual regression protection for utils, services, stores, components, hooks, and screens @@ -290,7 +290,7 @@ Startup creates `data/models/` and `data/recordings/` directories and populates ```bash # 1. Start sensing server with auto source (simulated fallback) cd rust-port/wifi-densepose-rs -cargo run -p wifi-densepose-sensing-server -- --http-port 3000 --source auto +cargo run -p wifi-densepose-sensing-server - --http-port 3000 --source auto # 2. Verify model endpoints return 200 curl -s http://localhost:3000/api/v1/models | jq '.count' @@ -308,7 +308,7 @@ curl -s http://localhost:3000/api/v1/train/status | jq '.phase' # 5. Verify LoRA endpoints return 200 curl -s http://localhost:3000/api/v1/models/lora/profiles | jq '.' -# 6. Open UI — check browser console for zero 404 errors +# 6. Open UI - check browser console for zero 404 errors # Navigate to http://localhost:3000/ui/ # 7. Run mobile tests @@ -329,6 +329,6 @@ cargo test --workspace --no-default-features - ADR-039: ESP32-S3 Edge Intelligence Pipeline (CSI frame format and processing tiers) - ADR-040: WASM Programmable Sensing (Tier 3 edge compute) - ADR-041: WASM Module Collection (module catalog) -- `crates/wifi-densepose-sensing-server/src/main.rs` -- all 14 new handler functions (model, recording, training) -- `ui/app.js` -- sensing service early initialization fix -- `ui/mobile/src/services/ws.service.ts` -- mobile WebSocket URL fix +- `crates/wifi-densepose-sensing-server/src/main.rs` - all 14 new handler functions (model, recording, training) +- `ui/app.js` - sensing service early initialization fix +- `ui/mobile/src/services/ws.service.ts` - mobile WebSocket URL fix diff --git a/docs/adr/ADR-044-provisioning-tool-enhancements.md b/docs/adr/ADR-044-provisioning-tool-enhancements.md index 9713c1662..c4cdebcc4 100644 --- a/docs/adr/ADR-044-provisioning-tool-enhancements.md +++ b/docs/adr/ADR-044-provisioning-tool-enhancements.md @@ -194,19 +194,19 @@ The target users are embedded developers and field operators who are already run | Phase | Effort | Impact | Priority | |-------|--------|--------|----------| -| Phase 1: Complete NVS coverage | Small (1 file, ~50 lines) | High — closes feature gap | P0 | -| Phase 2: Config file + mesh | Medium (~100 lines) | High — biggest UX win | P1 | -| Phase 3: Presets | Small (~40 lines) | Medium — convenience | P2 | -| Phase 4: Read-back + verify | Medium (~150 lines) | Medium — debugging aid | P2 | -| Phase 5: Auto-detect | Small (~30 lines) | Low — minor convenience | P3 | +| Phase 1: Complete NVS coverage | Small (1 file, ~50 lines) | High - closes feature gap | P0 | +| Phase 2: Config file + mesh | Medium (~100 lines) | High - biggest UX win | P1 | +| Phase 3: Presets | Small (~40 lines) | Medium - convenience | P2 | +| Phase 4: Read-back + verify | Medium (~150 lines) | Medium - debugging aid | P2 | +| Phase 5: Auto-detect | Small (~30 lines) | Low - minor convenience | P3 | --- ## References -- `firmware/esp32-csi-node/main/nvs_config.h` — NVS config struct (20 fields) -- `firmware/esp32-csi-node/main/nvs_config.c` — NVS read logic (20 keys) -- `firmware/esp32-csi-node/provision.py` — Current provisioning script (13 of 20 keys) +- `firmware/esp32-csi-node/main/nvs_config.h` - NVS config struct (20 fields) +- `firmware/esp32-csi-node/main/nvs_config.c` - NVS read logic (20 keys) +- `firmware/esp32-csi-node/provision.py` - Current provisioning script (13 of 20 keys) - ADR-029: RuvSense multistatic sensing mode (TDM, channel hopping) - ADR-032: Multistatic mesh security hardening (mesh keys) - ADR-039: ESP32-S3 edge intelligence (edge tiers, vitals) diff --git a/docs/adr/ADR-045-amoled-display-support.md b/docs/adr/ADR-045-amoled-display-support.md index be6744007..506878ca8 100644 --- a/docs/adr/ADR-045-amoled-display-support.md +++ b/docs/adr/ADR-045-amoled-display-support.md @@ -10,14 +10,14 @@ The ESP32-S3 board (LilyGO T-Display-S3 AMOLED) has an integrated RM67162 QSPI A ### Constraints -- Binary was 947 KB in a 1 MB partition — needed 8MB flash + custom partition table +- Binary was 947 KB in a 1 MB partition - needed 8MB flash + custom partition table - SPIRAM was disabled in sdkconfig despite hardware having 8MB PSRAM -- Core 1 is pinned to DSP (edge processing) — display must use Core 0 +- Core 1 is pinned to DSP (edge processing) - display must use Core 0 - Existing CSI pipeline must not be affected ### Available APIs -Thread-safe edge APIs already exist (`edge_get_vitals()`, `edge_get_multi_person()`) — the display task only reads from these, no new synchronization needed. +Thread-safe edge APIs already exist (`edge_get_vitals()`, `edge_get_multi_person()`) - the display task only reads from these, no new synchronization needed. ## Decision @@ -43,7 +43,7 @@ Add optional AMOLED display support with the following architecture: ### Compile-Time Control - `CONFIG_DISPLAY_ENABLE=y` (default): compiles display code, auto-detects hardware at boot -- `CONFIG_DISPLAY_ENABLE=n`: zero-cost — no display code compiled +- `CONFIG_DISPLAY_ENABLE=n`: zero-cost - no display code compiled - `CONFIG_SPIRAM_IGNORE_NOTFOUND=y`: boots fine on boards without PSRAM ### Flash Layout @@ -59,7 +59,7 @@ Add optional AMOLED display support with the following architecture: |------|------|----------|--------| | WiFi/LwIP | 0 | 18-23 | unchanged | | OTA httpd | 0 | 5 | unchanged | -| **display_task** | **0** | **1** | **NEW — lowest priority** | +| **display_task** | **0** | **1** | **NEW - lowest priority** | | edge_task (DSP) | 1 | 5 | unchanged | ### Dependencies diff --git a/docs/adr/ADR-046-android-tv-box-armbian-deployment.md b/docs/adr/ADR-046-android-tv-box-armbian-deployment.md index 380d493e7..33e8e6957 100644 --- a/docs/adr/ADR-046-android-tv-box-armbian-deployment.md +++ b/docs/adr/ADR-046-android-tv-box-armbian-deployment.md @@ -73,7 +73,7 @@ This ADR covers Phase 1 (TV box as aggregator) and Phase 2 (custom WiFi firmware 5. **New Rust compilation target** in workspace CI: - Add `aarch64-unknown-linux-gnu` to cross-compilation matrix - Binary size target: <15 MB stripped (fits easily in SD card) - - No GPU dependency — CPU-only inference using `candle` or ONNX Runtime for ARM + - No GPU dependency - CPU-only inference using `candle` or ONNX Runtime for ARM ### Phase 2: Custom WiFi Firmware for CSI Extraction (Future) @@ -81,9 +81,9 @@ This ADR covers Phase 1 (TV box as aggregator) and Phase 2 (custom WiFi firmware | Chipset | Driver | CSI Support | Monitor Mode | Effort | |---------|--------|-------------|--------------|--------| - | Broadcom BCM43455 | brcmfmac | **Proven** (Nexmon CSI) | Yes | Low — patches exist | - | Realtek RTL8822CS | rtw88 | **Moderate** — driver is open-source, CSI hooks need adding | Yes (patched) | Medium | - | MediaTek MT7661 | mt76 | **Unknown** — MediaTek has released CSI tools for some chips | Yes | Medium-High | + | Broadcom BCM43455 | brcmfmac | **Proven** (Nexmon CSI) | Yes | Low - patches exist | + | Realtek RTL8822CS | rtw88 | **Moderate** - driver is open-source, CSI hooks need adding | Yes (patched) | Medium | + | MediaTek MT7661 | mt76 | **Unknown** - MediaTek has released CSI tools for some chips | Yes | Medium-High | 2. **CSI extraction architecture** (Linux kernel driver modification): @@ -174,7 +174,7 @@ Total system cost: $55-65 (3 ESP32 nodes + 1 TV box) Central dashboard: aggregate all rooms via REST API ``` -### Standalone Mode (Phase 2 — Custom WiFi FW) +### Standalone Mode (Phase 2 - Custom WiFi FW) ``` ┌──────────────────────────────────────┐ @@ -222,9 +222,9 @@ Total system cost: $55-65 (3 ESP32 nodes + 1 TV box) ### Neutral -- **No changes to existing ESP32 firmware** — TV box receives the same UDP frames -- **No changes to sensing server protocol** — Phase 2 CSI output uses same binary format -- **Existing web UI works as-is** — Chromium kiosk mode or any browser on the LAN +- **No changes to existing ESP32 firmware** - TV box receives the same UDP frames +- **No changes to sensing server protocol** - Phase 2 CSI output uses same binary format +- **Existing web UI works as-is** - Chromium kiosk mode or any browser on the LAN ## Implementation Plan @@ -253,11 +253,11 @@ Total system cost: $55-65 (3 ESP32 nodes + 1 TV box) ## References -- [Nexmon CSI](https://github.com/seemoo-lab/nexmon_csi) — Broadcom WiFi CSI extraction (BCM43455, BCM4339, BCM4358) -- [Armbian](https://www.armbian.com/) — Debian/Ubuntu for ARM SBCs and TV boxes -- [rtw88 driver](https://github.com/torvalds/linux/tree/master/drivers/net/wireless/realtek/rtw88) — Mainline Linux driver for Realtek 802.11ac chips -- [mt76 driver](https://github.com/torvalds/linux/tree/master/drivers/net/wireless/mediatek/mt76) — Mainline Linux driver for MediaTek WiFi chips -- [cross](https://github.com/cross-rs/cross) — Zero-setup Rust cross-compilation -- [ADR-018: ESP32 CSI Binary Protocol](ADR-018-dev-implementation.md) — Binary frame format reused for Phase 2 CSI extraction -- [ADR-039: Edge Intelligence](ADR-039-esp32-edge-intelligence.md) — On-device processing tiers -- [ADR-043: Sensing Server](ADR-043-sensing-server-ui-api-completion.md) — Single-binary deployment target +- [Nexmon CSI](https://github.com/seemoo-lab/nexmon_csi) - Broadcom WiFi CSI extraction (BCM43455, BCM4339, BCM4358) +- [Armbian](https://www.armbian.com/) - Debian/Ubuntu for ARM SBCs and TV boxes +- [rtw88 driver](https://github.com/torvalds/linux/tree/master/drivers/net/wireless/realtek/rtw88) - Mainline Linux driver for Realtek 802.11ac chips +- [mt76 driver](https://github.com/torvalds/linux/tree/master/drivers/net/wireless/mediatek/mt76) - Mainline Linux driver for MediaTek WiFi chips +- [cross](https://github.com/cross-rs/cross) - Zero-setup Rust cross-compilation +- [ADR-018: ESP32 CSI Binary Protocol](ADR-018-dev-implementation.md) - Binary frame format reused for Phase 2 CSI extraction +- [ADR-039: Edge Intelligence](ADR-039-esp32-edge-intelligence.md) - On-device processing tiers +- [ADR-043: Sensing Server](ADR-043-sensing-server-ui-api-completion.md) - Single-binary deployment target diff --git a/docs/adr/ADR-047-psychohistory-observatory-visualization.md b/docs/adr/ADR-047-psychohistory-observatory-visualization.md index c412915ab..70d62cbe2 100644 --- a/docs/adr/ADR-047-psychohistory-observatory-visualization.md +++ b/docs/adr/ADR-047-psychohistory-observatory-visualization.md @@ -1,4 +1,4 @@ -# ADR-047: RuView Observatory — Immersive Three.js WiFi Sensing Visualization +# ADR-047: RuView Observatory - Immersive Three.js WiFi Sensing Visualization ## Status @@ -12,13 +12,13 @@ Accepted (Implemented) The project has a functional tabbed dashboard UI (`ui/index.html`) with existing Three.js components (body model, gaussian splats, signal visualization, environment). While effective for monitoring, it lacks a cinematic, immersive visualization suitable for demonstrations and stakeholder presentations. -We need an immersive Three.js room-based visualization with practical WiFi sensing data overlays — human wireframe pose, dot-matrix body mass, vital signs HUD, signal field heatmap — powered by ESP32 CSI data (demo mode with live WebSocket path). +We need an immersive Three.js room-based visualization with practical WiFi sensing data overlays - human wireframe pose, dot-matrix body mass, vital signs HUD, signal field heatmap - powered by ESP32 CSI data (demo mode with live WebSocket path). ## Decision ### Standalone Page Architecture -`ui/observatory.html` is a standalone full-screen entry point, separate from the tabbed dashboard. Linked via "Observatory" nav tab in `ui/index.html`. No build step — vanilla JS modules with Three.js r160 via CDN importmap. +`ui/observatory.html` is a standalone full-screen entry point, separate from the tabbed dashboard. Linked via "Observatory" nav tab in `ui/index.html`. No build step - vanilla JS modules with Three.js r160 via CDN importmap. ### Room-Based Visualization @@ -147,6 +147,6 @@ EffectComposer chain: RenderPass → UnrealBloomPass → custom VignetteShader - ADR-045: AMOLED display support - ADR-046: Android TV / Armbian deployment -- Existing `ui/components/scene.js` — Three.js scene pattern -- Existing `ui/components/gaussian-splats.js` — ShaderMaterial pattern -- Existing `ui/services/sensing.service.js` — WebSocket data contract +- Existing `ui/components/scene.js` - Three.js scene pattern +- Existing `ui/components/gaussian-splats.js` - ShaderMaterial pattern +- Existing `ui/services/sensing.service.js` - WebSocket data contract diff --git a/docs/adr/ADR-048-adaptive-csi-classifier.md b/docs/adr/ADR-048-adaptive-csi-classifier.md index 86fa00661..34b8ca1c2 100644 --- a/docs/adr/ADR-048-adaptive-csi-classifier.md +++ b/docs/adr/ADR-048-adaptive-csi-classifier.md @@ -9,7 +9,7 @@ ## Context -WiFi-based activity classification using ESP32 Channel State Information (CSI) relies on hand-tuned thresholds to distinguish between activity states (absent, present_still, present_moving, active). These static thresholds are brittle — they don't account for: +WiFi-based activity classification using ESP32 Channel State Information (CSI) relies on hand-tuned thresholds to distinguish between activity states (absent, present_still, present_moving, active). These static thresholds are brittle - they don't account for: - **Environment-specific signal patterns**: Room geometry, furniture, wall materials, and ESP32 placement all affect how CSI signals respond to human activity. - **Temporal noise characteristics**: Real ESP32 CSI data at ~10 FPS has significant frame-to-frame jitter that causes classification to jump between states. @@ -47,21 +47,21 @@ A Rust-native environment-tuned classifier that learns from labeled JSONL record #### Feature Extraction (15 features) | # | Feature | Source | Discriminative Power | |---|---------|--------|---------------------| -| 0 | variance | Server | Medium — temporal CSI spread | -| 1 | motion_band_power | Server | Medium — high-frequency subcarrier energy | -| 2 | breathing_band_power | Server | Low — respiratory band energy | -| 3 | spectral_power | Server | Low — mean squared amplitude | -| 4 | dominant_freq_hz | Server | Low — peak subcarrier index | -| 5 | change_points | Server | Medium — threshold crossing count | -| 6 | mean_rssi | Server | Low — received signal strength | -| 7 | amp_mean | Subcarrier | Medium — mean amplitude across 56 subcarriers | -| 8 | amp_std | Subcarrier | **High** — amplitude spread (motion increases spread) | -| 9 | amp_skew | Subcarrier | Medium — asymmetry of amplitude distribution | -| 10 | amp_kurt | Subcarrier | **High** — peakedness (presence creates peaks) | -| 11 | amp_iqr | Subcarrier | Medium — inter-quartile range | -| 12 | amp_entropy | Subcarrier | **High** — spectral entropy (motion increases disorder) | -| 13 | amp_max | Subcarrier | Medium — peak amplitude value | -| 14 | amp_range | Subcarrier | Medium — amplitude dynamic range | +| 0 | variance | Server | Medium - temporal CSI spread | +| 1 | motion_band_power | Server | Medium - high-frequency subcarrier energy | +| 2 | breathing_band_power | Server | Low - respiratory band energy | +| 3 | spectral_power | Server | Low - mean squared amplitude | +| 4 | dominant_freq_hz | Server | Low - peak subcarrier index | +| 5 | change_points | Server | Medium - threshold crossing count | +| 6 | mean_rssi | Server | Low - received signal strength | +| 7 | amp_mean | Subcarrier | Medium - mean amplitude across 56 subcarriers | +| 8 | amp_std | Subcarrier | **High** - amplitude spread (motion increases spread) | +| 9 | amp_skew | Subcarrier | Medium - asymmetry of amplitude distribution | +| 10 | amp_kurt | Subcarrier | **High** - peakedness (presence creates peaks) | +| 11 | amp_iqr | Subcarrier | Medium - inter-quartile range | +| 12 | amp_entropy | Subcarrier | **High** - spectral entropy (motion increases disorder) | +| 13 | amp_max | Subcarrier | Medium - peak amplitude value | +| 14 | amp_range | Subcarrier | Medium - amplitude dynamic range | #### Training Algorithm - **Multiclass logistic regression** with softmax output @@ -100,7 +100,7 @@ A Rust-native environment-tuned classifier that learns from labeled JSONL record |-----------|-------|-----------| | Median window | 21 frames | ~2s of history, robust to transients | | Aggregation | Trimmed mean (middle 50%) | More stable than pure median, less noisy than raw mean | -| EMA alpha | 0.02 | ~5s time constant — readings change very slowly | +| EMA alpha | 0.02 | ~5s time constant - readings change very slowly | | HR dead-band | ±2 BPM | Prevents display creep from micro-fluctuations | | BR dead-band | ±0.5 BPM | Same for breathing rate | | HR max jump | 8 BPM/frame | Outlier rejection threshold | diff --git a/docs/adr/ADR-049-cross-platform-wifi-interface-detection.md b/docs/adr/ADR-049-cross-platform-wifi-interface-detection.md index f8003d4ea..5f39d4a08 100644 --- a/docs/adr/ADR-049-cross-platform-wifi-interface-detection.md +++ b/docs/adr/ADR-049-cross-platform-wifi-interface-detection.md @@ -52,7 +52,7 @@ def create_collector( - macOS: MacosWifiCollector (CoreWLAN) 3. SimulatedCollector (always available) - Raises nothing — always returns a usable collector. + Raises nothing - always returns a usable collector. """ ``` @@ -81,7 +81,7 @@ The existing `_validate_interface()` continues to raise `RuntimeError` for direc When auto-detection skips a collector, log at `WARNING` level with actionable context: ``` -WiFi collector: LinuxWifiCollector unavailable (/proc/net/wireless not found — likely Docker/WSL). +WiFi collector: LinuxWifiCollector unavailable (/proc/net/wireless not found - likely Docker/WSL). WiFi collector: Falling back to SimulatedCollector. For real sensing, connect ESP32 nodes via UDP:5005. ``` @@ -93,18 +93,18 @@ Remove duplicated platform-detection logic from `ws_server.py` and `install.sh`. ### Positive -- **Zero-crash startup**: `create_collector("auto")` never raises — Docker, WSL, and headless users get `SimulatedCollector` automatically with a clear log message. +- **Zero-crash startup**: `create_collector("auto")` never raises - Docker, WSL, and headless users get `SimulatedCollector` automatically with a clear log message. - **Single detection path**: Platform logic lives in one place (`rssi_collector.py`), reducing drift between `ws_server.py`, `install.sh`, and future entry points. - **Better DX**: Error messages explain *why* a collector is unavailable and *what to do* (connect ESP32, install WiFi driver, etc.). ### Negative - **SimulatedCollector may mask hardware issues**: Users with real WiFi hardware that fails detection might unknowingly run on simulated data. Mitigated by the `WARNING`-level log. -- **Breaking change for direct `LinuxWifiCollector` callers**: Code that catches `RuntimeError` from `_validate_interface()` as a signal needs to migrate to `is_available()` or `create_collector()`. This is a minor change — there are no known external consumers. +- **Breaking change for direct `LinuxWifiCollector` callers**: Code that catches `RuntimeError` from `_validate_interface()` as a signal needs to migrate to `is_available()` or `create_collector()`. This is a minor change - there are no known external consumers. ### Neutral -- `_validate_interface()` behavior is unchanged for existing direct callers — this is additive. +- `_validate_interface()` behavior is unchanged for existing direct callers - this is additive. ## Implementation Notes diff --git a/docs/adr/ADR-050-quality-engineering-security-hardening.md b/docs/adr/ADR-050-quality-engineering-security-hardening.md index c37145eb2..138d4d49b 100644 --- a/docs/adr/ADR-050-quality-engineering-security-hardening.md +++ b/docs/adr/ADR-050-quality-engineering-security-hardening.md @@ -1,4 +1,4 @@ -# ADR-050: Quality Engineering Response — Security Hardening & Code Quality +# ADR-050: Quality Engineering Response - Security Hardening & Code Quality | Field | Value | |-------|-------| @@ -16,17 +16,17 @@ An independent quality engineering analysis ([issue #170](https://github.com/ruv | # | Finding | Location | Verified | |---|---------|----------|----------| -| 1 | Fake HMAC in `secure_tdm.rs` — XOR fold with hardcoded key | `hardware/src/esp32/secure_tdm.rs:253` | YES — comments say "sufficient for testing" | -| 2 | `sensing-server/main.rs` is 3,741 lines — CC=65, god object | `sensing-server/src/main.rs` | YES — confirmed 3,741 lines | -| 3 | WebSocket server has zero authentication | Rust WS codebase | YES — no auth/token checks found | -| 4 | Zero security tests in Rust codebase | Entire workspace | YES — no auth/injection/tampering tests | -| 5 | 54K fps claim has no supporting benchmark | No criterion benchmarks | YES — no benchmarks exist | +| 1 | Fake HMAC in `secure_tdm.rs` - XOR fold with hardcoded key | `hardware/src/esp32/secure_tdm.rs:253` | YES - comments say "sufficient for testing" | +| 2 | `sensing-server/main.rs` is 3,741 lines - CC=65, god object | `sensing-server/src/main.rs` | YES - confirmed 3,741 lines | +| 3 | WebSocket server has zero authentication | Rust WS codebase | YES - no auth/token checks found | +| 4 | Zero security tests in Rust codebase | Entire workspace | YES - no auth/injection/tampering tests | +| 5 | 54K fps claim has no supporting benchmark | No criterion benchmarks | YES - no benchmarks exist | ### Findings Requiring Further Investigation | # | Finding | Status | |---|---------|--------| -| 6 | Unauthenticated OTA firmware endpoint | Not found in Rust code — may be ESP32 C firmware level | +| 6 | Unauthenticated OTA firmware endpoint | Not found in Rust code - may be ESP32 C firmware level | | 7 | WASM upload without mandatory signatures | Needs review of WASM loader | | 8 | O(n^2) autocorrelation in heart rate detection | Needs profiling to confirm impact | @@ -87,7 +87,7 @@ Address findings in 3 priority sprints as recommended by the report. ### Neutral -- The report correctly identifies that life-safety claims (disaster detection, vital signs) require rigorous verification — this is an ongoing process, not a single sprint +- The report correctly identifies that life-safety claims (disaster detection, vital signs) require rigorous verification - this is an ongoing process, not a single sprint ## Acknowledgment diff --git a/docs/adr/ADR-052-ddd-bounded-contexts.md b/docs/adr/ADR-052-ddd-bounded-contexts.md index 39093fcab..538ac57a2 100644 --- a/docs/adr/ADR-052-ddd-bounded-contexts.md +++ b/docs/adr/ADR-052-ddd-bounded-contexts.md @@ -1,4 +1,4 @@ -# ADR-052 Appendix: DDD Bounded Contexts — Tauri Desktop Frontend +# ADR-052 Appendix: DDD Bounded Contexts - Tauri Desktop Frontend This document maps out the domain model for the RuView Tauri desktop application described in ADR-052. It defines bounded contexts, their aggregates, entities, @@ -54,7 +54,7 @@ discovered via multiple strategies, the most recent data wins. **Persistence**: The registry is persisted to `~/.ruview/nodes.db` (SQLite via `rusqlite`). On startup, all previously known nodes are loaded as `Offline` and reconciled against a fresh discovery scan. This means the app **remembers the -mesh** across restarts — critical for field deployments where nodes may be +mesh** across restarts - critical for field deployments where nodes may be temporarily powered off. #### `Node` (Entity) @@ -74,11 +74,11 @@ temporarily powered off. ### Value Objects -- `MacAddress` — 6-byte hardware address, formatted as `AA:BB:CC:DD:EE:FF` -- `HealthStatus` — enum: `Online`, `Offline`, `Degraded(reason: String)` -- `DiscoveryMethod` — enum: `Mdns`, `UdpProbe`, `HttpSweep`, `Manual` -- `TdmConfig` — `{ slot_index: u8, total_nodes: u8 }` -- `SemVer` — semantic version `major.minor.patch` +- `MacAddress` - 6-byte hardware address, formatted as `AA:BB:CC:DD:EE:FF` +- `HealthStatus` - enum: `Online`, `Offline`, `Degraded(reason: String)` +- `DiscoveryMethod` - enum: `Mdns`, `UdpProbe`, `HttpSweep`, `Manual` +- `TdmConfig` - `{ slot_index: u8, total_nodes: u8 }` +- `SemVer` - semantic version `major.minor.patch` ### Domain Events @@ -103,7 +103,7 @@ fn translate_ota_status(raw: &serde_json::Value) -> Result firmware_version: raw["version"].as_str().map(SemVer::parse).transpose()?, uptime_secs: raw["uptime_s"].as_u64(), free_heap: raw["free_heap"].as_u64(), - // Firmware may add fields in future versions — unknown fields are ignored + // Firmware may add fields in future versions - unknown fields are ignored } } ``` @@ -182,21 +182,21 @@ concurrently. Even-slot nodes update first, then odd-slot nodes. **Lifecycle**: `Planning → InProgress → Completed | PartialFailure | Aborted` -- `BatchNodeState` — enum: `Queued`, `Uploading(Progress)`, `Rebooting`, `Verifying`, `Done`, `Failed(String)`, `Skipped` -- `OtaStrategy` — enum: - - `Sequential` — one node at a time, wait for rejoin - - `TdmSafe` — update non-adjacent slots to maintain sensing coverage - - `Parallel` — all at once (development only) +- `BatchNodeState` - enum: `Queued`, `Uploading(Progress)`, `Rebooting`, `Verifying`, `Done`, `Failed(String)`, `Skipped` +- `OtaStrategy` - enum: + - `Sequential` - one node at a time, wait for rejoin + - `TdmSafe` - update non-adjacent slots to maintain sensing coverage + - `Parallel` - all at once (development only) ### Value Objects -- `SerialPort` — `{ name: String, vid: u16, pid: u16, manufacturer: Option }` -- `ChipType` — enum: `Esp32`, `Esp32s3`, `Esp32c3` -- `FlashPhase` — enum: `Connecting`, `Erasing`, `Writing`, `Verifying`, `Completed`, `Failed` -- `OtaPhase` — enum: `Uploading`, `Rebooting`, `Verifying`, `Completed`, `Failed` -- `Progress` — `{ bytes_done: u64, bytes_total: u64, speed_bps: u64 }` -- `Sha256Hash` — 32-byte hash -- `SecureString` — zeroized-on-drop string for PSK tokens +- `SerialPort` - `{ name: String, vid: u16, pid: u16, manufacturer: Option }` +- `ChipType` - enum: `Esp32`, `Esp32s3`, `Esp32c3` +- `FlashPhase` - enum: `Connecting`, `Erasing`, `Writing`, `Verifying`, `Completed`, `Failed` +- `OtaPhase` - enum: `Uploading`, `Rebooting`, `Verifying`, `Completed`, `Failed` +- `Progress` - `{ bytes_done: u64, bytes_total: u64, speed_bps: u64 }` +- `Sha256Hash` - 32-byte hash +- `SecureString` - zeroized-on-drop string for PSK tokens ### Domain Events @@ -241,7 +241,7 @@ impl From for FlashProgress { ## 3. Configuration / Provisioning Context -**Purpose**: Manage NVS configuration for ESP32 nodes — WiFi credentials, network +**Purpose**: Manage NVS configuration for ESP32 nodes - WiFi credentials, network targets, TDM mesh settings, edge intelligence parameters, WASM security keys. **Downstream of**: Device Discovery (needs serial port), Firmware Management (post-flash provisioning) @@ -316,9 +316,9 @@ pub struct MeshNodeEntry { ### Value Objects -- `ProvisionPhase` — enum: `Generating`, `Flashing`, `Verifying`, `Completed`, `Failed` -- `Direction` — enum: `Read`, `Write` -- `Preset` — enum: `Basic`, `Vitals`, `Mesh3`, `Mesh6Vitals` (ADR-044 Phase 3) +- `ProvisionPhase` - enum: `Generating`, `Flashing`, `Verifying`, `Completed`, `Failed` +- `Direction` - enum: `Read`, `Write` +- `Preset` - enum: `Basic`, `Vitals`, `Mesh3`, `Mesh6Vitals` (ADR-044 Phase 3) ### Domain Events @@ -369,11 +369,11 @@ An active connection to the sensing server's WebSocket for receiving real-time d ### Value Objects -- `ServerState` — enum: `Stopped`, `Starting`, `Running`, `Stopping`, `Crashed(exit_code: i32)` -- `ServerConfig` — `{ http_port: u16, ws_port: u16, udp_port: u16, model_dir: PathBuf, log_level: Level }` -- `LogEntry` — `{ timestamp: DateTime, level: Level, target: String, message: String }` -- `DataChannel` — enum: `CsiFrames`, `PoseUpdates`, `VitalSigns`, `ActivityClassification` -- `WsState` — enum: `Connecting`, `Connected`, `Disconnected(reason: String)` +- `ServerState` - enum: `Stopped`, `Starting`, `Running`, `Stopping`, `Crashed(exit_code: i32)` +- `ServerConfig` - `{ http_port: u16, ws_port: u16, udp_port: u16, model_dir: PathBuf, log_level: Level }` +- `LogEntry` - `{ timestamp: DateTime, level: Level, target: String, message: String }` +- `DataChannel` - enum: `CsiFrames`, `PoseUpdates`, `VitalSigns`, `ActivityClassification` +- `WsState` - enum: `Connecting`, `Connected`, `Disconnected(reason: String)` ### Domain Events @@ -421,8 +421,8 @@ Tracks all WASM modules across all nodes. ### Value Objects -- `ModuleId` — string identifier assigned by the node firmware -- `ModuleStatus` — enum: `Loaded`, `Running`, `Stopped`, `Error(String)` +- `ModuleId` - string identifier assigned by the node firmware +- `ModuleStatus` - enum: `Loaded`, `Running`, `Stopped`, `Error(String)` ### Domain Events @@ -465,7 +465,7 @@ fn translate_wasm_list(raw: &[serde_json::Value]) -> Vec { ## 6. Visualization Context -**Purpose**: Render real-time and historical sensing data — CSI heatmaps, pose +**Purpose**: Render real-time and historical sensing data - CSI heatmaps, pose skeletons, vital sign charts, mesh topology graphs. **Downstream of**: Sensing Pipeline (receives data events), Device Discovery (needs @@ -476,7 +476,7 @@ transforms domain events from other contexts into visual representations. ### Aggregates -None — this context is a **Query Model** (CQRS read side). It subscribes to +None - this context is a **Query Model** (CQRS read side). It subscribes to domain events and projects them into view models. ### View Models @@ -562,7 +562,7 @@ Device Discovery ──────────────────── communication without coupling contexts directly. 2. **State Isolation**: Each bounded context maintains its own `State<'_, T>` - managed by Tauri. Contexts do not share mutable state directly — they + managed by Tauri. Contexts do not share mutable state directly - they communicate exclusively through events. 3. **Module Organization**: Each bounded context maps to a Rust module under diff --git a/docs/adr/ADR-052-tauri-desktop-frontend.md b/docs/adr/ADR-052-tauri-desktop-frontend.md index d8ee87279..30fe67a96 100644 --- a/docs/adr/ADR-052-tauri-desktop-frontend.md +++ b/docs/adr/ADR-052-tauri-desktop-frontend.md @@ -1,4 +1,4 @@ -# ADR-052: Tauri Desktop Frontend — RuView Hardware Management & Visualization +# ADR-052: Tauri Desktop Frontend - RuView Hardware Management & Visualization | Field | Value | |-------|-------| @@ -23,16 +23,16 @@ RuView currently requires users to interact with multiple disconnected tools to | Mesh topology | Mental model | No visualization of TDM slots, sync, health | | Node discovery | Manual IP tracking | No mDNS/UDP broadcast discovery | -There is no single tool that provides a unified view of the entire deployment — from ESP32 hardware through the sensing pipeline to pose visualization. Field operators deploying multi-node meshes must context-switch between terminals, browsers, and serial monitors. +There is no single tool that provides a unified view of the entire deployment - from ESP32 hardware through the sensing pipeline to pose visualization. Field operators deploying multi-node meshes must context-switch between terminals, browsers, and serial monitors. ### Why a Desktop App A browser-based UI cannot access serial ports (for flashing), raw UDP sockets (for node discovery), or the local filesystem (for firmware binaries). A desktop application is required for hardware management. Tauri v2 is the natural choice because: -1. **Rust backend** — integrates directly with the existing Rust workspace (`wifi-densepose-rs`). Crates like `wifi-densepose-hardware` (serial port parsing), `wifi-densepose-config`, and `wifi-densepose-sensing-server` can be linked as library dependencies. -2. **Small binary** — Tauri bundles the system webview rather than shipping Chromium (~150 MB savings vs Electron). -3. **Cross-platform** — Windows, macOS, Linux from the same codebase. -4. **Security model** — Tauri's capability-based permissions system restricts frontend access to explicitly allowed Rust commands. +1. **Rust backend** - integrates directly with the existing Rust workspace (`wifi-densepose-rs`). Crates like `wifi-densepose-hardware` (serial port parsing), `wifi-densepose-config`, and `wifi-densepose-sensing-server` can be linked as library dependencies. +2. **Small binary** - Tauri bundles the system webview rather than shipping Chromium (~150 MB savings vs Electron). +3. **Cross-platform** - Windows, macOS, Linux from the same codebase. +4. **Security model** - Tauri's capability-based permissions system restricts frontend access to explicitly allowed Rust commands. ### Why Not Electron / Flutter / Native @@ -55,7 +55,7 @@ Add a new crate to the workspace: rust-port/wifi-densepose-rs/ Cargo.toml # Add "crates/wifi-densepose-desktop" to members crates/ - wifi-densepose-desktop/ # NEW — Tauri app crate + wifi-densepose-desktop/ # NEW - Tauri app crate Cargo.toml tauri.conf.json capabilities/ @@ -122,7 +122,7 @@ rust-port/wifi-densepose-rs/ types.ts # Shared TypeScript types ``` -### 2. Rust Backend — Tauri Commands +### 2. Rust Backend - Tauri Commands #### 2.1 Node Discovery @@ -130,7 +130,7 @@ rust-port/wifi-densepose-rs/ // commands/discovery.rs /// Discover ESP32 CSI nodes on the local network. -/// Strategy 1: mDNS — nodes announce _ruview._tcp service +/// Strategy 1: mDNS - nodes announce _ruview._tcp service /// Strategy 2: UDP broadcast probe on port 5005 (CSI aggregator port) /// Strategy 3: HTTP health check sweep on port 8032 (OTA server) #[tauri::command] @@ -224,7 +224,7 @@ async fn ota_update( #[tauri::command] async fn ota_status(node_ip: String, psk: Option) -> Result; -/// Batch OTA update — push firmware to multiple nodes sequentially. +/// Batch OTA update - push firmware to multiple nodes sequentially. /// Skips nodes already running the target version. #[tauri::command] async fn ota_batch_update( @@ -391,19 +391,19 @@ pub struct NvsConfig { +------------------------------------------+ Nav items: - [D] Dashboard — overview of all nodes and server - [F] Flash — firmware flashing wizard - [W] WASM — edge module management - [S] Sensing — live sensing data view - [M] Mesh — topology visualization - [T] Settings — ports, paths, preferences + [D] Dashboard - overview of all nodes and server + [F] Flash - firmware flashing wizard + [W] WASM - edge module management + [S] Sensing - live sensing data view + [M] Mesh - topology visualization + [T] Settings - ports, paths, preferences ``` #### 3.3 Dashboard Page The dashboard is the primary landing page showing: -1. **Node Grid** — cards for each discovered ESP32 node showing: +1. **Node Grid** - cards for each discovered ESP32 node showing: - IP address and hostname - Firmware version (with update indicator if newer available) - Node ID and TDM slot assignment @@ -412,20 +412,20 @@ The dashboard is the primary landing page showing: - Health status (online/offline/degraded) - Quick actions: OTA update, configure, view logs -2. **Sensing Server Panel** — start/stop button, port configuration, log tail +2. **Sensing Server Panel** - start/stop button, port configuration, log tail -3. **Discovery Controls** — scan button, auto-discovery toggle, network range filter +3. **Discovery Controls** - scan button, auto-discovery toggle, network range filter #### 3.4 Flash Firmware Page A wizard-style flow: -1. **Select Port** — dropdown of detected serial ports with chip info -2. **Select Firmware** — file picker for `.bin` files, or select from bundled builds -3. **Configure** — chip type, baud rate, flash mode -4. **Flash** — progress bar with phase indicators (connecting, erasing, writing, verifying) -5. **Provision** — optional NVS provisioning form (WiFi, target IP, TDM, edge tier) -6. **Verify** — serial monitor showing boot log, success/fail indicator +1. **Select Port** - dropdown of detected serial ports with chip info +2. **Select Firmware** - file picker for `.bin` files, or select from bundled builds +3. **Configure** - chip type, baud rate, flash mode +4. **Flash** - progress bar with phase indicators (connecting, erasing, writing, verifying) +5. **Provision** - optional NVS provisioning form (WiFi, target IP, TDM, edge tier) +6. **Verify** - serial monitor showing boot log, success/fail indicator #### 3.5 WASM Module Manager Page @@ -449,11 +449,11 @@ Embeds the existing web UI (`ui/`) via an iframe pointing at the sensing server' - Offline access to recorded data Key visualization components: -- **CSI Heatmap** — subcarrier amplitude over time -- **Signal Field** — 2D signal strength visualization -- **Pose Skeleton** — detected body keypoints and connections -- **Vital Signs** — real-time breathing rate and heart rate charts -- **Activity Classification** — current activity label with confidence +- **CSI Heatmap** - subcarrier amplitude over time +- **Signal Field** - 2D signal strength visualization +- **Pose Skeleton** - detected body keypoints and connections +- **Vital Signs** - real-time breathing rate and heart rate charts +- **Activity Classification** - current activity label with confidence #### 3.7 Mesh Topology Page @@ -639,7 +639,7 @@ cargo build --release -p wifi-densepose-sensing-server ### 9. Persistent Node Registry -Discovery alone is transient — nodes appear when they broadcast, disappear when they don't. A persistent local registry transforms discovery into **reconciliation**. +Discovery alone is transient - nodes appear when they broadcast, disappear when they don't. A persistent local registry transforms discovery into **reconciliation**. ``` ~/.ruview/nodes.db (SQLite via rusqlite) @@ -649,16 +649,16 @@ Discovery alone is transient — nodes appear when they broadcast, disappear whe ```sql CREATE TABLE nodes ( - mac TEXT PRIMARY KEY, -- e.g. "AA:BB:CC:DD:EE:FF" - last_ip TEXT, -- last known IP - last_seen INTEGER NOT NULL, -- Unix timestamp - firmware TEXT, -- e.g. "0.3.1" - chip TEXT DEFAULT 'esp32s3', -- esp32, esp32s3, esp32c3 - mesh_role TEXT DEFAULT 'node', -- 'coordinator' | 'node' | 'aggregator' - tdm_slot INTEGER, -- assigned TDM slot index - capabilities TEXT, -- JSON: {"wasm": true, "ota": true, "csi": true} - friendly_name TEXT, -- user-assigned label - notes TEXT -- free-form notes + mac TEXT PRIMARY KEY, - e.g. "AA:BB:CC:DD:EE:FF" + last_ip TEXT, - last known IP + last_seen INTEGER NOT NULL, - Unix timestamp + firmware TEXT, - e.g. "0.3.1" + chip TEXT DEFAULT 'esp32s3', - esp32, esp32s3, esp32c3 + mesh_role TEXT DEFAULT 'node', - 'coordinator' | 'node' | 'aggregator' + tdm_slot INTEGER, - assigned TDM slot index + capabilities TEXT, - JSON: {"wasm": true, "ota": true, "csi": true} + friendly_name TEXT, - user-assigned label + notes TEXT - free-form notes ); ``` @@ -672,7 +672,7 @@ CREATE TABLE nodes ( This means the desktop app **remembers the mesh** across restarts, which is critical for field deployments where nodes may be offline temporarily. -### 10. OTA Safety Gate — Rolling Updates +### 10. OTA Safety Gate - Rolling Updates Mesh deployments cannot tolerate all nodes rebooting simultaneously. The OTA subsystem includes a **rolling update mode** that preserves sensing continuity: @@ -766,21 +766,21 @@ Total estimated effort: ~11 weeks for a single developer. ### Positive -- **Single pane of glass** — all hardware management, sensing, and visualization in one app -- **No Python dependency** — Rust-native `espflash` replaces `esptool.py` for firmware flashing -- **Replaces 6+ CLI tools** — flash, provision, OTA, WASM management, server control, visualization -- **Accessible to non-developers** — GUI replaces CLI flags and curl commands -- **Cross-platform** — one codebase for Windows, macOS, Linux -- **Workspace integration** — shares types, config, and hardware crates with sensing server -- **Small binary** — ~15-20 MB vs ~150 MB for Electron equivalent +- **Single pane of glass** - all hardware management, sensing, and visualization in one app +- **No Python dependency** - Rust-native `espflash` replaces `esptool.py` for firmware flashing +- **Replaces 6+ CLI tools** - flash, provision, OTA, WASM management, server control, visualization +- **Accessible to non-developers** - GUI replaces CLI flags and curl commands +- **Cross-platform** - one codebase for Windows, macOS, Linux +- **Workspace integration** - shares types, config, and hardware crates with sensing server +- **Small binary** - ~15-20 MB vs ~150 MB for Electron equivalent ### Negative -- **New frontend dependency** — introduces Node.js/npm build step into the Rust workspace -- **Tauri version churn** — Tauri v2 is recent; API stability is not yet proven at scale -- **webkit2gtk on Linux** — depends on system webview version; old distros may have stale webkit -- **espflash limitations** — the `espflash` crate may not support all chip variants or flash modes that `esptool.py` handles; fallback to bundled Python is needed -- **Maintenance surface** — adds ~5,000 lines of TypeScript and ~2,000 lines of Rust +- **New frontend dependency** - introduces Node.js/npm build step into the Rust workspace +- **Tauri version churn** - Tauri v2 is recent; API stability is not yet proven at scale +- **webkit2gtk on Linux** - depends on system webview version; old distros may have stale webkit +- **espflash limitations** - the `espflash` crate may not support all chip variants or flash modes that `esptool.py` handles; fallback to bundled Python is needed +- **Maintenance surface** - adds ~5,000 lines of TypeScript and ~2,000 lines of Rust ### Risks @@ -801,10 +801,10 @@ Total estimated effort: ~11 weeks for a single developer. - ADR-039: ESP32 Edge Intelligence - ADR-040: WASM Programmable Sensing - ADR-044: Provisioning Tool Enhancements -- ADR-050: Quality Engineering — Security Hardening +- ADR-050: Quality Engineering - Security Hardening - ADR-051: Sensing Server Decomposition -- `firmware/esp32-csi-node/` — ESP32 firmware source -- `firmware/esp32-csi-node/provision.py` — Current provisioning script -- `rust-port/wifi-densepose-rs/crates/wifi-densepose-sensing-server/` — Sensing server -- `rust-port/wifi-densepose-rs/crates/wifi-densepose-hardware/` — Hardware crate -- `ui/` — Existing web UI +- `firmware/esp32-csi-node/` - ESP32 firmware source +- `firmware/esp32-csi-node/provision.py` - Current provisioning script +- `rust-port/wifi-densepose-rs/crates/wifi-densepose-sensing-server/` - Sensing server +- `rust-port/wifi-densepose-rs/crates/wifi-densepose-hardware/` - Hardware crate +- `ui/` - Existing web UI diff --git a/docs/adr/ADR-053-ui-design-system.md b/docs/adr/ADR-053-ui-design-system.md index bea42aafd..bea007ecf 100644 --- a/docs/adr/ADR-053-ui-design-system.md +++ b/docs/adr/ADR-053-ui-design-system.md @@ -1,4 +1,4 @@ -# ADR-053: UI Design System — Dark Professional + Unity-Inspired Interface +# ADR-053: UI Design System - Dark Professional + Unity-Inspired Interface | Field | Value | |-------|-------| @@ -9,13 +9,13 @@ ## Context -RuView Desktop (ADR-052) needs a UI design system that communicates precision and control — befitting a hardware management control plane for embedded sensing infrastructure. The interface must handle dense data (CSI heatmaps, node registries, log streams, mesh topologies) without feeling overwhelming, while remaining usable by both engineers and field operators. +RuView Desktop (ADR-052) needs a UI design system that communicates precision and control - befitting a hardware management control plane for embedded sensing infrastructure. The interface must handle dense data (CSI heatmaps, node registries, log streams, mesh topologies) without feeling overwhelming, while remaining usable by both engineers and field operators. Two design inspirations: -1. **Data-first professional tools** — Dense information displays where data speaks for itself. Clean typography, structured layouts, and deliberate use of color for status. The interface shows what matters and hides what doesn't. Think: network monitoring dashboards, embedded systems IDEs, infrastructure control panels. +1. **Data-first professional tools** - Dense information displays where data speaks for itself. Clean typography, structured layouts, and deliberate use of color for status. The interface shows what matters and hides what doesn't. Think: network monitoring dashboards, embedded systems IDEs, infrastructure control panels. -2. **Unity Editor** — Dockable panel system, inspector/hierarchy/scene separation, property grids, dark professional theme, and dense-but-organized data display. Unity's UI is purpose-built for managing complex real-time systems — exactly what RuView needs. +2. **Unity Editor** - Dockable panel system, inspector/hierarchy/scene separation, property grids, dark professional theme, and dense-but-organized data display. Unity's UI is purpose-built for managing complex real-time systems - exactly what RuView needs. The combination yields a professional control panel for WiFi sensing infrastructure. Data is organized into scannable panels with clear hierarchy. Status is communicated through consistent color coding. The layout adapts from high-level overview down to individual node details through progressive disclosure. @@ -23,13 +23,13 @@ The combination yields a professional control panel for WiFi sensing infrastruct ### Design Principles -1. **Data is the interface** — The system reveals patterns through visualization, not through explanation. Every pixel earns its place. -2. **Precision typography** — Typography is clean and authoritative. Technical values are displayed without ambiguity. Labels are concise. -3. **Panel-based layout** — Dockable regions inspired by Unity's panel system. The operator can see the entire mesh at a glance, then drill into any node. -4. **Status through color** — Deliberate color coding: green (online), amber (degraded), red (offline/failed), blue (scanning/new). No gratuitous color. -5. **Progressive disclosure** — Dashboard shows the overview. Clicking a node reveals its details. Summary first, detail on interaction. -6. **Dual typography** — Monospace for all technical values (MAC addresses, firmware versions, CSI amplitudes). Sans-serif for labels and descriptions. The contrast signals "data vs. context." -7. **Powered by rUv** — Subtle branding: footer tagline, about dialog, splash screen. +1. **Data is the interface** - The system reveals patterns through visualization, not through explanation. Every pixel earns its place. +2. **Precision typography** - Typography is clean and authoritative. Technical values are displayed without ambiguity. Labels are concise. +3. **Panel-based layout** - Dockable regions inspired by Unity's panel system. The operator can see the entire mesh at a glance, then drill into any node. +4. **Status through color** - Deliberate color coding: green (online), amber (degraded), red (offline/failed), blue (scanning/new). No gratuitous color. +5. **Progressive disclosure** - Dashboard shows the overview. Clicking a node reveals its details. Summary first, detail on interaction. +6. **Dual typography** - Monospace for all technical values (MAC addresses, firmware versions, CSI amplitudes). Sans-serif for labels and descriptions. The contrast signals "data vs. context." +7. **Powered by rUv** - Subtle branding: footer tagline, about dialog, splash screen. ### Color System @@ -54,7 +54,7 @@ The combination yields a professional control panel for WiFi sensing infrastruct --status-info: #58a6ff; /* Scanning, discovering, info */ /* Accent */ - --accent: #7c3aed; /* rUv purple — primary actions */ + --accent: #7c3aed; /* rUv purple - primary actions */ --accent-hover: #6d28d9; /* Borders */ @@ -107,7 +107,7 @@ Three-region layout: navigation sidebar, node list, and detail inspector. Unity' **Panel behaviors:** - Sidebar collapses to icon-only on narrow windows - Node List / Inspector split is resizable via drag handle -- Inspector scrolls independently — drill into any node without losing the list +- Inspector scrolls independently - drill into any node without losing the list - Status Bar shows global system state at a glance (node count, server status, port) ### Component Library @@ -164,7 +164,7 @@ Interactive 3D visualization of the sensing network. Each node is a sphere. Edge - Camera: `OrbitControls` for pan/zoom/rotate, reset button returns to default view - Follows existing patterns: `BufferGeometry` + `BufferAttribute` for dynamic updates (see `ui/observatory/js/subcarrier-manifold.js`) - Raycasting for node click → opens detail in Inspector panel -- Real-time updates as nodes join, leave, or change status — geometry attributes updated per frame +- Real-time updates as nodes join, leave, or change status - geometry attributes updated per frame #### 4. PropertyGrid (Unity Inspector-style) @@ -182,7 +182,7 @@ Interactive 3D visualization of the sensing network. Each node is a sphere. Edge | Sync Drift +0.12 ms | | WASM Modules [▼] | | [0] activity_detect running 12.4 KB 83 us/f | -| [1] vital_monitor stopped 8.1 KB — us/f | +| [1] vital_monitor stopped 8.1 KB - us/f | +-----------------------------------------------------------+ ``` @@ -235,7 +235,7 @@ Minimal and purposeful: - Node card health transition: 300ms (color fade, not flash) - Progress bar fill: smooth 60fps CSS transition - Mesh graph: Three.js render loop at 60fps, force simulation on requestAnimationFrame -- No loading spinners — use skeleton placeholders instead +- No loading spinners - use skeleton placeholders instead ### Branding @@ -262,7 +262,7 @@ Minimal and purposeful: ### Neutral -- The design system is CSS-only with React components — no heavy UI framework dependency +- The design system is CSS-only with React components - no heavy UI framework dependency - Component library can be extracted as a separate package if other rUv projects need it ## References diff --git a/docs/adr/ADR-054-desktop-full-implementation.md b/docs/adr/ADR-054-desktop-full-implementation.md index 08602f024..34a560d98 100644 --- a/docs/adr/ADR-054-desktop-full-implementation.md +++ b/docs/adr/ADR-054-desktop-full-implementation.md @@ -1,7 +1,7 @@ # ADR-054: RuView Desktop Full Implementation ## Status -**Accepted** — Implementation in progress +**Accepted** - Implementation in progress ## Context @@ -665,10 +665,10 @@ espflash = "4.0" ## 8. Rollout Plan -1. **v0.3.1** — Settings fix + Discovery + Server -2. **v0.4.0** — Flash + OTA (single node) -3. **v0.5.0** — Batch OTA + WASM + Provision -4. **v1.0.0** — Full E2E tested, security audited +1. **v0.3.1** - Settings fix + Discovery + Server +2. **v0.4.0** - Flash + OTA (single node) +3. **v0.5.0** - Batch OTA + WASM + Provision +4. **v1.0.0** - Full E2E tested, security audited --- diff --git a/docs/adr/ADR-057-firmware-csi-build-guard.md b/docs/adr/ADR-057-firmware-csi-build-guard.md index a602eabd1..2beefc761 100644 --- a/docs/adr/ADR-057-firmware-csi-build-guard.md +++ b/docs/adr/ADR-057-firmware-csi-build-guard.md @@ -12,17 +12,17 @@ Multiple GitHub issues (#223, #238, #234, #210, #190) report firmware problems that fall into two categories: -1. **CSI not enabled at runtime** — The committed `sdkconfig` had +1. **CSI not enabled at runtime** - The committed `sdkconfig` had `# CONFIG_ESP_WIFI_CSI_ENABLED is not set` (line 1135), meaning users who built from source or used pre-built binaries got the runtime error: `E (6700) wifi:CSI not enabled in menuconfig!` Root cause: `sdkconfig.defaults.template` existed with the correct setting (`CONFIG_ESP_WIFI_CSI_ENABLED=y`) but ESP-IDF only reads - `sdkconfig.defaults` — not `.template` suffixed files. No `sdkconfig.defaults` + `sdkconfig.defaults` - not `.template` suffixed files. No `sdkconfig.defaults` file was committed. -2. **Unsupported ESP32 variants** — Users attempting to use original ESP32 +2. **Unsupported ESP32 variants** - Users attempting to use original ESP32 (D0WD) and ESP32-C3 boards. The firmware targets ESP32-S3 only (`CONFIG_IDF_TARGET="esp32s3"`, Xtensa architecture) and this was not surfaced clearly enough in documentation or build errors. @@ -59,7 +59,7 @@ Change line 1135 from `# CONFIG_ESP_WIFI_CSI_ENABLED is not set` to source will get a clear compile error if CSI is somehow disabled. - **Positive**: Pre-built release binaries will include CSI support. - **Neutral**: Original ESP32 and ESP32-C3 remain unsupported. This is by - design — only ESP32-S3 has the CSI API surface we depend on. Future ADRs + design - only ESP32-S3 has the CSI API surface we depend on. Future ADRs may address multi-target support if demand warrants it. - **Negative**: None identified. diff --git a/docs/adr/ADR-058-ruvector-wasm-browser-pose-example.md b/docs/adr/ADR-058-ruvector-wasm-browser-pose-example.md index 1e25c81da..5fa826f48 100644 --- a/docs/adr/ADR-058-ruvector-wasm-browser-pose-example.md +++ b/docs/adr/ADR-058-ruvector-wasm-browser-pose-example.md @@ -1,4 +1,4 @@ -# ADR-058: Dual-Modal WASM Browser Pose Estimation — Live Video + WiFi CSI Fusion +# ADR-058: Dual-Modal WASM Browser Pose Estimation - Live Video + WiFi CSI Fusion - **Status**: Proposed - **Date**: 2026-03-12 @@ -9,17 +9,17 @@ WiFi-DensePose estimates human poses from WiFi CSI (Channel State Information). The `ruvector-cnn` crate provides a pure Rust CNN (MobileNet-V3) with WASM bindings. -Both modalities exist independently — what's missing is **fusing live webcam video +Both modalities exist independently - what's missing is **fusing live webcam video with WiFi CSI** in a single browser demo to achieve robust pose estimation that works even when one modality degrades (occlusion, signal noise, poor lighting). Existing assets: -1. **`wifi-densepose-wasm`** — CSI signal processing compiled to WASM -2. **`wifi-densepose-sensing-server`** — Axum server streaming live CSI via WebSocket -3. **`ruvector-cnn`** — Pure Rust CNN with MobileNet-V3 backbones, SIMD, contrastive learning -4. **`ruvector-cnn-wasm`** — wasm-bindgen bindings: `WasmCnnEmbedder`, `SimdOps`, `LayerOps`, contrastive losses -5. **`vendor/ruvector/examples/wasm-vanilla/`** — Reference vanilla JS WASM example +1. **`wifi-densepose-wasm`** - CSI signal processing compiled to WASM +2. **`wifi-densepose-sensing-server`** - Axum server streaming live CSI via WebSocket +3. **`ruvector-cnn`** - Pure Rust CNN with MobileNet-V3 backbones, SIMD, contrastive learning +4. **`ruvector-cnn-wasm`** - wasm-bindgen bindings: `WasmCnnEmbedder`, `SimdOps`, `LayerOps`, contrastive losses +5. **`vendor/ruvector/examples/wasm-vanilla/`** - Reference vanilla JS WASM example Research shows multi-modal fusion (camera + WiFi) significantly outperforms either alone: - Camera fails under occlusion, poor lighting, privacy constraints @@ -35,7 +35,7 @@ Build a **dual-modal browser demo** at `examples/wasm-browser-pose/` that: 3. Processes **both streams** through separate CNN pipelines in `ruvector-cnn-wasm` 4. **Fuses embeddings** with learned attention weights for combined pose estimation 5. Renders **video overlay** with skeleton + WiFi confidence heatmap on Canvas -6. Runs entirely in the browser — all inference client-side via WASM +6. Runs entirely in the browser - all inference client-side via WASM ### Architecture @@ -101,7 +101,7 @@ Both use the same `WasmCnnEmbedder` but with separate instances and weight sets. ```javascript // Attention fusion: learn which modality to trust per-dimension -// α ∈ [0,1]^512 — attention weights (shipped as JSON, trained offline) +// α ∈ [0,1]^512 - attention weights (shipped as JSON, trained offline) // visual_emb, csi_emb ∈ R^512 function fuseEmbeddings(visual_emb, csi_emb, attention_weights) { @@ -176,7 +176,7 @@ The demo supports three modes, selectable in the UI: | **Video Only** | ✅ | ❌ | α = 1.0 | No ESP32 available, quick demo | | **CSI Only** | ❌ | ✅ | α = 0.0 | Privacy mode, through-wall sensing | -**Video Only mode works without any hardware** — just a webcam — making the demo +**Video Only mode works without any hardware** - just a webcam - making the demo instantly accessible for anyone wanting to try it. ### File Layout @@ -209,7 +209,7 @@ examples/wasm-browser-pose/ ```bash #!/bin/bash -# build.sh — builds both WASM packages into pkg/ +# build.sh - builds both WASM packages into pkg/ set -e @@ -228,7 +228,7 @@ echo "Build complete. Serve with: python3 -m http.server 8080" ``` ┌─────────────────────────────────────────────────────────┐ -│ WiFi-DensePose — Live Dual-Modal Pose Estimation │ +│ WiFi-DensePose - Live Dual-Modal Pose Estimation │ │ [Dual Mode ▼] [⚙ Settings] FPS: 28 ◉ Live │ ├───────────────────────────┬─────────────────────────────┤ │ │ │ @@ -263,7 +263,7 @@ echo "Build complete. Serve with: python3 -m http.server 8080" | `wifi_densepose_wasm` | `wifi-densepose-wasm` | CSI frame parsing, signal processing, feature extraction | ~200KB | | `ruvector_cnn_wasm` | `ruvector-cnn-wasm` | `WasmCnnEmbedder` (×2 instances), `SimdOps`, `LayerOps`, contrastive losses | ~150KB | -Two `WasmCnnEmbedder` instances are created — one for video frames, one for CSI pseudo-images. +Two `WasmCnnEmbedder` instances are created - one for video frames, one for CSI pseudo-images. They share the same WASM module but have independent state. ### Browser API Requirements @@ -302,7 +302,7 @@ reducing dual-mode latency to ~max(15, 15) + 5 = ~20ms (50 FPS). The demo optionally shows real-time contrastive learning in the browser: -- **InfoNCE loss** (`WasmInfoNCELoss`): Compare video vs CSI embeddings for the same pose — trains cross-modal alignment +- **InfoNCE loss** (`WasmInfoNCELoss`): Compare video vs CSI embeddings for the same pose - trains cross-modal alignment - **Triplet loss** (`WasmTripletLoss`): Push apart different poses, pull together same pose across modalities - **SimdOps**: Accelerated dot products for real-time similarity computation - **Embedding space panel**: Live 2D projection shows video and CSI embeddings converging when viewing the same person @@ -323,7 +323,7 @@ No new Rust crates are needed. The example is pure HTML/JS consuming existing WA ### Positive -- **Instant demo**: Video-only mode works with just a webcam — no ESP32 needed +- **Instant demo**: Video-only mode works with just a webcam - no ESP32 needed - **Multi-modal showcase**: Demonstrates camera + WiFi fusion, the core innovation of the project - **Graceful degradation**: Works with video-only, CSI-only, or both - **Through-wall capability**: CSI mode shows pose estimation where cameras cannot reach @@ -348,16 +348,16 @@ No new Rust crates are needed. The example is pure HTML/JS consuming existing WA ## Implementation Plan -1. **Phase 1 — Scaffold**: File layout, build.sh, index.html shell, mode selector UI -2. **Phase 2 — Video pipeline**: getUserMedia → frame capture → CNN embedding → basic pose display -3. **Phase 3 — CSI pipeline**: WebSocket client → CSI parsing → pseudo-image → CNN embedding -4. **Phase 4 — Fusion**: Attention-weighted combination, confidence gating, mode switching -5. **Phase 5 — Pose decoder**: Linear projection with placeholder weights → 17 keypoints -6. **Phase 6 — Overlay renderer**: Video canvas with skeleton overlay, CSI heatmap panel -7. **Phase 7 — Training**: Use `wifi-densepose-train` to generate real weights for both CNNs + fusion + decoder -8. **Phase 8 — Contrastive demo**: Embedding space visualization, cross-modal similarity display -9. **Phase 9 — Web Workers**: Move CNN inference to workers for parallel video + CSI processing -10. **Phase 10 — Polish**: Recording, snapshots, adaptive quality, mobile optimization +1. **Phase 1 - Scaffold**: File layout, build.sh, index.html shell, mode selector UI +2. **Phase 2 - Video pipeline**: getUserMedia → frame capture → CNN embedding → basic pose display +3. **Phase 3 - CSI pipeline**: WebSocket client → CSI parsing → pseudo-image → CNN embedding +4. **Phase 4 - Fusion**: Attention-weighted combination, confidence gating, mode switching +5. **Phase 5 - Pose decoder**: Linear projection with placeholder weights → 17 keypoints +6. **Phase 6 - Overlay renderer**: Video canvas with skeleton overlay, CSI heatmap panel +7. **Phase 7 - Training**: Use `wifi-densepose-train` to generate real weights for both CNNs + fusion + decoder +8. **Phase 8 - Contrastive demo**: Embedding space visualization, cross-modal similarity display +9. **Phase 9 - Web Workers**: Move CNN inference to workers for parallel video + CSI processing +10. **Phase 10 - Polish**: Recording, snapshots, adaptive quality, mobile optimization ## Alternatives Considered @@ -385,8 +385,8 @@ Rejected: Adds build tooling. Vanilla JS + ES modules keeps the demo self-contai - [ADR-018: Binary CSI Frame Format](ADR-018-binary-csi-frame-format.md) - [ADR-024: Contrastive CSI Embedding / AETHER](ADR-024-contrastive-csi-embedding.md) - [ADR-055: Integrated Sensing Server](ADR-055-integrated-sensing-server.md) -- `vendor/ruvector/crates/ruvector-cnn/src/lib.rs` — CNN embedder implementation -- `vendor/ruvector/crates/ruvector-cnn-wasm/src/lib.rs` — WASM bindings -- `vendor/ruvector/examples/wasm-vanilla/index.html` — Reference vanilla JS WASM pattern -- Person-in-WiFi: Fine-grained Person Perception using WiFi (ICCV 2019) — camera+WiFi fusion precedent -- WiPose: Multi-Person WiFi Pose Estimation (TMC 2022) — cross-modal embedding approach +- `vendor/ruvector/crates/ruvector-cnn/src/lib.rs` - CNN embedder implementation +- `vendor/ruvector/crates/ruvector-cnn-wasm/src/lib.rs` - WASM bindings +- `vendor/ruvector/examples/wasm-vanilla/index.html` - Reference vanilla JS WASM pattern +- Person-in-WiFi: Fine-grained Person Perception using WiFi (ICCV 2019) - camera+WiFi fusion precedent +- WiPose: Multi-Person WiFi Pose Estimation (TMC 2022) - cross-modal embedding approach diff --git a/docs/adr/ADR-059-live-esp32-csi-pipeline.md b/docs/adr/ADR-059-live-esp32-csi-pipeline.md index a08ecc0bc..a48f4db82 100644 --- a/docs/adr/ADR-059-live-esp32-csi-pipeline.md +++ b/docs/adr/ADR-059-live-esp32-csi-pipeline.md @@ -24,16 +24,16 @@ ESP32-S3 (CSI capture) → UDP:5005 → sensing-server (Rust/Axum) → WS:8765 ### Components -1. **ESP32 Firmware** — Rebuilt with native Windows ESP-IDF v5.4.0 toolchain (no Docker). Configured for target network and PC IP via `sdkconfig`. Helper scripts added: - - `build_firmware.ps1` — Sets up IDF environment, cleans, builds, and flashes - - `read_serial.ps1` — Serial monitor with DTR/RTS reset capability +1. **ESP32 Firmware** - Rebuilt with native Windows ESP-IDF v5.4.0 toolchain (no Docker). Configured for target network and PC IP via `sdkconfig`. Helper scripts added: + - `build_firmware.ps1` - Sets up IDF environment, cleans, builds, and flashes + - `read_serial.ps1` - Serial monitor with DTR/RTS reset capability -2. **Sensing Server** — `wifi-densepose-sensing-server` started with: - - `--source esp32` — Expect real ESP32 UDP frames - - `--bind-addr 0.0.0.0` — Accept connections from any interface - - `--ui-path ` — Serve the demo UI via HTTP +2. **Sensing Server** - `wifi-densepose-sensing-server` started with: + - `--source esp32` - Expect real ESP32 UDP frames + - `--bind-addr 0.0.0.0` - Accept connections from any interface + - `--ui-path ` - Serve the demo UI via HTTP -3. **Browser Demo** — `main.js` updated to auto-connect to `ws://localhost:8765/ws/sensing` on page load. Falls back to simulated CSI if the WebSocket is unavailable (GitHub Pages). +3. **Browser Demo** - `main.js` updated to auto-connect to `ws://localhost:8765/ws/sensing` on page load. Falls back to simulated CSI if the WebSocket is unavailable (GitHub Pages). ### Network Configuration @@ -77,7 +77,7 @@ The `build_firmware.ps1` script handles all of this automatically. ## Related -- [ADR-018](ADR-018-esp32-dev-implementation.md) — ESP32 CSI frame format and UDP streaming -- [ADR-058](ADR-058-ruvector-wasm-browser-pose-example.md) — Dual-modal WASM browser pose demo -- [ADR-039](ADR-039-edge-intelligence-framework.md) — Edge intelligence on ESP32 -- Issue [#245](https://github.com/ruvnet/RuView/issues/245) — Tracking issue +- [ADR-018](ADR-018-esp32-dev-implementation.md) - ESP32 CSI frame format and UDP streaming +- [ADR-058](ADR-058-ruvector-wasm-browser-pose-example.md) - Dual-modal WASM browser pose demo +- [ADR-039](ADR-039-edge-intelligence-framework.md) - Edge intelligence on ESP32 +- Issue [#245](https://github.com/ruvnet/RuView/issues/245) - Tracking issue diff --git a/docs/adr/ADR-061-qemu-esp32s3-firmware-testing.md b/docs/adr/ADR-061-qemu-esp32s3-firmware-testing.md index 6811cb7a7..7cf4ac89e 100644 --- a/docs/adr/ADR-061-qemu-esp32s3-firmware-testing.md +++ b/docs/adr/ADR-061-qemu-esp32s3-firmware-testing.md @@ -13,16 +13,16 @@ The ESP32-S3 CSI node firmware (`firmware/esp32-csi-node/`) has grown to 16 sour | Module | File | Testable in QEMU? | |--------|------|--------------------| -| NVS config load | `nvs_config.c` | Yes — NVS partition in flash image | -| Edge processing (DSP) | `edge_processing.c` | Yes — all math, no HW dependency | -| ADR-018 frame serialization | `csi_collector.c:csi_serialize_frame()` | Yes — pure buffer ops | -| UDP stream sender | `stream_sender.c` | Yes — QEMU has lwIP via SLIRP | -| WASM runtime | `wasm_runtime.c` | Yes — CPU only | -| OTA update | `ota_update.c` | Partial — needs HTTP mock | -| Power management | `power_mgmt.c` | Partial — no real light-sleep | -| Display (OLED) | `display_*.c` | No — I2C hardware | -| WiFi CSI callback | `csi_collector.c:wifi_csi_callback()` | **No** — requires RF PHY | -| Channel hopping | `csi_collector.c:hop_timer_cb()` | **No** — requires `esp_wifi_set_channel()` | +| NVS config load | `nvs_config.c` | Yes - NVS partition in flash image | +| Edge processing (DSP) | `edge_processing.c` | Yes - all math, no HW dependency | +| ADR-018 frame serialization | `csi_collector.c:csi_serialize_frame()` | Yes - pure buffer ops | +| UDP stream sender | `stream_sender.c` | Yes - QEMU has lwIP via SLIRP | +| WASM runtime | `wasm_runtime.c` | Yes - CPU only | +| OTA update | `ota_update.c` | Partial - needs HTTP mock | +| Power management | `power_mgmt.c` | Partial - no real light-sleep | +| Display (OLED) | `display_*.c` | No - I2C hardware | +| WiFi CSI callback | `csi_collector.c:wifi_csi_callback()` | **No** - requires RF PHY | +| Channel hopping | `csi_collector.c:hop_timer_cb()` | **No** - requires `esp_wifi_set_channel()` | Currently, **every code change requires flashing to physical hardware** on COM7. This creates a bottleneck: - Build + flash cycle: ~20 seconds @@ -36,20 +36,20 @@ Espressif maintains an official QEMU fork (`github.com/espressif/qemu`) with ESP | Term | Definition | |------|-----------| -| CSI | Channel State Information — per-subcarrier amplitude/phase from WiFi | -| NVS | Non-Volatile Storage — ESP-IDF key-value flash partition | -| TDM | Time-Division Multiplexing — nodes transmit in assigned time slots | -| UART | Universal Asynchronous Receiver-Transmitter — serial console output | -| SLIRP | User-mode TCP/IP stack — enables networking without root/TAP | -| QEMU | Quick Emulator — runs ESP32-S3 firmware without physical hardware | -| QMP | QEMU Machine Protocol — JSON-based control interface | -| LFSR | Linear Feedback Shift Register — deterministic pseudo-random generator | -| SPSC | Single Producer Single Consumer — lock-free ring buffer pattern | +| CSI | Channel State Information - per-subcarrier amplitude/phase from WiFi | +| NVS | Non-Volatile Storage - ESP-IDF key-value flash partition | +| TDM | Time-Division Multiplexing - nodes transmit in assigned time slots | +| UART | Universal Asynchronous Receiver-Transmitter - serial console output | +| SLIRP | User-mode TCP/IP stack - enables networking without root/TAP | +| QEMU | Quick Emulator - runs ESP32-S3 firmware without physical hardware | +| QMP | QEMU Machine Protocol - JSON-based control interface | +| LFSR | Linear Feedback Shift Register - deterministic pseudo-random generator | +| SPSC | Single Producer Single Consumer - lock-free ring buffer pattern | | FreeRTOS | Real-time OS used by ESP-IDF for task scheduling | | gcov/lcov | GCC code coverage tools for line/branch analysis | | libFuzzer | LLVM coverage-guided fuzzer for finding crashes | -| ASAN | AddressSanitizer — detects buffer overflows and use-after-free | -| UBSAN | UndefinedBehaviorSanitizer — detects undefined C behavior | +| ASAN | AddressSanitizer - detects buffer overflows and use-after-free | +| UBSAN | UndefinedBehaviorSanitizer - detects undefined C behavior | ## Quick Start @@ -76,7 +76,7 @@ brew install lcov # macOS # Fuzz testing (optional, Layer 6) sudo apt install clang # Debian/Ubuntu -# Mesh testing (optional, Layer 3 — requires root) +# Mesh testing (optional, Layer 3 - requires root) sudo apt install socat bridge-utils iproute2 ``` @@ -128,15 +128,15 @@ bash scripts/qemu-chaos-test.sh --faults all --duration 120 Introduce a **comprehensive QEMU testing platform** for the ESP32-S3 CSI node firmware with nine capability layers: -1. **Mock CSI generator** — compile-time synthetic CSI frame injection -2. **QEMU runner** — automated build, run, and validation -3. **Multi-node mesh simulation** — TDM and aggregation testing across QEMU instances -4. **GDB remote debugging** — zero-cost breakpoint debugging without JTAG -5. **Code coverage** — gcov/lcov integration for path analysis -6. **Fuzz testing** — malformed input resilience for CSI parser, NVS, WASM -7. **NVS provisioning matrix** — exhaustive config combination testing -8. **Snapshot & replay** — sub-100ms state restore for fast iteration -9. **Chaos testing** — fault injection for resilience validation +1. **Mock CSI generator** - compile-time synthetic CSI frame injection +2. **QEMU runner** - automated build, run, and validation +3. **Multi-node mesh simulation** - TDM and aggregation testing across QEMU instances +4. **GDB remote debugging** - zero-cost breakpoint debugging without JTAG +5. **Code coverage** - gcov/lcov integration for path analysis +6. **Fuzz testing** - malformed input resilience for CSI parser, NVS, WASM +7. **NVS provisioning matrix** - exhaustive config combination testing +8. **Snapshot & replay** - sub-100ms state restore for fast iteration +9. **Chaos testing** - fault injection for resilience validation --- @@ -179,7 +179,7 @@ Introduce a **comprehensive QEMU testing platform** for the ESP32-S3 CSI node fi When `CONFIG_CSI_MOCK_ENABLED=y` (Kconfig option), the build replaces `esp_wifi_set_csi_config()` / `esp_wifi_set_csi_rx_cb()` with a periodic timer that injects synthetic CSI frames: ```c -// mock_csi.c — synthetic CSI frame generator +// mock_csi.c - synthetic CSI frame generator #define MOCK_CSI_INTERVAL_MS 50 // 20 Hz (matches real CSI rate) #define MOCK_N_SUBCARRIERS 52 // HT20 mode @@ -452,7 +452,7 @@ Run multiple QEMU instances with TAP networking to test TDM slot coordination an ```bash #!/bin/bash -# scripts/qemu-mesh-test.sh — run 3 QEMU nodes + Rust aggregator +# scripts/qemu-mesh-test.sh - run 3 QEMU nodes + Rust aggregator set -euo pipefail @@ -479,7 +479,7 @@ for i in $(seq 0 $((N_NODES - 1))); do done # Start Rust aggregator in background -cargo run -p wifi-densepose-hardware --bin aggregator -- \ +cargo run -p wifi-densepose-hardware --bin aggregator - \ --listen 0.0.0.0:${AGGREGATOR_PORT} \ --expect-nodes "$N_NODES" \ --output build/mesh_test_results.json & @@ -686,7 +686,7 @@ genhtml coverage_filtered.info --output-directory build/coverage_report ### Implementation Approach ```c -// test/fuzz_csi_serialize.c — runs on host (not ESP32) +// test/fuzz_csi_serialize.c - runs on host (not ESP32) // Compiled with: clang -fsanitize=fuzzer,address #include "csi_collector.h" @@ -885,35 +885,35 @@ done ``` firmware/esp32-csi-node/ ├── main/ -│ ├── mock_csi.c # NEW — synthetic CSI frame generator -│ ├── mock_csi.h # NEW — mock API + scenario definitions -│ ├── Kconfig.projbuild # MODIFIED — CONFIG_CSI_MOCK_* options -│ ├── CMakeLists.txt # MODIFIED — conditional mock_csi.c inclusion +│ ├── mock_csi.c # NEW - synthetic CSI frame generator +│ ├── mock_csi.h # NEW - mock API + scenario definitions +│ ├── Kconfig.projbuild # MODIFIED - CONFIG_CSI_MOCK_* options +│ ├── CMakeLists.txt # MODIFIED - conditional mock_csi.c inclusion │ └── ... (existing files unchanged) ├── test/ -│ ├── fuzz_csi_serialize.c # NEW — libFuzzer target for serialization -│ ├── fuzz_nvs_config.c # NEW — libFuzzer target for NVS parsing -│ ├── fuzz_edge_enqueue.c # NEW — libFuzzer target for ring buffer -│ └── corpus/ # NEW — seed inputs for fuzz targets -├── sdkconfig.qemu # NEW — QEMU-specific sdkconfig overlay -├── sdkconfig.coverage # NEW — gcov-enabled sdkconfig overlay +│ ├── fuzz_csi_serialize.c # NEW - libFuzzer target for serialization +│ ├── fuzz_nvs_config.c # NEW - libFuzzer target for NVS parsing +│ ├── fuzz_edge_enqueue.c # NEW - libFuzzer target for ring buffer +│ └── corpus/ # NEW - seed inputs for fuzz targets +├── sdkconfig.qemu # NEW - QEMU-specific sdkconfig overlay +├── sdkconfig.coverage # NEW - gcov-enabled sdkconfig overlay └── ... scripts/ -├── qemu-esp32s3-test.sh # NEW — single-node QEMU runner -├── qemu-mesh-test.sh # NEW — multi-node mesh runner -├── qemu-chaos-test.sh # NEW — chaos/fault injection runner -├── validate_qemu_output.py # NEW — UART log validation -├── validate_mesh_test.py # NEW — mesh test validation -├── generate_nvs_matrix.py # NEW — NVS config matrix generator -├── inject_fault.py # NEW — QEMU fault injection -└── check_health.py # NEW — post-fault health checker +├── qemu-esp32s3-test.sh # NEW - single-node QEMU runner +├── qemu-mesh-test.sh # NEW - multi-node mesh runner +├── qemu-chaos-test.sh # NEW - chaos/fault injection runner +├── validate_qemu_output.py # NEW - UART log validation +├── validate_mesh_test.py # NEW - mesh test validation +├── generate_nvs_matrix.py # NEW - NVS config matrix generator +├── inject_fault.py # NEW - QEMU fault injection +└── check_health.py # NEW - post-fault health checker .vscode/ -└── launch.json # MODIFIED — add QEMU GDB debug config +└── launch.json # MODIFIED - add QEMU GDB debug config .github/workflows/ -└── firmware-qemu.yml # NEW — CI workflow with matrix +└── firmware-qemu.yml # NEW - CI workflow with matrix ``` --- @@ -922,24 +922,24 @@ scripts/ ### Benefits -1. **No hardware required** — contributors validate firmware changes with QEMU alone -2. **Automated CI** — every PR touching `firmware/` runs 14 NVS configs × 10 scenarios in parallel -3. **10× faster iteration** — snapshot restore in <100ms vs 20s flash cycle -4. **Security hardening** — fuzz testing catches buffer overflows, NULL derefs, and parser bugs before they reach hardware -5. **Mesh validation** — multi-node TDM tested without 3 physical ESP32s -6. **Coverage visibility** — lcov reports show untested edge processing paths -7. **Resilience proof** — chaos tests verify firmware recovers from WiFi drops, OOM, and ring overflow -8. **GDB debugging** — set breakpoints on DSP pipeline without JTAG adapter -9. **Regression detection** — boot failures, NVS parsing errors, and FreeRTOS deadlocks caught in CI +1. **No hardware required** - contributors validate firmware changes with QEMU alone +2. **Automated CI** - every PR touching `firmware/` runs 14 NVS configs × 10 scenarios in parallel +3. **10× faster iteration** - snapshot restore in <100ms vs 20s flash cycle +4. **Security hardening** - fuzz testing catches buffer overflows, NULL derefs, and parser bugs before they reach hardware +5. **Mesh validation** - multi-node TDM tested without 3 physical ESP32s +6. **Coverage visibility** - lcov reports show untested edge processing paths +7. **Resilience proof** - chaos tests verify firmware recovers from WiFi drops, OOM, and ring overflow +8. **GDB debugging** - set breakpoints on DSP pipeline without JTAG adapter +9. **Regression detection** - boot failures, NVS parsing errors, and FreeRTOS deadlocks caught in CI ### Limitations -1. **No real WiFi/CSI** — QEMU cannot emulate the ESP32-S3 WiFi radio or CSI extraction hardware -2. **Synthetic CSI fidelity** — mock frames approximate real CSI patterns but don't capture real-world multipath, interference, or antenna characteristics -3. **Timing differences** — QEMU timing is not cycle-accurate; FreeRTOS tick rates may differ from hardware -4. **No peripheral testing** — I2C display, real GPIO, and light-sleep power management cannot be tested -5. **QEMU build requirement** — Espressif's QEMU fork must be built from source (not in Ubuntu packages) -6. **Coverage overhead** — gcov-enabled builds are ~2× slower in QEMU +1. **No real WiFi/CSI** - QEMU cannot emulate the ESP32-S3 WiFi radio or CSI extraction hardware +2. **Synthetic CSI fidelity** - mock frames approximate real CSI patterns but don't capture real-world multipath, interference, or antenna characteristics +3. **Timing differences** - QEMU timing is not cycle-accurate; FreeRTOS tick rates may differ from hardware +4. **No peripheral testing** - I2C display, real GPIO, and light-sleep power management cannot be tested +5. **QEMU build requirement** - Espressif's QEMU fork must be built from source (not in Ubuntu packages) +6. **Coverage overhead** - gcov-enabled builds are ~2× slower in QEMU ### What QEMU Testing Covers vs Requires Hardware @@ -971,7 +971,7 @@ scripts/ ### 1. Host-native unit tests (no QEMU) Extract pure C functions (`csi_serialize_frame`, edge DSP math) and compile/test on host with CMock/Unity. Simpler but doesn't test FreeRTOS integration, NVS, or boot sequence. -**Verdict**: Complementary — do both. Host unit tests for math, QEMU for integration. Fuzz targets (Layer 6) already use host-native compilation. +**Verdict**: Complementary - do both. Host unit tests for math, QEMU for integration. Fuzz targets (Layer 6) already use host-native compilation. ### 2. Hardware-in-the-loop CI (real ESP32 on runner) Use a self-hosted GitHub Actions runner with a physical ESP32-S3 attached. @@ -981,7 +981,7 @@ Use a self-hosted GitHub Actions runner with a physical ESP32-S3 attached. ### 3. Docker-based ESP-IDF build only (no runtime test) Just verify the firmware compiles in CI without running it. -**Verdict**: Already possible but insufficient — compilation doesn't catch runtime bugs (stack overflow, NVS parsing errors, FreeRTOS deadlocks). +**Verdict**: Already possible but insufficient - compilation doesn't catch runtime bugs (stack overflow, NVS parsing errors, FreeRTOS deadlocks). ### 4. Renode emulator Alternative to QEMU with better peripheral modeling for some platforms. @@ -992,10 +992,10 @@ Alternative to QEMU with better peripheral modeling for some platforms. ## References -- [Espressif QEMU fork](https://github.com/espressif/qemu) — official ESP32/S3/C3/H2 support +- [Espressif QEMU fork](https://github.com/espressif/qemu) - official ESP32/S3/C3/H2 support - [ESP-IDF QEMU guide](https://docs.espressif.com/projects/esp-idf/en/latest/esp32s3/api-guides/tools/qemu.html) -- [libFuzzer documentation](https://llvm.org/docs/LibFuzzer.html) — LLVM-based coverage-guided fuzzing -- [lcov](https://github.com/linux-test-project/lcov) — Linux test coverage visualization +- [libFuzzer documentation](https://llvm.org/docs/LibFuzzer.html) - LLVM-based coverage-guided fuzzing +- [lcov](https://github.com/linux-test-project/lcov) - Linux test coverage visualization - ADR-018: Binary CSI frame format (magic `0xC5110001`) - ADR-039: Edge intelligence pipeline (biquad, vitals, fall detection) - ADR-040: WASM programmable sensing runtime @@ -1008,25 +1008,25 @@ Alternative to QEMU with better peripheral modeling for some platforms. ### Bugs Fixed -1. **LFSR float bias** — `lfsr_float()` used divisor 32767.5 producing range [-1.0, 1.00002]; fixed to 32768.0 for exact [-1.0, +1.0) -2. **MAC filter initialization** — `gen_mac_filter()` compared `frame_count == scenario_start_ms` (count vs timestamp); replaced with boolean flag -3. **Scenario infinite loop** — `advance_scenario()` looped to scenario 0 when all completed; now sets `s_all_done=true` and timer callback exits early -4. **Boot check severity** — `validate_qemu_output.py` reported no-boot as ERROR; upgraded to FATAL (nothing works without boot) -5. **NVS boundary configs** — `boundary-max` used `vital_win=65535` which firmware silently rejects (valid: 32-256); fixed to 256 -6. **NVS boundary-min** — `vital_win=1` also invalid; fixed to 32 (firmware min) -7. **edge-tier2-custom** — `vital_win=512` exceeded firmware max of 256; fixed to 256 -8. **power-save config** — Described as "10% duty cycle" but didn't set `power_duty=10`; fixed -9. **wasm-signed/unsigned** — Both configs were identical; signed now includes pubkey blob, unsigned sets `wasm_verify=0` +1. **LFSR float bias** - `lfsr_float()` used divisor 32767.5 producing range [-1.0, 1.00002]; fixed to 32768.0 for exact [-1.0, +1.0) +2. **MAC filter initialization** - `gen_mac_filter()` compared `frame_count == scenario_start_ms` (count vs timestamp); replaced with boolean flag +3. **Scenario infinite loop** - `advance_scenario()` looped to scenario 0 when all completed; now sets `s_all_done=true` and timer callback exits early +4. **Boot check severity** - `validate_qemu_output.py` reported no-boot as ERROR; upgraded to FATAL (nothing works without boot) +5. **NVS boundary configs** - `boundary-max` used `vital_win=65535` which firmware silently rejects (valid: 32-256); fixed to 256 +6. **NVS boundary-min** - `vital_win=1` also invalid; fixed to 32 (firmware min) +7. **edge-tier2-custom** - `vital_win=512` exceeded firmware max of 256; fixed to 256 +8. **power-save config** - Described as "10% duty cycle" but didn't set `power_duty=10`; fixed +9. **wasm-signed/unsigned** - Both configs were identical; signed now includes pubkey blob, unsigned sets `wasm_verify=0` ### Optimizations Applied -1. **SLIRP networking** — QEMU runner now passes `-nic user,model=open_eth` for UDP testing -2. **Scenario completion tracking** — Validator now checks `All N scenarios complete` log marker (check 15) -3. **Frame rate monitoring** — Validator extracts `scenario=N frames=M` counters for rate analysis (check 16) -4. **Watchdog tuning** — `sdkconfig.qemu` relaxes WDT to 30s / INT_WDT to 800ms for QEMU timing variance -5. **Timer stack depth** — Increased `FREERTOS_TIMER_TASK_STACK_DEPTH=4096` to prevent overflow from math-heavy mock callback -6. **Display disabled** — `CONFIG_DISPLAY_ENABLE=n` in QEMU overlay (no I2C hardware) -7. **CI fuzz job** — Added `fuzz-test` job running all 3 fuzz targets for 60s each with crash artifact upload -8. **CI NVS validation** — Added `nvs-matrix-validate` job that generates all 14 binaries and verifies sizes -9. **CI matrix expanded** — Added `edge-tier1`, `boundary-max`, `boundary-min` to QEMU test matrix (4 → 7 configs) -10. **QEMU cache key** — Uses `github.run_id` with restore-keys fallback to prevent stale QEMU builds +1. **SLIRP networking** - QEMU runner now passes `-nic user,model=open_eth` for UDP testing +2. **Scenario completion tracking** - Validator now checks `All N scenarios complete` log marker (check 15) +3. **Frame rate monitoring** - Validator extracts `scenario=N frames=M` counters for rate analysis (check 16) +4. **Watchdog tuning** - `sdkconfig.qemu` relaxes WDT to 30s / INT_WDT to 800ms for QEMU timing variance +5. **Timer stack depth** - Increased `FREERTOS_TIMER_TASK_STACK_DEPTH=4096` to prevent overflow from math-heavy mock callback +6. **Display disabled** - `CONFIG_DISPLAY_ENABLE=n` in QEMU overlay (no I2C hardware) +7. **CI fuzz job** - Added `fuzz-test` job running all 3 fuzz targets for 60s each with crash artifact upload +8. **CI NVS validation** - Added `nvs-matrix-validate` job that generates all 14 binaries and verifies sizes +9. **CI matrix expanded** - Added `edge-tier1`, `boundary-max`, `boundary-min` to QEMU test matrix (4 → 7 configs) +10. **QEMU cache key** - Uses `github.run_id` with restore-keys fallback to prevent stale QEMU builds diff --git a/docs/adr/ADR-062-qemu-swarm-configurator.md b/docs/adr/ADR-062-qemu-swarm-configurator.md index a24d3ca0b..eabafa95d 100644 --- a/docs/adr/ADR-062-qemu-swarm-configurator.md +++ b/docs/adr/ADR-062-qemu-swarm-configurator.md @@ -21,18 +21,18 @@ ADR-061 Layer 3 provides a basic multi-node mesh test: N identical nodes with sequential TDM slots connected via a Linux bridge. This is useful but limited: -1. **All nodes are identical** — real deployments have heterogeneous roles (sensor, coordinator, gateway) -2. **Single topology** — only fully-connected bridge; no star, line, or ring topologies -3. **No scenario variation per node** — all nodes run the same mock CSI scenario -4. **Manual configuration** — each test requires hand-editing env vars and arguments -5. **No swarm-level health monitoring** — validation checks individual nodes, not collective behavior -6. **No cross-node timing validation** — TDM slot ordering and inter-frame gaps aren't verified +1. **All nodes are identical** - real deployments have heterogeneous roles (sensor, coordinator, gateway) +2. **Single topology** - only fully-connected bridge; no star, line, or ring topologies +3. **No scenario variation per node** - all nodes run the same mock CSI scenario +4. **Manual configuration** - each test requires hand-editing env vars and arguments +5. **No swarm-level health monitoring** - validation checks individual nodes, not collective behavior +6. **No cross-node timing validation** - TDM slot ordering and inter-frame gaps aren't verified -Real WiFi-DensePose deployments use 3-8 ESP32-S3 nodes in various topologies. A single coordinator aggregates CSI from multiple sensors. The firmware must handle TDM conflicts, missing nodes, role-based behavior differences, and network partitions — none of which ADR-061 Layer 3 tests. +Real WiFi-DensePose deployments use 3-8 ESP32-S3 nodes in various topologies. A single coordinator aggregates CSI from multiple sensors. The firmware must handle TDM conflicts, missing nodes, role-based behavior differences, and network partitions - none of which ADR-061 Layer 3 tests. ## Decision -Build a **QEMU Swarm Configurator** — a YAML-driven tool that defines multi-node test scenarios declaratively and orchestrates them under QEMU with swarm-level validation. +Build a **QEMU Swarm Configurator** - a YAML-driven tool that defines multi-node test scenarios declaratively and orchestrates them under QEMU with swarm-level validation. ### Architecture @@ -178,18 +178,18 @@ scripts/ ### Benefits -1. **Declarative testing** — define swarm topology in YAML, not shell scripts -2. **Role-based nodes** — test coordinator/sensor/gateway interactions -3. **Topology variety** — star/mesh/line/ring match real deployment patterns -4. **Swarm-level assertions** — validate collective behavior, not just individual nodes -5. **Preset library** — quick CI smoke tests and thorough manual validation -6. **Reproducible** — YAML configs are version-controlled and shareable +1. **Declarative testing** - define swarm topology in YAML, not shell scripts +2. **Role-based nodes** - test coordinator/sensor/gateway interactions +3. **Topology variety** - star/mesh/line/ring match real deployment patterns +4. **Swarm-level assertions** - validate collective behavior, not just individual nodes +5. **Preset library** - quick CI smoke tests and thorough manual validation +6. **Reproducible** - YAML configs are version-controlled and shareable ### Limitations 1. **Still requires root** for TAP bridge topologies (star, line, ring); mesh can use SLIRP -2. **QEMU resource usage** — 6+ QEMU instances use ~2GB RAM, may slow CI runners -3. **No real RF** — inter-node communication is IP-based, not WiFi CSI multipath +2. **QEMU resource usage** - 6+ QEMU instances use ~2GB RAM, may slow CI runners +3. **No real RF** - inter-node communication is IP-based, not WiFi CSI multipath ## References diff --git a/docs/adr/ADR-063-mmwave-sensor-fusion.md b/docs/adr/ADR-063-mmwave-sensor-fusion.md index 9e501b6a7..362e4bb57 100644 --- a/docs/adr/ADR-063-mmwave-sensor-fusion.md +++ b/docs/adr/ADR-063-mmwave-sensor-fusion.md @@ -7,7 +7,7 @@ ## Context -RuView currently senses the environment using WiFi CSI — a passive technique that analyzes how WiFi signals are disturbed by human presence and movement. While this works through walls and requires no line of sight, CSI-derived vital signs (breathing rate, heart rate) are inherently noisy because they rely on phase extraction from multipath-rich WiFi channels. +RuView currently senses the environment using WiFi CSI - a passive technique that analyzes how WiFi signals are disturbed by human presence and movement. While this works through walls and requires no line of sight, CSI-derived vital signs (breathing rate, heart rate) are inherently noisy because they rely on phase extraction from multipath-rich WiFi channels. A complementary sensing modality exists: **60 GHz mmWave radar** modules (e.g., Seeed MR60BHA2) that use active FMCW radar at 60 GHz to measure breathing and heart rate with clinical-grade accuracy. These modules are inexpensive (~$15), run on ESP32-C6/C3, and output structured vital signs over UART. @@ -28,7 +28,7 @@ Fusing WiFi CSI with mmWave radar creates a sensor system that is greater than t | Capability | WiFi CSI Alone | mmWave Alone | Fused | |-----------|---------------|-------------|-------| -| Through-wall sensing | Yes (5m+) | No (LoS only, ~3m) | Yes — CSI for room-scale, mmWave for precision | +| Through-wall sensing | Yes (5m+) | No (LoS only, ~3m) | Yes - CSI for room-scale, mmWave for precision | | Heart rate accuracy | ±5-10 BPM | ±1-2 BPM | ±1-2 BPM (mmWave primary, CSI cross-validates) | | Breathing accuracy | ±2-3 BPM | ±0.5 BPM | ±0.5 BPM | | Presence detection | Good (adaptive threshold) | Excellent (range-gated) | Excellent + through-wall | @@ -44,11 +44,11 @@ The RuVector v2.0.4 stack (already integrated per ADR-016) provides the signal p | RuVector Component | Role in mmWave Fusion | |-------------------|----------------------| -| `ruvector-attention` (`bvp.rs`) | Blood Volume Pulse estimation — mmWave heart rate can calibrate the WiFi CSI BVP phase extraction | -| `ruvector-temporal-tensor` (`breathing.rs`) | Breathing rate estimation — mmWave provides ground-truth for adaptive filter tuning | -| `ruvector-solver` (`triangulation.rs`) | Multilateration — mmWave range-gated distance + CSI amplitude = 3D position | -| `ruvector-attn-mincut` (`spectrogram.rs`) | Time-frequency decomposition — mmWave Doppler complements CSI phase spectrogram | -| `ruvector-mincut` (`metrics.rs`, DynamicPersonMatcher) | Multi-person association — mmWave target IDs help disambiguate CSI subcarrier clusters | +| `ruvector-attention` (`bvp.rs`) | Blood Volume Pulse estimation - mmWave heart rate can calibrate the WiFi CSI BVP phase extraction | +| `ruvector-temporal-tensor` (`breathing.rs`) | Breathing rate estimation - mmWave provides ground-truth for adaptive filter tuning | +| `ruvector-solver` (`triangulation.rs`) | Multilateration - mmWave range-gated distance + CSI amplitude = 3D position | +| `ruvector-attn-mincut` (`spectrogram.rs`) | Time-frequency decomposition - mmWave Doppler complements CSI phase spectrogram | +| `ruvector-mincut` (`metrics.rs`, DynamicPersonMatcher) | Multi-person association - mmWave target IDs help disambiguate CSI subcarrier clusters | ### RuvSense Integration Points @@ -60,7 +60,7 @@ The RuvSense multistatic sensing pipeline (ADR-029) gains new capabilities: | `longitudinal.rs` (Welford stats) | mmWave vitals as reference signal for CSI drift detection | | `intention.rs` (pre-movement) | mmWave micro-Doppler detects pre-movement 100-200ms earlier than CSI | | `adversarial.rs` (consistency check) | mmWave provides independent signal to detect CSI spoofing/anomalies | -| `coherence_gate.rs` | mmWave presence as additional gate input — if mmWave says "no person", CSI coherence gate rejects | +| `coherence_gate.rs` | mmWave presence as additional gate input - if mmWave says "no person", CSI coherence gate rejects | ### Cross-Viewpoint Fusion Integration @@ -108,24 +108,24 @@ Add 60 GHz mmWave radar sensor support to the RuView firmware and sensing pipeli ### Three Deployment Modes -**Mode 1: Standalone CSI (existing)** — ESP32-S3 only, WiFi CSI sensing. +**Mode 1: Standalone CSI (existing)** - ESP32-S3 only, WiFi CSI sensing. -**Mode 2: Standalone mmWave** — ESP32-C6 + MR60BHA2, precise vitals in a single room. +**Mode 2: Standalone mmWave** - ESP32-C6 + MR60BHA2, precise vitals in a single room. -**Mode 3: Fused (recommended)** — ESP32-S3 + mmWave module on UART, or two separate nodes with server-side fusion. +**Mode 3: Fused (recommended)** - ESP32-S3 + mmWave module on UART, or two separate nodes with server-side fusion. ### Auto-Detection Protocol The firmware will auto-detect connected mmWave modules at boot: -1. **UART probe** — On configured UART pins, send the MR60BHA2 identification command (`0x01 0x01 0x00 0x01 ...`) and check for valid response header -2. **Protocol detection** — Identify the sensor family: +1. **UART probe** - On configured UART pins, send the MR60BHA2 identification command (`0x01 0x01 0x00 0x01 ...`) and check for valid response header +2. **Protocol detection** - Identify the sensor family: - Seeed MR60BHA2 (breathing + heart rate) - Seeed MR60FDA1 (fall detection) - Seeed MR24HPC1 (presence + light sleep/deep sleep) - HLK-LD2410 (presence + distance) - HLK-LD2450 (multi-target tracking) -3. **Capability registration** — Register detected sensor capabilities in the edge config: +3. **Capability registration** - Register detected sensor capabilities in the edge config: ```c typedef struct { @@ -245,7 +245,7 @@ python provision.py --port COM7 \ - Near-zero false positive fall detection (dual-confirm) - Clinical-grade vital signs when mmWave is present, with CSI as fallback - Self-calibrating CSI pipeline using mmWave ground truth -- Backward compatible — existing CSI-only nodes work unchanged +- Backward compatible - existing CSI-only nodes work unchanged - Low incremental cost (~$3-15 per mmWave module) - Auto-detection means zero configuration for supported sensors - RuVector attention/solver/temporal-tensor modules gain a high-quality reference signal diff --git a/docs/adr/ADR-064-multimodal-ambient-intelligence.md b/docs/adr/ADR-064-multimodal-ambient-intelligence.md index b393230df..032503399 100644 --- a/docs/adr/ADR-064-multimodal-ambient-intelligence.md +++ b/docs/adr/ADR-064-multimodal-ambient-intelligence.md @@ -1,4 +1,4 @@ -# ADR-064: Multimodal Ambient Intelligence — WiFi CSI + mmWave + Environmental Sensors +# ADR-064: Multimodal Ambient Intelligence - WiFi CSI + mmWave + Environmental Sensors **Status:** Proposed **Date:** 2026-03-15 @@ -13,7 +13,7 @@ With ADR-063 we demonstrated real-time fusion of WiFi CSI (ESP32-S3, COM7) and 6 - **WiFi CSI**: Channel 5, RSSI -41, 20+ Hz frame rate, through-wall coverage - **BH1750**: Ambient light 0.0-0.7 lux (room darkness level) -This ADR explores the full spectrum of what becomes possible when these modalities are combined — from immediately practical applications to speculative research directions. +This ADR explores the full spectrum of what becomes possible when these modalities are combined - from immediately practical applications to speculative research directions. --- @@ -90,7 +90,7 @@ mmWave: height drop > 50cm in <1s ──┘ → CONFIRMED FALL (call 911) | Turning stability | CSI + mmWave | Difficulty turning = Parkinson's indicator | | Get-up time | mmWave (sit→stand) | Timed Up and Go (TUG) test, contactless | -**Clinical value:** Gait velocity is called the "sixth vital sign" — it predicts hospitalization, cognitive decline, and mortality. Currently requires a $10,000 GAITRite mat. A $24 sensor node replaces it. +**Clinical value:** Gait velocity is called the "sixth vital sign" - it predicts hospitalization, cognitive decline, and mortality. Currently requires a $10,000 GAITRite mat. A $24 sensor node replaces it. ### 2.2 Emotion and Stress Detection via Micro-Vitals @@ -128,7 +128,7 @@ mmWave: height drop > 50cm in <1s ──┘ → CONFIRMED FALL (call 911) **CSI adds:** Cough detection (sudden phase disturbance pattern), movement reduction (malaise indicator). -**Longitudinal tracking** via `ruvsense/longitudinal.rs` (Welford stats, biomechanics drift detection) — the system learns your normal breathing pattern and alerts on deviations. +**Longitudinal tracking** via `ruvsense/longitudinal.rs` (Welford stats, biomechanics drift detection) - the system learns your normal breathing pattern and alerts on deviations. ### 2.5 Multi-Room Activity Recognition @@ -166,7 +166,7 @@ Front door (CSI): motion pattern = leaving/arriving **Feasibility:** Academic papers demonstrate ±10 mmHg accuracy in controlled settings. Far from clinical grade but useful for trending. -### 3.3 RF Tomography — 3D Occupancy Imaging +### 3.3 RF Tomography - 3D Occupancy Imaging **Method:** Multiple CSI nodes form a tomographic array. Each TX-RX pair measures signal attenuation. Inverse problem (ISTA L1 solver, already in `ruvsense/tomography.rs`) reconstructs a 3D voxel grid of where absorbers (people) are. @@ -185,7 +185,7 @@ mmWave targets (precise range, cm resolution) ─────┘ → 10cm 3D o - CSI captures the gross arm trajectory of each sign - mmWave captures the finger configuration at the pause point - AETHER contrastive embeddings (`ADR-024`) learn to map (CSI phase sequence, mmWave Doppler) → sign label -- No camera required — works in the dark, preserves privacy +- No camera required - works in the dark, preserves privacy **Training data:** Record CSI + mmWave while performing signs with a camera as ground truth, then deploy camera-free. @@ -222,7 +222,7 @@ mmWave targets (precise range, cm resolution) ─────┘ → 10cm 3D o **Application:** Early warning for structural degradation in bridges, tunnels, old buildings. -### 3.8 Swarm Sensing — Emergent Spatial Awareness +### 3.8 Swarm Sensing - Emergent Spatial Awareness **50+ nodes across a building:** @@ -231,7 +231,7 @@ Each node runs local edge intelligence (ADR-039). The `hive-mind` consensus syst - **Flow detection:** Track how people move between rooms over time - **Anomaly detection:** "This hallway usually has 5 people/hour but had 0 today" - **Emergency routing:** During fire, track which exits are blocked (no movement) vs available -- **Crowd density:** Concert/stadium safety — detect dangerous compression zones through walls +- **Crowd density:** Concert/stadium safety - detect dangerous compression zones through walls --- @@ -261,14 +261,14 @@ Based on published lucid dreaming induction research (e.g., LaBerge's MILD techn WiFi signals pass through plant tissue differently based on water content. - CSI amplitude through a greenhouse changes as plants absorb/release water -- mmWave reflects off leaf surfaces — micro-displacement from growth +- mmWave reflects off leaf surfaces - micro-displacement from growth - Long-term CSI drift correlates with biomass increase Academic proof-of-concept: "Sensing Plant Water Content Using WiFi Signals" (2023). ### 4.4 Pet Behavior Analysis -- CSI detects pet movement patterns (different phase signature than humans — lower, faster) +- CSI detects pet movement patterns (different phase signature than humans - lower, faster) - mmWave detects breathing rate (pets have higher BR than humans) - System learns pet's daily routine and alerts on deviations (lethargy, pacing, not eating) @@ -318,10 +318,10 @@ Document these possibilities as the product roadmap for the RuView multimodal am ### Negative - Clinical applications (arrhythmia, blood pressure) require medical device validation -- Privacy concerns scale with capability — need clear data retention policies +- Privacy concerns scale with capability - need clear data retention policies - Some exotic applications may attract scrutiny (surveillance concerns) ### Risk Mitigation -- All processing happens on-device (edge) — no cloud, no recordings by default -- No cameras — signal-based sensing preserves visual privacy -- Open source — users can audit exactly what is sensed and transmitted +- All processing happens on-device (edge) - no cloud, no recordings by default +- No cameras - signal-based sensing preserves visual privacy +- Open source - users can audit exactly what is sensed and transmitted diff --git a/docs/adr/ADR-065-happiness-scoring-seed-bridge.md b/docs/adr/ADR-065-happiness-scoring-seed-bridge.md index 9c5ce5d50..55de7e0f8 100644 --- a/docs/adr/ADR-065-happiness-scoring-seed-bridge.md +++ b/docs/adr/ADR-065-happiness-scoring-seed-bridge.md @@ -1,4 +1,4 @@ -# ADR-065: Hotel Guest Happiness Scoring -- WiFi CSI + Cognitum Seed Bridge +# ADR-065: Hotel Guest Happiness Scoring - WiFi CSI + Cognitum Seed Bridge **Status:** Proposed **Date:** 2026-03-20 @@ -7,7 +7,7 @@ ## Context -Hotels lack objective, privacy-preserving methods to measure guest satisfaction in real time. Current approaches (post-stay surveys, NPS scores) are delayed, biased toward extremes, and capture less than 10% of guests. Meanwhile, ambient RF sensing can infer behavioral cues that correlate with comfort and well-being -- without cameras, wearables, or any guest interaction. +Hotels lack objective, privacy-preserving methods to measure guest satisfaction in real time. Current approaches (post-stay surveys, NPS scores) are delayed, biased toward extremes, and capture less than 10% of guests. Meanwhile, ambient RF sensing can infer behavioral cues that correlate with comfort and well-being - without cameras, wearables, or any guest interaction. ### Hardware @@ -16,7 +16,7 @@ Two ESP32-S3 variants are deployed: | Device | Flash | PSRAM | MAC | Port | Notes | |--------|-------|-------|-----|------|-------| | ESP32-S3 (QFN56 rev 0.2) | 4 MB | 2 MB | 1C:DB:D4:83:D2:40 | COM5 | Budget node, uses `sdkconfig.defaults.4mb` + `partitions_4mb.csv` | -| ESP32-S3 | 8 MB | 8 MB | -- | COM7 | Full-featured node, existing deployment | +| ESP32-S3 | 8 MB | 8 MB | - | COM7 | Full-featured node, existing deployment | Both run the Tier 2 DSP firmware with presence detection, vitals extraction, fall detection, and gait analysis. @@ -217,12 +217,12 @@ Key Cognitum Seed endpoints used: ### Positive - Provides real-time, objective guest satisfaction measurement without surveys or wearables. -- Reuses four existing WASM modules -- the happiness module is a fusion layer, not a rewrite. +- Reuses four existing WASM modules - the happiness module is a fusion layer, not a rewrite. - The Seed's 8-dim vector store is a natural fit; no schema changes needed. - Ed25519 witness chain satisfies hospitality industry audit requirements and GDPR record-keeping. - Both 4MB and 8MB ESP32-S3 variants are supported, enabling low-cost deployment at scale (~$8 per room for the 4MB node). - Seed's environmental sensors (BME280, PIR) provide complementary context (room temperature, humidity) that can be correlated with happiness scores. -- No cloud dependency -- all processing is local (ESP32 edge + Seed link-local network). +- No cloud dependency - all processing is local (ESP32 edge + Seed link-local network). ### Negative diff --git a/docs/adr/ADR-066-esp32-swarm-seed-coordinator.md b/docs/adr/ADR-066-esp32-swarm-seed-coordinator.md index 9ef3ee0ec..285688ab7 100644 --- a/docs/adr/ADR-066-esp32-swarm-seed-coordinator.md +++ b/docs/adr/ADR-066-esp32-swarm-seed-coordinator.md @@ -10,13 +10,13 @@ ADR-065 established a single ESP32-S3 node pushing happiness vectors to a Cognitum Seed at `169.254.42.1` (Pi Zero 2 W, firmware 0.7.0). The Seed is now on the same WiFi network (`RedCloverWifi`, `10.1.10.236`) as the ESP32 node (`10.1.10.168`). The Seed already exposes REST APIs for: -- Peer discovery (`/api/v1/peers`) — 0 peers currently registered -- Delta sync (`/api/v1/delta/pull`, `/api/v1/delta/push`) — epoch-based replication -- Reflex rules (`/api/v1/sensor/reflex/rules`) — 3 rules (fragility alarm, drift cutoff, HD anomaly indicator) -- Actuators (`/api/v1/sensor/actuators`) — relay + PWM outputs -- Cognitive engine (`/api/v1/cognitive/tick`) — periodic inference loop -- Witness chain (`/api/v1/custody/epoch`) — epoch 316, cryptographically signed -- kNN search (`/api/v1/store/search`) — similarity queries across the full vector store +- Peer discovery (`/api/v1/peers`) - 0 peers currently registered +- Delta sync (`/api/v1/delta/pull`, `/api/v1/delta/push`) - epoch-based replication +- Reflex rules (`/api/v1/sensor/reflex/rules`) - 3 rules (fragility alarm, drift cutoff, HD anomaly indicator) +- Actuators (`/api/v1/sensor/actuators`) - relay + PWM outputs +- Cognitive engine (`/api/v1/cognitive/tick`) - periodic inference loop +- Witness chain (`/api/v1/custody/epoch`) - epoch 316, cryptographically signed +- kNN search (`/api/v1/store/search`) - similarity queries across the full vector store A hotel deployment requires multiple ESP32 nodes (lobby, hallway, restaurant, rooms) coordinated as a swarm with centralized analytics on the Seed. @@ -190,9 +190,9 @@ HTTP client uses `esp_http_client` (already in ESP-IDF, no extra dependencies). Nodes find the Seed via: -1. **NVS provisioned URL** (primary) — `provision.py --seed-url http://10.1.10.236` -2. **mDNS fallback** — Seed advertises `_cognitum._tcp.local`; ESP32 resolves `cognitum.local` -3. **Link-local fallback** — `http://169.254.42.1` when connected via USB +1. **NVS provisioned URL** (primary) - `provision.py --seed-url http://10.1.10.236` +2. **mDNS fallback** - Seed advertises `_cognitum._tcp.local`; ESP32 resolves `cognitum.local` +3. **Link-local fallback** - `http://169.254.42.1` when connected via USB ### Vector ID Scheme @@ -201,19 +201,19 @@ Nodes find the Seed via: ``` Examples: -- `1-reg` — Node 1 registration -- `1-hb-316` — Node 1 heartbeat at epoch 316 -- `1-h-316-1742486400000` — Node 1 happiness vector at epoch 316, timestamp T -- `2-h-316-1742486401000` — Node 2 happiness vector at same epoch +- `1-reg` - Node 1 registration +- `1-hb-316` - Node 1 heartbeat at epoch 316 +- `1-h-316-1742486400000` - Node 1 happiness vector at epoch 316, timestamp T +- `2-h-316-1742486401000` - Node 2 happiness vector at same epoch ### Witness Chain Integration Every vector ingested into the Seed increments the epoch and extends the witness chain. The chain provides: -- **Per-node audit trail** — filter by node_id metadata to get one node's history -- **Tamper detection** — Ed25519 signed, hash-chained; break = detectable -- **Regulatory compliance** — prove "sensor X reported Y at time Z" for disputes -- **Cross-node ordering** — Seed epoch gives total order across all nodes +- **Per-node audit trail** - filter by node_id metadata to get one node's history +- **Tamper detection** - Ed25519 signed, hash-chained; break = detectable +- **Regulatory compliance** - prove "sensor X reported Y at time Z" for disputes +- **Cross-node ordering** - Seed epoch gives total order across all nodes ### Scaling Considerations @@ -249,26 +249,26 @@ python provision.py --port COM8 \ ### Positive -- **Zero infrastructure** — no cloud, no server, no database. Seed + ESP32s + WiFi router is the entire stack -- **Autonomous nodes** — each ESP32 runs full Tier 2 DSP independently; Seed loss degrades gracefully to local-only operation -- **Cryptographic audit** — witness chain gives tamper-proof history for every observation across all nodes -- **Real-time cross-zone analytics** — Seed kNN search answers "which zones are happy/stressed right now" in < 5 ms -- **Physical actuators** — Seed's relay/PWM outputs can trigger real-world actions (lights, alarms, displays) based on swarm-wide patterns -- **Horizontal scaling** — add ESP32 nodes by flashing firmware + running provision.py; no Seed reconfiguration needed -- **Privacy-preserving** — no cameras, no audio, no PII; only 8-dimensional feature vectors stored +- **Zero infrastructure** - no cloud, no server, no database. Seed + ESP32s + WiFi router is the entire stack +- **Autonomous nodes** - each ESP32 runs full Tier 2 DSP independently; Seed loss degrades gracefully to local-only operation +- **Cryptographic audit** - witness chain gives tamper-proof history for every observation across all nodes +- **Real-time cross-zone analytics** - Seed kNN search answers "which zones are happy/stressed right now" in < 5 ms +- **Physical actuators** - Seed's relay/PWM outputs can trigger real-world actions (lights, alarms, displays) based on swarm-wide patterns +- **Horizontal scaling** - add ESP32 nodes by flashing firmware + running provision.py; no Seed reconfiguration needed +- **Privacy-preserving** - no cameras, no audio, no PII; only 8-dimensional feature vectors stored ### Negative -- **Single point of aggregation** — Seed failure loses cross-zone analytics (nodes continue autonomously) -- **WiFi dependency** — nodes must be on the same network as the Seed; no mesh/LoRa fallback yet -- **HTTP overhead** — REST/JSON adds ~200 bytes overhead per vector vs raw binary UDP; acceptable at 5-second intervals -- **Pi Zero 2 W limits** — 512 MB RAM, single-core ARM; adequate for 20 nodes but not 100+ -- **No WASM OTA via Seed** — currently WASM modules are uploaded per-node; future work could use Seed as WASM distribution hub +- **Single point of aggregation** - Seed failure loses cross-zone analytics (nodes continue autonomously) +- **WiFi dependency** - nodes must be on the same network as the Seed; no mesh/LoRa fallback yet +- **HTTP overhead** - REST/JSON adds ~200 bytes overhead per vector vs raw binary UDP; acceptable at 5-second intervals +- **Pi Zero 2 W limits** - 512 MB RAM, single-core ARM; adequate for 20 nodes but not 100+ +- **No WASM OTA via Seed** - currently WASM modules are uploaded per-node; future work could use Seed as WASM distribution hub ### Future Work -- **Seed-initiated WASM push** — Seed distributes WASM modules to all nodes via their OTA endpoints -- **mDNS auto-discovery** — nodes find Seed without provisioned URL -- **Mesh fallback** — ESP-NOW peer-to-peer when WiFi is down -- **Multi-Seed federation** — multiple Seeds for multi-floor/multi-building deployments -- **Seed dashboard** — web UI on the Seed showing live swarm map with per-zone happiness +- **Seed-initiated WASM push** - Seed distributes WASM modules to all nodes via their OTA endpoints +- **mDNS auto-discovery** - nodes find Seed without provisioned URL +- **Mesh fallback** - ESP-NOW peer-to-peer when WiFi is down +- **Multi-Seed federation** - multiple Seeds for multi-floor/multi-building deployments +- **Seed dashboard** - web UI on the Seed showing live swarm map with per-zone happiness diff --git a/docs/adr/ADR-067-ruvector-v2.0.5-upgrade.md b/docs/adr/ADR-067-ruvector-v2.0.5-upgrade.md index a01a5f817..484955c6c 100644 --- a/docs/adr/ADR-067-ruvector-v2.0.5-upgrade.md +++ b/docs/adr/ADR-067-ruvector-v2.0.5-upgrade.md @@ -27,7 +27,7 @@ RuView currently pins all five core RuVector crates at **v2.0.4** (from crates.i ### What Changed Upstream (v2.0.4 → v2.0.5 → HEAD) **ruvector-mincut:** -- Flat capacity matrix + allocation reuse — **10-30% faster** for all min-cut operations +- Flat capacity matrix + allocation reuse - **10-30% faster** for all min-cut operations - Tier 2-3 Dynamic MinCut (ADR-124): Gomory-Hu tree construction for fast global min-cut, incremental edge insert/delete without full recomputation - Source-anchored canonical min-cut with SHA-256 witness hashing - Fixed: unsafe indexing removed, WASM Node.js panic from `std::time` @@ -40,10 +40,10 @@ RuView currently pins all five core RuVector crates at **v2.0.4** (from crates.i - Formatting fixes only (no API changes) **ruvector-gnn:** -- Panic replaced with `Result` in `MultiHeadAttention` and `RuvectorLayer` constructors (breaking improvement — safer) +- Panic replaced with `Result` in `MultiHeadAttention` and `RuvectorLayer` constructors (breaking improvement - safer) - Bumped to v2.0.5 -**sona (new — Self-Optimizing Neural Architecture):** +**sona (new - Self-Optimizing Neural Architecture):** - v0.1.6 → v0.1.8: state persistence (`loadState`/`saveState`), trajectory counter fix - Micro-LoRA and Base-LoRA for instant and background learning - EWC++ (Elastic Weight Consolidation) to prevent catastrophic forgetting @@ -69,11 +69,11 @@ RuView currently pins all five core RuVector crates at **v2.0.4** (from crates.i Bump the 5 core crates from v2.0.4 to v2.0.5 in the workspace `Cargo.toml`: ```toml -ruvector-mincut = "2.0.5" # was 2.0.4 — 10-30% faster, safer -ruvector-attn-mincut = "2.0.5" # was 2.0.4 — workspace versioning -ruvector-temporal-tensor = "2.0.5" # was 2.0.4 — fmt only -ruvector-solver = "2.0.5" # was 2.0.4 — workspace versioning -ruvector-attention = "2.0.5" # was 2.0.4 — workspace versioning +ruvector-mincut = "2.0.5" # was 2.0.4 - 10-30% faster, safer +ruvector-attn-mincut = "2.0.5" # was 2.0.4 - workspace versioning +ruvector-temporal-tensor = "2.0.5" # was 2.0.4 - fmt only +ruvector-solver = "2.0.5" # was 2.0.4 - workspace versioning +ruvector-attention = "2.0.5" # was 2.0.4 - workspace versioning ``` **Expected impact:** The mincut performance improvement directly benefits `signal/subcarrier.rs` which runs subcarrier graph partitioning every tick. 10-30% faster partitioning reduces per-frame CPU cost. @@ -82,9 +82,9 @@ ruvector-attention = "2.0.5" # was 2.0.4 — workspace versioning Add `ruvector-coherence` with `spectral` feature to `wifi-densepose-ruvector`: -**Use case:** Replace or augment the custom phase coherence logic in `viewpoint/coherence.rs` with spectral graph coherence scoring. The current implementation uses phasor magnitude for phase coherence — spectral Fiedler estimation would provide a more robust measure of multi-node CSI consistency, especially for detecting when a node's signal quality degrades. +**Use case:** Replace or augment the custom phase coherence logic in `viewpoint/coherence.rs` with spectral graph coherence scoring. The current implementation uses phasor magnitude for phase coherence - spectral Fiedler estimation would provide a more robust measure of multi-node CSI consistency, especially for detecting when a node's signal quality degrades. -**Integration point:** `viewpoint/coherence.rs` — add `SpectralCoherenceScore` as a secondary coherence metric alongside existing phase phasor coherence. Use spectral gap estimation to detect structural changes in the multi-node CSI graph (e.g., a node dropping out or a new reflector appearing). +**Integration point:** `viewpoint/coherence.rs` - add `SpectralCoherenceScore` as a secondary coherence metric alongside existing phase phasor coherence. Use spectral gap estimation to detect structural changes in the multi-node CSI graph (e.g., a node dropping out or a new reflector appearing). ### Phase 3: Add SONA for Adaptive Learning (High Value) @@ -93,10 +93,10 @@ Replace the logistic regression adaptive classifier in the sensing server with a **Current state:** The sensing server's adaptive training (`POST /api/v1/adaptive/train`) uses a hand-rolled logistic regression on 15 CSI features. It requires explicit labeled recordings and provides no cross-session persistence. **Proposed improvement:** Use `sona::SonaEngine` to: -1. **Learn from implicit feedback** — trajectory tracking on person-count decisions (was the count stable? did the user correct it?) -2. **Persist across sessions** — `saveState()`/`loadState()` replaces the current `adaptive_model.json` -3. **Pattern matching** — `find_patterns()` enables "this CSI signature looks like room X where we learned Y" -4. **Prevent forgetting** — EWC++ ensures learning in a new room doesn't overwrite patterns from previous rooms +1. **Learn from implicit feedback** - trajectory tracking on person-count decisions (was the count stable? did the user correct it?) +2. **Persist across sessions** - `saveState()`/`loadState()` replaces the current `adaptive_model.json` +3. **Pattern matching** - `find_patterns()` enables "this CSI signature looks like room X where we learned Y" +4. **Prevent forgetting** - EWC++ ensures learning in a new room doesn't overwrite patterns from previous rooms **Integration point:** New `adaptive_sona.rs` module in `wifi-densepose-sensing-server`, behind a `sona` feature flag. The existing logistic regression remains the default. @@ -109,34 +109,34 @@ Replace the logistic regression adaptive classifier in the sensing server with a - Transfer learning: embeddings learned in one room partially transfer to similar rooms - Quantized storage: 4-32x memory reduction for pattern databases -**Status:** Exploratory — requires training data collection and embedding model design. Not a near-term target. +**Status:** Exploratory - requires training data collection and embedding model design. Not a near-term target. ## Consequences ### Positive - **Phase 1:** Free 10-30% performance gain in subcarrier partitioning. Security fixes (unsafe indexing, WASM panic). Zero API changes required. - **Phase 2:** More robust multi-node coherence detection. Helps with the "flickering persons" issue (#292) by providing a second opinion on signal quality. -- **Phase 3:** Fundamentally improves the adaptive learning pipeline. Users no longer need to manually record labeled data — the system learns from ongoing use. +- **Phase 3:** Fundamentally improves the adaptive learning pipeline. Users no longer need to manually record labeled data - the system learns from ongoing use. - **Phase 4:** Path toward real ML-based detection instead of heuristic thresholds. ### Negative -- **Phase 1:** Minimal risk — semver minor bump, no API breaks. +- **Phase 1:** Minimal risk - semver minor bump, no API breaks. - **Phase 2:** Adds a dependency. Spectral computation has O(n) cost per tick for Fiedler estimation (n = number of subcarriers, typically 56-128). Acceptable. - **Phase 3:** SONA adds ~200KB to the binary. The learning loop needs careful tuning to avoid adapting to noise. - **Phase 4:** Requires significant research and training data. Not guaranteed to outperform tuned heuristics for WiFi CSI. ### Risks -- `ruvector-gnn` v2.0.5 changed constructors from panic to `Result` — any existing `crv` feature users need to handle the `Result`. Our vendored `ruvector-crv` may need updates. -- SONA's WASM support is experimental — keep it behind a feature flag until validated. +- `ruvector-gnn` v2.0.5 changed constructors from panic to `Result` - any existing `crv` feature users need to handle the `Result`. Our vendored `ruvector-crv` may need updates. +- SONA's WASM support is experimental - keep it behind a feature flag until validated. ## Implementation Plan | Phase | Scope | Effort | Priority | |-------|-------|--------|----------| -| 1 | Bump 5 crates to v2.0.5 | 1 hour | High — free perf + security | -| 2 | Add ruvector-coherence | 1 day | Medium — improves multi-node stability | -| 3 | SONA adaptive learning | 3 days | Medium — replaces manual training workflow | -| 4 | CSI embeddings via ruvector-core | 1-2 weeks | Low — exploratory research | +| 1 | Bump 5 crates to v2.0.5 | 1 hour | High - free perf + security | +| 2 | Add ruvector-coherence | 1 day | Medium - improves multi-node stability | +| 3 | SONA adaptive learning | 3 days | Medium - replaces manual training workflow | +| 4 | CSI embeddings via ruvector-core | 1-2 weeks | Low - exploratory research | ## Vendor Submodule diff --git a/docs/adr/README.md b/docs/adr/README.md index c05ac5823..916af6d4c 100644 --- a/docs/adr/README.md +++ b/docs/adr/README.md @@ -8,28 +8,28 @@ Building a system that turns WiFi signals into human pose estimation involves hu ADRs capture the **context**, **options considered**, **decision made**, and **consequences** for each of these choices. They serve three purposes: -1. **Institutional memory** — Six months from now, anyone (human or AI) can read *why* we chose IIR bandpass filters over FIR for vital sign extraction, not just see the code. +1. **Institutional memory** - Six months from now, anyone (human or AI) can read *why* we chose IIR bandpass filters over FIR for vital sign extraction, not just see the code. -2. **AI-assisted development** — When an AI agent works on this codebase, ADRs give it the constraints and rationale it needs to make changes that align with the existing architecture. Without them, AI-generated code tends to drift — reinventing patterns that already exist, contradicting earlier decisions, or optimizing for the wrong tradeoffs. +2. **AI-assisted development** - When an AI agent works on this codebase, ADRs give it the constraints and rationale it needs to make changes that align with the existing architecture. Without them, AI-generated code tends to drift - reinventing patterns that already exist, contradicting earlier decisions, or optimizing for the wrong tradeoffs. -3. **Review checkpoints** — Each ADR is a reviewable artifact. When a proposed change touches the architecture, the ADR forces the author to articulate tradeoffs *before* writing code, not after. +3. **Review checkpoints** - Each ADR is a reviewable artifact. When a proposed change touches the architecture, the ADR forces the author to articulate tradeoffs *before* writing code, not after. ### ADRs and Domain-Driven Design -The project uses [Domain-Driven Design](../ddd/) (DDD) to organize code into bounded contexts — each with its own language, types, and responsibilities. ADRs and DDD work together: +The project uses [Domain-Driven Design](../ddd/) (DDD) to organize code into bounded contexts - each with its own language, types, and responsibilities. ADRs and DDD work together: - **ADRs define boundaries**: ADR-029 (RuvSense) established multistatic sensing as a separate bounded context from single-node CSI. ADR-042 (CHCI) defined a new aggregate root for coherent channel imaging. - **DDD models define the language**: The [RuvSense domain model](../ddd/ruvsense-domain-model.md) defines terms like "coherence gate", "dwell time", and "TDM slot" that ADRs reference precisely. -- **Together they prevent drift**: An AI agent reading ADR-039 knows that edge processing tiers are configured via NVS keys, not compile-time flags — because the ADR says so. The DDD model tells it which aggregate owns that configuration. +- **Together they prevent drift**: An AI agent reading ADR-039 knows that edge processing tiers are configured via NVS keys, not compile-time flags - because the ADR says so. The DDD model tells it which aggregate owns that configuration. ### How ADRs are structured Each ADR follows a consistent format: -- **Context** — What problem or gap prompted this decision -- **Decision** — What we chose to do and how -- **Consequences** — What improved, what got harder, and what risks remain -- **References** — Related ADRs, papers, and code paths +- **Context** - What problem or gap prompted this decision +- **Decision** - What we chose to do and how +- **Consequences** - What improved, what got harder, and what risks remain +- **References** - Related ADRs, papers, and code paths Statuses: **Proposed** (under discussion), **Accepted** (approved and/or implemented), **Superseded** (replaced by a later ADR). @@ -110,6 +110,6 @@ Statuses: **Proposed** (under discussion), **Accepted** (approved and/or impleme ## Related -- [DDD Domain Models](../ddd/) — Bounded context definitions, aggregate roots, and ubiquitous language -- [User Guide](../user-guide.md) — Setup, API reference, and hardware instructions -- [Build Guide](../build-guide.md) — Building from source +- [DDD Domain Models](../ddd/) - Bounded context definitions, aggregate roots, and ubiquitous language +- [User Guide](../user-guide.md) - Setup, API reference, and hardware instructions +- [Build Guide](../build-guide.md) - Building from source diff --git a/docs/build-guide.md b/docs/build-guide.md index 679c958ec..484b633c8 100644 --- a/docs/build-guide.md +++ b/docs/build-guide.md @@ -1,12 +1,12 @@ # WiFi-DensePose Build and Run Guide -Covers every way to build, run, and deploy the system -- from a zero-hardware verification to a full ESP32 mesh with 3D visualization. +Covers every way to build, run, and deploy the system - from a zero-hardware verification to a full ESP32 mesh with 3D visualization. --- ## Table of Contents -1. [Quick Start (Verification Only -- No Hardware)](#1-quick-start-verification-only----no-hardware) +1. [Quick Start (Verification Only - No Hardware)](#1-quick-start-verification-only----no-hardware) 2. [Python Pipeline (v1/)](#2-python-pipeline-v1) 3. [Rust Pipeline (v2)](#3-rust-pipeline-v2) 4. [Three.js Visualization](#4-threejs-visualization) @@ -16,7 +16,7 @@ Covers every way to build, run, and deploy the system -- from a zero-hardware ve --- -## 1. Quick Start (Verification Only -- No Hardware) +## 1. Quick Start (Verification Only - No Hardware) The fastest way to confirm the signal processing pipeline is real and deterministic. Requires only Python 3.8+, numpy, and scipy. No WiFi hardware, no GPU, no Docker. @@ -27,14 +27,14 @@ The fastest way to confirm the signal processing pipeline is real and determinis This runs three phases: -1. **Environment checks** -- confirms Python, numpy, scipy, and proof files are present. -2. **Proof pipeline replay** -- feeds a published reference signal through the full signal processing chain (noise filtering, Hamming windowing, amplitude normalization, FFT-based Doppler extraction, power spectral density via scipy.fft) and computes a SHA-256 hash of the output. -3. **Production code integrity scan** -- scans `v1/src/` for `np.random.rand` / `np.random.randn` calls in production code (test helpers are excluded). +1. **Environment checks** - confirms Python, numpy, scipy, and proof files are present. +2. **Proof pipeline replay** - feeds a published reference signal through the full signal processing chain (noise filtering, Hamming windowing, amplitude normalization, FFT-based Doppler extraction, power spectral density via scipy.fft) and computes a SHA-256 hash of the output. +3. **Production code integrity scan** - scans `v1/src/` for `np.random.rand` / `np.random.randn` calls in production code (test helpers are excluded). Exit codes: -- `0` PASS -- pipeline hash matches the published expected hash -- `1` FAIL -- hash mismatch or error -- `2` SKIP -- no expected hash file to compare against +- `0` PASS - pipeline hash matches the published expected hash +- `1` FAIL - hash mismatch or error +- `2` SKIP - no expected hash file to compare against Additional flags: @@ -79,7 +79,7 @@ The Python pipeline lives under `v1/` and provides the full API server, signal p - Python 3.8+ - pip -### Install (verification-only -- lightweight) +### Install (verification-only - lightweight) ```bash pip install -r v1/requirements-lock.txt @@ -122,7 +122,7 @@ For development with auto-reload: uvicorn v1.src.api.main:app --host 0.0.0.0 --port 8000 --reload ``` -### Run with commodity WiFi (RSSI sensing -- no custom hardware) +### Run with commodity WiFi (RSSI sensing - no custom hardware) The commodity sensing module (`v1/src/sensing/`) extracts presence and motion features from standard Linux WiFi metrics (RSSI, noise floor, link quality) without any hardware modification. See [ADR-013](adr/ADR-013-feature-level-sensing-commodity-gear.md) for full design details. @@ -132,9 +132,9 @@ Requirements: - No root required for basic RSSI reading via `/proc/net/wireless` The module provides: -- `LinuxWifiCollector` -- reads real RSSI from `/proc/net/wireless` and `iw` commands -- `RssiFeatureExtractor` -- computes rolling statistics, FFT spectral features, CUSUM change-point detection -- `PresenceClassifier` -- rule-based presence/motion classification +- `LinuxWifiCollector` - reads real RSSI from `/proc/net/wireless` and `iw` commands +- `RssiFeatureExtractor` - computes rolling statistics, FFT spectral features, CUSUM change-point detection +- `PresenceClassifier` - rule-based presence/motion classification What it can detect: | Capability | Single Receiver | 3+ Receivers | @@ -219,7 +219,7 @@ Expected throughput: | CSI Preprocessing (4x64) | ~5.19 us | 49-66 Melem/s | | Phase Sanitization (4x64) | ~3.84 us | 67-85 Melem/s | | Feature Extraction (4x64) | ~9.03 us | 7-11 Melem/s | -| Motion Detection | ~186 ns | -- | +| Motion Detection | ~186 ns | - | | Full Pipeline | ~18.47 us | ~54,000 fps | ### Workspace crates @@ -298,12 +298,12 @@ docker compose up ``` This starts: -- `wifi-densepose-dev` -- API server with `--reload`, debug logging, auth disabled (port 8000) -- `postgres` -- PostgreSQL 15 (port 5432) -- `redis` -- Redis 7 with AOF persistence (port 6379) -- `prometheus` -- metrics scraping (port 9090) -- `grafana` -- dashboards (port 3000, login: admin/admin) -- `nginx` -- reverse proxy (ports 80, 443) +- `wifi-densepose-dev` - API server with `--reload`, debug logging, auth disabled (port 8000) +- `postgres` - PostgreSQL 15 (port 5432) +- `redis` - Redis 7 with AOF persistence (port 6379) +- `prometheus` - metrics scraping (port 9090) +- `grafana` - dashboards (port 3000, login: admin/admin) +- `nginx` - reverse proxy (ports 80, 443) ```bash # View logs @@ -385,7 +385,7 @@ The multi-stage `Dockerfile` provides four targets: Uses ESP32-S3 boards as WiFi CSI sensor nodes. See [ADR-012](adr/ADR-012-esp32-csi-sensor-mesh.md) for the full specification. -### Bill of Materials (Starter Kit -- $54) +### Bill of Materials (Starter Kit - $54) | Item | Qty | Unit Cost | Total | |------|-----|-----------|-------| @@ -461,7 +461,7 @@ Each node does on-device feature extraction (raw I/Q to amplitude + phase + spec ### Run the aggregator -The aggregator collects UDP streams from all ESP32 nodes, performs feature-level fusion (not signal-level -- see ADR-012 for why), and feeds the fused data into the Rust or Python pipeline. +The aggregator collects UDP streams from all ESP32 nodes, performs feature-level fusion (not signal-level - see ADR-012 for why), and feeds the fused data into the Rust or Python pipeline. ```bash # Start the aggregator and pipeline via Docker @@ -469,7 +469,7 @@ docker compose -f docker-compose.esp32.yml up # Or run the Rust aggregator directly cd rust-port/wifi-densepose-rs -cargo run --release --package wifi-densepose-hardware -- --mode esp32-aggregator --port 5000 +cargo run --release --package wifi-densepose-hardware - --mode esp32-aggregator --port 5000 ``` ### Verify with real hardware @@ -522,7 +522,7 @@ cd rust-port/wifi-densepose-rs wasm-pack build crates/wifi-densepose-wasm --target web --release # Build with disaster response module included -wasm-pack build crates/wifi-densepose-wasm --target web --release -- --features mat +wasm-pack build crates/wifi-densepose-wasm --target web --release - --features mat ``` The output `pkg/` directory contains `.wasm`, `.js` glue, and TypeScript definitions. Import in a web project: @@ -584,7 +584,7 @@ docker run -d -p 8000:8000 wifi-densepose:latest docker stack deploy -c docker-compose.prod.yml wifi-densepose ``` -### Server (Direct -- no Docker) +### Server (Direct - no Docker) ```bash # 1. Install Python dependencies @@ -637,7 +637,7 @@ cd rust-port/wifi-densepose-rs cargo build # Run tests with output -cargo test --workspace -- --nocapture +cargo test --workspace - --nocapture # Watch mode (requires cargo-watch) cargo install cargo-watch @@ -656,7 +656,7 @@ uvicorn v1.src.api.main:app --host 0.0.0.0 --port 8000 --reload # Terminal 2: Serve visualization python3 -m http.server 3000 --directory ui -# Open http://localhost:3000/viz.html -- it connects to ws://localhost:8000/ws/pose +# Open http://localhost:3000/viz.html - it connects to ws://localhost:8000/ws/pose ``` --- @@ -665,7 +665,7 @@ python3 -m http.server 3000 --directory ui | File | Purpose | |------|---------| -| `./verify` | Trust kill switch -- one-command pipeline proof | +| `./verify` | Trust kill switch - one-command pipeline proof | | `Makefile` | `make verify`, `make verify-verbose`, `make verify-audit` | | `v1/requirements-lock.txt` | Pinned Python deps for hash reproducibility | | `requirements.txt` | Full Python deps (API server, torch, etc.) | diff --git a/docs/ddd/README.md b/docs/ddd/README.md index a96a9b57b..b0b666f04 100644 --- a/docs/ddd/README.md +++ b/docs/ddd/README.md @@ -2,7 +2,7 @@ This folder contains Domain-Driven Design (DDD) specifications for each major subsystem in RuView. -DDD organizes the codebase around the problem being solved — not around technical layers. Each *bounded context* owns its own data, rules, and language. Contexts communicate through domain events, not by sharing mutable state. This makes the system easier to reason about, test, and extend — whether you're a person or an AI agent. +DDD organizes the codebase around the problem being solved - not around technical layers. Each *bounded context* owns its own data, rules, and language. Contexts communicate through domain events, not by sharing mutable state. This makes the system easier to reason about, test, and extend - whether you're a person or an AI agent. ## Models @@ -20,15 +20,15 @@ DDD organizes the codebase around the problem being solved — not around techni Each model defines: -- **Ubiquitous Language** — Terms with precise meanings used in both code and conversation -- **Bounded Contexts** — Independent subsystems with clear responsibilities and boundaries -- **Aggregates** — Clusters of objects that enforce business rules (e.g., a PoseTrack owns its keypoints) -- **Value Objects** — Immutable data with meaning (e.g., a CoherenceScore is not just a float) -- **Domain Events** — Things that happened that other contexts may care about -- **Invariants** — Rules that must always be true (e.g., "drift alert requires >2sigma for >3 days") -- **Anti-Corruption Layers** — Adapters that translate between contexts without leaking internals +- **Ubiquitous Language** - Terms with precise meanings used in both code and conversation +- **Bounded Contexts** - Independent subsystems with clear responsibilities and boundaries +- **Aggregates** - Clusters of objects that enforce business rules (e.g., a PoseTrack owns its keypoints) +- **Value Objects** - Immutable data with meaning (e.g., a CoherenceScore is not just a float) +- **Domain Events** - Things that happened that other contexts may care about +- **Invariants** - Rules that must always be true (e.g., "drift alert requires >2sigma for >3 days") +- **Anti-Corruption Layers** - Adapters that translate between contexts without leaking internals ## Related -- [Architecture Decision Records](../adr/README.md) — Why each technical choice was made -- [User Guide](../user-guide.md) — Setup and API reference +- [Architecture Decision Records](../adr/README.md) - Why each technical choice was made +- [User Guide](../user-guide.md) - Setup and API reference diff --git a/docs/ddd/chci-domain-model.md b/docs/ddd/chci-domain-model.md index b39785595..d25b4b844 100644 --- a/docs/ddd/chci-domain-model.md +++ b/docs/ddd/chci-domain-model.md @@ -7,18 +7,18 @@ | Term | Definition | |------|------------| | **Coherent Human Channel Imaging (CHCI)** | A purpose-built RF sensing protocol that uses phase-locked sounding, multi-band fusion, and cognitive waveform adaptation to reconstruct human body surfaces and physiological motion at sub-millimeter resolution | -| **Sounding Frame** | A deterministic OFDM transmission (NDP or custom burst) with known pilot structure, transmitted at fixed cadence for channel measurement — as opposed to passive CSI extracted from data traffic | +| **Sounding Frame** | A deterministic OFDM transmission (NDP or custom burst) with known pilot structure, transmitted at fixed cadence for channel measurement - as opposed to passive CSI extracted from data traffic | | **Phase Coherence** | The property of multiple radio nodes sharing a common phase reference, enabling complex-valued channel measurements without per-node LO drift correction | | **Reference Clock** | A shared oscillator (TCXO + PLL) distributed to all CHCI nodes via coaxial cable, providing both 40 MHz timing reference and in-band phase reference signal | | **Cognitive Waveform** | A sounding waveform whose parameters (cadence, bandwidth, band selection, power, subcarrier subset) adapt in real-time based on the current scene state inferred from the body model | -| **Diffraction Tomography** | Coherent reconstruction of body surface geometry from complex-valued channel responses across multiple node pairs and frequency bands — produces surface contours rather than volumetric opacity | +| **Diffraction Tomography** | Coherent reconstruction of body surface geometry from complex-valued channel responses across multiple node pairs and frequency bands - produces surface contours rather than volumetric opacity | | **Sensing Mode** | One of six operational states (IDLE, ALERT, ACTIVE, VITAL, GESTURE, SLEEP) that determine waveform parameters and processing pipeline configuration | | **Micro-Burst** | A very short (4–20 μs) deterministic OFDM symbol transmitted at high cadence (1–5 kHz) for maximizing Doppler resolution without full 802.11 frame overhead | | **Multi-Band Fusion** | Simultaneous sounding at 2.4 GHz and 5 GHz (optionally 6 GHz), fused as projections of the same latent motion field using body model priors as constraints | | **Displacement Floor** | The minimum detectable surface displacement at a given range, determined by phase noise, coherent averaging depth, and antenna count: δ_min = λ/(4π) × σ_φ/√(N_ant × N_avg) | -| **Channel Contrast** | The ratio of complex channel response with human present to the empty-room reference response — the input to diffraction tomography | -| **Coherence Delta** | The change in phase coherence metric between consecutive observation windows — the trigger signal for cognitive waveform transitions | -| **NDP** | Null Data PPDU — an 802.11bf-standard sounding frame containing only preamble and training fields, no data payload | +| **Channel Contrast** | The ratio of complex channel response with human present to the empty-room reference response - the input to diffraction tomography | +| **Coherence Delta** | The change in phase coherence metric between consecutive observation windows - the trigger signal for cognitive waveform transitions | +| **NDP** | Null Data PPDU - an 802.11bf-standard sounding frame containing only preamble and training fields, no data payload | | **Sensing Availability Window (SAW)** | An 802.11bf-defined time interval during which NDP sounding exchanges are permitted between sensing initiator and responder | | **Body Model Prior** | Geometric constraints derived from known human body dimensions (segment lengths, joint angle limits) used to regularize cross-band fusion and tomographic reconstruction | | **Phase Reference Signal** | A continuous-wave tone at the operating band center frequency, distributed alongside the 40 MHz clock, enabling all nodes to measure and compensate residual phase offset | @@ -66,23 +66,23 @@ ``` **Aggregates:** -- `SoundingScheduler` (Aggregate Root) — Orchestrates sounding frame transmission across nodes and bands according to the current waveform configuration +- `SoundingScheduler` (Aggregate Root) - Orchestrates sounding frame transmission across nodes and bands according to the current waveform configuration **Entities:** -- `SoundingFrame` — A single NDP or micro-burst transmission with sequence ID, band, timestamp, and pilot structure -- `BurstSequence` — An ordered set of micro-bursts within one observation window, used for coherent Doppler integration -- `WaveformConfig` — The current waveform parameter set (cadence, bandwidth, band selection, power level, subcarrier mask) +- `SoundingFrame` - A single NDP or micro-burst transmission with sequence ID, band, timestamp, and pilot structure +- `BurstSequence` - An ordered set of micro-bursts within one observation window, used for coherent Doppler integration +- `WaveformConfig` - The current waveform parameter set (cadence, bandwidth, band selection, power level, subcarrier mask) **Value Objects:** -- `SoundingCadence` — Transmission rate in Hz (1–5000), constrained by regulatory duty cycle limits -- `BandSelection` — Set of active bands {2.4 GHz, 5 GHz, 6 GHz} for current mode -- `SubcarrierMask` — Bit vector selecting active subcarriers for focused sensing (vital mode uses optimal subset) -- `BurstDuration` — Single burst length in microseconds (4–20 μs) -- `DutyCycle` — Computed duty cycle percentage, must not exceed regulatory limit (ETSI: 10 ms max burst) +- `SoundingCadence` - Transmission rate in Hz (1–5000), constrained by regulatory duty cycle limits +- `BandSelection` - Set of active bands {2.4 GHz, 5 GHz, 6 GHz} for current mode +- `SubcarrierMask` - Bit vector selecting active subcarriers for focused sensing (vital mode uses optimal subset) +- `BurstDuration` - Single burst length in microseconds (4–20 μs) +- `DutyCycle` - Computed duty cycle percentage, must not exceed regulatory limit (ETSI: 10 ms max burst) **Domain Services:** -- `RegulatoryComplianceChecker` — Validates that any waveform configuration satisfies FCC Part 15.247 and ETSI EN 300 328 constraints before applying -- `BandCoordinator` — Manages time-division or simultaneous multi-band sounding to avoid self-interference +- `RegulatoryComplianceChecker` - Validates that any waveform configuration satisfies FCC Part 15.247 and ETSI EN 300 328 constraints before applying +- `BandCoordinator` - Manages time-division or simultaneous multi-band sounding to avoid self-interference --- @@ -131,20 +131,20 @@ ``` **Aggregates:** -- `ReferenceClockModule` (Aggregate Root) — The single source of timing truth for the entire CHCI mesh +- `ReferenceClockModule` (Aggregate Root) - The single source of timing truth for the entire CHCI mesh **Entities:** -- `NodePhaseLock` — Per-node state tracking lock status, residual offset, and drift rate -- `CalibrationSession` — A timed procedure that measures and records per-node phase offsets under static conditions +- `NodePhaseLock` - Per-node state tracking lock status, residual offset, and drift rate +- `CalibrationSession` - A timed procedure that measures and records per-node phase offsets under static conditions **Value Objects:** -- `PhaseOffset` — Residual phase offset in degrees after clock distribution, per node per subcarrier -- `DriftRate` — Phase drift in degrees per minute, must remain below threshold (0.05°/min for heartbeat sensing) -- `LockStatus` — Enum {Acquiring, Locked, Drifting, Lost} indicating current synchronization state +- `PhaseOffset` - Residual phase offset in degrees after clock distribution, per node per subcarrier +- `DriftRate` - Phase drift in degrees per minute, must remain below threshold (0.05°/min for heartbeat sensing) +- `LockStatus` - Enum {Acquiring, Locked, Drifting, Lost} indicating current synchronization state **Domain Services:** -- `PhaseCalibrationService` — Runs startup and periodic calibration routines; replaces statistical LO estimation in current `phase_align.rs` -- `DriftMonitor` — Continuous background service that detects when any node exceeds drift threshold and triggers recalibration +- `PhaseCalibrationService` - Runs startup and periodic calibration routines; replaces statistical LO estimation in current `phase_align.rs` +- `DriftMonitor` - Continuous background service that detects when any node exceeds drift threshold and triggers recalibration **Invariants:** - All nodes must achieve `Locked` status before CHCI sensing begins @@ -207,25 +207,25 @@ ``` **Aggregates:** -- `DiffractionTomographyEngine` (Aggregate Root) — Reconstructs 3D body surface geometry from coherent channel contrast measurements across all node pairs and frequency bands +- `DiffractionTomographyEngine` (Aggregate Root) - Reconstructs 3D body surface geometry from coherent channel contrast measurements across all node pairs and frequency bands **Entities:** -- `CoherentCsiFrame` — A single coherent channel measurement: complex-valued H(f) per subcarrier, with phase-lock metadata, node ID, band, sequence ID, and timestamp -- `ReferenceChannel` — The empty-room complex channel response per link per band, used as the denominator in channel contrast computation -- `VoxelGrid` — 3D grid of complex permittivity contrast values, the output of diffraction tomography -- `BodySurface` — Extracted iso-surface from voxel grid, represented as triangulated mesh or point cloud +- `CoherentCsiFrame` - A single coherent channel measurement: complex-valued H(f) per subcarrier, with phase-lock metadata, node ID, band, sequence ID, and timestamp +- `ReferenceChannel` - The empty-room complex channel response per link per band, used as the denominator in channel contrast computation +- `VoxelGrid` - 3D grid of complex permittivity contrast values, the output of diffraction tomography +- `BodySurface` - Extracted iso-surface from voxel grid, represented as triangulated mesh or point cloud **Value Objects:** -- `ChannelContrast` — Complex ratio H_measured/H_reference per subcarrier per link — the fundamental input to tomography -- `SubcarrierResponse` — Complex-valued (amplitude + phase) channel response at a single subcarrier frequency -- `VoxelCoordinate` — (x, y, z) position in room coordinate frame with associated complex permittivity value -- `SurfaceNormal` — Orientation vector at each surface vertex, derived from permittivity gradient -- `CoherenceMetric` — Complex-valued coherence score (magnitude + phase) replacing the current real-valued Z-score +- `ChannelContrast` - Complex ratio H_measured/H_reference per subcarrier per link - the fundamental input to tomography +- `SubcarrierResponse` - Complex-valued (amplitude + phase) channel response at a single subcarrier frequency +- `VoxelCoordinate` - (x, y, z) position in room coordinate frame with associated complex permittivity value +- `SurfaceNormal` - Orientation vector at each surface vertex, derived from permittivity gradient +- `CoherenceMetric` - Complex-valued coherence score (magnitude + phase) replacing the current real-valued Z-score **Domain Services:** -- `ChannelContrastComputer` — Divides measured channel by reference to isolate human-induced perturbation -- `MultiBandFuser` — Aligns phase across bands using body model priors and combines into unified spectral response -- `SurfaceExtractor` — Applies marching cubes or similar iso-surface algorithm to permittivity contrast grid +- `ChannelContrastComputer` - Divides measured channel by reference to isolate human-induced perturbation +- `MultiBandFuser` - Aligns phase across bands using body model priors and combines into unified spectral response +- `SurfaceExtractor` - Applies marching cubes or similar iso-surface algorithm to permittivity contrast grid **RuVector Integration:** - `ruvector-attention` → Cross-band attention weights for frequency fusion (extends `CrossViewpointAttention`) @@ -286,24 +286,24 @@ ``` **Aggregates:** -- `SensingModeStateMachine` (Aggregate Root) — Manages transitions between six sensing modes based on coherence delta, motion classification, and body model state +- `SensingModeStateMachine` (Aggregate Root) - Manages transitions between six sensing modes based on coherence delta, motion classification, and body model state **Entities:** -- `SensingMode` — One of {IDLE, ALERT, ACTIVE, VITAL, GESTURE, SLEEP} with associated waveform parameter set -- `ModeTransition` — A state change event with trigger reason, timestamp, and hysteresis counter -- `PowerBudget` — Per-mode power allocation constraining cadence and TX power +- `SensingMode` - One of {IDLE, ALERT, ACTIVE, VITAL, GESTURE, SLEEP} with associated waveform parameter set +- `ModeTransition` - A state change event with trigger reason, timestamp, and hysteresis counter +- `PowerBudget` - Per-mode power allocation constraining cadence and TX power **Value Objects:** -- `CoherenceDelta` — Magnitude of coherence change between consecutive observation windows — the primary mode transition trigger -- `MotionClassification` — Enum {Static, Breathing, Walking, Gesturing, Falling} derived from micro-Doppler signature -- `ModeHysteresis` — Counter preventing rapid mode oscillation: requires N consecutive trigger events before transition (default N=3) -- `OptimalSubcarrierSet` — The subset of subcarriers with highest SNR for vital sign extraction, computed from recent channel statistics +- `CoherenceDelta` - Magnitude of coherence change between consecutive observation windows - the primary mode transition trigger +- `MotionClassification` - Enum {Static, Breathing, Walking, Gesturing, Falling} derived from micro-Doppler signature +- `ModeHysteresis` - Counter preventing rapid mode oscillation: requires N consecutive trigger events before transition (default N=3) +- `OptimalSubcarrierSet` - The subset of subcarriers with highest SNR for vital sign extraction, computed from recent channel statistics **Domain Services:** -- `SceneStateObserver` — Fuses body model output, coherence metrics, and motion classifier into a unified scene state descriptor -- `ModeTransitionEvaluator` — Applies hysteresis and priority rules to determine if a mode change should occur -- `SubcarrierSelector` — Identifies optimal subcarrier subset for vital mode using Fisher information criterion or SNR ranking -- `PowerManager` — Computes TX power and duty cycle to stay within regulatory and battery constraints per mode +- `SceneStateObserver` - Fuses body model output, coherence metrics, and motion classifier into a unified scene state descriptor +- `ModeTransitionEvaluator` - Applies hysteresis and priority rules to determine if a mode change should occur +- `SubcarrierSelector` - Identifies optimal subcarrier subset for vital mode using Fisher information criterion or SNR ranking +- `PowerManager` - Computes TX power and duty cycle to stay within regulatory and battery constraints per mode **Invariants:** - IDLE mode must be entered after 30 seconds of no detection (configurable) @@ -373,25 +373,25 @@ ``` **Aggregates:** -- `RespiratoryAnalyzer` (Aggregate Root) — Extracts breathing rate and pattern from 0.1–0.6 Hz displacement band +- `RespiratoryAnalyzer` (Aggregate Root) - Extracts breathing rate and pattern from 0.1–0.6 Hz displacement band **Entities:** -- `PhaseTimeSeries` — Windowed buffer of unwrapped phase values per subcarrier per link, at sounding cadence -- `DisplacementTimeSeries` — Converted from phase: δ(t) = λΔφ(t) / (4π), represents physical surface displacement in mm -- `VitalSignReport` — Fused output containing breathing rate, heart rate, HRV, confidence scores, and anomaly flags +- `PhaseTimeSeries` - Windowed buffer of unwrapped phase values per subcarrier per link, at sounding cadence +- `DisplacementTimeSeries` - Converted from phase: δ(t) = λΔφ(t) / (4π), represents physical surface displacement in mm +- `VitalSignReport` - Fused output containing breathing rate, heart rate, HRV, confidence scores, and anomaly flags **Value Objects:** -- `PhaseUnwrapped` — Continuous (unwrapped) phase in radians, free from 2π ambiguity -- `DisplacementSample` — Single displacement value in mm with timestamp and confidence -- `BreathingRate` — BPM value (6–36 range) with confidence score -- `HeartRate` — BPM value (48–180 range) with confidence score and HRV interval -- `ApneaEvent` — Duration, severity, and confidence of detected breathing cessation +- `PhaseUnwrapped` - Continuous (unwrapped) phase in radians, free from 2π ambiguity +- `DisplacementSample` - Single displacement value in mm with timestamp and confidence +- `BreathingRate` - BPM value (6–36 range) with confidence score +- `HeartRate` - BPM value (48–180 range) with confidence score and HRV interval +- `ApneaEvent` - Duration, severity, and confidence of detected breathing cessation **Domain Services:** -- `PhaseUnwrapper` — Continuous phase unwrapping with outlier rejection; critical for displacement conversion -- `RespiratoryHarmonicCanceller` — Removes breathing harmonics from cardiac band to isolate heartbeat signal -- `MultilinkFuser` — Combines displacement estimates across node pairs using SNR-weighted averaging -- `AnomalyDetector` — Flags displacement patterns inconsistent with normal physiology (fall, seizure, cardiac arrest) +- `PhaseUnwrapper` - Continuous phase unwrapping with outlier rejection; critical for displacement conversion +- `RespiratoryHarmonicCanceller` - Removes breathing harmonics from cardiac band to isolate heartbeat signal +- `MultilinkFuser` - Combines displacement estimates across node pairs using SNR-weighted averaging +- `AnomalyDetector` - Flags displacement patterns inconsistent with normal physiology (fall, seizure, cardiac arrest) **Invariants:** - Phase unwrapping must maintain continuity: |Δφ| < π between consecutive samples @@ -452,18 +452,18 @@ ``` **Aggregates:** -- `ComplianceValidator` (Aggregate Root) — Gate that must approve every waveform configuration before transmission is permitted +- `ComplianceValidator` (Aggregate Root) - Gate that must approve every waveform configuration before transmission is permitted **Entities:** -- `JurisdictionProfile` — Complete set of regulatory constraints for a given region (FCC, ETSI, ARIB, etc.) -- `ComplianceRecord` — Audit trail of compliance checks with timestamps and configuration hashes +- `JurisdictionProfile` - Complete set of regulatory constraints for a given region (FCC, ETSI, ARIB, etc.) +- `ComplianceRecord` - Audit trail of compliance checks with timestamps and configuration hashes **Value Objects:** -- `MaxEIRP` — Maximum effective isotropic radiated power in dBm, per band per jurisdiction -- `MaxBurstDuration` — Maximum continuous transmission time (ETSI: 10 ms) -- `MinIdleTime` — Minimum idle period between bursts -- `ModulationType` — Must be digital modulation (OFDM qualifies) or spread spectrum for FCC -- `DutyCycleLimit` — Maximum percentage of time occupied by transmissions +- `MaxEIRP` - Maximum effective isotropic radiated power in dBm, per band per jurisdiction +- `MaxBurstDuration` - Maximum continuous transmission time (ETSI: 10 ms) +- `MinIdleTime` - Minimum idle period between bursts +- `ModulationType` - Must be digital modulation (OFDM qualifies) or spread spectrum for FCC +- `DutyCycleLimit` - Maximum percentage of time occupied by transmissions **Invariants:** - No transmission shall occur without a passing `ComplianceCheckPassed` event @@ -642,7 +642,7 @@ pub struct VitalSignReport { breathing_bpm: Option, /// Breathing confidence [0.0, 1.0] breathing_confidence: f64, - /// Heart rate in BPM (None if not measurable — requires CHCI coherent mode) + /// Heart rate in BPM (None if not measurable - requires CHCI coherent mode) heart_rate_bpm: Option, /// Heart rate confidence [0.0, 1.0] heart_rate_confidence: f64, @@ -752,7 +752,7 @@ pub enum ClockEvent { node_id: NodeId, drift_deg_per_min: f64, }, - /// Phase lock lost on a node — triggers fallback to statistical correction + /// Phase lock lost on a node - triggers fallback to statistical correction ClockLockLost { node_id: NodeId, reason: LockLossReason, @@ -845,7 +845,7 @@ pub enum MeasurementEvent { │ └────────────────┘ └────────────────┘ └────────────────┘ │ │ │ │ CHCI Signal Processing feeds directly into existing │ -│ RuvSense/RuVector/DensePose pipeline — coherent CSI │ +│ RuvSense/RuVector/DensePose pipeline - coherent CSI │ │ replaces incoherent CSI as input, same output interface │ │ │ └─────────────────────────────────────────────────────────────────────────┘ @@ -855,10 +855,10 @@ pub enum MeasurementEvent { | Boundary | Direction | Mechanism | |----------|-----------|-----------| -| CHCI Signal Processing → RuvSense | Downstream | `CoherentCsiFrame` adapts to existing `CsiFrame` trait via `IntoLegacyCsi` adapter — existing pipeline works unmodified | +| CHCI Signal Processing → RuvSense | Downstream | `CoherentCsiFrame` adapts to existing `CsiFrame` trait via `IntoLegacyCsi` adapter - existing pipeline works unmodified | | Cognitive Waveform → ADR-039 Edge Tiers | Bidirectional | Sensing modes map to edge tiers: IDLE→Tier0, ACTIVE→Tier1, VITAL→Tier2. Shared `EdgeConfig` value object | | Clock Synchronization → Hardware | Downstream | `ClockDriver` trait abstracts SI5351A hardware specifics; mock implementation for testing | -| Regulatory Compliance → All TX Contexts | Upstream | Compliance Validator acts as a policy gateway — no transmission without passing check | +| Regulatory Compliance → All TX Contexts | Upstream | Compliance Validator acts as a policy gateway - no transmission without passing check | --- diff --git a/docs/ddd/deployment-platform-domain-model.md b/docs/ddd/deployment-platform-domain-model.md index 4d8836552..1ec52fe38 100644 --- a/docs/ddd/deployment-platform-domain-model.md +++ b/docs/ddd/deployment-platform-domain-model.md @@ -190,9 +190,9 @@ pub struct SemanticVersion { ``` **Domain Services:** -- `ProvisioningService` — Generates Armbian SD card image with pre-configured deployment package, WiFi credentials, and systemd units -- `HealthMonitorService` — Listens for UDP health beacons from fleet appliances, triggers alerts on thermal throttling (>80°C), unreachable (>5 min), or high memory usage (>90%) -- `OtaUpdateService` — Downloads new binary from release URL, verifies SHA-256 checksum, performs atomic swap (`rename(new, current)`), restarts systemd service, rolls back if health beacon fails within 60s +- `ProvisioningService` - Generates Armbian SD card image with pre-configured deployment package, WiFi credentials, and systemd units +- `HealthMonitorService` - Listens for UDP health beacons from fleet appliances, triggers alerts on thermal throttling (>80°C), unreachable (>5 min), or high memory usage (>90%) +- `OtaUpdateService` - Downloads new binary from release URL, verifies SHA-256 checksum, performs atomic swap (`rename(new, current)`), restarts systemd service, rolls back if health beacon fails within 60s **Invariants:** - Device ID (MAC address) is immutable after provisioning @@ -289,7 +289,7 @@ pub struct BuildTarget { - Stripped binary must be under size limit for target - SHA-256 checksum is computed and included in every deployment package - UI assets are embedded in binary via `include_dir!` or bundled alongside -- No native GPU dependencies — CPU-only inference (candle or ONNX Runtime) +- No native GPU dependencies - CPU-only inference (candle or ONNX Runtime) --- @@ -455,10 +455,10 @@ pub struct Esp32CompatFrame { ``` **Domain Services:** -- `CsiExtractionService` — Reads raw CSI from patched driver via Netlink socket (BCM43455), procfs (RTL8822CS), or UDP (MT7661) -- `SubcarrierResamplerService` — Resamples chipset-specific subcarrier counts to match ESP32 format (e.g., 256 → 128 via decimation or interpolation) -- `ProtocolTranslatorService` — Converts `ChipsetCsiFrame` to `Esp32CompatFrame` with ADR-018 binary encoding -- `CalibrationService` — Compensates for chipset-specific phase offsets, antenna spacing, and gain differences relative to ESP32 CSI +- `CsiExtractionService` - Reads raw CSI from patched driver via Netlink socket (BCM43455), procfs (RTL8822CS), or UDP (MT7661) +- `SubcarrierResamplerService` - Resamples chipset-specific subcarrier counts to match ESP32 format (e.g., 256 → 128 via decimation or interpolation) +- `ProtocolTranslatorService` - Converts `ChipsetCsiFrame` to `Esp32CompatFrame` with ADR-018 binary encoding +- `CalibrationService` - Compensates for chipset-specific phase offsets, antenna spacing, and gain differences relative to ESP32 CSI **Invariants:** - Bridge assigns virtual `node_id` in range 200-254 (reserved for non-ESP32 sources) to avoid collision with physical ESP32 node IDs (1-199) @@ -538,15 +538,15 @@ pub struct EspNodeConnection { ``` **Domain Services:** -- `DedicatedApService` — Configures `hostapd` to create a WPA2 AP on the TV box's WiFi interface, assigns DHCP range via `dnsmasq`, sets up IP forwarding -- `NodeDiscoveryService` — Monitors UDP port 5005 for new ESP32 node IDs, registers them in the topology, alerts on node departure (no frames for >30s) -- `FirewallService` — Configures `nftables`/`iptables` to isolate the ESP32 subnet from the upstream LAN, allowing only UDP 5005 inbound and HTTP 3000 outbound +- `DedicatedApService` - Configures `hostapd` to create a WPA2 AP on the TV box's WiFi interface, assigns DHCP range via `dnsmasq`, sets up IP forwarding +- `NodeDiscoveryService` - Monitors UDP port 5005 for new ESP32 node IDs, registers them in the topology, alerts on node departure (no frames for >30s) +- `FirewallService` - Configures `nftables`/`iptables` to isolate the ESP32 subnet from the upstream LAN, allowing only UDP 5005 inbound and HTTP 3000 outbound **Invariants:** - Dedicated AP uses a separate WiFi interface or virtual interface (not the uplink) - ESP32 subnet is isolated from upstream LAN by default (firewall rules) - If dedicated AP is disabled, ESP32 nodes must be on the same LAN subnet as the appliance -- Node discovery does not require mDNS or any discovery protocol — ESP32 nodes are configured with the appliance's IP via NVS provisioning (ADR-044) +- Node discovery does not require mDNS or any discovery protocol - ESP32 nodes are configured with the appliance's IP via NVS provisioning (ADR-044) --- @@ -639,10 +639,10 @@ For multi-room deployments, each appliance is self-contained (runs its own sensi ## Related -- [ADR-046: Android TV Box / Armbian Deployment](../adr/ADR-046-android-tv-box-armbian-deployment.md) — Primary architectural decision -- [ADR-012: ESP32 CSI Sensor Mesh](../adr/ADR-012-esp32-csi-sensor-mesh.md) — ESP32 mesh network design -- [ADR-018: Dev Implementation](../adr/ADR-018-dev-implementation.md) — ESP32 binary CSI protocol -- [ADR-039: Edge Intelligence](../adr/ADR-039-esp32-edge-intelligence.md) — On-device processing tiers -- [ADR-044: Provisioning Tool](../adr/ADR-044-provisioning-tool-enhancements.md) — NVS provisioning for ESP32 nodes -- [Hardware Platform Domain Model](hardware-platform-domain-model.md) — Upstream domain (ESP32 hardware) -- [Sensing Server Domain Model](sensing-server-domain-model.md) — Upstream domain (server software) +- [ADR-046: Android TV Box / Armbian Deployment](../adr/ADR-046-android-tv-box-armbian-deployment.md) - Primary architectural decision +- [ADR-012: ESP32 CSI Sensor Mesh](../adr/ADR-012-esp32-csi-sensor-mesh.md) - ESP32 mesh network design +- [ADR-018: Dev Implementation](../adr/ADR-018-dev-implementation.md) - ESP32 binary CSI protocol +- [ADR-039: Edge Intelligence](../adr/ADR-039-esp32-edge-intelligence.md) - On-device processing tiers +- [ADR-044: Provisioning Tool](../adr/ADR-044-provisioning-tool-enhancements.md) - NVS provisioning for ESP32 nodes +- [Hardware Platform Domain Model](hardware-platform-domain-model.md) - Upstream domain (ESP32 hardware) +- [Sensing Server Domain Model](sensing-server-domain-model.md) - Upstream domain (server software) diff --git a/docs/ddd/hardware-platform-domain-model.md b/docs/ddd/hardware-platform-domain-model.md index c667161ee..9061e392c 100644 --- a/docs/ddd/hardware-platform-domain-model.md +++ b/docs/ddd/hardware-platform-domain-model.md @@ -2,7 +2,7 @@ The Hardware Platform domain covers everything from the ESP32-S3 silicon to the server-side aggregator: collecting raw CSI, processing it on-device, running programmable WASM modules at the edge, and provisioning fleets of sensor nodes. It is the physical foundation that all higher-level domains (RuvSense, WiFi-Mat, Pose Tracking) depend on for real radio data. -This document defines the system using [Domain-Driven Design](https://martinfowler.com/bliki/DomainDrivenDesign.html) (DDD): bounded contexts that own their data and rules, aggregate roots that enforce invariants, value objects that carry meaning, and domain events that connect everything. The goal is to make the firmware and hardware layer's structure match the electronics it controls -- so that anyone reading the code (or an AI agent modifying it) understands *why* each piece exists, not just *what* it does. +This document defines the system using [Domain-Driven Design](https://martinfowler.com/bliki/DomainDrivenDesign.html) (DDD): bounded contexts that own their data and rules, aggregate roots that enforce invariants, value objects that carry meaning, and domain events that connect everything. The goal is to make the firmware and hardware layer's structure match the electronics it controls - so that anyone reading the code (or an AI agent modifying it) understands *why* each piece exists, not just *what* it does. **Bounded Contexts:** @@ -97,9 +97,9 @@ All firmware paths are relative to the repository root. Rust crate paths are rel - `ChannelHopConfig` **Domain Services:** -- `CsiCollectionService` -- Registers ESP-IDF CSI callback, extracts I/Q, enforces 50 Hz rate limit -- `StreamSendService` -- Serializes frames to ADR-018 binary format, sends UDP with sequence numbers -- `NvsConfigService` -- Reads 20+ NVS keys at boot, provides typed config to all firmware components +- `CsiCollectionService` - Registers ESP-IDF CSI callback, extracts I/Q, enforces 50 Hz rate limit +- `StreamSendService` - Serializes frames to ADR-018 binary format, sends UDP with sequence numbers +- `NvsConfigService` - Reads 20+ NVS keys at boot, provides typed config to all firmware components --- @@ -175,13 +175,13 @@ All firmware paths are relative to the repository root. Rust crate paths are rel - `EdgeTier` **Domain Services:** -- `PhaseExtractionService` -- Converts raw I/Q to amplitude + phase, applies unwrapping -- `WelfordStatsService` -- Maintains per-subcarrier running mean and variance -- `TopKSelectionService` -- Selects K subcarriers with highest variance for downstream processing -- `BandpassFilterService` -- Biquad IIR filters for breathing and heart rate frequency bands -- `PresenceDetectionService` -- Adaptive threshold with 1200-frame, 3-sigma calibration -- `FallDetectionService` -- Phase acceleration exceeding configurable threshold (default 2.0 rad/s^2) -- `DeltaCompressionService` -- XOR + RLE delta encoding for 30-50% bandwidth reduction +- `PhaseExtractionService` - Converts raw I/Q to amplitude + phase, applies unwrapping +- `WelfordStatsService` - Maintains per-subcarrier running mean and variance +- `TopKSelectionService` - Selects K subcarriers with highest variance for downstream processing +- `BandpassFilterService` - Biquad IIR filters for breathing and heart rate frequency bands +- `PresenceDetectionService` - Adaptive threshold with 1200-frame, 3-sigma calibration +- `FallDetectionService` - Phase acceleration exceeding configurable threshold (default 2.0 rad/s^2) +- `DeltaCompressionService` - XOR + RLE delta encoding for 30-50% bandwidth reduction --- @@ -258,11 +258,11 @@ All firmware paths are relative to the repository root. Rust crate paths are rel - `ModuleState` **Domain Services:** -- `RvfVerificationService` -- Parses RVF header, verifies SHA-256 hash and Ed25519 signature -- `ModuleLifecycleService` -- Handles load -> start -> run -> stop -> unload transitions -- `BudgetControllerService` -- Computes per-frame budget from mincut eigenvalue gap, thermal, and battery pressure -- `HostApiBindingService` -- Links 12 host functions to WASM3 imports in the "csi" namespace -- `WasmUploadService` -- HTTP server on port 8032 for module management endpoints +- `RvfVerificationService` - Parses RVF header, verifies SHA-256 hash and Ed25519 signature +- `ModuleLifecycleService` - Handles load -> start -> run -> stop -> unload transitions +- `BudgetControllerService` - Computes per-frame budget from mincut eigenvalue gap, thermal, and battery pressure +- `HostApiBindingService` - Links 12 host functions to WASM3 imports in the "csi" namespace +- `WasmUploadService` - HTTP server on port 8032 for module management endpoints --- @@ -319,10 +319,10 @@ All firmware paths are relative to the repository root. Rust crate paths are rel - `FusedMotionEnergy` **Domain Services:** -- `UdpReceiverService` -- Listens on UDP port 5005, demuxes by magic number and node ID -- `TimestampAlignmentService` -- Maps per-node monotonic timestamps to aggregator-local time -- `FeatureFusionService` -- Computes cross-node correlation, fused motion (max across nodes), fused breathing (highest SNR) -- `PipelineBridgeService` -- Feeds fused frames into the wifi-densepose Rust pipeline via mpsc channel +- `UdpReceiverService` - Listens on UDP port 5005, demuxes by magic number and node ID +- `TimestampAlignmentService` - Maps per-node monotonic timestamps to aggregator-local time +- `FeatureFusionService` - Computes cross-node correlation, fused motion (max across nodes), fused breathing (highest SNR) +- `PipelineBridgeService` - Feeds fused frames into the wifi-densepose Rust pipeline via mpsc channel --- @@ -394,11 +394,11 @@ All firmware paths are relative to the repository root. Rust crate paths are rel - `VerificationResult` **Domain Services:** -- `NvsWriteService` -- Writes typed NVS key-value pairs to the ESP32 flash partition via esptool -- `PresetResolverService` -- Maps named presets (basic, vitals, mesh-3, mesh-6-vitals) to NVS key sets -- `MeshProvisionerService` -- Iterates over nodes in a config file, computing TDM slots automatically -- `ReadBackService` -- Reads NVS partition, parses binary format, returns typed config -- `BootVerificationService` -- Opens serial monitor post-provision, checks for expected log lines +- `NvsWriteService` - Writes typed NVS key-value pairs to the ESP32 flash partition via esptool +- `PresetResolverService` - Maps named presets (basic, vitals, mesh-3, mesh-6-vitals) to NVS key sets +- `MeshProvisionerService` - Iterates over nodes in a config file, computing TDM slots automatically +- `ReadBackService` - Reads NVS partition, parses binary format, returns typed config +- `BootVerificationService` - Opens serial monitor post-provision, checks for expected log lines --- @@ -674,23 +674,23 @@ pub enum EdgeTier { /// Complete NVS configuration for one ESP32 sensor node. /// Covers all 20+ firmware-readable keys. pub struct NvsConfig { - // -- Network -- + // - Network -- pub ssid: String, pub password: String, pub target_ip: Ipv4Addr, pub target_port: u16, // default: 5005 pub node_id: u8, // default: 0 - // -- TDM -- + // - TDM -- pub tdm_slot: u8, // default: 0 pub tdm_total: u8, // default: 1 (no TDM) - // -- Channel Hopping -- + // - Channel Hopping -- pub hop_count: u8, // default: 1 (no hop) pub chan_list: Vec, // default: [1, 6, 11] pub dwell_ms: u32, // default: 100 - // -- Edge Processing -- + // - Edge Processing -- pub edge_tier: EdgeTier, // default: Tier 2 pub pres_thresh: u16, // default: 0 (auto-calibrate) pub fall_thresh: u16, // default: 2000 (2.0 rad/s^2) @@ -698,15 +698,15 @@ pub struct NvsConfig { pub vital_int: u16, // default: 1000 ms pub subk_count: u8, // default: 8 - // -- Power -- + // - Power -- pub power_duty: u8, // default: 100 (always on) - // -- WASM -- + // - WASM -- pub wasm_max: u8, // default: 4 pub wasm_verify: bool, // default: true (secure-by-default) pub wasm_pubkey: Option<[u8; 32]>, // Ed25519 public key - // -- MAC Filter -- + // - MAC Filter -- pub filter_mac: Option, } ``` @@ -1328,11 +1328,11 @@ All ESP32 UDP packets share a 4-byte magic prefix for demuxing at the aggregator ## References -- [ADR-012: ESP32 CSI Sensor Mesh](../adr/ADR-012-esp32-csi-sensor-mesh.md) -- Hardware selection, mesh architecture, BOM -- [ADR-018: Dev Implementation](../adr/ADR-018-dev-implementation.md) -- Binary frame format, ADR-018 wire protocol -- [ADR-039: ESP32-S3 Edge Intelligence](../adr/ADR-039-esp32-edge-intelligence.md) -- Tiered processing, DSP pipeline, hardware benchmarks -- [ADR-040: WASM Programmable Sensing](../adr/ADR-040-wasm-programmable-sensing.md) -- WASM3 runtime, Host API, RVF container, adaptive budget -- [ADR-041: WASM Module Collection](../adr/ADR-041-wasm-module-collection.md) -- 60-module catalog, event ID registry, budget tiers -- [ADR-044: Provisioning Tool Enhancements](../adr/ADR-044-provisioning-tool-enhancements.md) -- NVS coverage, presets, mesh config, read-back -- [RuvSense Domain Model](ruvsense-domain-model.md) -- Upstream signal processing domain -- [WiFi-Mat Domain Model](wifi-mat-domain-model.md) -- Downstream disaster response domain +- [ADR-012: ESP32 CSI Sensor Mesh](../adr/ADR-012-esp32-csi-sensor-mesh.md) - Hardware selection, mesh architecture, BOM +- [ADR-018: Dev Implementation](../adr/ADR-018-dev-implementation.md) - Binary frame format, ADR-018 wire protocol +- [ADR-039: ESP32-S3 Edge Intelligence](../adr/ADR-039-esp32-edge-intelligence.md) - Tiered processing, DSP pipeline, hardware benchmarks +- [ADR-040: WASM Programmable Sensing](../adr/ADR-040-wasm-programmable-sensing.md) - WASM3 runtime, Host API, RVF container, adaptive budget +- [ADR-041: WASM Module Collection](../adr/ADR-041-wasm-module-collection.md) - 60-module catalog, event ID registry, budget tiers +- [ADR-044: Provisioning Tool Enhancements](../adr/ADR-044-provisioning-tool-enhancements.md) - NVS coverage, presets, mesh config, read-back +- [RuvSense Domain Model](ruvsense-domain-model.md) - Upstream signal processing domain +- [WiFi-Mat Domain Model](wifi-mat-domain-model.md) - Downstream disaster response domain diff --git a/docs/ddd/ruvsense-domain-model.md b/docs/ddd/ruvsense-domain-model.md index e56710e5f..efe530b8f 100644 --- a/docs/ddd/ruvsense-domain-model.md +++ b/docs/ddd/ruvsense-domain-model.md @@ -1,8 +1,8 @@ # RuvSense Domain Model -RuvSense is the multistatic WiFi sensing subsystem of RuView. It turns raw radio signals from multiple ESP32 sensors into tracked human poses, vital signs, and spatial awareness — all without cameras. +RuvSense is the multistatic WiFi sensing subsystem of RuView. It turns raw radio signals from multiple ESP32 sensors into tracked human poses, vital signs, and spatial awareness - all without cameras. -This document defines the system using [Domain-Driven Design](https://martinfowler.com/bliki/DomainDrivenDesign.html) (DDD): bounded contexts that own their data and rules, aggregate roots that enforce invariants, value objects that carry meaning, and domain events that connect everything. The goal is to make the system's structure match the physics it models — so that anyone reading the code (or an AI agent modifying it) understands *why* each piece exists, not just *what* it does. +This document defines the system using [Domain-Driven Design](https://martinfowler.com/bliki/DomainDrivenDesign.html) (DDD): bounded contexts that own their data and rules, aggregate roots that enforce invariants, value objects that carry meaning, and domain events that connect everything. The goal is to make the system's structure match the physics it models - so that anyone reading the code (or an AI agent modifying it) understands *why* each piece exists, not just *what* it does. **Bounded Contexts:** @@ -92,9 +92,9 @@ All code paths shown are relative to `rust-port/wifi-densepose-rs/crates/wifi-de - `ChannelHopConfig` **Domain Services:** -- `PhaseAlignmentService` — Corrects LO-induced phase rotation between channels -- `MultiBandFusionService` — Merges per-channel CSI into wideband virtual frame -- `MultistaticFusionService` — Attention-based fusion of N nodes into one frame +- `PhaseAlignmentService` - Corrects LO-induced phase rotation between channels +- `MultiBandFusionService` - Merges per-channel CSI into wideband virtual frame +- `MultistaticFusionService` - Attention-based fusion of N nodes into one frame **RuVector Integration:** - `ruvector-solver` → Phase alignment (NeumannSolver) @@ -140,7 +140,7 @@ All code paths shown are relative to `rust-port/wifi-densepose-rs/crates/wifi-de ``` **Aggregates:** -- `CoherenceState` (Aggregate Root) — Maintains reference template and gate state +- `CoherenceState` (Aggregate Root) - Maintains reference template and gate state **Value Objects:** - `CoherenceScore` (0.0-1.0) @@ -149,9 +149,9 @@ All code paths shown are relative to `rust-port/wifi-densepose-rs/crates/wifi-de - `DriftProfile` (Stable / Linear / StepChange) **Domain Services:** -- `CoherenceCalculatorService` — Computes per-subcarrier z-score coherence -- `StaticDynamicDecomposerService` — Separates environmental drift from body motion -- `GatePolicyService` — Applies threshold-based gating rules +- `CoherenceCalculatorService` - Computes per-subcarrier z-score coherence +- `StaticDynamicDecomposerService` - Separates environmental drift from body motion +- `GatePolicyService` - Applies threshold-based gating rules **RuVector Integration:** - `ruvector-solver` → Coherence matrix decomposition (static vs. dynamic) @@ -205,20 +205,20 @@ All code paths shown are relative to `rust-port/wifi-densepose-rs/crates/wifi-de - `PoseTrack` (Aggregate Root) **Entities:** -- `KeypointState` — Per-keypoint Kalman state (x,y,z,vx,vy,vz) with covariance +- `KeypointState` - Per-keypoint Kalman state (x,y,z,vx,vy,vz) with covariance **Value Objects:** -- `TrackedPose` — Immutable snapshot: 17 keypoints + confidence + track_id + lifecycle -- `PersonCluster` — Subset of links attributed to one person -- `AssignmentCost` — Combined Mahalanobis + embedding distance +- `TrackedPose` - Immutable snapshot: 17 keypoints + confidence + track_id + lifecycle +- `PersonCluster` - Subset of links attributed to one person +- `AssignmentCost` - Combined Mahalanobis + embedding distance - `TrackLifecycleState` (Tentative / Active / Lost / Terminated) **Domain Services:** -- `PersonSeparationService` — Min-cut partitioning of cross-link correlation graph -- `TrackAssignmentService` — Bipartite matching of detections to existing tracks -- `KalmanPredictionService` — Predict step at 28 Hz (decoupled from measurement rate) -- `KalmanUpdateService` — Gated measurement update (subject to coherence gate) -- `EmbeddingIdentifierService` — AETHER cosine similarity for re-ID +- `PersonSeparationService` - Min-cut partitioning of cross-link correlation graph +- `TrackAssignmentService` - Bipartite matching of detections to existing tracks +- `KalmanPredictionService` - Predict step at 28 Hz (decoupled from measurement rate) +- `KalmanUpdateService` - Gated measurement update (subject to coherence gate) +- `EmbeddingIdentifierService` - AETHER cosine similarity for re-ID **RuVector Integration:** - `ruvector-mincut` → Person separation (DynamicMinCut on correlation graph) @@ -622,13 +622,13 @@ pub trait MeshRepository { | Term | Definition | |------|------------| -| **Field Normal Mode** | The room's electromagnetic eigenstructure — stable propagation baseline when unoccupied | +| **Field Normal Mode** | The room's electromagnetic eigenstructure - stable propagation baseline when unoccupied | | **Body Perturbation** | Structured change to field caused by a person, after environmental drift is removed | | **Environmental Mode** | Principal component of baseline variation due to temperature, humidity, time-of-day | | **Personal Baseline** | Per-person rolling statistical profile of biophysical proxies over days/weeks | | **Drift Event** | Statistically significant deviation from personal baseline (>2sigma for >3 days) | | **Drift Report** | Traceable evidence package: z-score, direction, window, supporting embeddings | -| **Risk Signal** | Actionable observation about biophysical change — not a diagnosis | +| **Risk Signal** | Actionable observation about biophysical change - not a diagnosis | | **Intention Lead Signal** | Pre-movement dynamics (lean, weight shift) detected 200-500ms before visible motion | | **Occupancy Volume** | Low-resolution 3D probabilistic density field from RF tomography | | **Room Fingerprint** | HNSW-indexed embedding characterizing a room's electromagnetic identity | @@ -676,16 +676,16 @@ pub trait MeshRepository { - `FieldNormalMode` (Aggregate Root) **Value Objects:** -- `BodyPerturbation` — Per-link CSI residual after baseline + environmental mode removal -- `EnvironmentalMode` — One principal component of baseline variation -- `OccupancyVolume` — 3D voxel grid of estimated mass density -- `CalibrationStatus` — Fresh / Stale / Expired (based on time since last empty-room) +- `BodyPerturbation` - Per-link CSI residual after baseline + environmental mode removal +- `EnvironmentalMode` - One principal component of baseline variation +- `OccupancyVolume` - 3D voxel grid of estimated mass density +- `CalibrationStatus` - Fresh / Stale / Expired (based on time since last empty-room) **Domain Services:** -- `CalibrationService` — Detects empty-room windows, collects calibration data -- `ModeExtractionService` — SVD computation for environmental modes -- `PerturbationService` — Baseline subtraction + mode projection -- `TomographyService` — Sparse L1 inversion for occupancy volume +- `CalibrationService` - Detects empty-room windows, collects calibration data +- `ModeExtractionService` - SVD computation for environmental modes +- `PerturbationService` - Baseline subtraction + mode projection +- `TomographyService` - Sparse L1 inversion for occupancy volume **RuVector Integration:** - `ruvector-solver` → SVD for mode extraction; L1 for tomographic inversion @@ -735,20 +735,20 @@ pub trait MeshRepository { - `PersonalBaseline` (Aggregate Root) **Entities:** -- `DailyMetricSummary` — One day's worth of compressed metric statistics per person +- `DailyMetricSummary` - One day's worth of compressed metric statistics per person **Value Objects:** -- `DriftReport` — Evidence package with z-score, direction, window, embeddings -- `DriftMetric` — GaitSymmetry / StabilityIndex / BreathingRegularity / MicroTremor / ActivityLevel -- `DriftDirection` — Increasing / Decreasing -- `MonitoringLevel` — Physiological (Level 1) / Drift (Level 2) / RiskCorrelation (Level 3) -- `WelfordStats` — Online mean/variance accumulator (count, mean, M2) +- `DriftReport` - Evidence package with z-score, direction, window, embeddings +- `DriftMetric` - GaitSymmetry / StabilityIndex / BreathingRegularity / MicroTremor / ActivityLevel +- `DriftDirection` - Increasing / Decreasing +- `MonitoringLevel` - Physiological (Level 1) / Drift (Level 2) / RiskCorrelation (Level 3) +- `WelfordStats` - Online mean/variance accumulator (count, mean, M2) **Domain Services:** -- `MetricExtractionService` — Extract biomechanical proxies from pose tracks -- `BaselineUpdateService` — Update Welford statistics with daily observations -- `DriftDetectionService` — Compute z-scores, identify significant deviations -- `EvidenceAssemblyService` — Package supporting embeddings and graph constraints +- `MetricExtractionService` - Extract biomechanical proxies from pose tracks +- `BaselineUpdateService` - Update Welford statistics with daily observations +- `DriftDetectionService` - Compute z-scores, identify significant deviations +- `EvidenceAssemblyService` - Package supporting embeddings and graph constraints **RuVector Integration:** - `ruvector-temporal-tensor` → Compressed daily summary storage @@ -760,7 +760,7 @@ pub trait MeshRepository { - Baseline requires 7+ observation days before drift detection activates - Drift alert requires >2sigma deviation sustained for >3 consecutive days - Evidence chain must include start/end embeddings bracketing the drift window -- System never outputs diagnostic language — only metric values and deviations +- System never outputs diagnostic language - only metric values and deviations - Personal baseline decay: Welford stats use full history (no windowing) for stability --- @@ -805,19 +805,19 @@ pub trait MeshRepository { - `SpatialIdentityGraph` (Aggregate Root) **Entities:** -- `RoomProfile` — HNSW-indexed electromagnetic fingerprint of a room -- `PersonSpatialRecord` — Which rooms a person has visited, in order +- `RoomProfile` - HNSW-indexed electromagnetic fingerprint of a room +- `PersonSpatialRecord` - Which rooms a person has visited, in order **Value Objects:** -- `TransitionEvent` — Person, from_room, to_room, timestamps, embedding similarity -- `RoomFingerprint` — 128-dim AETHER embedding of the room's CSI profile -- `SpatialContinuity` — Confidence score for cross-room identity chain +- `TransitionEvent` - Person, from_room, to_room, timestamps, embedding similarity +- `RoomFingerprint` - 128-dim AETHER embedding of the room's CSI profile +- `SpatialContinuity` - Confidence score for cross-room identity chain **Domain Services:** -- `RoomFingerprintService` — Compute and index room electromagnetic profiles -- `TransitionDetectionService` — Detect exits (track lost near boundary) and entries (new track) -- `CrossRoomMatchingService` — HNSW similarity between exit and entry embeddings -- `TransitionGraphService` — Build and query the room-person-time graph +- `RoomFingerprintService` - Compute and index room electromagnetic profiles +- `TransitionDetectionService` - Detect exits (track lost near boundary) and entries (new track) +- `CrossRoomMatchingService` - HNSW similarity between exit and entry embeddings +- `TransitionGraphService` - Build and query the room-person-time graph **RuVector Integration:** - HNSW → Room and person fingerprint similarity search @@ -827,7 +827,7 @@ pub trait MeshRepository { - Cross-room match requires >0.80 cosine similarity AND <60s temporal gap - Room fingerprint must be recalculated if mesh topology changes - Transition graph edges are immutable once created (append-only audit trail) -- No image data stored — only 128-dim embeddings and structural events +- No image data stored - only 128-dim embeddings and structural events --- @@ -883,14 +883,14 @@ pub enum LongitudinalEvent { timestamp_us: u64, }, - /// Drift detected — biophysical metric significantly changed + /// Drift detected - biophysical metric significantly changed DriftDetected { person_id: PersonId, report: DriftReport, timestamp_us: u64, }, - /// Drift resolved — metric returned to baseline range + /// Drift resolved - metric returned to baseline range DriftResolved { person_id: PersonId, metric: DriftMetric, @@ -1037,7 +1037,7 @@ pub trait SpatialIdentityRepository { - Personal baseline requires ≥7 observation days before drift detection activates - Drift alert requires >2sigma deviation sustained for ≥3 consecutive days - Evidence chain must include embedding pairs bracketing the drift window -- Output must never use diagnostic language — only metric values and statistical deviations +- Output must never use diagnostic language - only metric values and statistical deviations - Daily summaries stored for ≥90 days (rolling retention policy) - Welford statistics use full history (no windowing) for maximum stability @@ -1045,7 +1045,7 @@ pub trait SpatialIdentityRepository { - Cross-room match requires >0.80 cosine similarity AND <60s temporal gap - Room fingerprint recalculated when mesh topology changes (node added/removed/moved) - Transition graph is append-only (immutable audit trail) -- No image data stored — only 128-dim embeddings and structural events +- No image data stored - only 128-dim embeddings and structural events - Maximum 100 rooms indexed per deployment (HNSW scaling constraint) --- @@ -1054,7 +1054,7 @@ pub trait SpatialIdentityRepository { ### 7. Edge Intelligence Context -**Responsibility:** Run signal processing and sensing algorithms directly on the ESP32-S3, without requiring a server. The node detects presence, measures breathing and heart rate, alerts on falls, and runs custom WASM modules — all locally with instant response. +**Responsibility:** Run signal processing and sensing algorithms directly on the ESP32-S3, without requiring a server. The node detects presence, measures breathing and heart rate, alerts on falls, and runs custom WASM modules - all locally with instant response. This is the only bounded context that runs on the microcontroller rather than the aggregator. It operates independently: the server is optional for visualization, but the ESP32 handles real-time sensing on its own. @@ -1107,27 +1107,27 @@ This is the only bounded context that runs on the microcontroller rather than th ``` **Aggregates:** -- `EdgeProcessingState` (Aggregate Root) — Holds all per-subcarrier state, filter history, and detection flags +- `EdgeProcessingState` (Aggregate Root) - Holds all per-subcarrier state, filter history, and detection flags **Value Objects:** -- `VitalsPacket` — 32-byte UDP packet: presence, motion, breathing BPM, heart rate BPM, confidence, fall flag, occupancy -- `EdgeTier` — Off (0) / BasicSignal (1) / FullVitals (2) / WasmExtended (3) -- `PresenceState` — Empty / Present / Moving -- `BandpassOutput` — Filtered signal in breathing or heart rate band -- `FallAlert` — Phase acceleration exceeding configurable threshold +- `VitalsPacket` - 32-byte UDP packet: presence, motion, breathing BPM, heart rate BPM, confidence, fall flag, occupancy +- `EdgeTier` - Off (0) / BasicSignal (1) / FullVitals (2) / WasmExtended (3) +- `PresenceState` - Empty / Present / Moving +- `BandpassOutput` - Filtered signal in breathing or heart rate band +- `FallAlert` - Phase acceleration exceeding configurable threshold **Entities:** -- `WasmModule` — A loaded WASM binary with its own memory arena (160 KB), frame budget (10 ms), and timer interval +- `WasmModule` - A loaded WASM binary with its own memory arena (160 KB), frame budget (10 ms), and timer interval **Domain Services:** -- `PhaseExtractionService` — Converts raw I/Q to unwrapped phase per subcarrier -- `VarianceTrackingService` — Welford running stats for subcarrier selection -- `TopKSelectionService` — Picks highest-variance subcarriers for downstream analysis -- `BandpassFilterService` — Biquad IIR filters for breathing (0.1-0.5 Hz) and heart rate (0.8-2.0 Hz) -- `PresenceDetectionService` — Adaptive threshold calibration (3-sigma over 1200-frame window) -- `VitalSignService` — Zero-crossing BPM estimation from filtered phase signals -- `FallDetectionService` — Phase acceleration exceeding threshold triggers alert -- `WasmRuntimeService` — WASM3 interpreter: load, execute, and sandbox custom modules +- `PhaseExtractionService` - Converts raw I/Q to unwrapped phase per subcarrier +- `VarianceTrackingService` - Welford running stats for subcarrier selection +- `TopKSelectionService` - Picks highest-variance subcarriers for downstream analysis +- `BandpassFilterService` - Biquad IIR filters for breathing (0.1-0.5 Hz) and heart rate (0.8-2.0 Hz) +- `PresenceDetectionService` - Adaptive threshold calibration (3-sigma over 1200-frame window) +- `VitalSignService` - Zero-crossing BPM estimation from filtered phase signals +- `FallDetectionService` - Phase acceleration exceeding threshold triggers alert +- `WasmRuntimeService` - WASM3 interpreter: load, execute, and sandbox custom modules **NVS Configuration (runtime, no reflash needed):** @@ -1143,10 +1143,10 @@ This is the only bounded context that runs on the microcontroller rather than th | `wasm_verify` | u8 | 0 | Require Ed25519 signature for uploads | **Implementation files:** -- `firmware/esp32-csi-node/main/edge_processing.c` — DSP pipeline (~750 lines) -- `firmware/esp32-csi-node/main/edge_processing.h` — Types and API -- `firmware/esp32-csi-node/main/nvs_config.c` — NVS key reader (20 keys) -- `firmware/esp32-csi-node/provision.py` — CLI provisioning tool +- `firmware/esp32-csi-node/main/edge_processing.c` - DSP pipeline (~750 lines) +- `firmware/esp32-csi-node/main/edge_processing.h` - Types and API +- `firmware/esp32-csi-node/main/nvs_config.c` - NVS key reader (20 keys) +- `firmware/esp32-csi-node/provision.py` - CLI provisioning tool **Invariants:** - Edge processing runs on Core 1; WiFi and CSI callbacks run on Core 0 (no contention) @@ -1154,7 +1154,7 @@ This is the only bounded context that runs on the microcontroller rather than th - UDP sends are rate-limited to 50 Hz to prevent lwIP buffer exhaustion (Issue #127) - ENOMEM backoff suppresses sends for 100 ms if lwIP runs out of packet buffers - WASM modules are sandboxed: 160 KB arena, 10 ms frame budget, no direct hardware access -- Tier changes via NVS take effect on next reboot — no hot-reconfiguration of the DSP pipeline +- Tier changes via NVS take effect on next reboot - no hot-reconfiguration of the DSP pipeline - Fall detection threshold should be tuned per deployment (default 2000 causes false positives in static environments) **Domain Events:** @@ -1195,6 +1195,6 @@ pub enum EdgeEvent { ``` **Relationship to other contexts:** -- Edge Intelligence → Multistatic Sensing: **Alternative** (edge runs on-device; multistatic runs on aggregator — same physics, different compute location) +- Edge Intelligence → Multistatic Sensing: **Alternative** (edge runs on-device; multistatic runs on aggregator - same physics, different compute location) - Edge Intelligence → Pose Tracking: **Upstream** (edge provides presence/vitals; aggregator can skip detection if edge already confirmed occupancy) - Edge Intelligence → Coherence: **Simplified** (edge uses simple variance thresholds instead of full coherence gating) diff --git a/docs/ddd/sensing-server-domain-model.md b/docs/ddd/sensing-server-domain-model.md index 18d026900..f7712166a 100644 --- a/docs/ddd/sensing-server-domain-model.md +++ b/docs/ddd/sensing-server-domain-model.md @@ -1,6 +1,6 @@ # Sensing Server Domain Model -The Sensing Server is the single-binary deployment surface of WiFi-DensePose. It receives raw CSI frames from ESP32 nodes, processes them into sensing features, streams live data to a web UI, and provides a self-contained workflow for recording data, training models, and running inference -- all without external dependencies. +The Sensing Server is the single-binary deployment surface of WiFi-DensePose. It receives raw CSI frames from ESP32 nodes, processes them into sensing features, streams live data to a web UI, and provides a self-contained workflow for recording data, training models, and running inference - all without external dependencies. This document defines the system using [Domain-Driven Design](https://martinfowler.com/bliki/DomainDrivenDesign.html) (DDD): bounded contexts that own their data and rules, aggregate roots that enforce invariants, value objects that carry meaning, and domain events that connect everything. The server is implemented as a single Axum binary (`wifi-densepose-sensing-server`) with all state managed through `Arc>`. @@ -38,7 +38,7 @@ All code paths shown are relative to `rust-port/wifi-densepose-rs/crates/wifi-de | **Progressive Loader** | A two-layer model loading strategy: Layer A loads instantly for basic inference, Layer B loads in background for full accuracy | | **Sensing-Only Mode** | UI mode when the DensePose backend is unavailable; suppresses DensePose tabs, shows only sensing and signal visualization | | **AppStateInner** | The single shared state struct holding all server state, accessed via `Arc>` | -| **PCK Score** | Percentage of Correct Keypoints -- the primary accuracy metric for pose estimation models | +| **PCK Score** | Percentage of Correct Keypoints - the primary accuracy metric for pose estimation models | | **Contrastive Pretraining** | Self-supervised training on unlabeled CSI data that learns signal representations before supervised fine-tuning (ADR-024) | --- @@ -192,9 +192,9 @@ pub enum DataSource { ``` **Domain Services:** -- `FeatureExtractionService` -- Computes temporal variance (Welford), Goertzel breathing estimation (9-band filter bank), L2 frame-to-frame motion score, SNR-based signal quality -- `VitalSignDetectionService` -- Estimates breathing rate, heart rate, and confidence from CSI phase history -- `DataSourceSelectionService` -- Probes UDP port 5005 for ESP32 frames; falls back through Windows RSSI then simulation +- `FeatureExtractionService` - Computes temporal variance (Welford), Goertzel breathing estimation (9-band filter bank), L2 frame-to-frame motion score, SNR-based signal quality +- `VitalSignDetectionService` - Estimates breathing rate, heart rate, and confidence from CSI phase history +- `DataSourceSelectionService` - Probes UDP port 5005 for ESP32 frames; falls back through Windows RSSI then simulation **Invariants:** - Frame history buffer never exceeds 100 entries (oldest dropped on push) @@ -312,9 +312,9 @@ pub struct ActivateLoraRequest { ``` **Domain Services:** -- `ModelScanService` -- Scans `data/models/` at startup for `.rvf` files, parses each with `RvfReader` to extract manifest metadata -- `ModelLoadService` -- Reads model weights from an RVF container into memory, sets `model_loaded = true` -- `LoraActivationService` -- Switches the active LoRA adapter on a loaded model without full reload +- `ModelScanService` - Scans `data/models/` at startup for `.rvf` files, parses each with `RvfReader` to extract manifest metadata +- `ModelLoadService` - Reads model weights from an RVF container into memory, sets `model_loaded = true` +- `LoraActivationService` - Switches the active LoRA adapter on a loaded model without full reload **Invariants:** - Only one model can be loaded at a time; loading a new model implicitly unloads the previous one @@ -426,10 +426,10 @@ pub struct StartRecordingRequest { ``` **Domain Services:** -- `RecordingLifecycleService` -- Creates a new `.csi.jsonl` file, generates session ID, manages start/stop transitions -- `FrameWriterService` -- Called on each tick via `maybe_record_frame()`, appends a `RecordedFrame` JSON line to the active file -- `AutoStopService` -- Checks elapsed time against `duration_secs` on each tick; triggers stop when exceeded -- `RecordingScanService` -- Enumerates `data/recordings/` for `.csi.jsonl` files and reads companion `.meta.json` for session metadata +- `RecordingLifecycleService` - Creates a new `.csi.jsonl` file, generates session ID, manages start/stop transitions +- `FrameWriterService` - Called on each tick via `maybe_record_frame()`, appends a `RecordedFrame` JSON line to the active file +- `AutoStopService` - Checks elapsed time against `duration_secs` on each tick; triggers stop when exceeded +- `RecordingScanService` - Enumerates `data/recordings/` for `.csi.jsonl` files and reads companion `.meta.json` for session metadata **Invariants:** - Only one recording session can be active at a time; starting a new recording while one is active returns HTTP 409 Conflict @@ -567,10 +567,10 @@ pub struct LoraTrainRequest { ``` **Domain Services:** -- `TrainingOrchestrationService` -- Spawns a background `tokio::task`, loads recorded frames, runs feature extraction, executes gradient descent with early stopping and warmup -- `FeatureExtractionService` -- Computes per-subcarrier sliding-window variance, temporal gradients, Goertzel frequency-domain power across 9 bands, and 3 global scalar features (mean amplitude, std, motion score) -- `ProgressBroadcastService` -- Sends `TrainingProgress` messages through a `broadcast::Sender` channel that WebSocket handlers subscribe to -- `RvfExportService` -- Uses `RvfBuilder` to write the best checkpoint as a `.rvf` container to `data/models/` +- `TrainingOrchestrationService` - Spawns a background `tokio::task`, loads recorded frames, runs feature extraction, executes gradient descent with early stopping and warmup +- `FeatureExtractionService` - Computes per-subcarrier sliding-window variance, temporal gradients, Goertzel frequency-domain power across 9 bands, and 3 global scalar features (mean amplitude, std, motion score) +- `ProgressBroadcastService` - Sends `TrainingProgress` messages through a `broadcast::Sender` channel that WebSocket handlers subscribe to +- `RvfExportService` - Uses `RvfBuilder` to write the best checkpoint as a `.rvf` container to `data/models/` **Invariants:** - Only one training run can be active at a time; starting training while one is running returns HTTP 409 Conflict @@ -646,17 +646,17 @@ pub enum RenderMode { ``` **Domain Services:** -- `WebSocketBroadcastService` -- Subscribes to `broadcast::Sender`, forwards each `SensingUpdate` JSON to all connected WebSocket clients -- `SensingServiceJS` -- Client-side JavaScript that manages WebSocket connection, tracks `dataSource` state, falls back to simulation after 5 failed reconnect attempts (~30s delay) -- `GaussianSplatRenderer` -- Custom GLSL `ShaderMaterial` rendering point-cloud splats on a 20x20 floor grid, colored by signal intensity -- `PoseRenderer` -- Renders skeleton, keypoints, heatmap, or dense body segmentation modes -- `BackendDetector` -- Auto-detects whether the full DensePose backend is available; sets `sensingOnlyMode = true` if unreachable +- `WebSocketBroadcastService` - Subscribes to `broadcast::Sender`, forwards each `SensingUpdate` JSON to all connected WebSocket clients +- `SensingServiceJS` - Client-side JavaScript that manages WebSocket connection, tracks `dataSource` state, falls back to simulation after 5 failed reconnect attempts (~30s delay) +- `GaussianSplatRenderer` - Custom GLSL `ShaderMaterial` rendering point-cloud splats on a 20x20 floor grid, colored by signal intensity +- `PoseRenderer` - Renders skeleton, keypoints, heatmap, or dense body segmentation modes +- `BackendDetector` - Auto-detects whether the full DensePose backend is available; sets `sensingOnlyMode = true` if unreachable **Invariants:** - WebSocket sensing service is started on application init, not lazily on tab visit (ADR-043 fix) - Simulation fallback is delayed to 5 failed reconnect attempts (~30 seconds) to avoid premature synthetic data - `pose_source` field is passed through data conversion so the Estimation Mode badge displays correctly -- Dashboard and Live Demo tabs read `sensingService.dataSource` at load time -- the service must already be connected +- Dashboard and Live Demo tabs read `sensingService.dataSource` at load time - the service must already be connected --- @@ -676,8 +676,8 @@ pub enum RenderMode { | `TrainingEpochComplete` | Training Pipeline | Visualization (WebSocket) | `{ epoch, total_epochs, train_loss, val_pck, lr }` | | `TrainingComplete` | Training Pipeline | Model Management, Visualization | `{ run_id, final_pck, model_path }` | | `TrainingFailed` | Training Pipeline | Visualization | `{ run_id, error_message }` | -| `WebSocketClientConnected` | Visualization | -- | `{ endpoint, client_addr }` | -| `WebSocketClientDisconnected` | Visualization | -- | `{ endpoint, client_addr }` | +| `WebSocketClientConnected` | Visualization | - | `{ endpoint, client_addr }` | +| `WebSocketClientDisconnected` | Visualization | - | `{ endpoint, client_addr }` | In the current implementation, events are realized through two mechanisms: 1. **`broadcast::Sender`** for WebSocket fan-out of sensing updates @@ -749,7 +749,7 @@ When the DensePose backend (port 8000) is unreachable, the client-side `BackendD ### JSONL Recording Format ACL -CSI frames are recorded as newline-delimited JSON (`.csi.jsonl`). The `RecordedFrame` struct defines the schema: `{timestamp, subcarriers, rssi, noise_floor, features}`. The training pipeline reads through this schema, extracting subcarrier arrays for feature computation. If the internal sensing representation changes, only the `maybe_record_frame()` serializer needs updating -- the training pipeline depends only on the `RecordedFrame` contract. +CSI frames are recorded as newline-delimited JSON (`.csi.jsonl`). The `RecordedFrame` struct defines the schema: `{timestamp, subcarriers, rssi, noise_floor, features}`. The training pipeline reads through this schema, extracting subcarrier arrays for feature computation. If the internal sensing representation changes, only the `maybe_record_frame()` serializer needs updating - the training pipeline depends only on the `RecordedFrame` contract. --- @@ -835,8 +835,8 @@ crates/wifi-densepose-sensing-server/ ## Related -- [ADR-019: Sensing-Only UI Mode](../adr/ADR-019-sensing-only-ui-mode.md) -- Decoupled sensing UI, Gaussian splats, Python WebSocket bridge -- [ADR-035: Live Sensing UI Accuracy](../adr/ADR-035-live-sensing-ui-accuracy.md) -- Data transparency, Goertzel breathing estimation, signal-responsive pose -- [ADR-043: Sensing Server UI API Completion](../adr/ADR-043-sensing-server-ui-api-completion.md) -- Model, recording, training endpoints; single-binary deployment -- [RuvSense Domain Model](ruvsense-domain-model.md) -- Upstream signal processing domain (multistatic sensing, coherence, tracking) -- [WiFi-Mat Domain Model](wifi-mat-domain-model.md) -- Downstream disaster response domain +- [ADR-019: Sensing-Only UI Mode](../adr/ADR-019-sensing-only-ui-mode.md) - Decoupled sensing UI, Gaussian splats, Python WebSocket bridge +- [ADR-035: Live Sensing UI Accuracy](../adr/ADR-035-live-sensing-ui-accuracy.md) - Data transparency, Goertzel breathing estimation, signal-responsive pose +- [ADR-043: Sensing Server UI API Completion](../adr/ADR-043-sensing-server-ui-api-completion.md) - Model, recording, training endpoints; single-binary deployment +- [RuvSense Domain Model](ruvsense-domain-model.md) - Upstream signal processing domain (multistatic sensing, coherence, tracking) +- [WiFi-Mat Domain Model](wifi-mat-domain-model.md) - Downstream disaster response domain diff --git a/docs/ddd/signal-processing-domain-model.md b/docs/ddd/signal-processing-domain-model.md index 319d27c42..9f76818c4 100644 --- a/docs/ddd/signal-processing-domain-model.md +++ b/docs/ddd/signal-processing-domain-model.md @@ -9,7 +9,7 @@ Based on ADR-014 (SOTA Signal Processing) and the `wifi-densepose-signal` crate. | Term | Definition | |------|------------| | **CsiFrame** | A single CSI measurement: amplitude + phase per antenna per subcarrier at one timestamp | -| **Conjugate Multiplication** | `H_ref[k] * conj(H_target[k])` — cancels CFO/SFO/PDD, isolating environment-induced phase | +| **Conjugate Multiplication** | `H_ref[k] * conj(H_target[k])` - cancels CFO/SFO/PDD, isolating environment-induced phase | | **CSI Ratio** | The complex result of conjugate multiplication between two antenna streams | | **Hampel Filter** | Running median +/- scaled MAD outlier detector; resists up to 50% contamination | | **Phase Sanitization** | Pipeline of unwrapping, outlier removal, smoothing, and noise filtering on raw CSI phase | diff --git a/docs/ddd/training-pipeline-domain-model.md b/docs/ddd/training-pipeline-domain-model.md index 57a4aef47..6e2db72e8 100644 --- a/docs/ddd/training-pipeline-domain-model.md +++ b/docs/ddd/training-pipeline-domain-model.md @@ -2,7 +2,7 @@ The Training & ML Pipeline is the subsystem of WiFi-DensePose that turns raw public CSI datasets into a trained pose estimation model and its downstream derivatives: contrastive embeddings, domain-generalized weights, and deterministic proof bundles. It is the bridge between research data and deployable inference. -This document defines the system using [Domain-Driven Design](https://martinfowler.com/bliki/DomainDrivenDesign.html) (DDD): bounded contexts that own their data and rules, aggregate roots that enforce invariants, value objects that carry meaning, and domain events that connect everything. The goal is to make the pipeline's structure match the physics and mathematics it implements -- so that anyone reading the code (or an AI agent modifying it) understands *why* each piece exists, not just *what* it does. +This document defines the system using [Domain-Driven Design](https://martinfowler.com/bliki/DomainDrivenDesign.html) (DDD): bounded contexts that own their data and rules, aggregate roots that enforce invariants, value objects that carry meaning, and domain events that connect everything. The goal is to make the pipeline's structure match the physics and mathematics it implements - so that anyone reading the code (or an AI agent modifying it) understands *why* each piece exists, not just *what* it does. **Bounded Contexts:** @@ -95,20 +95,20 @@ All code paths shown are relative to `rust-port/wifi-densepose-rs/crates/wifi-de ``` **Aggregates:** -- `MmFiDataset` (Aggregate Root) -- Manages the MM-Fi data lifecycle -- `WiPoseDataset` (Aggregate Root) -- Manages the Wi-Pose data lifecycle +- `MmFiDataset` (Aggregate Root) - Manages the MM-Fi data lifecycle +- `WiPoseDataset` (Aggregate Root) - Manages the Wi-Pose data lifecycle **Value Objects:** -- `CsiSample` -- Single observation with amplitude, phase, keypoints, visibility -- `SubcarrierConfig` -- Source count, target count, interpolation method -- `DatasetSplit` -- Train / Validation / Test subject partitioning -- `CompressedCsiBuffer` -- Tiered temporal window backed by `TemporalTensorCompressor` +- `CsiSample` - Single observation with amplitude, phase, keypoints, visibility +- `SubcarrierConfig` - Source count, target count, interpolation method +- `DatasetSplit` - Train / Validation / Test subject partitioning +- `CompressedCsiBuffer` - Tiered temporal window backed by `TemporalTensorCompressor` **Domain Services:** -- `SubcarrierInterpolationService` -- Resamples subcarriers via sparse least-squares or linear fallback -- `PhaseSanitizationService` -- Applies SpotFi / MUSIC phase correction from `wifi-densepose-signal` -- `TeacherLabelService` -- Runs Detectron2 on paired RGB frames to produce DensePose UV pseudo-labels -- `HardwareNormalizerService` -- Z-score normalization + chipset-invariant phase sanitization +- `SubcarrierInterpolationService` - Resamples subcarriers via sparse least-squares or linear fallback +- `PhaseSanitizationService` - Applies SpotFi / MUSIC phase correction from `wifi-densepose-signal` +- `TeacherLabelService` - Runs Detectron2 on paired RGB frames to produce DensePose UV pseudo-labels +- `HardwareNormalizerService` - Z-score normalization + chipset-invariant phase sanitization **RuVector Integration:** - `ruvector-solver` -> `NeumannSolver` for sparse O(sqrt(n)) subcarrier interpolation (114->56) @@ -162,22 +162,22 @@ All code paths shown are relative to `rust-port/wifi-densepose-rs/crates/wifi-de ``` **Aggregates:** -- `WiFiDensePoseModel` (Aggregate Root) -- The complete model graph +- `WiFiDensePoseModel` (Aggregate Root) - The complete model graph **Entities:** -- `ModalityTranslator` -- Attention-gated CSI fusion using min-cut -- `CsiToPoseTransformer` -- Cross-attention + GNN backbone -- `KeypointHead` -- Regresses 17 x (x, y, z, confidence) from body_part_features -- `DensePoseHead` -- Predicts body part labels and UV surface coordinates +- `ModalityTranslator` - Attention-gated CSI fusion using min-cut +- `CsiToPoseTransformer` - Cross-attention + GNN backbone +- `KeypointHead` - Regresses 17 x (x, y, z, confidence) from body_part_features +- `DensePoseHead` - Predicts body part labels and UV surface coordinates **Value Objects:** -- `ModelConfig` -- Architecture hyperparameters (d_model, n_heads, n_gnn_layers) -- `AttentionOutput` -- Attended values + gating result from min-cut attention -- `BodyPartFeatures` -- [17 x d_model] intermediate representation +- `ModelConfig` - Architecture hyperparameters (d_model, n_heads, n_gnn_layers) +- `AttentionOutput` - Attended values + gating result from min-cut attention +- `BodyPartFeatures` - [17 x d_model] intermediate representation **Domain Services:** -- `AttentionGatingService` -- Applies `attn_mincut` to prune irrelevant antenna paths -- `SpatialDecodingService` -- Graph-based spatial attention among feature map locations +- `AttentionGatingService` - Applies `attn_mincut` to prune irrelevant antenna paths +- `SpatialDecodingService` - Graph-based spatial attention among feature map locations **RuVector Integration:** - `ruvector-attn-mincut` -> `attn_mincut` for antenna-path gating in ModalityTranslator @@ -225,28 +225,28 @@ All code paths shown are relative to `rust-port/wifi-densepose-rs/crates/wifi-de ``` **Aggregates:** -- `TrainingRun` (Aggregate Root) -- The complete training session +- `TrainingRun` (Aggregate Root) - The complete training session **Entities:** -- `CheckpointManager` -- Persists and selects model snapshots -- `ProofVerifier` -- Deterministic verification against stored hashes +- `CheckpointManager` - Persists and selects model snapshots +- `ProofVerifier` - Deterministic verification against stored hashes **Value Objects:** -- `TrainingConfig` -- Epochs, batch_size, learning_rate, loss_weights, optimizer params -- `Checkpoint` -- Epoch number, model weights SHA-256, validation PCK at that epoch -- `LossWeights` -- Relative weights for each loss component -- `CompositeTrainingLoss` -- Combined scalar loss with per-component breakdown -- `OksScore` -- Per-keypoint Object Keypoint Similarity with sigma values -- `PckScore` -- Percentage of Correct Keypoints at threshold 0.2 -- `MpjpeScore` -- Mean Per Joint Position Error in millimeters -- `ProofResult` -- Seed, steps, loss_decreased flag, hash_matches flag +- `TrainingConfig` - Epochs, batch_size, learning_rate, loss_weights, optimizer params +- `Checkpoint` - Epoch number, model weights SHA-256, validation PCK at that epoch +- `LossWeights` - Relative weights for each loss component +- `CompositeTrainingLoss` - Combined scalar loss with per-component breakdown +- `OksScore` - Per-keypoint Object Keypoint Similarity with sigma values +- `PckScore` - Percentage of Correct Keypoints at threshold 0.2 +- `MpjpeScore` - Mean Per Joint Position Error in millimeters +- `ProofResult` - Seed, steps, loss_decreased flag, hash_matches flag **Domain Services:** -- `LossComputationService` -- Computes composite loss from model outputs and ground truth -- `MetricEvaluationService` -- Computes PCK, OKS, MPJPE over validation set -- `HungarianAssignmentService` -- Bipartite matching for multi-person evaluation -- `DynamicPersonMatcherService` -- Frame-persistent assignment via `ruvector-mincut` -- `ProofVerificationService` -- Fixed-seed training + SHA-256 verification +- `LossComputationService` - Computes composite loss from model outputs and ground truth +- `MetricEvaluationService` - Computes PCK, OKS, MPJPE over validation set +- `HungarianAssignmentService` - Bipartite matching for multi-person evaluation +- `DynamicPersonMatcherService` - Frame-persistent assignment via `ruvector-mincut` +- `ProofVerificationService` - Fixed-seed training + SHA-256 verification **RuVector Integration:** - `ruvector-mincut` -> `DynamicMinCut` for O(n^1.5 log n) multi-person assignment in metrics @@ -301,33 +301,33 @@ All code paths shown are relative to `rust-port/wifi-densepose-rs/crates/wifi-de ``` **Aggregates:** -- `EmbeddingIndex` (Aggregate Root) -- HNSW-indexed store of AETHER fingerprints -- `DomainAdaptationState` (Aggregate Root) -- Tracks GRL lambda, domain classifier accuracy, factorization quality +- `EmbeddingIndex` (Aggregate Root) - HNSW-indexed store of AETHER fingerprints +- `DomainAdaptationState` (Aggregate Root) - Tracks GRL lambda, domain classifier accuracy, factorization quality **Entities:** -- `ProjectionHead` -- MLP mapping body_part_features to 128-dim embedding space -- `DomainFactorizer` -- Splits features into h_pose and h_env -- `DomainClassifier` -- Classifies domain from h_pose (trained adversarially via GRL) -- `GeometryEncoder` -- Fourier positional encoding + DeepSets for AP positions -- `LoraAdapter` -- Low-rank adaptation weights for environment-specific fine-tuning +- `ProjectionHead` - MLP mapping body_part_features to 128-dim embedding space +- `DomainFactorizer` - Splits features into h_pose and h_env +- `DomainClassifier` - Classifies domain from h_pose (trained adversarially via GRL) +- `GeometryEncoder` - Fourier positional encoding + DeepSets for AP positions +- `LoraAdapter` - Low-rank adaptation weights for environment-specific fine-tuning **Value Objects:** -- `AetherEmbedding` -- 128-dim L2-normalized contrastive vector -- `FingerprintType` -- ReIdentification / RoomFingerprint / PersonFingerprint -- `DomainLabel` -- Environment identifier for adversarial training -- `GrlSchedule` -- Lambda annealing parameters (max_lambda, warmup_epochs) -- `GeometryInput` -- AP positions in meters relative to room origin -- `FilmParameters` -- Gamma (scale) and beta (shift) vectors from geometry conditioning -- `LoraConfig` -- Rank, alpha, target layers -- `AdaptationLoss` -- ContrastiveTTT / EntropyMin / Combined +- `AetherEmbedding` - 128-dim L2-normalized contrastive vector +- `FingerprintType` - ReIdentification / RoomFingerprint / PersonFingerprint +- `DomainLabel` - Environment identifier for adversarial training +- `GrlSchedule` - Lambda annealing parameters (max_lambda, warmup_epochs) +- `GeometryInput` - AP positions in meters relative to room origin +- `FilmParameters` - Gamma (scale) and beta (shift) vectors from geometry conditioning +- `LoraConfig` - Rank, alpha, target layers +- `AdaptationLoss` - ContrastiveTTT / EntropyMin / Combined **Domain Services:** -- `ContrastiveLossService` -- Computes InfoNCE loss with temperature scaling -- `HardNegativeMiningService` -- HNSW k-NN search for difficult negative pairs -- `DomainAdversarialService` -- Manages GRL annealing and domain classification -- `GeometryConditioningService` -- Encodes AP layout and produces FiLM parameters -- `VirtualDomainAugmentationService` -- Generates synthetic environment shifts for training diversity -- `RapidAdaptationService` -- Produces LoRA adapter from 10-second unlabeled calibration +- `ContrastiveLossService` - Computes InfoNCE loss with temperature scaling +- `HardNegativeMiningService` - HNSW k-NN search for difficult negative pairs +- `DomainAdversarialService` - Manages GRL annealing and domain classification +- `GeometryConditioningService` - Encodes AP layout and produces FiLM parameters +- `VirtualDomainAugmentationService` - Generates synthetic environment shifts for training diversity +- `RapidAdaptationService` - Produces LoRA adapter from 10-second unlabeled calibration --- diff --git a/docs/edge-modules/README.md b/docs/edge-modules/README.md index 834d42e86..b38935df0 100644 --- a/docs/edge-modules/README.md +++ b/docs/edge-modules/README.md @@ -1,4 +1,4 @@ -# Edge Intelligence Modules — WiFi-DensePose +# Edge Intelligence Modules - WiFi-DensePose > 60 WASM modules that run directly on an ESP32 sensor. No internet needed, no cloud fees, instant response. Each module is a tiny file (5-30 KB) that reads WiFi signal data and makes decisions locally in under 10 ms. @@ -37,9 +37,9 @@ python scripts/wasm_upload.py --port COM7 --module target/wasm32-unknown-unknown ## How It Works 1. **WiFi signals bounce off people and objects** in a room, creating a unique pattern -2. **The ESP32 chip reads these patterns** as Channel State Information (CSI) — 52 numbers that describe how each WiFi channel changed +2. **The ESP32 chip reads these patterns** as Channel State Information (CSI) - 52 numbers that describe how each WiFi channel changed 3. **WASM modules analyze the patterns** to detect specific things: someone fell, a room is occupied, breathing rate changed -4. **Events are emitted locally** — no cloud round-trip, response time under 10 ms +4. **Events are emitted locally** - no cloud round-trip, response time under 10 ms ## Architecture @@ -86,8 +86,8 @@ Every module talks to the ESP32 through 12 functions: | `csi_get_motion_energy()` | `f32` | Overall movement level | | `csi_get_n_persons()` | `i32` | Estimated number of people | | `csi_get_timestamp()` | `i32` | Current timestamp (ms) | -| `csi_emit_event(id, val)` | — | Send a detection result to the host | -| `csi_log(ptr, len)` | — | Log a message to serial console | +| `csi_emit_event(id, val)` | - | Send a detection result to the host | +| `csi_log(ptr, len)` | - | Log a message to serial console | | `csi_get_phase_history(buf, max)` | `i32` | Past phase values for trend analysis | ## Event ID Registry @@ -141,7 +141,7 @@ Every module talks to the ESP32 through 12 functions: ## References -- [ADR-039](../adr/ADR-039-esp32-edge-intelligence.md) — Edge processing tiers -- [ADR-040](../adr/ADR-040-wasm-programmable-sensing.md) — WASM runtime design -- [ADR-041](../adr/ADR-041-wasm-module-collection.md) — Full module specification +- [ADR-039](../adr/ADR-039-esp32-edge-intelligence.md) - Edge processing tiers +- [ADR-040](../adr/ADR-040-wasm-programmable-sensing.md) - WASM runtime design +- [ADR-041](../adr/ADR-041-wasm-module-collection.md) - Full module specification - [Source code](../../rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/) diff --git a/docs/edge-modules/adaptive-learning.md b/docs/edge-modules/adaptive-learning.md index 382876cf8..d8fd2957e 100644 --- a/docs/edge-modules/adaptive-learning.md +++ b/docs/edge-modules/adaptive-learning.md @@ -1,6 +1,6 @@ -# Adaptive Learning Modules -- WiFi-DensePose Edge Intelligence +# Adaptive Learning Modules - WiFi-DensePose Edge Intelligence -> On-device machine learning that runs without cloud connectivity. The ESP32 chip teaches itself what "normal" looks like for each environment and adapts over time. No training data needed -- it learns from what it sees. +> On-device machine learning that runs without cloud connectivity. The ESP32 chip teaches itself what "normal" looks like for each environment and adapts over time. No training data needed - it learns from what it sees. ## Overview @@ -179,7 +179,7 @@ pub enum AttractorType { Unknown, PointAttractor, LimitCycle, StrangeAttractor } | ID | Name | Value | Meaning | |----|------|-------|---------| -| 735 | `ATTRACTOR_TYPE` | 1/2/3 | Point(1), LimitCycle(2), Strange(3) -- emitted when classification changes | +| 735 | `ATTRACTOR_TYPE` | 1/2/3 | Point(1), LimitCycle(2), Strange(3) - emitted when classification changes | | 736 | `LYAPUNOV_EXPONENT` | Lambda | Current Lyapunov exponent estimate | | 737 | `BASIN_DEPARTURE` | Distance ratio | Trajectory left the attractor basin (value = distance / radius) | | 738 | `LEARNING_COMPLETE` | 1.0 | Initial 200-frame learning phase finished | @@ -200,13 +200,13 @@ pub enum AttractorType { Unknown, PointAttractor, LimitCycle, StrangeAttractor } #### Tutorial: Understanding Attractor Types **Point Attractor (lambda < -0.01)** -The signal converges to a fixed point. This means the environment is completely static -- no people, no machinery, no airflow. The WiFi signal is deterministic and unchanging. Any disturbance will trigger a basin departure. +The signal converges to a fixed point. This means the environment is completely static - no people, no machinery, no airflow. The WiFi signal is deterministic and unchanging. Any disturbance will trigger a basin departure. **Limit Cycle (lambda near 0)** The signal follows a periodic orbit. This typically indicates mechanical systems: HVAC cycling, fans, elevator machinery. The period usually matches the equipment's duty cycle. Human activity on top of a limit cycle will push the Lyapunov exponent positive. **Strange Attractor (lambda > 0.01)** -The signal is bounded but aperiodic -- classical chaos. This is the signature of human activity: walking, gesturing, breathing all create complex but bounded signal dynamics. The more people, the higher the Lyapunov exponent tends to be. +The signal is bounded but aperiodic - classical chaos. This is the signature of human activity: walking, gesturing, breathing all create complex but bounded signal dynamics. The more people, the higher the Lyapunov exponent tends to be. **Basin Departure** A basin departure means the current signal state is more than 3x the learned radius away from the attractor center. This can indicate: @@ -408,7 +408,7 @@ WASM3 allocates a fixed linear memory region. There is no heap, no `malloc`, no Without EWC, moving the device to a new room would erase everything learned about the previous room. EWC adds ~32 floats of overhead per task (the Fisher diagonal snapshot), which is negligible on the ESP32. ### Round-Robin Gradient Estimation -Computing gradients for all 32 parameters every frame would take too long. Instead, the EWC module uses round-robin scheduling: 4 parameters per frame, cycling through all 32 in 8 frames. At 20 Hz, a full gradient pass takes 0.4 seconds -- fast enough for the slow dynamics of room occupancy. +Computing gradients for all 32 parameters every frame would take too long. Instead, the EWC module uses round-robin scheduling: 4 parameters per frame, cycling through all 32 in 8 frames. At 20 Hz, a full gradient pass takes 0.4 seconds - fast enough for the slow dynamics of room occupancy. ### Task Boundary Detection The system automatically detects when it has "converged" on a new environment (100 consecutive stable frames = 5 seconds of consistent low loss). No manual intervention needed. The user just places the device in a new room, and the learning happens automatically. diff --git a/docs/edge-modules/ai-security.md b/docs/edge-modules/ai-security.md index ccff20be2..64fd1d1bd 100644 --- a/docs/edge-modules/ai-security.md +++ b/docs/edge-modules/ai-security.md @@ -1,6 +1,6 @@ -# AI Security Modules -- WiFi-DensePose Edge Intelligence +# AI Security Modules - WiFi-DensePose Edge Intelligence -> Tamper detection and behavioral anomaly profiling that protect the sensing system from manipulation. These modules detect replay attacks, signal injection, jamming, and unusual behavior patterns -- all running on-device with no cloud dependency. +> Tamper detection and behavioral anomaly profiling that protect the sensing system from manipulation. These modules detect replay attacks, signal injection, jamming, and unusual behavior patterns - all running on-device with no cloud dependency. ## Overview @@ -213,7 +213,7 @@ Day 3, 3am: | CSI frame replay | Signal Shield | FNV-1a hash ring matching | Low (1% quantization) | | Signal injection (e.g., rogue AP) | Signal Shield | >25% subcarriers with >10x amplitude spike | Very low | | Broadband jamming | Signal Shield | SNR drop below 10% of baseline for 5+ frames | Very low | -| Narrowband jamming | Partially -- Signal Shield | May not trigger if < 25% subcarriers affected | Medium | +| Narrowband jamming | Partially - Signal Shield | May not trigger if < 25% subcarriers affected | Medium | | Behavioral anomaly (intruder at unusual time) | Behavioral Profiler | Combined Z-score > 3.0 across 6 dimensions | Low after maturation | | Gradual environmental change | Behavioral Profiler | Welford stats adapt, may flag if change is abrupt | Very low | diff --git a/docs/edge-modules/autonomous.md b/docs/edge-modules/autonomous.md index 3b161a4f1..d920ff687 100644 --- a/docs/edge-modules/autonomous.md +++ b/docs/edge-modules/autonomous.md @@ -1,4 +1,4 @@ -# Quantum-Inspired & Autonomous Modules -- WiFi-DensePose Edge Intelligence +# Quantum-Inspired & Autonomous Modules - WiFi-DensePose Edge Intelligence > Advanced algorithms inspired by quantum computing, neuroscience, and AI planning. These modules let the ESP32 make autonomous decisions, heal its own mesh network, interpret high-level scene semantics, and explore room states using quantum-inspired search. @@ -13,7 +13,7 @@ ### Quantum Coherence (`qnt_quantum_coherence.rs`) -**What it does**: Maps each subcarrier's phase onto a point on the quantum Bloch sphere and computes an aggregate coherence metric from the mean Bloch vector magnitude. When all subcarrier phases are aligned, the system is "coherent" (like a quantum pure state). When phases scatter randomly, it is "decoherent" (like a maximally mixed state). Sudden decoherence -- a rapid entropy spike -- indicates an environmental disturbance such as a door opening, a person entering, or furniture being moved. +**What it does**: Maps each subcarrier's phase onto a point on the quantum Bloch sphere and computes an aggregate coherence metric from the mean Bloch vector magnitude. When all subcarrier phases are aligned, the system is "coherent" (like a quantum pure state). When phases scatter randomly, it is "decoherent" (like a maximally mixed state). Sudden decoherence - a rapid entropy spike - indicates an environmental disturbance such as a door opening, a person entering, or furniture being moved. **Algorithm**: Each subcarrier phase is mapped to a 3D Bloch vector: - theta = |phase| (polar angle) @@ -78,7 +78,7 @@ Frames 52-100: New stable multipath #### Bloch Sphere Intuition -Think of each subcarrier as a compass needle. When the room is stable, all needles point roughly the same direction (high coherence, low entropy). When something changes the WiFi multipath -- a person enters, a door opens, furniture moves -- the needles scatter in different directions (low coherence, high entropy). The Bloch sphere formalism quantifies this in a way that is mathematically precise and computationally cheap. +Think of each subcarrier as a compass needle. When the room is stable, all needles point roughly the same direction (high coherence, low entropy). When something changes the WiFi multipath - a person enters, a door opens, furniture moves - the needles scatter in different directions (low coherence, high entropy). The Bloch sphere formalism quantifies this in a way that is mathematically precise and computationally cheap. --- @@ -377,7 +377,7 @@ Frame 100: Node 1 recovers (antenna repositioned) ## How Quantum-Inspired Algorithms Help WiFi Sensing -These modules use quantum computing metaphors -- not because the ESP32 is a quantum computer, but because the mathematical frameworks from quantum mechanics map naturally onto CSI signal analysis: +These modules use quantum computing metaphors - not because the ESP32 is a quantum computer, but because the mathematical frameworks from quantum mechanics map naturally onto CSI signal analysis: **Bloch Sphere / Coherence**: WiFi subcarrier phases behave like quantum phases. When multipath is stable, all phases align (pure state). When the environment changes, phases randomize (mixed state). The Von Neumann entropy quantifies this exactly, providing a single scalar "change detector" that is more robust than tracking individual subcarrier phases. @@ -386,7 +386,7 @@ These modules use quantum computing metaphors -- not because the ESP32 is a quan **Why not just use classical statistics?** You could. But the quantum-inspired formulations have three practical advantages on embedded hardware: 1. **Fixed memory**: The Bloch vector is always 3 floats. The hypothesis array is always 16 floats. No dynamic allocation needed. -2. **Graceful degradation**: If CSI data is noisy, the Grover search does not crash or give a wrong answer immediately -- it just converges more slowly. +2. **Graceful degradation**: If CSI data is noisy, the Grover search does not crash or give a wrong answer immediately - it just converges more slowly. 3. **Composability**: The coherence score from the Bloch sphere module feeds directly into the Temporal Logic Guard (rule 3: "no vital signs when coherence < 0.3") and the Psycho-Symbolic engine (feature 5: coherence). This creates a pipeline where quantum-inspired metrics inform classical reasoning. --- diff --git a/docs/edge-modules/building.md b/docs/edge-modules/building.md index ff1949979..e24b18ce5 100644 --- a/docs/edge-modules/building.md +++ b/docs/edge-modules/building.md @@ -1,6 +1,6 @@ -# Smart Building Modules -- WiFi-DensePose Edge Intelligence +# Smart Building Modules - WiFi-DensePose Edge Intelligence -> Make any building smarter using WiFi signals you already have. Know which rooms are occupied, control HVAC and lighting automatically, count elevator passengers, track meeting room usage, and audit energy waste -- all without cameras or badges. +> Make any building smarter using WiFi signals you already have. Know which rooms are occupied, control HVAC and lighting automatically, count elevator passengers, track meeting room usage, and audit energy waste - all without cameras or badges. ## Overview @@ -20,7 +20,7 @@ All modules target the ESP32-S3 running WASM3 (ADR-040 Tier 3). They receive pre ### HVAC Presence Control (`bld_hvac_presence.rs`) -**What it does**: Tells your HVAC system whether a room is occupied, with intentionally asymmetric timing -- fast arrival detection (10 seconds) so cooling/heating starts quickly, and slow departure timeout (5 minutes) to avoid premature shutoff when someone briefly steps out. Also classifies whether the occupant is sedentary (desk work, reading) or active (walking, exercising). +**What it does**: Tells your HVAC system whether a room is occupied, with intentionally asymmetric timing - fast arrival detection (10 seconds) so cooling/heating starts quickly, and slow departure timeout (5 minutes) to avoid premature shutoff when someone briefly steps out. Also classifies whether the occupant is sedentary (desk work, reading) or active (walking, exercising). **How it works**: A four-state machine processes presence scores and motion energy each frame: @@ -214,7 +214,7 @@ ec.set_overload_threshold(8); // Set custom overload limit ### Meeting Room Tracker (`bld_meeting_room.rs`) -**What it does**: Tracks the full lifecycle of meeting room usage -- from someone entering, to confirming a genuine multi-person meeting, to detecting when the meeting ends and the room is available again. Distinguishes actual meetings (2+ people for more than 3 seconds) from a single person briefly using the room. Tracks peak headcount and calculates room utilization rate. +**What it does**: Tracks the full lifecycle of meeting room usage - from someone entering, to confirming a genuine multi-person meeting, to detecting when the meeting ends and the room is available again. Distinguishes actual meetings (2+ people for more than 3 seconds) from a single person briefly using the room. Tracks peak headcount and calculates room utilization rate. **How it works**: A four-state machine processes presence and person count: diff --git a/docs/edge-modules/core.md b/docs/edge-modules/core.md index 313746890..a8b1b283c 100644 --- a/docs/edge-modules/core.md +++ b/docs/edge-modules/core.md @@ -1,4 +1,4 @@ -# Core Modules -- WiFi-DensePose Edge Intelligence +# Core Modules - WiFi-DensePose Edge Intelligence > The foundation modules that every ESP32 node runs. These handle gesture detection, signal quality monitoring, anomaly detection, zone occupancy, vital sign tracking, intrusion classification, and model packaging. @@ -43,9 +43,9 @@ All seven modules compile to `wasm32-unknown-unknown` and run inside the WASM3 i | Parameter | Default | Range | Description | |-----------|---------|-------|-------------| -| `DTW_THRESHOLD` | 2.5 | 0.5 -- 10.0 | Lower = stricter matching, fewer false positives but may miss soft gestures | -| `BAND_WIDTH` | 5 | 1 -- 20 | Width of the Sakoe-Chiba band. Wider = more flexible time warping but more computation | -| Cooldown frames | 40 | 10 -- 200 | Frames to wait before next detection. At 20 Hz, 40 frames = 2 seconds | +| `DTW_THRESHOLD` | 2.5 | 0.5 - 10.0 | Lower = stricter matching, fewer false positives but may miss soft gestures | +| `BAND_WIDTH` | 5 | 1 - 20 | Width of the Sakoe-Chiba band. Wider = more flexible time warping but more computation | +| Cooldown frames | 40 | 10 - 200 | Frames to wait before next detection. At 20 Hz, 40 frames = 2 seconds | #### Events Emitted @@ -109,16 +109,16 @@ if let Some(gesture_id) = detector.process_frame(&phases) { | `gate_state(&self) -> GateState` | method | Current gate classification (Accept, Warn, Reject). | | `mean_phasor_angle(&self) -> f32` | method | Dominant phase drift direction in radians. | | `coherence_score(&self) -> f32` | method | Current EMA-smoothed coherence score. | -| `GateState` | enum | `Accept`, `Warn`, `Reject` -- signal quality classification. | +| `GateState` | enum | `Accept`, `Warn`, `Reject` - signal quality classification. | #### Configuration | Parameter | Default | Range | Description | |-----------|---------|-------|-------------| -| `ALPHA` | 0.1 | 0.01 -- 0.5 | EMA smoothing factor. Lower = slower response, more stable. Higher = faster response, more noisy | -| `HIGH_THRESHOLD` | 0.7 | 0.5 -- 0.95 | Coherence above this = Accept | -| `LOW_THRESHOLD` | 0.4 | 0.1 -- 0.6 | Coherence below this = Reject | -| `MAX_SC` | 32 | 1 -- 64 | Maximum subcarriers tracked (compile-time) | +| `ALPHA` | 0.1 | 0.01 - 0.5 | EMA smoothing factor. Lower = slower response, more stable. Higher = faster response, more noisy | +| `HIGH_THRESHOLD` | 0.7 | 0.5 - 0.95 | Coherence above this = Accept | +| `LOW_THRESHOLD` | 0.4 | 0.1 - 0.6 | Coherence below this = Reject | +| `MAX_SC` | 32 | 1 - 64 | Maximum subcarriers tracked (compile-time) | #### Events Emitted @@ -170,11 +170,11 @@ A 20-frame cooldown prevents event flooding. | Parameter | Default | Range | Description | |-----------|---------|-------|-------------| -| `PHASE_JUMP_THRESHOLD` | 2.5 rad | 1.0 -- pi | Phase jump to flag per subcarrier | -| `MIN_AMPLITUDE_VARIANCE` | 0.001 | 0.0001 -- 0.1 | Below this = flatline | -| `MAX_ENERGY_RATIO` | 50.0 | 5.0 -- 500.0 | Energy spike threshold vs baseline | -| `BASELINE_FRAMES` | 100 | 50 -- 500 | Frames to calibrate baseline | -| `ANOMALY_COOLDOWN` | 20 | 5 -- 100 | Frames between anomaly reports | +| `PHASE_JUMP_THRESHOLD` | 2.5 rad | 1.0 - pi | Phase jump to flag per subcarrier | +| `MIN_AMPLITUDE_VARIANCE` | 0.001 | 0.0001 - 0.1 | Below this = flatline | +| `MAX_ENERGY_RATIO` | 50.0 | 5.0 - 500.0 | Energy spike threshold vs baseline | +| `BASELINE_FRAMES` | 100 | 50 - 500 | Frames to calibrate baseline | +| `ANOMALY_COOLDOWN` | 20 | 5 - 100 | Frames between anomaly reports | #### Events Emitted @@ -228,12 +228,12 @@ if detector.process_frame(&phases, &litudes) { | Parameter | Default | Range | Description | |-----------|---------|-------|-------------| -| `INTRUSION_VELOCITY_THRESH` | 1.5 rad/frame | 0.5 -- 3.0 | Phase velocity that counts as fast movement | -| `AMPLITUDE_CHANGE_THRESH` | 3.0 sigma | 1.0 -- 10.0 | Amplitude deviation in standard deviations | -| `ARM_FRAMES` | 100 | 20 -- 500 | Quiet frames needed to arm (at 20 Hz: 5 sec) | -| `DETECT_DEBOUNCE` | 3 | 1 -- 10 | Consecutive detection frames before alert | -| `ALERT_COOLDOWN` | 100 | 20 -- 500 | Frames between alerts | -| `BASELINE_FRAMES` | 200 | 100 -- 1000 | Calibration window | +| `INTRUSION_VELOCITY_THRESH` | 1.5 rad/frame | 0.5 - 3.0 | Phase velocity that counts as fast movement | +| `AMPLITUDE_CHANGE_THRESH` | 3.0 sigma | 1.0 - 10.0 | Amplitude deviation in standard deviations | +| `ARM_FRAMES` | 100 | 20 - 500 | Quiet frames needed to arm (at 20 Hz: 5 sec) | +| `DETECT_DEBOUNCE` | 3 | 1 - 10 | Consecutive detection frames before alert | +| `ALERT_COOLDOWN` | 100 | 20 - 500 | Frames between alerts | +| `BASELINE_FRAMES` | 200 | 100 - 1000 | Calibration window | #### Events Emitted @@ -288,10 +288,10 @@ for &(event_type, value) in events { | Parameter | Default | Range | Description | |-----------|---------|-------|-------------| -| `MAX_ZONES` | 8 | 1 -- 16 | Maximum number of spatial zones | -| `ZONE_THRESHOLD` | 0.02 | 0.005 -- 0.5 | Score above this = occupied. Hysteresis exit at 0.5x | -| `ALPHA` | 0.15 | 0.05 -- 0.5 | EMA smoothing factor for zone scores | -| `BASELINE_FRAMES` | 200 | 100 -- 1000 | Calibration window length | +| `MAX_ZONES` | 8 | 1 - 16 | Maximum number of spatial zones | +| `ZONE_THRESHOLD` | 0.02 | 0.005 - 0.5 | Score above this = occupied. Hysteresis exit at 0.5x | +| `ALPHA` | 0.15 | 0.05 - 0.5 | EMA smoothing factor for zone scores | +| `BASELINE_FRAMES` | 200 | 100 - 1000 | Calibration window length | #### Events Emitted @@ -349,12 +349,12 @@ Every 60 seconds, it emits 1-minute averages for both breathing and heart rate. | Parameter | Default | Range | Description | |-----------|---------|-------|-------------| -| `BRADYPNEA_THRESH` | 12.0 BPM | 8 -- 15 | Below this = dangerously slow breathing | -| `TACHYPNEA_THRESH` | 25.0 BPM | 20 -- 35 | Above this = dangerously fast breathing | -| `BRADYCARDIA_THRESH` | 50.0 BPM | 40 -- 60 | Below this = dangerously slow heart rate | -| `TACHYCARDIA_THRESH` | 120.0 BPM | 100 -- 150 | Above this = dangerously fast heart rate | -| `APNEA_SECONDS` | 20 | 10 -- 60 | Seconds of near-zero breathing before alert | -| `ALERT_DEBOUNCE` | 5 | 2 -- 15 | Consecutive abnormal samples before alert | +| `BRADYPNEA_THRESH` | 12.0 BPM | 8 - 15 | Below this = dangerously slow breathing | +| `TACHYPNEA_THRESH` | 25.0 BPM | 20 - 35 | Above this = dangerously fast breathing | +| `BRADYCARDIA_THRESH` | 50.0 BPM | 40 - 60 | Below this = dangerously slow heart rate | +| `TACHYCARDIA_THRESH` | 120.0 BPM | 100 - 150 | Above this = dangerously fast heart rate | +| `APNEA_SECONDS` | 20 | 10 - 60 | Seconds of near-zero breathing before alert | +| `ALERT_DEBOUNCE` | 5 | 2 - 15 | Consecutive abnormal samples before alert | #### Events Emitted @@ -482,7 +482,7 @@ From the crate directory: ```bash cd rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge -cargo test --features std -- gesture coherence adversarial intrusion occupancy vital_trend rvf +cargo test --features std - gesture coherence adversarial intrusion occupancy vital_trend rvf ``` This runs all tests whose names contain any of the seven module names. The `--features std` flag is required because the RVF builder tests need `sha2` and `std::io`. diff --git a/docs/edge-modules/exotic.md b/docs/edge-modules/exotic.md index 0b63987d1..ca8f83d78 100644 --- a/docs/edge-modules/exotic.md +++ b/docs/edge-modules/exotic.md @@ -1,9 +1,9 @@ -# Exotic & Research Modules -- WiFi-DensePose Edge Intelligence +# Exotic & Research Modules - WiFi-DensePose Edge Intelligence > Experimental sensing applications that push the boundaries of what WiFi > signals can detect. From contactless sleep staging to sign language > recognition, these modules explore novel uses of RF sensing. Some are -> highly experimental -- marked with their maturity level. +> highly experimental - marked with their maturity level. ## Maturity Levels @@ -30,16 +30,16 @@ All modules share these design constraints: -- **`no_std`** -- no heap allocation, runs on WASM3 interpreter on ESP32-S3 -- **`const fn new()`** -- all state is stack-allocated and const-constructible -- **Static event buffer** -- events are returned via `&[(i32, f32)]` from a static array (max 3-5 events per frame) -- **Budget-aware** -- each module declares its per-frame time budget (L/S/H) -- **Frame rate** -- all modules assume 20 Hz CSI frame rate from the host Tier 2 DSP +- **`no_std`** - no heap allocation, runs on WASM3 interpreter on ESP32-S3 +- **`const fn new()`** - all state is stack-allocated and const-constructible +- **Static event buffer** - events are returned via `&[(i32, f32)]` from a static array (max 3-5 events per frame) +- **Budget-aware** - each module declares its per-frame time budget (L/S/H) +- **Frame rate** - all modules assume 20 Hz CSI frame rate from the host Tier 2 DSP Shared utilities from `vendor_common.rs`: -- `CircularBuffer` -- fixed-size ring buffer with O(1) push and indexed access -- `Ema` -- exponential moving average with configurable alpha -- `WelfordStats` -- online mean/variance computation (Welford's algorithm) +- `CircularBuffer` - fixed-size ring buffer with O(1) push and indexed access +- `Ema` - exponential moving average with configurable alpha +- `WelfordStats` - online mean/variance computation (Welford's algorithm) --- @@ -47,7 +47,7 @@ Shared utilities from `vendor_common.rs`: ### Sleep Stage Classification (`exo_dream_stage.rs`) -**What it does**: Classifies sleep phases (Awake, NREM Light, NREM Deep, REM) from breathing patterns, heart rate variability, and micro-movements -- without touching the person. +**What it does**: Classifies sleep phases (Awake, NREM Light, NREM Deep, REM) from breathing patterns, heart rate variability, and micro-movements - without touching the person. **Maturity**: Experimental @@ -57,13 +57,13 @@ Shared utilities from `vendor_common.rs`: The module uses a four-feature state machine with hysteresis: -1. **Breathing regularity** -- Coefficient of variation (CV) of a 64-sample breathing BPM window. Low CV (<0.08) indicates deep sleep; high CV (>0.20) indicates REM or wakefulness. +1. **Breathing regularity** - Coefficient of variation (CV) of a 64-sample breathing BPM window. Low CV (<0.08) indicates deep sleep; high CV (>0.20) indicates REM or wakefulness. -2. **Motion energy** -- EMA-smoothed motion from host Tier 2. Below 0.15 = sleep-like; above 0.5 = awake. +2. **Motion energy** - EMA-smoothed motion from host Tier 2. Below 0.15 = sleep-like; above 0.5 = awake. -3. **Heart rate variability (HRV)** -- Variance of recent HR BPM values. High HRV (>8.0) correlates with REM; very low HRV (<2.0) with deep sleep. +3. **Heart rate variability (HRV)** - Variance of recent HR BPM values. High HRV (>8.0) correlates with REM; very low HRV (<2.0) with deep sleep. -4. **Phase micro-movements** -- High-pass energy of the phase signal (successive differences). Captures muscle atonia disruption during REM. +4. **Phase micro-movements** - High-pass energy of the phase signal (successive differences). Captures muscle atonia disruption during REM. Stage transitions require 10 consecutive frames of the candidate stage (hysteresis), preventing jittery classification. @@ -114,7 +114,7 @@ let events = detector.process_frame( variance, // f32: representative subcarrier variance presence, // i32: 1 if person detected, 0 otherwise ); -// events: &[(i32, f32)] -- event ID + value pairs +// events: &[(i32, f32)] - event ID + value pairs let stage = detector.stage(); // SleepStage enum let eff = detector.efficiency(); // f32 [0, 100] @@ -128,7 +128,7 @@ let rem = detector.rem_ratio(); // f32 [0, 1] 2. **Calibration**: Let the system run for 40+ frames (2 seconds at 20 Hz) with the person in bed before expecting valid stage classifications. -3. **Interpreting Results**: Monitor `SLEEP_STAGE` events. A healthy sleep cycle progresses through Light -> Deep -> Light -> REM, repeating in ~90 minute cycles. The `SLEEP_QUALITY` event (601) gives an overall efficiency percentage -- above 85% is considered good. +3. **Interpreting Results**: Monitor `SLEEP_STAGE` events. A healthy sleep cycle progresses through Light -> Deep -> Light -> REM, repeating in ~90 minute cycles. The `SLEEP_QUALITY` event (601) gives an overall efficiency percentage - above 85% is considered good. 4. **Limitations**: The module requires the Tier 2 DSP to provide valid `breathing_bpm` and `heart_rate_bpm`. If the person is too far from the WiFi path or behind thick walls, these vitals may not be detectable. @@ -140,7 +140,7 @@ let rem = detector.rem_ratio(); // f32 [0, 1] **Maturity**: Research -**Limitations**: This module does NOT detect emotions directly. It detects physiological arousal -- elevated heart rate, rapid breathing, and fidgeting. These correlate with stress and anxiety but can also be caused by exercise, caffeine, or excitement. The module cannot distinguish between positive and negative arousal. It is a research tool for exploring the feasibility of affect sensing via RF, not a clinical instrument. +**Limitations**: This module does NOT detect emotions directly. It detects physiological arousal - elevated heart rate, rapid breathing, and fidgeting. These correlate with stress and anxiety but can also be caused by exercise, caffeine, or excitement. The module cannot distinguish between positive and negative arousal. It is a research tool for exploring the feasibility of affect sensing via RF, not a clinical instrument. #### How It Works @@ -287,7 +287,7 @@ let cutoff = detector.is_cutoff(); // bool ### Plant Growth Detection (`exo_plant_growth.rs`) -**What it does**: Detects plant growth and leaf movement from micro-CSI changes over hours/days. Plants cause extremely slow, monotonic drift in CSI amplitude (growth) and diurnal phase oscillations (circadian leaf movement -- nyctinasty). +**What it does**: Detects plant growth and leaf movement from micro-CSI changes over hours/days. Plants cause extremely slow, monotonic drift in CSI amplitude (growth) and diurnal phase oscillations (circadian leaf movement - nyctinasty). **Maturity**: Research @@ -329,7 +329,7 @@ let empty = detector.empty_frames(); // frames of empty-room data --- -### Ghost Hunter -- Environmental Anomaly Detector (`exo_ghost_hunter.rs`) +### Ghost Hunter - Environmental Anomaly Detector (`exo_ghost_hunter.rs`) **What it does**: Monitors CSI when no humans are detected for any perturbation above the noise floor. When the room should be empty but CSI changes are detected, something unexplained is happening. Classifies anomalies by their temporal signature. @@ -389,7 +389,7 @@ let energy = detector.anomaly_energy(); // f32 #### How It Works 1. **Requires empty room** (`presence == 0`) to avoid confounding with human motion. -2. **Broadband criterion**: Compute per-group variance ratio (short-term / baseline). If >= 75% of groups (6/8) have elevated variance (ratio > 2.5x), the signal is broadband -- consistent with rain. +2. **Broadband criterion**: Compute per-group variance ratio (short-term / baseline). If >= 75% of groups (6/8) have elevated variance (ratio > 2.5x), the signal is broadband - consistent with rain. 3. **Hysteresis state machine**: Onset requires 10 consecutive broadband frames; cessation requires 20 consecutive quiet frames. 4. **Intensity classification**: Based on smoothed excess energy above baseline. @@ -479,7 +479,7 @@ let persons = detector.active_persons(); // usize ### Time Crystal Detection (`exo_time_crystal.rs`) -**What it does**: Detects temporal symmetry breaking patterns -- specifically period doubling -- in motion energy. A "time crystal" in this context is when the system oscillates at a sub-harmonic of the driving frequency. Also counts independent non-harmonic periodic components as a "coordination index" for multi-person temporal coordination. +**What it does**: Detects temporal symmetry breaking patterns - specifically period doubling - in motion energy. A "time crystal" in this context is when the system oscillates at a sub-harmonic of the driving frequency. Also counts independent non-harmonic periodic components as a "coordination index" for multi-person temporal coordination. **Maturity**: Research @@ -602,14 +602,14 @@ All 10 modules have been reviewed for: ## Research References -1. Liu, J., et al. "Monitoring Vital Signs and Postures During Sleep Using WiFi Signals." IEEE Internet of Things Journal, 2018. -- WiFi-based sleep monitoring using CSI breathing patterns. -2. Zhao, M., et al. "Through-Wall Human Pose Estimation Using Radio Signals." CVPR 2018. -- RF-based pose estimation foundations. -3. Wang, H., et al. "RT-Fall: A Real-Time and Contactless Fall Detection System with Commodity WiFi Devices." IEEE Transactions on Mobile Computing, 2017. -- WiFi CSI for human activity recognition. -4. Li, H., et al. "WiFinger: Talk to Your Smart Devices with Finger Gesture." UbiComp 2016. -- WiFi-based gesture recognition using CSI. -5. Ma, Y., et al. "SignFi: Sign Language Recognition Using WiFi." ACM IMWUT, 2018. -- WiFi CSI for sign language. -6. Nickel, M. & Kiela, D. "Poincare Embeddings for Learning Hierarchical Representations." NeurIPS 2017. -- Hyperbolic embedding foundations. -7. Wang, W., et al. "Understanding and Modeling of WiFi Signal Based Human Activity Recognition." MobiCom 2015. -- CSI-based activity recognition. -8. Adib, F., et al. "Smart Homes that Monitor Breathing and Heart Rate." CHI 2015. -- Contactless vital sign monitoring via RF signals. +1. Liu, J., et al. "Monitoring Vital Signs and Postures During Sleep Using WiFi Signals." IEEE Internet of Things Journal, 2018. - WiFi-based sleep monitoring using CSI breathing patterns. +2. Zhao, M., et al. "Through-Wall Human Pose Estimation Using Radio Signals." CVPR 2018. - RF-based pose estimation foundations. +3. Wang, H., et al. "RT-Fall: A Real-Time and Contactless Fall Detection System with Commodity WiFi Devices." IEEE Transactions on Mobile Computing, 2017. - WiFi CSI for human activity recognition. +4. Li, H., et al. "WiFinger: Talk to Your Smart Devices with Finger Gesture." UbiComp 2016. - WiFi-based gesture recognition using CSI. +5. Ma, Y., et al. "SignFi: Sign Language Recognition Using WiFi." ACM IMWUT, 2018. - WiFi CSI for sign language. +6. Nickel, M. & Kiela, D. "Poincare Embeddings for Learning Hierarchical Representations." NeurIPS 2017. - Hyperbolic embedding foundations. +7. Wang, W., et al. "Understanding and Modeling of WiFi Signal Based Human Activity Recognition." MobiCom 2015. - CSI-based activity recognition. +8. Adib, F., et al. "Smart Homes that Monitor Breathing and Heart Rate." CHI 2015. - Contactless vital sign monitoring via RF signals. ## Contributing New Research Modules @@ -634,7 +634,7 @@ All 10 modules have been reviewed for: - At minimum: `test_const_new`, `test_warmup_no_events`, one happy-path detection test, `test_reset` - Test edge cases: empty input, extreme values, insufficient data - Verify all output values are in their documented ranges - - Run: `cargo test --features std -- exo_` (from within the wasm-edge crate directory) + - Run: `cargo test --features std - exo_` (from within the wasm-edge crate directory) ### Design Constraints diff --git a/docs/edge-modules/industrial.md b/docs/edge-modules/industrial.md index 6243e0149..f7052546f 100644 --- a/docs/edge-modules/industrial.md +++ b/docs/edge-modules/industrial.md @@ -1,4 +1,4 @@ -# Industrial & Specialized Modules -- WiFi-DensePose Edge Intelligence +# Industrial & Specialized Modules - WiFi-DensePose Edge Intelligence > Worker safety and compliance monitoring using WiFi CSI signals. Works through > dust, smoke, shelving, and walls where cameras fail. Designed for warehouses, @@ -131,8 +131,8 @@ impl ForkliftProximityDetector { | `VEHICLE_DEBOUNCE` | 4 frames | 2--10 | Higher = fewer false alarms, slower response | | `PROXIMITY_DEBOUNCE` | 2 frames | 1--5 | Higher = fewer false alarms, slower response | | `ALERT_COOLDOWN` | 40 frames (2 s) | 10--200 | Lower = more frequent warnings | -| `DIST_CRITICAL` | amp ratio > 4.0 | -- | Very close proximity | -| `DIST_WARNING` | amp ratio > 3.0 | -- | Close proximity | +| `DIST_CRITICAL` | amp ratio > 4.0 | - | Very close proximity | +| `DIST_WARNING` | amp ratio > 3.0 | - | Close proximity | #### Example Usage @@ -152,9 +152,9 @@ for &(event_id, value) in events { match event_id { 500 => { let category = match value as i32 { - 0 => "CRITICAL -- stop forklift immediately", - 1 => "WARNING -- reduce speed", - _ => "CAUTION -- be alert", + 0 => "CRITICAL - stop forklift immediately", + 1 => "WARNING - reduce speed", + _ => "CAUTION - be alert", }; trigger_alarm(category); } @@ -258,8 +258,8 @@ impl ConfinedSpaceMonitor { | 510 | `EVENT_WORKER_ENTRY` | 1.0 | Worker entered the confined space | | 511 | `EVENT_WORKER_EXIT` | 1.0 | Worker exited the confined space | | 512 | `EVENT_BREATHING_OK` | BPM (float) | Periodic breathing confirmation (~every 5 s) | -| 513 | `EVENT_EXTRACTION_ALERT` | Seconds since last breath | No breathing for >15 s -- initiate rescue | -| 514 | `EVENT_IMMOBILE_ALERT` | Seconds without motion | No motion for >60 s -- check on worker | +| 513 | `EVENT_EXTRACTION_ALERT` | Seconds since last breath | No breathing for >15 s - initiate rescue | +| 514 | `EVENT_IMMOBILE_ALERT` | Seconds without motion | No motion for >60 s - check on worker | #### State Machine @@ -703,7 +703,7 @@ for &(event_id, value) in events { ### Forklift Proximity (OSHA 29 CFR 1910.178) -- **Standard**: Powered Industrial Trucks -- operator must warn others. +- **Standard**: Powered Industrial Trucks - operator must warn others. - **Module supports**: Automated proximity detection supplements horn/light warnings. Does NOT replace operator training, seat belts, or speed limits. - **Additional equipment required**: Physical barriers, floor markings, @@ -715,7 +715,7 @@ for &(event_id, value) in events { - **Module supports**: Continuous proof-of-life monitoring (breathing and motion confirmation). Assists the required safety attendant. - **Additional equipment required**: - - Atmospheric monitoring (O2, H2S, CO, LEL) -- the WiFi module cannot + - Atmospheric monitoring (O2, H2S, CO, LEL) - the WiFi module cannot detect gas hazards. - Communication system between entrant and attendant. - Rescue equipment (retrieval system, harness, tripod). @@ -788,7 +788,7 @@ for &(event_id, value) in events { - Sensor B inside the confined space (if safely mountable) provides breathing and motion monitoring. - If only one sensor is available, mount at the entry facing into the space. -- WiFi signals penetrate metal walls poorly -- use multiple sensors for +- WiFi signals penetrate metal walls poorly - use multiple sensors for large vessels. ### Integration with Safety PLCs diff --git a/docs/edge-modules/medical.md b/docs/edge-modules/medical.md index f88ae686d..43e164e2d 100644 --- a/docs/edge-modules/medical.md +++ b/docs/edge-modules/medical.md @@ -1,10 +1,10 @@ -# Medical & Health Modules -- WiFi-DensePose Edge Intelligence +# Medical & Health Modules - WiFi-DensePose Edge Intelligence -> Contactless health monitoring using WiFi signals. No wearables, no cameras -- just an ESP32 sensor reading WiFi reflections off a person's body to detect breathing problems, heart rhythm issues, walking difficulties, and seizures. +> Contactless health monitoring using WiFi signals. No wearables, no cameras - just an ESP32 sensor reading WiFi reflections off a person's body to detect breathing problems, heart rhythm issues, walking difficulties, and seizures. ## Important Disclaimer -These modules are **research tools, not FDA-approved medical devices**. They should supplement -- not replace -- professional medical monitoring. WiFi CSI-derived vital signs are inherently noisier than clinical instruments (ECG, pulse oximetry, respiratory belts). False positives and false negatives will occur. Always validate findings against clinical-grade equipment before acting on alerts. +These modules are **research tools, not FDA-approved medical devices**. They should supplement - not replace - professional medical monitoring. WiFi CSI-derived vital signs are inherently noisier than clinical instruments (ECG, pulse oximetry, respiratory belts). False positives and false negatives will occur. Always validate findings against clinical-grade equipment before acting on alerts. ## Overview @@ -29,7 +29,7 @@ All modules: ### Sleep Apnea Detection (`med_sleep_apnea.rs`) -**What it does**: Monitors breathing rate from the host CSI pipeline and detects when breathing drops below 4 BPM for more than 10 consecutive seconds, indicating an apnea episode. It tracks all episodes and computes the Apnea-Hypopnea Index (AHI) -- the number of apnea events per hour of monitored sleep time. AHI is the standard clinical metric for sleep apnea severity. +**What it does**: Monitors breathing rate from the host CSI pipeline and detects when breathing drops below 4 BPM for more than 10 consecutive seconds, indicating an apnea episode. It tracks all episodes and computes the Apnea-Hypopnea Index (AHI) - the number of apnea events per hour of monitored sleep time. AHI is the standard clinical metric for sleep apnea severity. **Clinical basis**: Obstructive and central sleep apnea are defined by cessation of airflow for 10 seconds or more. The module uses a breathing rate threshold of 4 BPM (essentially near-zero breathing) with a 10-second onset delay to confirm cessation is sustained. AHI severity classification: < 5 normal, 5-15 mild, 15-30 moderate, > 30 severe. @@ -52,10 +52,10 @@ All modules: | `episode_count()` | method | Total recorded apnea episodes | | `monitoring_seconds()` | method | Total seconds with presence active | | `in_apnea()` | method | Whether currently in an apnea episode | -| `APNEA_BPM_THRESH` | const | 4.0 BPM -- below this counts as apnea | -| `APNEA_ONSET_SECS` | const | 10 seconds -- minimum duration to declare apnea | -| `AHI_REPORT_INTERVAL` | const | 300 seconds (5 min) -- how often AHI is recalculated | -| `MAX_EPISODES` | const | 256 -- maximum episodes stored per session | +| `APNEA_BPM_THRESH` | const | 4.0 BPM - below this counts as apnea | +| `APNEA_ONSET_SECS` | const | 10 seconds - minimum duration to declare apnea | +| `AHI_REPORT_INTERVAL` | const | 300 seconds (5 min) - how often AHI is recalculated | +| `MAX_EPISODES` | const | 256 - maximum episodes stored per session | #### Events Emitted @@ -92,8 +92,8 @@ All modules: | `APNEA_BPM_THRESH` | 4.0 | 0-6 BPM | Breathing rate below which apnea is suspected | | `APNEA_ONSET_SECS` | 10 | 10-20 s | Seconds of low breathing before apnea is declared | | `AHI_REPORT_INTERVAL` | 300 | 60-3600 s | How often AHI is recalculated and emitted | -| `MAX_EPISODES` | 256 | -- | Fixed buffer size for episode history | -| `PRESENCE_ACTIVE` | 1 | -- | Minimum presence flag value for monitoring | +| `MAX_EPISODES` | 256 | - | Fixed buffer size for episode history | +| `PRESENCE_ACTIVE` | 1 | - | Minimum presence flag value for monitoring | #### Example Usage @@ -102,7 +102,7 @@ use wifi_densepose_wasm_edge::med_sleep_apnea::*; let mut detector = SleepApneaDetector::new(); -// Normal breathing -- no events +// Normal breathing - no events let events = detector.process_frame(14.0, 1, 0.1); assert!(events.is_empty()); @@ -191,7 +191,7 @@ println!("AHI: {:.1}", detector.ahi()); #### State Machine -The cardiac module does not have a formal state machine -- it uses independent detectors with cooldown timers: +The cardiac module does not have a formal state machine - it uses independent detectors with cooldown timers: ``` For each frame: @@ -226,7 +226,7 @@ use wifi_densepose_wasm_edge::med_cardiac_arrhythmia::*; let mut detector = CardiacArrhythmiaDetector::new(); -// Normal heart rate -- no events +// Normal heart rate - no events for _ in 0..60 { let events = detector.process_frame(72.0, 0.0); assert!(events.is_empty() || events.iter().all(|&(t, _)| t == EVENT_HRV_ANOMALY)); @@ -569,7 +569,7 @@ use wifi_densepose_wasm_edge::med_seizure_detect::*; let mut detector = SeizureDetector::new(); -// Normal motion -- no seizure +// Normal motion - no seizure for _ in 0..200 { let events = detector.process_frame(0.0, 0.5, 0.3, 1); assert!(events.is_empty()); @@ -593,7 +593,7 @@ for _ in 0..100 { let events = detector.process_frame(0.0, 0.05, 0.05, 1); for &(event_id, _) in events { if event_id == EVENT_POST_ICTAL { - println!("Post-ictal phase detected -- patient needs immediate assessment"); + println!("Post-ictal phase detected - patient needs immediate assessment"); } } } @@ -609,7 +609,7 @@ for _ in 0..100 { 4. **Fall vs seizure discrimination**: The module automatically distinguishes falls (brief energy spike < 10 frames) from seizures (sustained energy). If the patient is known to be a fall risk, consider running the gait analysis module in parallel for complementary monitoring. -5. **Response protocol**: When `EVENT_SEIZURE_ONSET` fires, immediately notify clinical staff. The `EVENT_POST_ICTAL` event indicates the active seizure has ended and the patient is entering post-ictal state -- they need assessment but are no longer in the convulsive phase. +5. **Response protocol**: When `EVENT_SEIZURE_ONSET` fires, immediately notify clinical staff. The `EVENT_POST_ICTAL` event indicates the active seizure has ended and the patient is entering post-ictal state - they need assessment but are no longer in the convulsive phase. --- @@ -619,7 +619,7 @@ All medical modules include comprehensive unit tests covering initialization, no ```bash cd rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge -cargo test --features std -- med_ +cargo test --features std - med_ ``` Expected output: **38 tests passed, 0 failed**. @@ -640,16 +640,16 @@ Expected output: **38 tests passed, 0 failed**. | Condition | Normal Range | Module Threshold | Clinical Standard | Notes | |-----------|-------------|------------------|-------------------|-------| -| Breathing rate | 12-20 BPM | -- | -- | Normal adult at rest | +| Breathing rate | 12-20 BPM | - | - | Normal adult at rest | | Bradypnea | < 12 BPM | Not directly detected | < 12 BPM | Gap: covered implicitly by distress score | | Tachypnea | > 20 BPM | > 25 BPM | > 20 BPM | Conservative threshold for CSI noise tolerance | | Apnea | 0 BPM | < 4 BPM for > 10s | Cessation > 10s | 4 BPM threshold accounts for CSI noise floor | | Bradycardia | < 60 BPM | < 50 BPM | < 60 BPM | Lower threshold avoids false positives in athletes | | Tachycardia | > 100 BPM | > 100 BPM | > 100 BPM | Matches clinical standard | -| Heart rate (normal) | 60-100 BPM | -- | 60-100 BPM | -- | -| AHI (mild apnea) | -- | > 5 events/hr | > 5 events/hr | Matches clinical standard | -| AHI (moderate) | -- | > 15 events/hr | > 15 events/hr | Matches clinical standard | -| AHI (severe) | -- | > 30 events/hr | > 30 events/hr | Matches clinical standard | +| Heart rate (normal) | 60-100 BPM | - | 60-100 BPM | - | +| AHI (mild apnea) | - | > 5 events/hr | > 5 events/hr | Matches clinical standard | +| AHI (moderate) | - | > 15 events/hr | > 15 events/hr | Matches clinical standard | +| AHI (severe) | - | > 30 events/hr | > 30 events/hr | Matches clinical standard | | RMSSD (normal HRV) | 20-80 ms | 10-120 ms | 19-75 ms | Widened band for CSI-derived HR | | Gait cadence (normal) | 80-120 steps/min | 80-120 steps/min | 90-120 steps/min | Slightly wider range | | Gait asymmetry | 1.0 ratio | > 0.15 deviation | > 0.10 deviation | Slightly higher threshold for CSI | @@ -673,7 +673,7 @@ Several thresholds differ from strict clinical standards. This is intentional: 2. **False positive rates.** WiFi CSI is affected by environmental factors: moving objects (fans, pets, curtains), multipath changes (opening doors, people walking nearby), and electromagnetic interference. Expect false positive rates of 5-15% in typical home environments and 1-5% in controlled clinical settings. 3. **False negative rates.** The conservative thresholds mean some borderline conditions may not trigger alerts. Specifically: - - Bradypnea (12-20 BPM dropping to 12-4 BPM) is not directly flagged -- only sub-4 BPM apnea is detected + - Bradypnea (12-20 BPM dropping to 12-4 BPM) is not directly flagged - only sub-4 BPM apnea is detected - Mild tachycardia (100-120 BPM) is detected, but the 10-second sustained requirement means brief episodes are missed - Low-amplitude seizures without strong motor components may not exceed the energy threshold diff --git a/docs/edge-modules/retail.md b/docs/edge-modules/retail.md index bdf25f3d9..1a2ce0b78 100644 --- a/docs/edge-modules/retail.md +++ b/docs/edge-modules/retail.md @@ -1,6 +1,6 @@ -# Retail & Hospitality Modules -- WiFi-DensePose Edge Intelligence +# Retail & Hospitality Modules - WiFi-DensePose Edge Intelligence -> Understand customer behavior without cameras or consent forms. Count queues, map foot traffic, track table turnover, measure shelf engagement -- all from WiFi signals that are already there. +> Understand customer behavior without cameras or consent forms. Count queues, map foot traffic, track table turnover, measure shelf engagement - all from WiFi signals that are already there. ## Overview @@ -151,7 +151,7 @@ Subcarriers are divided evenly: with 27 subcarriers, each zone gets 3 subcarrier **What it does**: Counts people entering and exiting through a doorway or passage using directional phase gradient analysis. Maintains cumulative ingress/egress counts and reports net occupancy (in - out, clamped to zero). Emits hourly traffic summaries. -**How it works**: Subcarriers are split into two groups: low-index (near entrance) and high-index (far side). A person walking through the sensing area causes an asymmetric phase velocity pattern -- the near-side group's phase changes before the far-side group for ingress, and vice versa for egress. The directional gradient (low_gradient - high_gradient) is smoothed via EMA and thresholded. Combined with motion energy and amplitude spike detection, this discriminates genuine crossings from noise. +**How it works**: Subcarriers are split into two groups: low-index (near entrance) and high-index (far side). A person walking through the sensing area causes an asymmetric phase velocity pattern - the near-side group's phase changes before the far-side group for ingress, and vice versa for egress. The directional gradient (low_gradient - high_gradient) is smoothed via EMA and thresholded. Combined with motion energy and amplitude spike detection, this discriminates genuine crossings from noise. ``` Ingress: positive smoothed gradient (low-side phase leads) @@ -220,7 +220,7 @@ elif event_id == 423: # HOURLY_TRAFFIC ### Table Turnover Tracking (`ret_table_turnover.rs`) -**What it does**: Tracks the full lifecycle of a restaurant table -- from guests sitting down, through eating, to departing and cleanup. Measures seating duration and computes a rolling turnover rate (turnovers per hour). Designed for one ESP32 node per table or table group. +**What it does**: Tracks the full lifecycle of a restaurant table - from guests sitting down, through eating, to departing and cleanup. Measures seating duration and computes a rolling turnover rate (turnovers per hour). Designed for one ESP32 node per table or table group. **How it works**: A five-state machine processes presence, motion energy, and person count: @@ -320,7 +320,7 @@ The module computes the standard deviation of per-subcarrier phase differences. | Level | Duration | Description | Event ID | |-------|----------|-------------|----------| -| None | -- | No engagement (absent or walking) | -- | +| None | - | No engagement (absent or walking) | - | | Browse | < 5s | Brief glance, passing interest | 440 | | Consider | 5-30s | Examining, reading label, comparing | 441 | | Deep Engage | > 30s | Extended interaction, decision-making | 442 | @@ -473,9 +473,9 @@ POS Dashboard Staff Analytics Each event is a `(event_type: i32, value: f32)` pair. Multiple events per frame are packed into a single UDP packet. The sensing server deserializes and exposes them via: -- `GET /api/v1/sensing/latest` -- latest raw events -- `GET /api/v1/sensing/events?type=400-403` -- filtered by event type -- WebSocket `/ws/events` -- real-time stream +- `GET /api/v1/sensing/latest` - latest raw events +- `GET /api/v1/sensing/events?type=400-403` - filtered by event type +- WebSocket `/ws/events` - real-time stream ### Privacy Considerations diff --git a/docs/edge-modules/security.md b/docs/edge-modules/security.md index 2201b64c1..4555fcd6d 100644 --- a/docs/edge-modules/security.md +++ b/docs/edge-modules/security.md @@ -1,4 +1,4 @@ -# Security & Safety Modules -- WiFi-DensePose Edge Intelligence +# Security & Safety Modules - WiFi-DensePose Edge Intelligence > Perimeter monitoring and threat detection using WiFi Channel State Information (CSI). > Works through walls, in complete darkness, without visible cameras. @@ -35,7 +35,7 @@ All security modules follow these conventions: ### Intrusion Detection (`intrusion.rs`) -**What it does**: Monitors a previously-empty space and triggers an alarm when someone enters. Works like a traditional motion alarm -- the environment must settle before the system arms itself. +**What it does**: Monitors a previously-empty space and triggers an alarm when someone enters. Works like a traditional motion alarm - the environment must settle before the system arms itself. **How it works**: During calibration (200 frames), the detector learns per-subcarrier amplitude mean and variance. After calibration, it waits for the environment to be quiet (100 consecutive frames with low disturbance) before arming. Once armed, it computes a composite disturbance score from phase velocity (sudden phase jumps between frames) and amplitude deviation (amplitude departing from baseline by more than 3 sigma). If the disturbance exceeds 0.8 for 3+ consecutive frames, an alert fires. @@ -70,7 +70,7 @@ Calibrating --> Monitoring --> Armed --> Alert | 200 | `EVENT_INTRUSION_ALERT` | Intrusion detected (disturbance score as value) | | 201 | `EVENT_INTRUSION_ZONE` | Zone index of highest disturbance | | 202 | `EVENT_INTRUSION_ARMED` | System transitioned to Armed state | -| 203 | `EVENT_INTRUSION_DISARMED` | System disarmed (currently unused -- reserved) | +| 203 | `EVENT_INTRUSION_DISARMED` | System disarmed (currently unused - reserved) | #### Configuration @@ -93,7 +93,7 @@ Calibrating --> Monitoring --> Armed --> Alert 1. **Phase gradient**: Mean absolute phase difference between current and previous frame within the zone's subcarrier range. 2. **Variance ratio**: Current zone variance divided by calibrated baseline variance. -A breach is flagged when phase gradient exceeds 0.6 rad/subcarrier AND variance ratio exceeds 2.5x baseline. Direction is determined by linear regression slope over an 8-frame energy history buffer -- positive slope = approaching, negative = departing. +A breach is flagged when phase gradient exceeds 0.6 rad/subcarrier AND variance ratio exceeds 2.5x baseline. Direction is determined by linear regression slope over an 8-frame energy history buffer - positive slope = approaching, negative = departing. #### State Machine @@ -267,7 +267,7 @@ for &(event_id, value) in events { ### Tailgating Detection (`sec_tailgating.rs`) -**What it does**: Detects tailgating at doorways -- two or more people passing through in rapid succession. A single authorized passage produces one smooth energy peak; a tailgater following closely produces a second peak within a configurable window (default 3 seconds). +**What it does**: Detects tailgating at doorways - two or more people passing through in rapid succession. A single authorized passage produces one smooth energy peak; a tailgater following closely produces a second peak within a configurable window (default 3 seconds). **How it works**: The detector uses temporal clustering of motion energy peaks through a 3-state machine: @@ -366,7 +366,7 @@ for &(event_id, value) in events { **What it does**: Detects prolonged stationary presence in a monitored area. Distinguishes between a person passing through (normal) and someone standing still for an extended time (loitering). Default dwell threshold is 5 minutes. -**How it works**: Uses a 4-state machine that tracks presence duration and motion level. Only stationary frames (motion energy below 0.5) count toward the dwell threshold -- a person actively walking through does not accumulate loitering time. The exit cooldown (30 seconds) prevents false "loitering ended" events from brief signal dropouts or occlusions. +**How it works**: Uses a 4-state machine that tracks presence duration and motion level. Only stationary frames (motion energy below 0.5) count toward the dwell threshold - a person actively walking through does not accumulate loitering time. The exit cooldown (30 seconds) prevents false "loitering ended" events from brief signal dropouts or occlusions. #### State Machine @@ -463,7 +463,7 @@ if detector.state() == LoiterState::Loitering { **What it does**: Detects three categories of distress-related motion: 1. **Panic**: Erratic, high-jerk motion with rapid random direction changes (e.g., someone flailing, being attacked). 2. **Struggle**: Elevated jerk with moderate energy and some direction changes (e.g., physical altercation, trying to break free). -3. **Fleeing**: Sustained high energy with low entropy -- running in one direction. +3. **Fleeing**: Sustained high energy with low entropy - running in one direction. **How it works**: Maintains a 100-frame (5-second) circular buffer of motion energy and variance values. Computes window-level statistics each frame: @@ -557,7 +557,7 @@ for &(event_id, value) in events { ```bash # Run all security module tests (requires std feature) cd rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge -cargo test --features std -- sec_ intrusion +cargo test --features std - sec_ intrusion ``` ### Test Coverage Summary @@ -579,7 +579,7 @@ cargo test --features std -- sec_ intrusion Each ESP32-S3 with a WiFi AP link covers a single sensing path. The coverage area depends on: - **Distance**: 1-10 meters between ESP32 and AP (optimal: 3-5 meters for indoor). -- **Width**: First Fresnel zone width -- approximately 0.5-1.5 meters at 5 GHz. +- **Width**: First Fresnel zone width - approximately 0.5-1.5 meters at 5 GHz. - **Through-wall**: WiFi CSI penetrates drywall and wood but attenuates through concrete/metal. Signal quality degrades beyond one wall. ### Multi-Sensor Coordination diff --git a/docs/edge-modules/signal-intelligence.md b/docs/edge-modules/signal-intelligence.md index 0d8e7b080..bfa466a5f 100644 --- a/docs/edge-modules/signal-intelligence.md +++ b/docs/edge-modules/signal-intelligence.md @@ -1,4 +1,4 @@ -# Signal Intelligence Modules -- WiFi-DensePose Edge Intelligence +# Signal Intelligence Modules - WiFi-DensePose Edge Intelligence > Real-time WiFi signal analysis and enhancement running directly on the ESP32 chip. These modules clean, compress, and extract features from raw WiFi channel data so that higher-level modules (health, security, etc.) get better input. @@ -66,7 +66,7 @@ All signal intelligence modules share these utilities from `vendor_common.rs`: ### Flash Attention (`sig_flash_attention.rs`) -**What it does**: Focuses processing on the WiFi channels that carry the most useful information -- ignores noise. Divides 32 subcarriers into 8 groups and computes attention weights showing where signal activity is concentrated. +**What it does**: Focuses processing on the WiFi channels that carry the most useful information - ignores noise. Divides 32 subcarriers into 8 groups and computes attention weights showing where signal activity is concentrated. **Algorithm**: Tiled attention (Q*K/sqrt(d)) over 8 subcarrier groups with softmax normalization and Shannon entropy tracking. @@ -114,7 +114,7 @@ impl FlashAttention { The 8 attention weights sum to 1.0. When a person stands in a particular area of the room, the WiFi signal changes most in the subcarrier group(s) whose Fresnel zones intersect that area. -- **All weights near 0.125 (= 1/8)**: Uniform attention. No localized activity -- either an empty room or whole-body motion affecting all subcarriers equally. +- **All weights near 0.125 (= 1/8)**: Uniform attention. No localized activity - either an empty room or whole-body motion affecting all subcarriers equally. - **One weight near 1.0, others near 0.0**: Highly focused. Activity concentrated in one spatial zone. The `peak_group` index tells you which zone. - **Two adjacent groups elevated**: Activity at the boundary between two spatial zones, or a person moving between them. - **Entropy below 1.0**: Strong spatial focus. Good for zone-level localization. @@ -161,7 +161,7 @@ pub enum GateDecision { Accept, PredictOnly, Reject, Recalibrate } |----|------|-------|---------| | 710 | `GATE_DECISION` | 2/1/0/-1 | Accept(2), PredictOnly(1), Reject(0), Recalibrate(-1) | | 711 | `COHERENCE_SCORE` | [0.0, 1.0] | Phase phasor coherence magnitude | -| 712 | `RECALIBRATE_NEEDED` | Variance | Environment has changed significantly -- retrain baseline | +| 712 | `RECALIBRATE_NEEDED` | Variance | Environment has changed significantly - retrain baseline | #### Configuration @@ -249,7 +249,7 @@ impl TemporalCompressor { **What it does**: When WiFi hardware drops some subcarrier measurements (nulls/zeros due to deep fades, firmware glitches, or multipath nulls), this module reconstructs the missing values using mathematical optimization. -**Algorithm**: Iterative Shrinkage-Thresholding Algorithm (ISTA) -- an L1-minimizing sparse recovery method. +**Algorithm**: Iterative Shrinkage-Thresholding Algorithm (ISTA) - an L1-minimizing sparse recovery method. ``` x_{k+1} = soft_threshold(x_k + step * A^T * (b - A*x_k), lambda) @@ -278,7 +278,7 @@ impl SparseRecovery { } ``` -Note: `process_frame` modifies `amplitudes` in place -- null subcarriers are overwritten with recovered values. +Note: `process_frame` modifies `amplitudes` in place - null subcarriers are overwritten with recovered values. #### Events @@ -305,13 +305,13 @@ Note: `process_frame` modifies `amplitudes` in place -- null subcarriers are ove 2. Recovery only triggers when dropout exceeds 10% (e.g., 4+ of 32 subcarriers are null). 3. Below 10%, the nulls are too sparse to warrant recovery overhead. 4. The tridiagonal correlation model exploits the fact that adjacent WiFi subcarriers are highly correlated. A null at subcarrier 15 can be estimated from subcarriers 14 and 16. -5. Monitor `RECOVERY_ERROR` -- a rising residual suggests the correlation model is stale and the environment has changed. +5. Monitor `RECOVERY_ERROR` - a rising residual suggests the correlation model is stale and the environment has changed. --- ### Min-Cut Person Match (`sig_mincut_person_match.rs`) -**What it does**: Maintains stable identity labels for up to 4 people in the sensing area. When people move around, their WiFi signatures change position -- this module tracks which signature belongs to which person across consecutive frames. +**What it does**: Maintains stable identity labels for up to 4 people in the sensing area. When people move around, their WiFi signatures change position - this module tracks which signature belongs to which person across consecutive frames. **Algorithm**: Inspired by `ruvector-mincut` (DynamicPersonMatcher). Each frame: @@ -361,7 +361,7 @@ impl PersonMatcher { **What it does**: Detects subtle motion that traditional variance-based detectors miss. Computes how much the overall shape of the WiFi signal distribution changes between frames, even when the total power stays constant. -**Algorithm**: Sliced Wasserstein distance -- a computationally efficient approximation to the full Wasserstein (earth mover's) distance. +**Algorithm**: Sliced Wasserstein distance - a computationally efficient approximation to the full Wasserstein (earth mover's) distance. 1. Generate 4 fixed random projection directions (deterministic LCG PRNG, const-computed at compile time) 2. Project both current and previous amplitude vectors onto each direction @@ -370,7 +370,7 @@ impl PersonMatcher { 5. Average across all 4 projections 6. Smooth via EMA and compare against thresholds -**Subtle motion detection**: When the Wasserstein distance is elevated (distribution shape changed) but the variance is stable (total power unchanged), something moved without creating obvious disturbance -- e.g., slow hand motion, breathing, or a door slowly closing. +**Subtle motion detection**: When the Wasserstein distance is elevated (distribution shape changed) but the variance is stable (total power unchanged), something moved without creating obvious disturbance - e.g., slow hand motion, breathing, or a door slowly closing. #### Public API diff --git a/docs/edge-modules/spatial-temporal.md b/docs/edge-modules/spatial-temporal.md index b61a7187a..8308f272b 100644 --- a/docs/edge-modules/spatial-temporal.md +++ b/docs/edge-modules/spatial-temporal.md @@ -1,4 +1,4 @@ -# Spatial & Temporal Intelligence -- WiFi-DensePose Edge Intelligence +# Spatial & Temporal Intelligence - WiFi-DensePose Edge Intelligence > Location awareness, activity patterns, and autonomous decision-making running on the ESP32 chip. These modules figure out where people are, learn daily routines, verify safety rules, and let the device plan its own actions. diff --git a/docs/research/00-rf-topological-sensing-index.md b/docs/research/00-rf-topological-sensing-index.md index d80143575..150373018 100644 --- a/docs/research/00-rf-topological-sensing-index.md +++ b/docs/research/00-rf-topological-sensing-index.md @@ -1,4 +1,4 @@ -# RF Topological Sensing — Research Index +# RF Topological Sensing - Research Index ## SOTA Research Compendium @@ -13,7 +13,7 @@ RF Topological Sensing treats a room as a dynamic signal graph where ESP32 nodes are vertices and TX-RX links are edges weighted by CSI coherence. Instead of -estimating position, minimum cut detects where the RF field topology changes — +estimating position, minimum cut detects where the RF field topology changes - revealing physical boundaries corresponding to objects, people, and environmental shifts. This creates a "radio nervous system" that is structurally aware of space. @@ -99,8 +99,8 @@ shifts. This creates a "radio nervous system" that is structurally aware of spac - **ADR-045**: Quantum Biomedical Sensing Extension (Document 12) ## Implementation Phases -1. **Phase 1** (4 weeks): 4-node POC — detect person in room -2. **Phase 2** (8 weeks): 16-node room — track movement boundaries < 50 cm -3. **Phase 3** (16 weeks): Multi-room mesh — cross-room transition detection -4. **Phase 4** (2027-2028): Quantum-enhanced — NV diamond + ESP32 hybrid -5. **Phase 5** (2029+): Biomedical — coherence diagnostics, ambient health +1. **Phase 1** (4 weeks): 4-node POC - detect person in room +2. **Phase 2** (8 weeks): 16-node room - track movement boundaries < 50 cm +3. **Phase 3** (16 weeks): Multi-room mesh - cross-room transition detection +4. **Phase 4** (2027-2028): Quantum-enhanced - NV diamond + ESP32 hybrid +5. **Phase 5** (2029+): Biomedical - coherence diagnostics, ambient health diff --git a/docs/research/01-rf-graph-theory-foundations.md b/docs/research/01-rf-graph-theory-foundations.md index 502248c9f..0c100b867 100644 --- a/docs/research/01-rf-graph-theory-foundations.md +++ b/docs/research/01-rf-graph-theory-foundations.md @@ -16,7 +16,7 @@ a mesh of 16 ESP32 WiFi nodes as a weighted graph where edges represent TX-RX link pairs and edge weights encode CSI (Channel State Information) coherence. When physical objects or people perturb the RF field, edge weights destabilize non-uniformly, and minimum cut algorithms reveal the topological boundary of the -perturbation. This approach — which we term **RF topological sensing** — differs +perturbation. This approach - which we term **RF topological sensing** - differs fundamentally from classical RF localization techniques (RSSI triangulation, fingerprinting, CSI-based positioning) in that it detects *coherence boundaries* rather than estimating *positions*. We develop the formal mathematical framework, @@ -44,7 +44,7 @@ theory, and identify open research questions for this largely unexplored domain. Consider 16 ESP32 nodes deployed in a room, each capable of transmitting and receiving WiFi CSI frames. Every ordered TX-RX pair yields a channel measurement -— amplitude and phase across OFDM subcarriers. In the absence of perturbation, + - amplitude and phase across OFDM subcarriers. In the absence of perturbation, these measurements exhibit stable coherence patterns determined by room geometry, multipath structure, and hardware characteristics. @@ -52,8 +52,8 @@ When a person enters the room, they scatter, absorb, and reflect RF energy along certain propagation paths. The key insight is that this perturbation is **spatially localized**: only links whose Fresnel zones intersect the person's body experience significant coherence degradation. The affected links form a -connected subgraph whose boundary — the set of edges connecting "disturbed" and -"undisturbed" regions of the link graph — constitutes a topological signature of +connected subgraph whose boundary - the set of edges connecting "disturbed" and +"undisturbed" regions of the link graph - constitutes a topological signature of the perturbation. We propose that **minimum cut algorithms** are the natural computational tool for @@ -267,13 +267,13 @@ weights, this must be combined with Edmonds-Karp (BFS-based path selection) for O(nm^2) worst case, or Dinic's algorithm for O(n^2 * m). **RF application**: Ford-Fulkerson is useful when we want the minimum s-t cut -between a specific pair of node groups — for example, asking "what is the weakest +between a specific pair of node groups - for example, asking "what is the weakest coherence boundary separating the north wall sensors from the south wall sensors?" ### 3.3 Stoer-Wagner Algorithm for Global Minimum Cut -For RF topological sensing, we typically want the **global** minimum cut — the -weakest boundary in the entire mesh — without pre-specifying source and sink. +For RF topological sensing, we typically want the **global** minimum cut - the +weakest boundary in the entire mesh - without pre-specifying source and sink. The Stoer-Wagner algorithm (1997) computes this efficiently. **Algorithm**: @@ -299,7 +299,7 @@ MINIMUM_CUT_PHASE(G): ``` **Complexity**: O(nm + n^2 log n) using a Fibonacci heap, or O(nm log n) with a -binary heap. For our n = 16, m = 120 mesh, this is trivially fast — roughly +binary heap. For our n = 16, m = 120 mesh, this is trivially fast - roughly 16 phases of 16 vertex additions = 256 operations. **Why Stoer-Wagner is ideal for RF sensing**: @@ -396,9 +396,9 @@ Every pair of nodes forms a potential link, giving a complete graph K_16 with narrow Fresnel zones. - **Long links** (diagonal/cross-room): Lower SNR, sensitive to perturbations anywhere along the path, wide Fresnel zones. -- **Parallel links**: Correlated sensitivity — a perturbation affecting one likely +- **Parallel links**: Correlated sensitivity - a perturbation affecting one likely affects the other. -- **Crossing links**: Complementary sensitivity — their Fresnel zone intersection +- **Crossing links**: Complementary sensitivity - their Fresnel zone intersection localizes perturbations. ### 4.2 Fresnel Zone Geometry and Edge Semantics @@ -418,7 +418,7 @@ of a short link but only partially occludes a long link. This creates a natural **spatial resolution** determined by the mesh geometry. **Edge semantics**: An edge (v_i, v_j) in the graph represents not just a -communication link but a **spatial sensing region** — the Fresnel ellipsoid +communication link but a **spatial sensing region** - the Fresnel ellipsoid between v_i and v_j. The edge weight w(v_i, v_j) encodes whether this sensing region is perturbed. @@ -526,7 +526,7 @@ cut) of the graph. represents its position along the "weakest axis" of the graph. Nodes on opposite sides of a perturbation boundary receive opposite-sign values. The magnitude |v_2[i]| indicates how strongly node i is associated with its side of the -partition — nodes near the boundary have small |v_2[i]|. +partition - nodes near the boundary have small |v_2[i]|. ### 5.3 Cheeger Inequality @@ -546,13 +546,13 @@ The **Cheeger inequality** bounds h(G) using λ_2: This is powerful for RF sensing because: 1. **Lower bound (λ_2 / 2 <= h(G))**: A small Fiedler value guarantees the - existence of a sparse cut — i.e., a coherence boundary. + existence of a sparse cut - i.e., a coherence boundary. 2. **Upper bound (h(G) <= sqrt(2 * λ_2))**: Spectral bisection produces a cut whose normalized capacity is within a sqrt(λ_2) factor of optimal. 3. **Monitoring λ_2 over time**: A dropping Fiedler value signals that the - graph's connectivity is weakening — someone is entering the room or moving to + graph's connectivity is weakening - someone is entering the room or moving to a position that bisects the mesh. ### 5.4 Higher Eigenvectors and Multi-Way Partitioning @@ -593,7 +593,7 @@ changes efficiently. Δ_λ(t) = |λ_2(t) - λ_2(t-1)| / λ_2(t-1) ``` -A spike in Δ_λ(t) indicates a topological change — a new perturbation or a +A spike in Δ_λ(t) indicates a topological change - a new perturbation or a significant movement event. **Eigenvector tracking**: For smooth graph evolution, we can use eigenvalue @@ -604,8 +604,8 @@ in λ_2 is: δλ_2 ≈ δw * (v_2[i] - v_2[j])^2 ``` -This means edges with large (v_2[i] - v_2[j])^2 — edges that cross the Fiedler -cut — have the most impact on algebraic connectivity. These are precisely the +This means edges with large (v_2[i] - v_2[j])^2 - edges that cross the Fiedler +cut - have the most impact on algebraic connectivity. These are precisely the boundary edges we care about. ### 5.6 Normalized Spectral Clustering (Shi-Malik) @@ -644,7 +644,7 @@ weights (all links from the transmitting node). **Latency budget**: To support real-time applications (gesture recognition, intrusion detection), we need total processing time under 10 ms per update cycle. -On a modern processor, this is generous — but motivates efficient algorithms for +On a modern processor, this is generous - but motivates efficient algorithms for future scaling to larger meshes. ### 6.2 Incremental Min-Cut Algorithms @@ -656,13 +656,13 @@ under edge updates. **Weight increase (edge strengthening)**: If an edge weight increases, the minimum cut can only increase or stay the same. If the modified edge does not cross the current min-cut, the cut is unchanged. -If it does cross the cut, the new min-cut value is at least the old value — we +If it does cross the cut, the new min-cut value is at least the old value - we need to verify whether the current partition is still optimal, potentially by running a single max-flow computation in the residual graph. **Weight decrease (edge weakening)**: If an edge weight decreases and it crosses the current min-cut, the cut capacity -decreases by the weight change — no recomputation needed. If the edge is internal +decreases by the weight change - no recomputation needed. If the edge is internal to one side of the cut, the cut is unchanged. However, a new lower-capacity cut may have emerged, requiring recomputation. @@ -732,7 +732,7 @@ endpoint v_k, constraining where the min-cut can change. **Lemma**: If v_k is entirely on one side of the current min-cut (say v_k ∈ S), then changes to edges (v_k, v_j) where v_j ∈ S cannot affect the cut capacity. -Only edges crossing the cut — (v_k, v_j) where v_j ∈ S̄ — matter. +Only edges crossing the cut - (v_k, v_j) where v_j ∈ S̄ - matter. In a balanced bisection of 16 nodes, at most 8 of the 15 updated edges cross the cut, reducing the effective update size. @@ -853,7 +853,7 @@ min-cut or hierarchical decomposition reveals all boundaries simultaneously. With 16 nodes, the topological resolution is limited to distinguishing regions separated by at least one link. Fine-grained positioning (sub-meter accuracy) -is not achievable through topology alone — though it can be augmented with +is not achievable through topology alone - though it can be augmented with classical methods. **2. Ambiguity in cut interpretation** @@ -891,7 +891,7 @@ location (focused attention). ### 8.1 Optimal Node Placement for Topological Resolution **Question**: Given a room geometry and n nodes, what placement maximizes -topological resolution — the ability to distinguish different perturbation +topological resolution - the ability to distinguish different perturbation locations via distinct min-cut partitions? This is related to sensor placement optimization but with a graph-theoretic @@ -935,7 +935,7 @@ correlation structure (nearby edges provide redundant information). An adversary who knows the node positions could potentially create RF perturbations that manipulate the min-cut to produce a desired (false) topology. Understanding the attack surface requires analysis of which edge weight -modifications change the min-cut partition — the "critical edges" of the graph. +modifications change the min-cut partition - the "critical edges" of the graph. Connection to the `adversarial.rs` module in RuvSense: physically impossible signal patterns (e.g., coherence dropping on a link whose Fresnel zone is @@ -956,7 +956,7 @@ the same partition (topological aliasing). ### 8.6 Multi-Resolution Topological Decomposition **Question**: Can hierarchical min-cut decomposition (Gomory-Hu tree) provide -multi-resolution sensing — coarse room segmentation at the top level, fine-grained +multi-resolution sensing - coarse room segmentation at the top level, fine-grained boundary detection at lower levels? The Gomory-Hu tree naturally provides a hierarchy: the minimum weight edge in the @@ -1005,8 +1005,8 @@ selection for detection algorithms. ## 9. Conclusion -This document has established that graph-theoretic methods — particularly minimum -cut algorithms and spectral decomposition — provide a rigorous mathematical +This document has established that graph-theoretic methods - particularly minimum +cut algorithms and spectral decomposition - provide a rigorous mathematical foundation for RF topological sensing. The key contributions are: 1. **Formal framework**: Modeling the ESP32 mesh as a weighted graph G = (V, E, w) diff --git a/docs/research/02-csi-edge-weight-computation.md b/docs/research/02-csi-edge-weight-computation.md index 8c2547673..c61b6bdf9 100644 --- a/docs/research/02-csi-edge-weight-computation.md +++ b/docs/research/02-csi-edge-weight-computation.md @@ -46,7 +46,7 @@ $$ $$ A high SAV on subcarrier $k$ indicates that the channel at that frequency is -being perturbed -- typically by motion in a Fresnel zone that subcarrier is +being perturbed - typically by motion in a Fresnel zone that subcarrier is sensitive to. **Amplitude Stability Index (ASI).** The reciprocal of the coefficient of @@ -256,7 +256,7 @@ amplitude features are more reliable because phase noise dominates. At high SNR ### 3.1 Motivation The CSI vector captures the superposition of all multipath components. A stable -CSI does not necessarily mean a stable environment -- it could mean that the +CSI does not necessarily mean a stable environment - it could mean that the dominant path is stable while secondary paths fluctuate. Decomposing the channel into individual multipath components and tracking their stability provides richer information for edge weighting. @@ -294,7 +294,7 @@ is the steering vector. **ESP32 Constraints.** With $K = 56$ subcarriers and $L = 20$, we can resolve up to $P = 5$ multipath components with delay resolution finer than the FFT limit. The eigendecomposition of a $20 \times 20$ Hermitian matrix requires -approximately 15,000 floating-point operations -- feasible on the aggregator +approximately 15,000 floating-point operations - feasible on the aggregator node at 20 Hz for 120 edges if batched efficiently, but not on each ESP32 independently. @@ -382,7 +382,7 @@ $O(LP)$ per update rather than $O(L^3)$ for full eigendecomposition. Grassmann manifold, providing guaranteed convergence with $O(LP)$ complexity. For the 20 Hz update rate with $L = 20$ and $P = 5$, subspace tracking costs -approximately 200 multiply-accumulate operations per edge per update -- trivially +approximately 200 multiply-accumulate operations per edge per update - trivially cheap even on the aggregator. --- @@ -1048,7 +1048,7 @@ the specific multipath geometry of each link. 8. Schmidt, R. O. (1986). Multiple emitter location and signal parameter estimation. IEEE Transactions on Antennas and Propagation. -9. Roy, R., & Kailath, T. (1989). ESPRIT -- estimation of signal parameters via +9. Roy, R., & Kailath, T. (1989). ESPRIT - estimation of signal parameters via rotational invariance techniques. IEEE Transactions on ASSP. 10. Welford, B. P. (1962). Note on a method for calculating corrected sums of diff --git a/docs/research/03-attention-mechanisms-rf-sensing.md b/docs/research/03-attention-mechanisms-rf-sensing.md index 95beecff4..2a1b09818 100644 --- a/docs/research/03-attention-mechanisms-rf-sensing.md +++ b/docs/research/03-attention-mechanisms-rf-sensing.md @@ -43,7 +43,7 @@ cut partitioning of this weighted graph identifies the boundary between perturbed and unperturbed subgraphs, localizing the person. ``` - RF Topological Sensing — Conceptual Model + RF Topological Sensing - Conceptual Model ========================================== Physical Space Signal Graph G = (V, E, W) @@ -75,7 +75,7 @@ limitations: ignoring the sequential structure of human motion. Attention mechanisms address all three by learning to weight information -sources — subcarriers, time steps, links, and nodes — according to their +sources - subcarriers, time steps, links, and nodes - according to their relevance for the downstream task. ### 1.3 Notation @@ -183,7 +183,7 @@ where e_ij in R^E contains link-level features: - Fresnel zone geometry (distance, angle) ``` - Edge-Featured GAT — RF Sensing + Edge-Featured GAT - RF Sensing ================================ x_i x_j @@ -217,7 +217,7 @@ but before the dot product: This is strictly more expressive and important for RF sensing where the same node should attend differently depending on which neighbor it is -evaluating — a dynamic property essential for tracking moving targets. +evaluating - a dynamic property essential for tracking moving targets. --- @@ -295,7 +295,7 @@ making the model invariant to window start time. ### 3.4 Causal vs. Bidirectional Attention -For real-time sensing, causal (masked) attention is necessary — time step t +For real-time sensing, causal (masked) attention is necessary - time step t can only attend to steps 1..t: ``` @@ -321,7 +321,7 @@ weight for graph construction. Attention-weighted temporal pooling: Here z_t^{ij} is the contextualized CSI representation for link (i,j) at time t, and g maps to a scalar coherence score. The attention weights -alpha_t learn to focus on the most informative moments — for example, +alpha_t learn to focus on the most informative moments - for example, the peak of a Doppler burst during a gesture. --- @@ -400,7 +400,7 @@ link n: - beta, gamma: learnable parameters This is the concept implemented in RuVector's `CrossViewpointAttention` -with `GeometricBias` — the attention mechanism is biased toward +with `GeometricBias` - the attention mechanism is biased toward geometrically meaningful link combinations while still allowing the model to discover non-obvious correlations. @@ -455,7 +455,7 @@ graph Laplacian L = D - W (D is the degree matrix): Relaxed: min_y y^T L y / y^T D y, y in R^N ``` -The solution is the Fiedler vector — the eigenvector of the smallest +The solution is the Fiedler vector - the eigenvector of the smallest nonzero eigenvalue of the normalized Laplacian. ### 5.3 Attention as Edge Scoring for MinCut @@ -508,7 +508,7 @@ The training signal for attention comes from two sources: should have low weights (those crossing the person's body). 2. **Self-supervised**: The mincut objective itself provides a training - signal — attention weights that produce cleaner cuts (lower Ncut value + signal - attention weights that produce cleaner cuts (lower Ncut value with balanced partitions) are reinforced. ``` @@ -602,7 +602,7 @@ via the Cramer-Rao bound. The Geometric Diversity Index from RuVector's ### 6.5 Dynamic Node Dropout -Spatial attention naturally enables dynamic node dropout — nodes with +Spatial attention naturally enables dynamic node dropout - nodes with importance below a threshold are excluded from graph construction: ``` @@ -802,7 +802,7 @@ Instead of full T x T attention, use structured sparsity: ``` Complexity: O(T * w) with w << T. For CSI at 100 Hz, w = 32 covers -320 ms — sufficient for most motion events. +320 ms - sufficient for most motion events. **Dilated Attention**: Attend to positions at exponentially increasing gaps: @@ -841,7 +841,7 @@ computing attention only within buckets: With b = sqrt(T): O(T * sqrt(T)) ``` -For RF sensing, LSH naturally groups similar CSI patterns — time steps +For RF sensing, LSH naturally groups similar CSI patterns - time steps with similar signal characteristics attend to each other, which is physically meaningful (similar body poses produce similar CSI). @@ -999,7 +999,7 @@ link-level activity labels. The model learns to identify active links. **Stage 3: End-to-end fine-tuning** with mincut loss (Section 5) and person location supervision. All attention mechanisms adapt jointly. -**Stage 4: Distillation for edge deployment** — train efficient variants +**Stage 4: Distillation for edge deployment** - train efficient variants (Section 8) to match the full model's attention patterns using KL divergence between attention distributions. diff --git a/docs/research/04-transformer-architectures-graph-sensing.md b/docs/research/04-transformer-architectures-graph-sensing.md index b46796562..2844c1d39 100644 --- a/docs/research/04-transformer-architectures-graph-sensing.md +++ b/docs/research/04-transformer-architectures-graph-sensing.md @@ -1,13 +1,13 @@ # Transformer Architectures for RF Topological Graph Sensing **Research Document 04** | March 2026 -**Context**: RuView / wifi-densepose — 16-node ESP32 mesh, CSI coherence-weighted graphs, mincut-based boundary detection, real-time inference requirements. +**Context**: RuView / wifi-densepose - 16-node ESP32 mesh, CSI coherence-weighted graphs, mincut-based boundary detection, real-time inference requirements. --- ## Abstract -This document surveys transformer architectures applicable to RF topological graph sensing, where a mesh of 16 ESP32 nodes forms a dynamic graph with edges weighted by Channel State Information (CSI) coherence. The primary inference task is mincut prediction — identifying physical boundaries (walls, doors, human bodies) that partition the radio field. We examine graph transformers, temporal graph networks, vision transformers applied to RF spectrograms, transformer-based mincut prediction, positional encoding strategies for RF graphs, foundation model pre-training, and efficient edge deployment. The goal is to identify architectures that can replace or augment combinatorial mincut solvers with learned models capable of real-time inference on resource-constrained hardware. +This document surveys transformer architectures applicable to RF topological graph sensing, where a mesh of 16 ESP32 nodes forms a dynamic graph with edges weighted by Channel State Information (CSI) coherence. The primary inference task is mincut prediction - identifying physical boundaries (walls, doors, human bodies) that partition the radio field. We examine graph transformers, temporal graph networks, vision transformers applied to RF spectrograms, transformer-based mincut prediction, positional encoding strategies for RF graphs, foundation model pre-training, and efficient edge deployment. The goal is to identify architectures that can replace or augment combinatorial mincut solvers with learned models capable of real-time inference on resource-constrained hardware. --- @@ -28,7 +28,7 @@ This document surveys transformer architectures applicable to RF topological gra ### 1.1 The Structural Gap Between Sequences and Graphs -Standard transformers operate on sequences where positional encoding captures order. Graphs have no canonical ordering — nodes are permutation-invariant, and structure is encoded in adjacency rather than position. This creates a fundamental tension: the self-attention mechanism in vanilla transformers treats all token pairs equally, ignoring the graph topology that carries critical information in RF sensing. +Standard transformers operate on sequences where positional encoding captures order. Graphs have no canonical ordering - nodes are permutation-invariant, and structure is encoded in adjacency rather than position. This creates a fundamental tension: the self-attention mechanism in vanilla transformers treats all token pairs equally, ignoring the graph topology that carries critical information in RF sensing. For RF topological sensing, graph structure IS the signal. An edge between ESP32 nodes 3 and 7 weighted by CSI coherence of 0.92 means the radio path between them is unobstructed. A weight of 0.31 suggests an intervening boundary. The transformer must respect this structure, not flatten it away. @@ -54,7 +54,7 @@ Where `b_SPD(i,j)` is a learnable scalar indexed by the shortest-path distance. **Edge Encoding.** Edge features along the shortest path between two nodes are aggregated into the attention bias. For RF graphs, edge features include CSI amplitude, phase coherence, signal-to-noise ratio, and temporal stability. This is particularly powerful because the shortest path between two nodes often traverses intermediate links whose coherence values reveal intervening geometry. -**Applicability to RF sensing.** Graphormer's all-pairs attention with structural bias is well-suited to our 16-node mesh because N=16 makes O(N^2) attention tractable (256 pairs). The spatial encoding naturally captures the radio topology — nodes separated by many low-coherence hops are likely in different rooms. +**Applicability to RF sensing.** Graphormer's all-pairs attention with structural bias is well-suited to our 16-node mesh because N=16 makes O(N^2) attention tractable (256 pairs). The spatial encoding naturally captures the radio topology - nodes separated by many low-coherence hops are likely in different rooms. **Limitation.** Graphormer was designed for molecular property prediction with static graphs. RF graphs evolve at 10-100 Hz as people move, doors open, and multipath conditions change. The model needs temporal extension. @@ -68,7 +68,7 @@ For an RF mesh with adjacency matrix W (CSI coherence weights), the normalized L L = I - D^(-1/2) W D^(-1/2) ``` -The eigenvectors of L with the smallest non-zero eigenvalues capture the low-frequency structure of the graph — precisely the large-scale partitions that correspond to room boundaries. The Fiedler vector (eigenvector of the second-smallest eigenvalue) directly encodes the mincut partition. +The eigenvectors of L with the smallest non-zero eigenvalues capture the low-frequency structure of the graph - precisely the large-scale partitions that correspond to room boundaries. The Fiedler vector (eigenvector of the second-smallest eigenvalue) directly encodes the mincut partition. SAN computes attention separately over the original graph edges ("sparse attention") and all node pairs ("full attention"), then combines them. This dual mechanism lets the model simultaneously exploit local CSI patterns and global graph structure. @@ -103,7 +103,7 @@ For each node, TokenGT creates a token from the node features concatenated with **Token sequence for a 16-node RF mesh:** - 16 node tokens (each carrying node features: device ID, antenna configuration, noise floor) - Up to 120 edge tokens for a complete graph (each carrying CSI coherence, amplitude, phase, SNR) -- Total: up to 136 tokens — well within standard transformer capacity +- Total: up to 136 tokens - well within standard transformer capacity The advantage is simplicity: no custom attention mechanisms, no graph-specific modules. The disadvantage is that all structural information must be learned from the positional encodings and edge tokens rather than being architecturally enforced. @@ -152,7 +152,7 @@ Where `s_i(t-)` is node i's memory before the event, `delta_t` is the time since s_i(t) = GRU(s_i(t-), m_i(t)) ``` -This persistent memory captures the temporal context of each ESP32 node — its recent coherence history, drift patterns, and interaction frequency. +This persistent memory captures the temporal context of each ESP32 node - its recent coherence history, drift patterns, and interaction frequency. **Embedding Module.** To compute the embedding for node i at time t, TGN aggregates information from temporal neighbors using attention: @@ -162,11 +162,11 @@ z_i(t) = sum_j alpha(s_i, s_j, e_ij, delta_t_ij) * W * s_j(t_j) The attention weights depend on both node memories and the time elapsed since each neighbor's last update. -**Link Predictor / Graph Classifier.** The embeddings are used for downstream tasks — in our case, predicting which edges will be cut (mincut prediction) or classifying graph topology (room occupancy). +**Link Predictor / Graph Classifier.** The embeddings are used for downstream tasks - in our case, predicting which edges will be cut (mincut prediction) or classifying graph topology (room occupancy). **RF sensing adaptation.** TGN's event-driven architecture maps naturally to CSI measurements, which arrive as discrete edge events (node i measures coherence to node j). The persistent memory per node captures slow-changing context (room geometry, device calibration drift) while the embedding module captures fast dynamics (person movement). -For 16 nodes with measurements at 100 Hz across all 120 links, TGN processes approximately 12,000 edge events per second — feasible for the architecture but requiring careful batching. +For 16 nodes with measurements at 100 Hz across all 120 links, TGN processes approximately 12,000 edge events per second - feasible for the architecture but requiring careful batching. ### 2.3 Temporal Graph Attention (TGAT) @@ -176,7 +176,7 @@ TGAT (Xu et al., ICLR 2020) introduces time-aware attention using a functional t Phi(t) = sqrt(1/d) * [cos(omega_1 * t), sin(omega_1 * t), ..., cos(omega_d * t), sin(omega_d * t)] ``` -This continuous-time encoding allows TGAT to handle irregular sampling — critical for RF sensing where different links may be measured at different rates due to the TDM (Time-Division Multiplexing) protocol on the ESP32 mesh. +This continuous-time encoding allows TGAT to handle irregular sampling - critical for RF sensing where different links may be measured at different rates due to the TDM (Time-Division Multiplexing) protocol on the ESP32 mesh. The attention mechanism incorporates time explicitly: @@ -246,7 +246,7 @@ Channel State Information from a single link is a time series of complex-valued - X-axis: time window center - Y-axis: Doppler frequency - Value: spectral power -- This reveals movement velocities — human walking produces 2-6 Hz Doppler, breathing 0.1-0.5 Hz +- This reveals movement velocities - human walking produces 2-6 Hz Doppler, breathing 0.1-0.5 Hz **Cross-Link Spectrogram.** Stack spectrograms from multiple links: - For all 120 links in a 16-node complete graph: a 120 x 56 x T tensor @@ -278,7 +278,7 @@ A single link's spectrogram provides limited spatial information. To capture the **Approach 1: Channel stacking.** Treat each link's spectrogram as a separate channel of a multi-channel image. With 120 links and 56 subcarriers over 128 timesteps, this creates a 120-channel 56x128 image. Patch extraction operates across all channels simultaneously. -**Approach 2: Token concatenation.** Process each link's spectrogram independently through shared patch extraction and embedding, then concatenate all link tokens into a single sequence. With 112 patches per link and 120 links, this yields 13,440 tokens — too many for standard attention. +**Approach 2: Token concatenation.** Process each link's spectrogram independently through shared patch extraction and embedding, then concatenate all link tokens into a single sequence. With 112 patches per link and 120 links, this yields 13,440 tokens - too many for standard attention. **Approach 3: Hierarchical ViT.** Two-stage processing: 1. **Link-level ViT**: Process each link's spectrogram independently (shared weights), producing one embedding per link (120 embeddings) @@ -291,7 +291,7 @@ This hierarchical approach is the most promising because: ### 3.4 ViT Variants for RF -**DeiT (Data-efficient Image Transformers).** Uses knowledge distillation from a CNN teacher, relevant when training data is limited — a common constraint in RF sensing where labeled datasets require manual annotation of room layouts and occupancy. +**DeiT (Data-efficient Image Transformers).** Uses knowledge distillation from a CNN teacher, relevant when training data is limited - a common constraint in RF sensing where labeled datasets require manual annotation of room layouts and occupancy. **Swin Transformer.** Hierarchical ViT with shifted windows, reducing attention complexity from O(N^2) to O(N). For large spectrograms, Swin's local attention windows align with the locality of time-frequency patterns. @@ -321,7 +321,7 @@ Given a weighted graph G = (V, E, w) where V is 16 ESP32 nodes, E is up to 120 e cut(S, V\S) = sum_{(i,j) in E: i in S, j in V\S} w(i,j) ``` -The exact solution requires O(V^3) max-flow computation (e.g., push-relabel) or O(V * E) augmenting paths. For N=16 and E=120, exact computation takes microseconds — so why use a learned model? +The exact solution requires O(V^3) max-flow computation (e.g., push-relabel) or O(V * E) augmenting paths. For N=16 and E=120, exact computation takes microseconds - so why use a learned model? **Reasons for learned mincut prediction:** 1. **Temporal smoothing.** Exact mincut on noisy CSI measurements is unstable. A learned model can produce temporally smooth partitions. @@ -370,12 +370,12 @@ We propose a MinCut Transformer architecture for RF topological sensing: **Mincut prediction head.** Two output branches: -Branch 1 — **Partition assignment**: +Branch 1 - **Partition assignment**: ``` S = softmax(MLP(h_nodes)) [16 x K matrix for K-way partition] ``` -Branch 2 — **Cut edge prediction**: +Branch 2 - **Cut edge prediction**: ``` p_cut(i,j) = sigmoid(MLP([h_i || h_j || e_ij])) [probability that edge (i,j) is cut] ``` @@ -501,7 +501,7 @@ Rather than absolute node positions, relative encodings capture pairwise relatio b_ij = mean(w_e : e in shortest_path(i, j)) ``` -For RF graphs, the shortest path in the coherence graph between two distant nodes reveals the "radio corridor" connecting them — the sequence of high-coherence links that radio signals can traverse. +For RF graphs, the shortest path in the coherence graph between two distant nodes reveals the "radio corridor" connecting them - the sequence of high-coherence links that radio signals can traverse. **Rotary Position Embedding (RoPE) for graphs.** Adapt RoPE from language models by using spectral coordinates: ``` diff --git a/docs/research/05-sublinear-mincut-algorithms.md b/docs/research/05-sublinear-mincut-algorithms.md index 6433dd4c5..ddb59ae7f 100644 --- a/docs/research/05-sublinear-mincut-algorithms.md +++ b/docs/research/05-sublinear-mincut-algorithms.md @@ -1,7 +1,7 @@ # Sublinear and Near-Linear Time Minimum Cut Algorithms for Real-Time RF Sensing **Date**: 2026-03-08 -**Context**: RuVector v2.0.4 / RuvSense multistatic mesh — 16 ESP32 nodes, 120 link edges, 20 Hz update rate +**Context**: RuVector v2.0.4 / RuvSense multistatic mesh - 16 ESP32 nodes, 120 link edges, 20 Hz update rate **Scope**: Algorithmic foundations for maintaining minimum cuts on dynamic RF link graphs under real-time constraints --- @@ -13,7 +13,7 @@ C(16,2) = 120 edges, where each edge weight encodes the RF channel state information (CSI) attenuation or coherence between two nodes. Human bodies, moving objects, and environmental changes continuously perturb these weights. The minimum cut of this graph partitions the sensing field into regions of -minimal RF coupling — directly useful for person segmentation, occupancy +minimal RF coupling - directly useful for person segmentation, occupancy counting, and anomaly detection. At 20 Hz update rate, each mincut computation has a budget of 50 ms wall-clock @@ -21,7 +21,7 @@ time. On a resource-constrained coordinator (ESP32-S3 at 240 MHz or a modest ARM host), classical algorithms are either too slow or carry too much overhead. This document surveys the algorithmic landscape from classical exact methods through sublinear approximations, dynamic maintenance, streaming, and -sparsification — evaluating each for applicability to the RuVector RF sensing +sparsification - evaluating each for applicability to the RuVector RF sensing pipeline. Throughout, V = 16 and E = 120 (complete graph). While these are small by @@ -43,7 +43,7 @@ the total weight of edges crossing the partition: For RF sensing, w(u,v) typically represents the CSI coherence or signal attenuation between nodes u and v. A minimum cut identifies the partition -where RF coupling is weakest — corresponding to physical obstructions +where RF coupling is weakest - corresponding to physical obstructions (human bodies, walls, large objects) that attenuate the RF field. ### 1.2 Stoer-Wagner Algorithm (1997) @@ -68,7 +68,7 @@ each performed via a maximum adjacency ordering. **Practical assessment:** For V = 16, Stoer-Wagner executes 15 phases, each scanning at most 120 edges. Total work is roughly 1,800 edge scans plus priority queue operations. On modern hardware this completes in microseconds. -On ESP32 at 240 MHz, estimated wall time is 50-200 us — well within budget. +On ESP32 at 240 MHz, estimated wall time is 50-200 us - well within budget. This is the baseline. The algorithm is exact, deterministic, and simple to implement. For V = 16, classical complexity is not actually the bottleneck. @@ -100,7 +100,7 @@ then recursing on two independent copies. This reduces the repetition count from O(V^2) to O(V^2 / 2^depth), yielding O(V^2 log V) total time. **For our graph:** -- O(256 * 4) = O(1024) total work — negligible +- O(256 * 4) = O(1024) total work - negligible - Recursion depth: O(log V) = 4 levels **Practical assessment:** At V = 16, the recursion tree has ~4 levels with @@ -168,7 +168,7 @@ This achieves a (1 +/- 0.1)-approximation by reading only 1/3 of the edges. ``` The key insight: Stoer-Wagner on a sparse sample with ~40 edges and 16 -vertices runs in O(16 * 40) = O(640) operations — faster than on the full +vertices runs in O(16 * 40) = O(640) operations - faster than on the full graph, and with provable approximation guarantees. ### 2.3 Cut Sparsifiers @@ -181,9 +181,9 @@ For V = 16, epsilon = 0.1: O(16 * 4 / 0.01) = O(6400) edges. This exceeds our actual edge count of 120, so sparsification provides no benefit at this scale. However, it becomes critical for: -- V = 64: E = 2016, sparsifier needs ~O(2560) edges — marginal savings -- V = 128: E = 8128, sparsifier needs ~O(5120) edges — 37% reduction -- V = 256: E = 32640, sparsifier needs ~O(10240) edges — 69% reduction +- V = 64: E = 2016, sparsifier needs ~O(2560) edges - marginal savings +- V = 128: E = 8128, sparsifier needs ~O(5120) edges - 37% reduction +- V = 256: E = 32640, sparsifier needs ~O(10240) edges - 69% reduction ### 2.4 Spectral Sparsification @@ -195,12 +195,12 @@ Laplacian preserves all cut values. Their algorithm: 3. Reweight sampled edges to preserve expected cut values. Result: O(V log V / epsilon^2) edges suffice, same as combinatorial -sparsification, but the spectral guarantee is stronger — it preserves the +sparsification, but the spectral guarantee is stronger - it preserves the entire spectrum of the Laplacian, not just cut values. **For RF sensing:** The graph Laplacian eigenvectors correspond to spatial modes of the RF field. Spectral sparsification preserves these modes, which -is useful beyond mincut — it preserves the spatial structure needed for +is useful beyond mincut - it preserves the spatial structure needed for tomography and field modeling (RuvSense `field_model.rs`). ### 2.5 Query-Based Sublinear Algorithms @@ -208,7 +208,7 @@ tomography and field modeling (RuvSense `field_model.rs`). Recent work by Rubinstein, Schramm, and Weinberg (2018) achieves O(V polylog V)-time algorithms that query the graph adjacency/weight oracle rather than reading all edges. For V = 16, this gives O(16 * 16) = O(256) -queries — a 2x reduction over reading all 120 edges (not useful at this +queries - a 2x reduction over reading all 120 edges (not useful at this scale, but relevant at V = 256 where it reduces from 32640 to ~4000 queries). --- @@ -253,7 +253,7 @@ amortized update time. **For our setting:** - Update time: O(log^3(16) / 0.01) ~ O(6400) per edge update with epsilon = 0.1 -- Batch of 120 updates: O(768,000) — worse than recomputation! +- Batch of 120 updates: O(768,000) - worse than recomputation! This reveals an important practical point: dynamic algorithms have excellent asymptotic behavior but carry large constant factors that dominate at small @@ -263,9 +263,9 @@ known dynamic algorithm. ### 3.4 When Dynamic Algorithms Win Dynamic algorithms become beneficial when: -1. **V > 1000** and E > 100,000 — amortized polylog update beats O(VE). -2. **Sparse updates** — only a few edges change per frame, not all 120. -3. **Incremental weight changes** — weights change by small deltas, +1. **V > 1000** and E > 100,000 - amortized polylog update beats O(VE). +2. **Sparse updates** - only a few edges change per frame, not all 120. +3. **Incremental weight changes** - weights change by small deltas, allowing incremental sparsifier updates. For our RF mesh, a practical middle ground is: @@ -296,7 +296,7 @@ Output: Updated mincut ``` In practice, steps 1-4 handle >90% of frames (the minimum cut partition is -spatially stable — people do not teleport), and full recomputation is +spatially stable - people do not teleport), and full recomputation is triggered only when someone crosses the cut boundary. This reduces average per-frame cost to O(E) = O(120) for crossing-weight evaluation plus occasional O(VE) recomputation. @@ -309,7 +309,7 @@ occasional O(VE) recomputation. In the streaming model, edges arrive one at a time (or in a stream from multiple ESP32 nodes), and we must estimate the mincut using limited working -memory — ideally O(V polylog V) space rather than O(V^2). +memory - ideally O(V polylog V) space rather than O(V^2). This is relevant when: - CSI data arrives asynchronously from 16 nodes via TDM (Time Division @@ -350,7 +350,7 @@ Result: (1+epsilon)-approximate mincut from the refined sparsifier. For our TDM protocol, each complete CSI scan across all 16 nodes constitutes one "pass." A two-pass approach means using two consecutive TDM cycles -(100 ms total at 20 Hz) to build and refine the sparsifier — acceptable +(100 ms total at 20 Hz) to build and refine the sparsifier - acceptable if we can tolerate 100 ms latency on the initial estimate. ### 4.4 Turnstile Streaming @@ -362,7 +362,7 @@ Ahn, Guha, and McGregor (2013) extended their sketching approach to the turnstile model. The key: L0-sampling sketches allow recovering edges from the sketch difference, enabling dynamic cut estimation. -**Space complexity:** O(V * polylog(V) / epsilon^2) — same as the +**Space complexity:** O(V * polylog(V) / epsilon^2) - same as the insertion-only case. **For RF sensing:** This means we can maintain a running sketch that @@ -409,7 +409,7 @@ a subgraph H with O(V log V / epsilon^2) edges such that for every cut 3. Reweight sampled edges: w_H(e) = w_G(e) / p_e. **Computing strong connectivity:** This requires O(VE) time using max-flow -computations — as expensive as solving mincut directly. However, approximate +computations - as expensive as solving mincut directly. However, approximate strong connectivity can be computed in O(E log^3 V) time using the sparsification itself (bootstrapping). @@ -491,7 +491,7 @@ mincut computation and spatial field modeling. ### 6.1 Motivation -Classical mincut algorithms are global — they examine the entire graph. Local +Classical mincut algorithms are global - they examine the entire graph. Local partitioning algorithms find cuts by exploring only a small region of the graph, running in time proportional to the size of the smaller side of the cut rather than the full graph. @@ -574,7 +574,7 @@ local partitioning can quickly confirm or refine the detection: ``` This creates a feedback loop where the tracker guides the graph algorithm -and the graph algorithm refines the tracker — running in O(1/alpha/epsilon) +and the graph algorithm refines the tracker - running in O(1/alpha/epsilon) time rather than O(VE) for full mincut. ### 6.5 Multi-Seed Local Partitioning @@ -582,7 +582,7 @@ time rather than O(VE) for full mincut. For multiple people, run local partitioning from multiple seeds simultaneously. With k people and V = 16 nodes, each person's local partition explores ~4-6 nodes, totaling ~O(k * 6 * degree) = O(k * 90) -work. For k = 3 people, this is O(270) — less than half the cost of +work. For k = 3 people, this is O(270) - less than half the cost of full Stoer-Wagner. The challenge is handling overlapping partitions. Two approaches: @@ -720,7 +720,7 @@ pub struct RfGraph { ``` For V = 16, this uses 120 * 4 = 480 bytes for weights, plus 8 bytes for -cached values. Total: 488 bytes — fits in a single cache line pair. +cached values. Total: 488 bytes - fits in a single cache line pair. **Stoer-Wagner state:** ```rust @@ -795,7 +795,7 @@ impl RfGraph { let j_side = (partition >> j) & 1; if i_side != j_side { - // Edge crosses the cut — must update cut value + // Edge crosses the cut - must update cut value if let Some(ref mut cut_val) = self.cached_mincut { *cut_val += new_weight - old_weight; // Cut value changed but partition might still be optimal @@ -809,7 +809,7 @@ impl RfGraph { return true; } } - // Edge does not cross the cut — partition still valid, + // Edge does not cross the cut - partition still valid, // but cut value might no longer be minimum // Heuristic: if weight decreased significantly, invalidate if new_weight < old_weight * 0.8 { @@ -950,7 +950,7 @@ where } }); - // Find max key (sequential — V is small) + // Find max key (sequential - V is small) let next = (0..V) .filter(|&v| active[v] && !in_set[v]) .max_by(|&a, &b| key[a].partial_cmp(&key[b]).unwrap()) @@ -1027,7 +1027,7 @@ impl DynamicPersonMatcher { let normalized_cut = cut_value / smaller_side as f32; if normalized_cut > self.connectivity_threshold { - // Segment is internally well-connected — one person or empty + // Segment is internally well-connected - one person or empty result.push(segment); } else { // Split into two sub-segments and continue diff --git a/docs/research/06-esp32-mesh-hardware-constraints.md b/docs/research/06-esp32-mesh-hardware-constraints.md index e78a87c42..6ff4151e4 100644 --- a/docs/research/06-esp32-mesh-hardware-constraints.md +++ b/docs/research/06-esp32-mesh-hardware-constraints.md @@ -32,9 +32,9 @@ follows: | Parameter | HT20 (20 MHz) | HT40 (40 MHz) | HE20 (WiFi 6) | |------------------------|-----------------|-----------------|-----------------| | Total OFDM subcarriers | 64 | 128 | 256 | -| Null subcarriers | 12 | 14 | — | -| Pilot subcarriers | 4 | 6 | — | -| Data subcarriers | 48 | 108 | — | +| Null subcarriers | 12 | 14 | - | +| Pilot subcarriers | 4 | 6 | - | +| Data subcarriers | 48 | 108 | - | | CSI reported (ESP32) | 52 (data+pilot) | 114 (data+pilot)| N/A | | CSI reported (ESP32-S3)| 52 | 114 | N/A | | CSI reported (ESP32-C6)| 52 | 114 | 52 (HE mode) | @@ -175,7 +175,7 @@ For a 5m x 5m room, 16 nodes are placed around the perimeter at approximately N14 N7 | | | | - N13 -- N12 -- N11 -- N8 + N13 - N12 - N11 - N8 South Wall Node spacing: ~1.25 m along each 5m wall @@ -557,9 +557,9 @@ complementary information. ``` Channel Hopping Schedule (3-channel rotation): - Sweep 0: Ch 1 -- all 16 TDM slots -- 48 ms - Sweep 1: Ch 6 -- all 16 TDM slots -- 48 ms - Sweep 2: Ch 11 -- all 16 TDM slots -- 48 ms + Sweep 0: Ch 1 - all 16 TDM slots - 48 ms + Sweep 1: Ch 6 - all 16 TDM slots - 48 ms + Sweep 2: Ch 11 - all 16 TDM slots - 48 ms [repeat] Channel switch overhead: ~5 ms (wifi_set_channel) @@ -817,17 +817,17 @@ Node configuration stored in non-volatile storage (NVS): | Key | Type | Default | Description | |----------------------|--------|---------|----------------------------------| -| `node_id` | u8 | — | Unique node ID (1-16) | +| `node_id` | u8 | - | Unique node ID (1-16) | | `mesh_size` | u8 | 16 | Number of nodes in mesh | | `tdm_slot_ms` | u16 | 3 | TDM slot duration (ms) | | `sweep_channels` | u8[] | [1,6,11]| Channel hopping sequence | | `tx_power_dbm` | i8 | 8 | TX power (2-20 dBm) | | `sync_interval_ms` | u32 | 1000 | Sync beacon period | | `report_interval_ms` | u32 | 100 | Result upload period | -| `server_ip` | u32 | — | Backend server IP | +| `server_ip` | u32 | - | Backend server IP | | `server_port` | u16 | 8080 | Backend server port | | `coherence_alpha` | f32 | 0.1 | EMA smoothing factor | -| `ota_url` | string | — | Firmware update endpoint | +| `ota_url` | string | - | Firmware update endpoint | ### 7.7 Error Handling and Watchdog diff --git a/docs/research/07-contrastive-learning-rf-coherence.md b/docs/research/07-contrastive-learning-rf-coherence.md index 5ad1d82c8..69b8cd912 100644 --- a/docs/research/07-contrastive-learning-rf-coherence.md +++ b/docs/research/07-contrastive-learning-rf-coherence.md @@ -27,7 +27,7 @@ within the RuView/wifi-densepose Rust codebase. ### 1.1 Motivation Traditional supervised approaches to WiFi CSI-based sensing require -extensive labeled datasets -- a person walking through a room while +extensive labeled datasets - a person walking through a room while ground-truth positions are recorded via camera or motion capture. This labeling burden is the single largest bottleneck in deploying WiFi sensing systems to new environments. Contrastive self-supervised learning offers @@ -141,7 +141,7 @@ deployment, is often prolonged stillness). BYOL (Grill et al., 2020) eliminates negative pairs entirely, learning by predicting the output of a momentum-updated target network from an online network. This is attractive for RF sensing because defining "true negatives" -in a continuously varying RF field is ambiguous -- when a person moves slowly, +in a continuously varying RF field is ambiguous - when a person moves slowly, CSI frames 1 second apart are neither clearly positive nor clearly negative. **BYOL for CSI:** @@ -227,7 +227,7 @@ link or time), and n is a negative (different person or empty room). AETHER's person re-ID embeddings capture *who* is perturbing the RF field. We propose extending AETHER to additionally capture *where* topological -boundaries form -- the physical surfaces, walls, doors, and moving bodies +boundaries form - the physical surfaces, walls, doors, and moving bodies that partition the RF field into coherent zones. The key insight is that a topological boundary in the RF graph manifests @@ -431,7 +431,7 @@ boundary sharpness suggests the model cannot reliably distinguish zones. The `field_model.rs` module computes room eigenstructure via SVD of the CSI covariance matrix. The leading singular vectors represent the dominant modes of RF field variation. Boundaries correspond to regions where the -dominant singular vectors change character -- where the eigenstructure +dominant singular vectors change character - where the eigenstructure of one zone is linearly independent of the neighboring zone's eigenstructure. @@ -716,12 +716,12 @@ Pre-training integrates with the existing training pipeline in wifi-densepose-train/ src/ pretrain/ - contrastive.rs -- SimCLR/MoCo/BYOL implementations - augmentations.rs -- CSI-specific augmentations - curriculum.rs -- Complexity-ordered data staging - cache.rs -- Embedding cache for delta-driven updates - dataset.rs -- CompressedCsiBuffer (ruvector-temporal-tensor) - model.rs -- Encoder architecture with AETHER-Topo heads + contrastive.rs - SimCLR/MoCo/BYOL implementations + augmentations.rs - CSI-specific augmentations + curriculum.rs - Complexity-ordered data staging + cache.rs - Embedding cache for delta-driven updates + dataset.rs - CompressedCsiBuffer (ruvector-temporal-tensor) + model.rs - Encoder architecture with AETHER-Topo heads ``` The pre-trained model is serialized to ONNX format for deployment via @@ -793,8 +793,8 @@ especially with online hard example mining. Standard triplet training with random sampling is inefficient because most triplets satisfy the margin constraint trivially. OHEM selects the -hardest triplets -- those where the positive is far and the negative -is close -- to focus learning on the decision boundary. +hardest triplets - those where the positive is far and the negative +is close - to focus learning on the decision boundary. **OHEM for edge classification:** @@ -802,7 +802,7 @@ For each anchor, we maintain a priority queue of candidates scored by: hardness(a, p, n) = ||f(a) - f(p)||^2 - ||f(a) - f(n)||^2 -The hardest valid triplets (where hardness is negative -- the triangle +The hardest valid triplets (where hardness is negative - the triangle inequality is violated) provide the most gradient signal. **Semi-hard mining**: In practice, the hardest triplets can be outliers @@ -1086,7 +1086,7 @@ based version for visualization and debugging. | Implement SimCLR contrastive loss | wifi-densepose-train | pretrain/contrastive.rs | core, nn | | Implement delta change detector | wifi-densepose-signal | ruvsense/delta.rs | coherence.rs | | Add embedding cache | wifi-densepose-signal | ruvsense/embed_cache.rs | coherence_gate.rs | -| Unit tests for augmentations | wifi-densepose-train | tests/ | -- | +| Unit tests for augmentations | wifi-densepose-train | tests/ | - | ### 8.2 Phase 2: AETHER-Topo (Weeks 5-8) @@ -1106,7 +1106,7 @@ based version for visualization and debugging. | Edge state classifier | wifi-densepose-signal | ruvsense/edge_classify.rs | coherence.rs | | Learned min-cut weighting | wifi-densepose-ruvector | src/metrics.rs | edge_classify.rs | | Temporal state transition validator | wifi-densepose-signal | ruvsense/adversarial.rs | edge_classify.rs | -| End-to-end tests: triplet + min-cut | wifi-densepose-ruvector | tests/ | -- | +| End-to-end tests: triplet + min-cut | wifi-densepose-ruvector | tests/ | - | ### 8.4 Phase 4: Cross-Environment Transfer (Weeks 13-16) @@ -1115,7 +1115,7 @@ based version for visualization and debugging. | Domain alignment contrastive loss | wifi-densepose-train | pretrain/domain_align.rs | contrastive.rs | | Environment fingerprinting | wifi-densepose-signal | ruvsense/cross_room.rs | ADR-027 | | Few-shot adaptation pipeline | wifi-densepose-train | pretrain/few_shot.rs | domain_align.rs | -| EWC continual learning | wifi-densepose-train | pretrain/ewc.rs | -- | +| EWC continual learning | wifi-densepose-train | pretrain/ewc.rs | - | | Quantized encoder for ESP32-S3 | wifi-densepose-nn | src/quantize.rs | Candle backend | ### 8.5 ADR Dependencies diff --git a/docs/research/08-temporal-graph-evolution-ruvector.md b/docs/research/08-temporal-graph-evolution-ruvector.md index 55792fcd3..7ad1dbd7a 100644 --- a/docs/research/08-temporal-graph-evolution-ruvector.md +++ b/docs/research/08-temporal-graph-evolution-ruvector.md @@ -30,7 +30,7 @@ WiFi-based sensing produces a rich, continuously evolving graph structure. Each ESP32 node is a vertex; each TX-RX link is an edge carrying time-varying Channel State Information (CSI). People, furniture, doors, and environmental conditions perturb this graph in characteristic patterns. Tracking *how* the -graph changes over time -- not just the current snapshot -- unlocks several +graph changes over time - not just the current snapshot - unlocks several capabilities that static analysis cannot provide: - **Trajectory reconstruction** from the movement of minimum-cut boundaries. @@ -45,7 +45,7 @@ evolution tracking into the RuView codebase via RuVector's graph engine. ### 1.1 Scope Boundaries -This research covers the RF sensing graph specifically -- the graph whose +This research covers the RF sensing graph specifically - the graph whose vertices are ESP32 nodes and whose edges are CSI links. It does not address the DensePose skeleton graph (which is a separate, downstream structure). The two graphs interact at the fusion boundary where `MultistaticArray` @@ -173,11 +173,11 @@ attention weight. | Framework | Time Model | Memory | Scalability | RuView Fit | |-----------|-----------|--------|-------------|-----------| -| TGN | Continuous | Per-node | O(N) update | High -- maps to CoherenceState | -| JODIE | Continuous | Per-pair | O(E) update | Medium -- TX-RX pairs | -| CT-DGNN | Continuous | Global | O(N^2) attention | Low -- too expensive at 20 Hz | -| DyRep | Continuous | Per-node | O(N*K) | Medium -- temporal attention useful | -| GraphSAGE-T | Discrete | Aggregated | O(N*K*L) | High -- snapshot aggregation | +| TGN | Continuous | Per-node | O(N) update | High - maps to CoherenceState | +| JODIE | Continuous | Per-pair | O(E) update | Medium - TX-RX pairs | +| CT-DGNN | Continuous | Global | O(N^2) attention | Low - too expensive at 20 Hz | +| DyRep | Continuous | Per-node | O(N*K) | Medium - temporal attention useful | +| GraphSAGE-T | Discrete | Aggregated | O(N*K*L) | High - snapshot aggregation | ### 2.4 Recommended Hybrid Approach @@ -322,7 +322,7 @@ With delta compression (Section 7), the per-day cost drops to approximately ### 4.1 Pattern Taxonomy RF field graphs exhibit characteristic evolution patterns during different -physical events. We classify these as **temporal motifs** -- recurring +physical events. We classify these as **temporal motifs** - recurring subgraph evolution signatures. ``` @@ -1198,7 +1198,7 @@ analogous graph health metrics with biomechanical parallels: /// Integrates with existing modules via the integration points /// listed in Section 1.2. pub struct RfTemporalGraph { - // -- Topology (stable) -- + // - Topology (stable) -- /// Node identifiers. nodes: Vec, /// Link definitions (directed: tx -> rx). @@ -1206,7 +1206,7 @@ pub struct RfTemporalGraph { /// Node positions in room coordinates. positions: Vec<[f32; 3]>, - // -- Live state (updated at 20 Hz) -- + // - Live state (updated at 20 Hz) -- /// Per-link coherence state (from coherence.rs). coherence_states: Vec, /// Per-link gate policy (from coherence_gate.rs). @@ -1214,13 +1214,13 @@ pub struct RfTemporalGraph { /// Field model for eigenstructure tracking. field_model: FieldModel, - // -- Temporal storage -- + // - Temporal storage -- /// Delta-compressed graph history. history: DeltaGraphStore, /// Graph-level Welford baseline. graph_baseline: GraphBaseline, - // -- Analysis -- + // - Analysis -- /// Per-link CUSUM detectors for change-point detection. cusum_detectors: Vec, /// Temporal motif classifier. @@ -1228,7 +1228,7 @@ pub struct RfTemporalGraph { /// Cut boundary trackers (one per tracked person). cut_trackers: Vec, - // -- Configuration -- + // - Configuration -- config: TemporalGraphConfig, } @@ -1365,12 +1365,12 @@ pub struct CycleResult { **Goal**: Implement `DeltaGraphStore` and basic temporal queries. **Files to create**: -- `signal/src/ruvsense/temporal_graph.rs` -- Core temporal graph types -- `signal/src/ruvsense/temporal_store.rs` -- Delta compression engine +- `signal/src/ruvsense/temporal_graph.rs` - Core temporal graph types +- `signal/src/ruvsense/temporal_store.rs` - Delta compression engine **Files to modify**: -- `signal/src/ruvsense/mod.rs` -- Register new modules -- `signal/src/ruvsense/coherence.rs` -- Add `snapshot()` method to `CoherenceState` +- `signal/src/ruvsense/mod.rs` - Register new modules +- `signal/src/ruvsense/coherence.rs` - Add `snapshot()` method to `CoherenceState` **Dependencies**: None (builds on existing `WelfordStats`, `CoherenceState`). @@ -1384,10 +1384,10 @@ pub struct CycleResult { **Goal**: Implement CUSUM detectors and event classification. **Files to create**: -- `signal/src/ruvsense/change_point.rs` -- CUSUM and spectral detectors +- `signal/src/ruvsense/change_point.rs` - CUSUM and spectral detectors **Files to modify**: -- `signal/src/ruvsense/cross_room.rs` -- Accept events from detector +- `signal/src/ruvsense/cross_room.rs` - Accept events from detector **Dependencies**: Phase 1 (temporal store for history access). @@ -1401,10 +1401,10 @@ pub struct CycleResult { **Goal**: Implement `CutBoundaryTracker` with Kalman filtering. **Files to create**: -- `signal/src/ruvsense/cut_trajectory.rs` -- Kalman-filtered cut tracking +- `signal/src/ruvsense/cut_trajectory.rs` - Kalman-filtered cut tracking **Files to modify**: -- `signal/src/ruvsense/multistatic.rs` -- Feed `PersonCluster` to tracker +- `signal/src/ruvsense/multistatic.rs` - Feed `PersonCluster` to tracker **Dependencies**: Phase 1, `ruvector-mincut` integration. @@ -1417,7 +1417,7 @@ pub struct CycleResult { **Goal**: Implement `GraphBaseline` with drift detection. **Files to modify**: -- `signal/src/ruvsense/longitudinal.rs` -- Extract `WelfordStats` pattern +- `signal/src/ruvsense/longitudinal.rs` - Extract `WelfordStats` pattern into shared trait, implement for graph metrics. **Dependencies**: Phase 1, Phase 2. @@ -1432,7 +1432,7 @@ pub struct CycleResult { **Goal**: Extend `CrossRoomTracker` with `TemporalTransitionGraph`. **Files to modify**: -- `signal/src/ruvsense/cross_room.rs` -- Add temporal statistics to +- `signal/src/ruvsense/cross_room.rs` - Add temporal statistics to transition log, implement transition prediction. **Dependencies**: Phase 2 (event detection feeds transitions). diff --git a/docs/research/09-resolution-spatial-granularity.md b/docs/research/09-resolution-spatial-granularity.md index 59740c4a3..d520e6516 100644 --- a/docs/research/09-resolution-spatial-granularity.md +++ b/docs/research/09-resolution-spatial-granularity.md @@ -37,7 +37,7 @@ of length `d` at wavelength `lambda` is: r_F = sqrt(lambda * d / 4) ``` -This is the *minimum detectable feature size* for a single link -- an +This is the *minimum detectable feature size* for a single link - an object smaller than `r_F` cannot reliably perturb the link's CSI above noise floor. @@ -70,7 +70,7 @@ r_F(d1) = sqrt(lambda * d1 * d2 / d) This reaches its maximum at the midpoint (`d1 = d2 = d/2`) and tapers to zero at both endpoints. The practical implication: objects near a node are harder to detect on that specific link because the Fresnel zone is -narrow there. This is why mesh density matters -- nearby links cover +narrow there. This is why mesh density matters - nearby links cover the "blind cone" of each individual link. ### 1.4 Fresnel Zone as Resolution Kernel @@ -103,8 +103,8 @@ blocks the first Fresnel zone on a 5m link at 2.4 GHz. At 5 GHz the same person extends beyond the Fresnel zone, meaning: - At 2.4 GHz: person width approximately equals Fresnel radius on - medium links -- moderate SNR perturbation. -- At 5 GHz: person width exceeds Fresnel radius -- stronger relative + medium links - moderate SNR perturbation. +- At 5 GHz: person width exceeds Fresnel radius - stronger relative perturbation, better localization along perpendicular axis. The mincut algorithm partitions the graph at edges where coherence drops. @@ -412,7 +412,7 @@ C(S, T) = sum_{(i,j) : i in S, j in T} w_ij When a person or object bisects the sensing region, links crossing the boundary experience coherence drops, reducing their weights. The mincut naturally identifies this boundary because it finds the cheapest way to -separate the graph -- and disrupted links are cheap. +separate the graph - and disrupted links are cheap. ### 4.2 Boundary Localization from Cut Edges @@ -1330,7 +1330,7 @@ cross-room tracker) to weight their inputs by spatial confidence. Magazine, 22(4), 54-69. 8. Shen, Y. and Win, M. Z. (2010). "Fundamental Limits of Wideband - Localization — Part I: A General Framework." IEEE Trans. Information + Localization - Part I: A General Framework." IEEE Trans. Information Theory, 56(10), 4956-4980. ### Graph Cuts and Spectral Methods diff --git a/docs/research/10-system-architecture-prototype.md b/docs/research/10-system-architecture-prototype.md index 02196f56c..9e27b4ead 100644 --- a/docs/research/10-system-architecture-prototype.md +++ b/docs/research/10-system-architecture-prototype.md @@ -1,4 +1,4 @@ -# Research Document 10: RF Topological Sensing — System Architecture and Prototype +# Research Document 10: RF Topological Sensing - System Architecture and Prototype **Date**: 2026-03-08 **Status**: Draft @@ -27,7 +27,7 @@ RF topological sensing treats a mesh of ESP32 nodes as a "radio nervous system." Every transmitter-receiver pair defines a graph edge. The Channel State Information (CSI) measured on each edge encodes how the radio environment between those two -nodes has been perturbed — by walls, furniture, and most importantly, by human +nodes has been perturbed - by walls, furniture, and most importantly, by human bodies. When a person stands between two nodes, the CSI coherence on that link drops. The collection of all such drops defines a cut in the graph that traces the physical boundary of the person. @@ -163,7 +163,7 @@ Time --> The signal crate contains the RuvSense modules that provide the mathematical foundation for edge weight computation. -**coherence.rs** — Z-score coherence scoring with DriftProfile. This module +**coherence.rs** - Z-score coherence scoring with DriftProfile. This module already computes a coherence metric between CSI frames. For RF topology, we use coherence as the primary edge weight: high coherence means the link is unobstructed, low coherence means something (a person) is in the path. @@ -175,7 +175,7 @@ Usage in rf_topology: - coherence_gate::CoherenceGate decides if a measurement is reliable ``` -**phase_align.rs** — Iterative LO phase offset estimation using circular mean. +**phase_align.rs** - Iterative LO phase offset estimation using circular mean. ESP32 local oscillators drift, which corrupts phase measurements. Phase alignment is a prerequisite for meaningful coherence computation. @@ -186,7 +186,7 @@ Usage in rf_topology: - Runs per-edge, per-frame ``` -**multiband.rs** — Multi-band CSI frame fusion. When nodes operate on multiple +**multiband.rs** - Multi-band CSI frame fusion. When nodes operate on multiple WiFi channels (via channel hopping), this module fuses the measurements into a single coherent view. @@ -197,7 +197,7 @@ Usage in rf_topology: - Optional: single-channel operation is sufficient for prototype ``` -**multistatic.rs** — Attention-weighted fusion with geometric diversity. This +**multistatic.rs** - Attention-weighted fusion with geometric diversity. This module already performs multi-link fusion, which is conceptually close to what rf_topology needs. The key difference is that multistatic.rs fuses for pose estimation, while rf_topology fuses for boundary detection. @@ -208,7 +208,7 @@ Usage in rf_topology: - Reuse attention weights for graph edge confidence scoring ``` -**adversarial.rs** — Physically impossible signal detection. This module +**adversarial.rs** - Physically impossible signal detection. This module detects when CSI measurements violate physical constraints (e.g., signal strength increases when a person is blocking the path). Essential for filtering bad edges in the graph. @@ -224,7 +224,7 @@ Usage in rf_topology: The ruvector crate provides graph-based data structures and attention mechanisms that can be repurposed for RF topology. -**viewpoint/attention.rs** — CrossViewpointAttention with GeometricBias and +**viewpoint/attention.rs** - CrossViewpointAttention with GeometricBias and softmax. The attention mechanism computes importance weights across multiple viewpoints. In RF topology, each TX-RX pair is a "viewpoint" and the attention mechanism can prioritize the most informative edges. @@ -236,7 +236,7 @@ Usage in rf_topology: - Softmax normalization produces valid probability distribution over edges ``` -**viewpoint/geometry.rs** — GeometricDiversityIndex and Cramer-Rao bounds. +**viewpoint/geometry.rs** - GeometricDiversityIndex and Cramer-Rao bounds. This module quantifies how much geometric information a set of links provides. RF topology uses this to determine if the current node placement can resolve a boundary at a given location. @@ -248,7 +248,7 @@ Usage in rf_topology: - Fisher Information matrix guides optimal node placement ``` -**viewpoint/coherence.rs** — Phase phasor coherence with hysteresis gate. +**viewpoint/coherence.rs** - Phase phasor coherence with hysteresis gate. Already provides a gating mechanism for coherence measurements. RF topology reuses this to prevent boundary flicker from noisy measurements. @@ -258,7 +258,7 @@ Usage in rf_topology: - Smooths boundary detection over time ``` -**viewpoint/fusion.rs** — MultistaticArray aggregate root with domain events. +**viewpoint/fusion.rs** - MultistaticArray aggregate root with domain events. This is a DDD aggregate root that manages a collection of multistatic links. RF topology can extend this pattern for graph-level aggregate management. @@ -272,7 +272,7 @@ Usage in rf_topology: The hardware crate manages ESP32 devices and the TDM protocol. -**esp32/tdm.rs** — Time Division Multiplexing scheduler. Assigns transmit +**esp32/tdm.rs** - Time Division Multiplexing scheduler. Assigns transmit slots to nodes, ensures collision-free CSI extraction. ``` @@ -282,7 +282,7 @@ Usage in rf_topology: - Cycle period = N_nodes * slot_duration ``` -**esp32/channel_hop.rs** — Channel hopping firmware control. Allows nodes to +**esp32/channel_hop.rs** - Channel hopping firmware control. Allows nodes to measure CSI on multiple WiFi channels for improved spatial resolution. ``` @@ -291,7 +291,7 @@ Usage in rf_topology: - Feeds into multiband.rs fusion ``` -**esp32/csi_extract.rs** — Raw CSI extraction from ESP32 hardware registers. +**esp32/csi_extract.rs** - Raw CSI extraction from ESP32 hardware registers. Produces CsiFrame structs that are the input to the entire pipeline. ``` @@ -369,7 +369,7 @@ shared types. ### 3.2 Key Types -#### RfGraph — Aggregate Root +#### RfGraph - Aggregate Root RfGraph is the central aggregate root. It owns the complete graph state: nodes, edges, weights, and metadata. All mutations go through RfGraph methods, which @@ -393,7 +393,7 @@ Invariants enforced by RfGraph: - Stale edges (no update in N cycles) are pruned - Graph is always connected (disconnected subgraphs trigger alert) -#### EdgeWeight — Value Object +#### EdgeWeight - Value Object ``` EdgeWeight { @@ -411,7 +411,7 @@ EdgeWeight { EdgeWeight is a value object: immutable after creation. Each TDM cycle produces a new EdgeWeight for each edge, which replaces the previous one in RfGraph. -#### CutBoundary — Value Object +#### CutBoundary - Value Object ``` CutBoundary { @@ -428,7 +428,7 @@ CutBoundary { CutBoundary represents the output of the mincut solver. Multiple CutBoundaries can exist simultaneously when multiple people are detected. -#### TopologyEvent — Domain Event +#### TopologyEvent - Domain Event ``` TopologyEvent { @@ -607,7 +607,7 @@ Input: RfGraph adjacency matrix with weights Output: CutBoundary (minimum cut edges + partitions) Cost: 4-node: ~0.1ms 16-node: ~2ms - 64-node: ~15ms (exceeds budget -- use incremental solver) + 64-node: ~15ms (exceeds budget - use incremental solver) ``` For graphs larger than ~40 nodes, use incremental mincut: only recompute @@ -630,9 +630,9 @@ Cost: Convex hull + smoothing. <3ms for typical boundaries. Serialize boundary polygon to JSON, send over WebSocket, render in browser. ``` -Serialization: serde_json::to_string(&boundary) -- <1ms -WebSocket TX: axum tungstenite broadcast -- <2ms local -Browser render: Canvas 2D path drawing -- 10-16ms at 60fps +Serialization: serde_json::to_string(&boundary) - <1ms +WebSocket TX: axum tungstenite broadcast - <2ms local +Browser render: Canvas 2D path drawing - 10-16ms at 60fps ``` ### 4.3 Timing Diagram @@ -953,7 +953,7 @@ fusion. A fundamentally different approach is possible: treat the entire ESP32 mesh as a graph where TX-RX pairs are edges and CSI coherence determines edge -weights. A minimum cut of this graph reveals physical boundaries — the +weights. A minimum cut of this graph reveals physical boundaries - the locations where radio propagation is disrupted by human bodies. This is "RF topological sensing." @@ -1002,7 +1002,7 @@ The implementation will proceed in three phases: **Negative**: - Requires minimum 4 ESP32 nodes (higher hardware cost than single-link) -- Mincut provides boundaries, not poses — pose still requires neural inference +- Mincut provides boundaries, not poses - pose still requires neural inference or additional geometric reasoning - Stoer-Wagner complexity O(V*E + V^2 log V) limits scalability beyond ~40 nodes without incremental solver @@ -1573,14 +1573,14 @@ impl AdjacencyMatrix { | Term | Definition | |-----------------------|-------------------------------------------------------------------| -| CSI | Channel State Information -- per-subcarrier complex amplitude | -| TDM | Time Division Multiplexing -- collision-free TX scheduling | -| Mincut | Minimum cut -- partition of graph that minimizes total edge weight | +| CSI | Channel State Information - per-subcarrier complex amplitude | +| TDM | Time Division Multiplexing - collision-free TX scheduling | +| Mincut | Minimum cut - partition of graph that minimizes total edge weight | | Stoer-Wagner | Deterministic O(VE + V^2 log V) mincut algorithm | | Edge weight | Coherence metric on a TX-RX link; low = obstructed | | Boundary | Spatial region where mincut edges intersect physical space | -| Aggregate root | DDD pattern -- single entry point for a consistency boundary | -| EMA | Exponential Moving Average -- temporal smoothing filter | +| Aggregate root | DDD pattern - single entry point for a consistency boundary | +| EMA | Exponential Moving Average - temporal smoothing filter | ## Appendix B: Related ADRs diff --git a/docs/research/11-quantum-level-sensors.md b/docs/research/11-quantum-level-sensors.md index 7fe342b1e..5b1f52b81 100644 --- a/docs/research/11-quantum-level-sensors.md +++ b/docs/research/11-quantum-level-sensors.md @@ -1,6 +1,6 @@ # Quantum-Level Sensors for RF Topological Sensing -## SOTA Research Document — RF Topological Sensing Series (11/12) +## SOTA Research Document - RF Topological Sensing Series (11/12) **Date**: 2026-03-08 **Domain**: Quantum Sensing × RF Topology × Graph-Based Detection @@ -58,7 +58,7 @@ ODMR Protocol: ### 2.2 Superconducting Quantum Interference Devices (SQUIDs) -- **Sensitivity**: ~1 fT/√Hz (femtotesla — 1000× better than NV) +- **Sensitivity**: ~1 fT/√Hz (femtotesla - 1000× better than NV) - **Operating temperature**: 4 K (liquid helium) or 77 K (high-Tc) - **Frequency range**: DC to ~1 GHz - **Detection mechanism**: Josephson junction flux quantization @@ -188,17 +188,17 @@ For r = 2 (17.4 dB squeezing): --- -## 4. Rydberg Atom RF Sensors — Deep Dive +## 4. Rydberg Atom RF Sensors - Deep Dive ### 4.1 Broadband RF Detection via EIT Rydberg atoms provide the most promising near-term quantum RF sensor for topological sensing because: -1. **Room temperature operation** — no cryogenics -2. **Broadband** — single vapor cell covers MHz to THz by tuning laser wavelength -3. **Self-calibrated** — response depends only on atomic constants -4. **Compact** — vapor cell can be cm-scale +1. **Room temperature operation** - no cryogenics +2. **Broadband** - single vapor cell covers MHz to THz by tuning laser wavelength +3. **Self-calibrated** - response depends only on atomic constants +4. **Compact** - vapor cell can be cm-scale ``` Rydberg Sensor Architecture: @@ -931,4 +931,4 @@ value for graph cut optimization at scale. The long-term vision is a quantum-native sensing mesh where every node performs quantum measurements, edge weights encode quantum coherence between nodes, and graph algorithms -run on quantum hardware — a true quantum radio nervous system. +run on quantum hardware - a true quantum radio nervous system. diff --git a/docs/research/12-quantum-biomedical-sensing.md b/docs/research/12-quantum-biomedical-sensing.md index 7f7285288..46d8ee938 100644 --- a/docs/research/12-quantum-biomedical-sensing.md +++ b/docs/research/12-quantum-biomedical-sensing.md @@ -1,6 +1,6 @@ -# Quantum Biomedical Sensing — From Anatomy to Field Dynamics +# Quantum Biomedical Sensing - From Anatomy to Field Dynamics -## SOTA Research Document — RF Topological Sensing Series (12/12) +## SOTA Research Document - RF Topological Sensing Series (12/12) **Date**: 2026-03-08 **Domain**: Quantum Biomedical Sensing × Graph Diagnostics × Ambient Health Monitoring @@ -17,7 +17,7 @@ The heart's electrical cycle produces magnetic fields detectable meters away. Ne in femtotesla-scale magnetic fluctuations. Blood flow carries ionic currents that create measurable magnetic disturbances. -Quantum sensors — operating at picotesla and femtotesla sensitivity — can observe these +Quantum sensors - operating at picotesla and femtotesla sensitivity - can observe these fields directly. Combined with graph-based topological analysis (minimum cut, coherence detection, RuVector temporal tracking), this creates a fundamentally new diagnostic paradigm: @@ -76,7 +76,7 @@ Dense Biomagnetic Array (conceptual): │ Q Q Q Q Q Q Q Q │ - Continuously at 1 kHz └────────────────────────────────────┘ - Output: B(x, y, z, t) — 4D biomagnetic field map + Output: B(x, y, z, t) - 4D biomagnetic field map ``` ### 2.3 Graph-Based Biomagnetic Analysis @@ -137,10 +137,10 @@ Neural Field Generation: B ≈ 100 fT Required sensitivity: < 10 fT/√Hz - NV diamond (current): ~1 pT/√Hz — not yet sufficient - NV diamond (projected 2028): ~10 fT/√Hz — approaching - SERF magnetometer: ~0.16 fT/√Hz — sufficient now - OPM (optically pumped): ~5 fT/√Hz — sufficient now + NV diamond (current): ~1 pT/√Hz - not yet sufficient + NV diamond (projected 2028): ~10 fT/√Hz - approaching + SERF magnetometer: ~0.16 fT/√Hz - sufficient now + OPM (optically pumped): ~5 fT/√Hz - sufficient now ``` ### 3.2 Wearable MEG with Quantum Sensors @@ -371,9 +371,9 @@ Non-Contact Detection Ranges: ──────────────────────────────────────────────────────────── Heart (magnetic) | 100 pT | 1 pT | 0.01 pT | NV (1m), SERF (3m) Heart (electric) | 1 mV/m | 10 µV/m | 1 µV/m | Rydberg (all) - Breathing (motion)| — via RF disturbance — | ESP32 mesh - Muscle tremor | 10 pT | 0.1 pT | — | NV (1m) - Neural (MEG) | 1 pT | 0.01 pT| — | SERF (1m only) + Breathing (motion)| - via RF disturbance - | ESP32 mesh + Muscle tremor | 10 pT | 0.1 pT | - | NV (1m) + Neural (MEG) | 1 pT | 0.01 pT| - | SERF (1m only) Practical non-contact vital signs at 1-3m: ✅ Heart rate (magnetic + RF) @@ -816,7 +816,7 @@ Hospital Patient Monitoring Without Wires: ✅ Movement/activity (RF: excellent) ✅ Fall detection (RF: <2s) ⚠️ Heart rhythm detail (quantum: approaching clinical) - ❌ SpO2 (requires optical — not yet ambient) + ❌ SpO2 (requires optical - not yet ambient) ❌ Blood pressure (requires contact measurement) ``` @@ -1139,9 +1139,9 @@ Timeline: Quantum biomedical sensing represents the convergence of three advancing frontiers: -1. **Quantum sensor technology** — Room-temperature sensors approaching fT sensitivity -2. **Graph-based analysis** — Minimum cut and coherence topology for health monitoring -3. **Ambient computing** — Non-contact, privacy-preserving, continuous measurement +1. **Quantum sensor technology** - Room-temperature sensors approaching fT sensitivity +2. **Graph-based analysis** - Minimum cut and coherence topology for health monitoring +3. **Ambient computing** - Non-contact, privacy-preserving, continuous measurement The key insight is that **disease is a topological change in the body's electromagnetic coherence graph**. The same minimum cut algorithms that detect a person walking through diff --git a/docs/research/13-nv-diamond-neural-magnetometry.md b/docs/research/13-nv-diamond-neural-magnetometry.md index f559b99d8..df529267a 100644 --- a/docs/research/13-nv-diamond-neural-magnetometry.md +++ b/docs/research/13-nv-diamond-neural-magnetometry.md @@ -1,6 +1,6 @@ # NV Diamond Magnetometers for Neural Current Detection -## SOTA Research Document — RF Topological Sensing Series (13/22) +## SOTA Research Document - RF Topological Sensing Series (13/22) **Date**: 2026-03-09 **Domain**: Nitrogen-Vacancy Quantum Sensing × Neural Magnetometry × Graph Topology @@ -10,7 +10,7 @@ ## 1. Introduction -Neurons communicate through ionic currents. Those currents generate magnetic fields — tiny +Neurons communicate through ionic currents. Those currents generate magnetic fields - tiny ones, measured in femtotesla (10⁻¹⁵ T). For context, Earth's magnetic field is approximately 50 μT, roughly 10¹⁰ times stronger than the magnetic signature of a single cortical column. @@ -67,7 +67,7 @@ Where: - γₑ = 28 GHz/T (electron gyromagnetic ratio) - B = external magnetic field component along NV axis -For a 1 fT field: Δf = 28 × 10⁻¹⁵ GHz = 28 μHz — extraordinarily small, requiring +For a 1 fT field: Δf = 28 × 10⁻¹⁵ GHz = 28 μHz - extraordinarily small, requiring long integration times or ensemble measurements. ### 2.3 Sensitivity Fundamentals @@ -79,7 +79,7 @@ long integration times or ensemble measurements. Where C is ODMR contrast (~0.03), R is photon count rate (~10⁵/s), T₂* is inhomogeneous dephasing time (~1 μs in bulk diamond). -Typical single NV sensitivity: ~1 μT/√Hz — insufficient for neural signals. +Typical single NV sensitivity: ~1 μT/√Hz - insufficient for neural signals. **NV ensemble**: N centers improve sensitivity by √N ``` @@ -502,18 +502,18 @@ NV's combination of high temporal resolution AND dense spatial sampling is uniqu ### 9.1 Leading Research Groups -**MIT/Harvard**: Walsworth group — pioneered NV magnetometry, demonstrated cellular-scale +**MIT/Harvard**: Walsworth group - pioneered NV magnetometry, demonstrated cellular-scale magnetic imaging, working on macroscale neural sensing arrays. -**University of Stuttgart**: Wrachtrup group — single NV defect spectroscopy, advanced +**University of Stuttgart**: Wrachtrup group - single NV defect spectroscopy, advanced dynamical decoupling protocols for NV magnetometry. -**University of Melbourne**: Hollenberg group — NV-based quantum sensing for biological +**University of Melbourne**: Hollenberg group - NV-based quantum sensing for biological applications, diamond fabrication optimization. **NIST Boulder**: NV ensemble magnetometry with optimized readout, approaching fT sensitivity. -**UC Berkeley**: Budker group — NV magnetometry for fundamental physics and biomedical +**UC Berkeley**: Budker group - NV magnetometry for fundamental physics and biomedical applications. ### 9.2 Commercial NV Sensor Companies @@ -779,7 +779,7 @@ For the RuVector + dynamic mincut architecture, NV sensors offer: 5. **Temporal resolution** sufficient for real-time topology tracking The combination of NV sensor arrays with RuVector graph memory and dynamic mincut analysis -could create the first portable brain network topology observatory — measuring how cognition +could create the first portable brain network topology observatory - measuring how cognition organizes itself in real time, without requiring the $3M SQUID MEG systems that currently dominate neuroimaging. diff --git a/docs/research/21-sota-neural-decoding-landscape.md b/docs/research/21-sota-neural-decoding-landscape.md index 56cb4bc18..313b1a667 100644 --- a/docs/research/21-sota-neural-decoding-landscape.md +++ b/docs/research/21-sota-neural-decoding-landscape.md @@ -1,6 +1,6 @@ # State-of-the-Art Neural Decoding Landscape (2023–2026) -## SOTA Research Document — RF Topological Sensing Series (21/22) +## SOTA Research Document - RF Topological Sensing Series (21/22) **Date**: 2026-03-09 **Domain**: Neural Decoding × Generative AI × Brain-Computer Interfaces × Quantum Sensing @@ -11,7 +11,7 @@ ## 1. Introduction The field of neural decoding has undergone a phase transition between 2023 and 2026. Three -technologies stacked together — sensors, decoders, and visualization/reconstruction systems — +technologies stacked together - sensors, decoders, and visualization/reconstruction systems - have collectively moved "brain reading" from science fiction to engineering challenge. Yet the popular narrative obscures a critical distinction: current systems decode *perceived* and *intended* content from neural activity, not arbitrary private thoughts. @@ -22,7 +22,7 @@ territory where topological brain modeling could open an entirely new research d --- -## 2. Layer 1: Neural Sensors — The Fidelity Floor +## 2. Layer 1: Neural Sensors - The Fidelity Floor Everything in neural decoding is bounded by sensor fidelity. No algorithm can extract information that the sensor never captured. @@ -54,7 +54,7 @@ information that the sensor never captured. **Fundamental Limitation**: Requires brain surgery. Coverage area is tiny relative to the whole brain (~0.001% of cortical surface per array). Each implant covers one small patch. -Network-level topology analysis requires coverage of many regions simultaneously — the exact +Network-level topology analysis requires coverage of many regions simultaneously - the exact opposite of what implants provide. **Why This Matters for Mincut Architecture**: Implants give depth but not breadth. Dynamic @@ -208,7 +208,7 @@ analysis. NV diamond arrays represent the medium-term upgrade path. --- -## 3. Layer 2: Neural Decoders — AI Meets Neuroscience +## 3. Layer 2: Neural Decoders - AI Meets Neuroscience ### 3.1 The Translation Paradigm @@ -391,7 +391,7 @@ This means: **Implication for Topology Analysis**: The RuVector/mincut approach sidesteps the hallucination problem entirely. It measures *structural properties* of brain activity (network topology, coherence boundaries) rather than trying to generate *content* (images, text). There is no -generative prior to hallucinate — the topology either changes or it doesn't. +generative prior to hallucinate - the topology either changes or it doesn't. --- @@ -478,7 +478,7 @@ LOW FIDELITY EEG motor imagery EEG connectivity (basic) ``` The RuVector + mincut architecture occupies the **medium-fidelity, structure-focused** quadrant -— a space that is largely unexplored in current research. + - a space that is largely unexplored in current research. ### 6.3 What This Architecture Uniquely Enables @@ -489,7 +489,7 @@ The RuVector + mincut architecture occupies the **medium-fidelity, structure-foc which correlates with cognitive state changes. 3. **Longitudinal tracking**: RuVector memory enables tracking of topology evolution over - days, weeks, months — detecting gradual changes like neurodegeneration. + days, weeks, months - detecting gradual changes like neurodegeneration. 4. **Content-agnostic monitoring**: The system does not need to decode what is being thought. It detects how the brain organizes its processing, which is clinically and scientifically @@ -533,7 +533,7 @@ Foundation models could learn brain topology patterns from large datasets: - Enable cross-subject topology comparison in a shared embedding space This is where RuVector's contrastive learning (AETHER) and geometric embedding become -particularly valuable — they provide the representational framework for topology foundation +particularly valuable - they provide the representational framework for topology foundation models. --- @@ -559,7 +559,7 @@ Used GPT-based language model to map fMRI activity to word sequences. **Result**: Recovered semantic meaning of stories (not verbatim words). **Significance**: First open-vocabulary language decoder from non-invasive imaging. Crucially, -decoding failed when subjects were not cooperating — they could defeat the decoder by +decoding failed when subjects were not cooperating - they could defeat the decoder by thinking about other things. ### 8.3 Takagi & Nishimoto Image Reconstruction (2023) @@ -601,9 +601,9 @@ speed. MEG's temporal resolution enabled tracking of dynamic visual processing. 1. **Content decoding is advancing rapidly** but remains subject-specific and perception-bound. 2. **Non-invasive sensors are reaching sufficient fidelity** for network-level analysis. 3. **Generative AI amplifies decoding** but introduces hallucination risks. -4. **Topology analysis is the unexplored dimension** — no major group is doing real-time +4. **Topology analysis is the unexplored dimension** - no major group is doing real-time mincut-based brain network analysis. -5. **OPM-MEG is the enabling technology** — wearable, high-fidelity, affordable trajectory. +5. **OPM-MEG is the enabling technology** - wearable, high-fidelity, affordable trajectory. ### 9.2 Recommended Architecture Priorities @@ -627,7 +627,7 @@ speed. MEG's temporal resolution enabled tracking of dynamic visual processing. but these are static (one scan per subject). - **Dynamic functional connectivity (dFC)**: fMRI-based studies examine time-varying - connectivity, but at ~0.5 Hz temporal resolution — too slow for real-time cognitive + connectivity, but at ~0.5 Hz temporal resolution - too slow for real-time cognitive tracking. - **No one is doing real-time mincut on brain networks from MEG/OPM data.** This is @@ -673,7 +673,7 @@ better algorithms (transformers, diffusion models), and better training data. Ye progress has not addressed the fundamental question of how cognition organizes itself topologically. -The RuVector + dynamic mincut architecture positions itself in this gap — not competing with +The RuVector + dynamic mincut architecture positions itself in this gap - not competing with content decoders but opening an entirely new dimension of brain observation. Combined with OPM quantum sensors, this becomes a "topological brain observatory" that measures the architecture of thought rather than its content. diff --git a/docs/research/22-brain-observatory-application-domains.md b/docs/research/22-brain-observatory-application-domains.md index 994eacc89..0a693959e 100644 --- a/docs/research/22-brain-observatory-application-domains.md +++ b/docs/research/22-brain-observatory-application-domains.md @@ -1,6 +1,6 @@ -# Brain State Observatory — Ten Application Domains +# Brain State Observatory - Ten Application Domains -## SOTA Research Document — RF Topological Sensing Series (22/22) +## SOTA Research Document - RF Topological Sensing Series (22/22) **Date**: 2026-03-09 **Domain**: Clinical Diagnostics × BCI × Cognitive Science × Commercial Applications @@ -8,14 +8,14 @@ --- -## 1. Introduction — Not Mind Reading, Something Better +## 1. Introduction - Not Mind Reading, Something Better If you build a system that combines high-sensitivity neural sensing, RuVector-style geometric memory, and dynamic mincut topology analysis, you are not building a mind reader. You are building a **brain state observatory**. The most valuable applications are not "reading thoughts." They are systems that measure how -cognition organizes itself over time — and detect when that organization goes wrong. +cognition organizes itself over time - and detect when that organization goes wrong. This document maps ten application domains where the RuVector + dynamic mincut architecture becomes unusually powerful, with honest assessment of feasibility, market reality, and @@ -451,7 +451,7 @@ enables entirely new categories of scientific questions. ### 8.4 This Is Network Science of Cognition The field has studied individual brain regions and pairwise connections. Topology analysis -studies the emergent organizational principles — how the whole network self-organizes to +studies the emergent organizational principles - how the whole network self-organizes to produce cognition. This is analogous to studying traffic patterns in a city rather than individual cars. @@ -682,7 +682,7 @@ neural network classifiers which can fail unpredictably. --- -## 13. The Most Powerful Future Use — Google Maps for Cognition +## 13. The Most Powerful Future Use - Google Maps for Cognition ### 13.1 The Vision @@ -711,7 +711,7 @@ A real-time display showing: ### 13.3 How This Changes Neuroscience -Current neuroscience is like having satellite photos of a city — you see the buildings but +Current neuroscience is like having satellite photos of a city - you see the buildings but not the traffic. This observatory adds the traffic layer: real-time flow, congestion, routing, and reorganization. @@ -805,9 +805,9 @@ routing, and reorganization. 2. **Professional monitoring**: simplified system for aviation/military **Commercialization priorities**: -- Cognitive workload monitoring (defense/aviation contracts) — fastest revenue -- Epilepsy topology monitoring (clinical need, clear regulatory path) — largest impact -- Brain health assessment (wellness market) — largest eventual market +- Cognitive workload monitoring (defense/aviation contracts) - fastest revenue +- Epilepsy topology monitoring (clinical need, clear regulatory path) - largest impact +- Brain health assessment (wellness market) - largest eventual market ### Phase 4: Platform Expansion (Year 5–10) @@ -839,7 +839,7 @@ The RuVector + mincut core engine is the reusable technology. It should be: **Why non-invasive is the right starting point**: 1. Mincut topology analysis needs *breadth* of coverage (many regions), which non-invasive excels at -2. Implants provide *depth* (single neuron) but only from tiny patches — the opposite of +2. Implants provide *depth* (single neuron) but only from tiny patches - the opposite of what topology analysis needs 3. OPM-MEG fidelity is sufficient for network-level topology analysis 4. Regulatory pathway is simpler for non-invasive devices @@ -857,8 +857,8 @@ Once the topology framework is validated non-invasively, combine with implant da The ten application domains for a brain state observatory are not speculative science fiction. They are engineering challenges with clear technical requirements, identifiable markets, and -realistic development timelines. The enabling technologies — OPM sensors, graph algorithms, -RuVector memory, dynamic mincut — exist today or are within reach. +realistic development timelines. The enabling technologies - OPM sensors, graph algorithms, +RuVector memory, dynamic mincut - exist today or are within reach. The strategic insight is this: while the rest of the field races to decode brain *content* (what people think, see, imagine), there is an entirely unexplored dimension of brain diff --git a/docs/research/remote-vital-sign-sensing-modalities.md b/docs/research/remote-vital-sign-sensing-modalities.md index 6e4d661d4..815aaa5bd 100644 --- a/docs/research/remote-vital-sign-sensing-modalities.md +++ b/docs/research/remote-vital-sign-sensing-modalities.md @@ -57,10 +57,10 @@ This is already used in some wellness and telemedicine systems. Quantum radar (based on entanglement/correlations or quantum illumination) is under research: -- **Quantum radar** aims to use quantum correlations to outperform classical radar in target detection at short ranges. Early designs have demonstrated proof of concept but remain limited to near-field/short distances — potential for biomedical scanning is discussed. +- **Quantum radar** aims to use quantum correlations to outperform classical radar in target detection at short ranges. Early designs have demonstrated proof of concept but remain limited to near-field/short distances - potential for biomedical scanning is discussed. - **Quantum-inspired computational imaging** and quantum sensors promise enhanced sensitivity, including in foggy, low visibility or internal sensing contexts. -While full quantum remote vital sign sensing (like single-photon quantum radar scanning people's heartbeat) isn't yet operational, quantum sensors — especially atomic magnetometers and NV-centre devices — offer a path toward ultrasensitive biomedical field detection. +While full quantum remote vital sign sensing (like single-photon quantum radar scanning people's heartbeat) isn't yet operational, quantum sensors - especially atomic magnetometers and NV-centre devices - offer a path toward ultrasensitive biomedical field detection. ### 6. Quantum Biomedical Instrumentation @@ -89,7 +89,7 @@ These are quantum-sensor-enabled biomedical detection advances rather than direc ## Key Insights & State-of-Research - **RF and radar sensing** are the dominant SOTA methods for non-contact vital sign detection outside optical imaging. These use advanced signal processing and ML to extract micro-movement signatures. -- **Quantum sensors** are showing promise for enhanced biomedical detection at finer scales — especially magnetic and other field sensing — but practical remote vital sign sensing (people at distance) is still largely research. +- **Quantum sensors** are showing promise for enhanced biomedical detection at finer scales - especially magnetic and other field sensing - but practical remote vital sign sensing (people at distance) is still largely research. - **Hybrid approaches** (RF + ML, quantum-inspired imaging) represent emerging research frontiers with potential breakthroughs in sensitivity and privacy. --- @@ -107,4 +107,4 @@ This project's signal processing pipeline (ADR-014) implements several of the co | Subcarrier Selection | Channel/frequency selection in OFDM and FMCW systems | | Body Velocity Profile | Doppler-velocity mapping used in mmWave and through-wall radar | -The algorithmic foundations are shared across modalities — what differs is the carrier frequency, bandwidth, and hardware interface. +The algorithmic foundations are shared across modalities - what differs is the carrier frequency, bandwidth, and hardware interface. diff --git a/docs/research/ruview-multistatic-fidelity-sota-2026.md b/docs/research/ruview-multistatic-fidelity-sota-2026.md index 638a6bcb0..52dc2b375 100644 --- a/docs/research/ruview-multistatic-fidelity-sota-2026.md +++ b/docs/research/ruview-multistatic-fidelity-sota-2026.md @@ -17,9 +17,9 @@ WiFi-based dense human pose estimation faces three persistent fidelity bottlenec The core insight behind RuView is that **upgrading observability beats inventing new WiFi standards**. Rather than waiting for wider bandwidth hardware or higher carrier frequencies, RuView exploits the one fidelity lever that scales with commodity equipment deployed today: geometric viewpoint diversity. -RuView -- RuVector Viewpoint-Integrated Enhancement -- is a sensing-first RF mode that rides on existing silicon (ESP32-S3), existing bands (2.4/5 GHz), and existing regulations (Part 15 unlicensed). Its principal contribution is **cross-viewpoint embedding fusion via ruvector-attention**, where per-viewpoint AETHER embeddings (ADR-024) are fused through a geometric-bias attention mechanism that learns which viewpoint combinations are informative for each body region. +RuView - RuVector Viewpoint-Integrated Enhancement - is a sensing-first RF mode that rides on existing silicon (ESP32-S3), existing bands (2.4/5 GHz), and existing regulations (Part 15 unlicensed). Its principal contribution is **cross-viewpoint embedding fusion via ruvector-attention**, where per-viewpoint AETHER embeddings (ADR-024) are fused through a geometric-bias attention mechanism that learns which viewpoint combinations are informative for each body region. -Three fidelity levers govern WiFi sensing resolution: bandwidth, carrier frequency, and viewpoints. RuView focuses on the third -- the only lever that improves all three bottlenecks simultaneously without hardware upgrades. +Three fidelity levers govern WiFi sensing resolution: bandwidth, carrier frequency, and viewpoints. RuView focuses on the third - the only lever that improves all three bottlenecks simultaneously without hardware upgrades. --- @@ -62,7 +62,7 @@ Fresnel zone radius at each band governs the sensing-sensitive region: r_n = sqrt(n * lambda * d1 * d2 / (d1 + d2)) -At 2.4 GHz with 3m link distance, the first Fresnel zone radius is 0.61m -- a broad sensitivity region suitable for macro-motion detection but poor for localizing specific body parts. At 5 GHz the radius shrinks to 0.42m, improving localization at the cost of coverage. +At 2.4 GHz with 3m link distance, the first Fresnel zone radius is 0.61m - a broad sensitivity region suitable for macro-motion detection but poor for localizing specific body parts. At 5 GHz the radius shrinks to 0.42m, improving localization at the cost of coverage. RuView currently targets 2.4 GHz (ESP32-S3) and 5 GHz (Cognitum path), compensating for coarse per-link localization with viewpoint diversity. @@ -90,7 +90,7 @@ and rho_bar is the mean pairwise correlation between viewpoint CSI streams. Maxi Effective_resolution ~ BW * N_viewpoints * sin(angular_spread) -This means even at 20 MHz bandwidth, six well-placed viewpoints with 60-degree angular spread provide effective resolution comparable to a single 120 MHz viewpoint -- at a fraction of the hardware cost. +This means even at 20 MHz bandwidth, six well-placed viewpoints with 60-degree angular spread provide effective resolution comparable to a single 120 MHz viewpoint - at a fraction of the hardware cost. **References:** Person-in-WiFi 3D (Yan et al., CVPR 2024); bistatic MIMO radar theory (Li and Stoica, 2007); DGSense (Zhou et al., 2025). @@ -198,12 +198,12 @@ Per-viewpoint AETHER embeddings are produced by the CsiToPoseTransformer backbon - Input: sanitized CSI frame (56 subcarriers x 2 antennas x 2 components) - Backbone: cross-attention transformer producing [17 x d_model] body part features - Projection: linear head maps pooled features to 128-d normalized embedding -- Training: VICReg-style contrastive loss with three terms -- invariance (same pose from different viewpoints maps nearby), variance (embeddings use full capacity), covariance (embedding dimensions are decorrelated) +- Training: VICReg-style contrastive loss with three terms - invariance (same pose from different viewpoints maps nearby), variance (embeddings use full capacity), covariance (embedding dimensions are decorrelated) - Augmentation: subcarrier dropout (p=0.1), phase noise injection (sigma=0.05 rad), temporal jitter (+-2 frames) ### 5.3 RuVector Graph Memory -The HNSW index (ADR-004) stores environment fingerprints as AETHER embeddings. Graph edges encode temporal adjacency (consecutive frames from the same track) and spatial adjacency (observations from the same room region). Query protocol: given a new CSI frame, compute its AETHER embedding, retrieve k nearest HNSW neighbors, and return associated pose, identity, and room region. Updates are incremental -- new observations insert into the graph without full reindexing. +The HNSW index (ADR-004) stores environment fingerprints as AETHER embeddings. Graph edges encode temporal adjacency (consecutive frames from the same track) and spatial adjacency (observations from the same room region). Query protocol: given a new CSI frame, compute its AETHER embedding, retrieve k nearest HNSW neighbors, and return associated pose, identity, and room region. Updates are incremental - new observations insert into the graph without full reindexing. ### 5.4 Coherence-Gated Updates @@ -355,29 +355,29 @@ The 6-node ESP32 + RuView configuration achieves 70-80% of camera DensePose accu ## 10. References ### WiFi Sensing and Pose Estimation -- [DensePose From WiFi](https://arxiv.org/abs/2301.00250) -- Geng, Huang, De la Torre (CMU, 2023) -- [Person-in-WiFi 3D](https://openaccess.thecvf.com/content/CVPR2024/papers/Yan_Person-in-WiFi_3D_End-to-End_Multi-Person_3D_Pose_Estimation_with_Wi-Fi_CVPR_2024_paper.pdf) -- Yan et al. (CVPR 2024) -- [AdaPose: Cross-Site WiFi Pose Estimation](https://ieeexplore.ieee.org/document/10584280) -- Zhou et al. (IEEE IoT Journal, 2024) -- [HPE-Li: Lightweight WiFi Pose Estimation](https://link.springer.com/chapter/10.1007/978-3-031-72904-1_6) -- ECCV 2024 -- [DGSense: Domain-Generalized Sensing](https://arxiv.org/abs/2501.12345) -- Zhou et al. (2025) -- [X-Fi: Modality-Invariant Foundation Model](https://openreview.net/forum?id=xfi2025) -- Chen and Yang (ICLR 2025) -- [AM-FM: First WiFi Foundation Model](https://arxiv.org/abs/2602.00001) -- (2026) -- [PerceptAlign: Cross-Layout Pose Estimation](https://arxiv.org/abs/2603.00001) -- Chen et al. (2026) -- [CAPC: Context-Aware Predictive Coding](https://ieeexplore.ieee.org/document/10600001) -- IEEE OJCOMS, 2024 +- [DensePose From WiFi](https://arxiv.org/abs/2301.00250) - Geng, Huang, De la Torre (CMU, 2023) +- [Person-in-WiFi 3D](https://openaccess.thecvf.com/content/CVPR2024/papers/Yan_Person-in-WiFi_3D_End-to-End_Multi-Person_3D_Pose_Estimation_with_Wi-Fi_CVPR_2024_paper.pdf) - Yan et al. (CVPR 2024) +- [AdaPose: Cross-Site WiFi Pose Estimation](https://ieeexplore.ieee.org/document/10584280) - Zhou et al. (IEEE IoT Journal, 2024) +- [HPE-Li: Lightweight WiFi Pose Estimation](https://link.springer.com/chapter/10.1007/978-3-031-72904-1_6) - ECCV 2024 +- [DGSense: Domain-Generalized Sensing](https://arxiv.org/abs/2501.12345) - Zhou et al. (2025) +- [X-Fi: Modality-Invariant Foundation Model](https://openreview.net/forum?id=xfi2025) - Chen and Yang (ICLR 2025) +- [AM-FM: First WiFi Foundation Model](https://arxiv.org/abs/2602.00001) - (2026) +- [PerceptAlign: Cross-Layout Pose Estimation](https://arxiv.org/abs/2603.00001) - Chen et al. (2026) +- [CAPC: Context-Aware Predictive Coding](https://ieeexplore.ieee.org/document/10600001) - IEEE OJCOMS, 2024 ### Signal Processing and Localization -- [SpotFi: Decimeter-Level Localization](https://dl.acm.org/doi/10.1145/2785956.2787487) -- Kotaru et al. (SIGCOMM 2015) -- [FarSense: Pushing WiFi Sensing Range](https://dl.acm.org/doi/10.1145/3300061.3345433) -- Zeng et al. (MobiCom 2019) -- [Widar 3.0: Cross-Domain Gesture Recognition](https://dl.acm.org/doi/10.1145/3300061.3345436) -- Zheng et al. (MobiCom 2019) -- [WiGest: WiFi-Based Gesture Recognition](https://ieeexplore.ieee.org/document/7127672) -- Abdelnasser et al. (2015) -- [CSI-Channel Spatial Decomposition](https://www.mdpi.com/2079-9292/14/4/756) -- Electronics, Feb 2025 +- [SpotFi: Decimeter-Level Localization](https://dl.acm.org/doi/10.1145/2785956.2787487) - Kotaru et al. (SIGCOMM 2015) +- [FarSense: Pushing WiFi Sensing Range](https://dl.acm.org/doi/10.1145/3300061.3345433) - Zeng et al. (MobiCom 2019) +- [Widar 3.0: Cross-Domain Gesture Recognition](https://dl.acm.org/doi/10.1145/3300061.3345436) - Zheng et al. (MobiCom 2019) +- [WiGest: WiFi-Based Gesture Recognition](https://ieeexplore.ieee.org/document/7127672) - Abdelnasser et al. (2015) +- [CSI-Channel Spatial Decomposition](https://www.mdpi.com/2079-9292/14/4/756) - Electronics, Feb 2025 ### MIMO Radar and Array Theory -- [MIMO Radar with Widely Separated Antennas](https://ieeexplore.ieee.org/document/4350230) -- Li and Stoica (IEEE SPM, 2007) +- [MIMO Radar with Widely Separated Antennas](https://ieeexplore.ieee.org/document/4350230) - Li and Stoica (IEEE SPM, 2007) ### Standards and Hardware -- [IEEE 802.11bf: WLAN Sensing](https://www.ieee802.org/11/Reports/tgbf_update.htm) -- Published 2024 -- [Espressif ESP-CSI](https://github.com/espressif/esp-csi) -- Official CSI collection tools +- [IEEE 802.11bf: WLAN Sensing](https://www.ieee802.org/11/Reports/tgbf_update.htm) - Published 2024 +- [Espressif ESP-CSI](https://github.com/espressif/esp-csi) - Official CSI collection tools - [ESP32-S3 Technical Reference](https://www.espressif.com/sites/default/files/documentation/esp32-s3_technical_reference_manual_en.pdf) ### Project ADRs @@ -386,4 +386,4 @@ The 6-node ESP32 + RuView configuration achieves 70-80% of camera DensePose accu - ADR-014: SOTA Signal Processing Algorithms for WiFi Sensing - ADR-016: RuVector Training Pipeline Integration - ADR-017: RuVector Signal and MAT Integration -- ADR-024: Project AETHER -- Contrastive CSI Embedding Model +- ADR-024: Project AETHER - Contrastive CSI Embedding Model diff --git a/docs/research/ruvsense-multistatic-fidelity-architecture.md b/docs/research/ruvsense-multistatic-fidelity-architecture.md index 34b559481..71e0e908c 100644 --- a/docs/research/ruvsense-multistatic-fidelity-architecture.md +++ b/docs/research/ruvsense-multistatic-fidelity-architecture.md @@ -2,7 +2,7 @@ **Date:** 2026-03-02 **Author:** ruv -**Codename:** **RuvSense** — RuVector-Enhanced Sensing for Multistatic Fidelity +**Codename:** **RuvSense** - RuVector-Enhanced Sensing for Multistatic Fidelity **Scope:** Sensing-first RF mode design, multistatic ESP32 mesh, coherence-gated tracking, and complete RuVector integration for achieving sub-centimeter pose jitter, robust multi-person separation, and small-motion sensitivity on existing silicon. --- @@ -21,7 +21,7 @@ WiFi-based DensePose estimation suffers from three fidelity bottlenecks that pre **Acceptance test:** Two people in a room, 20 Hz, stable tracks for 10 minutes with no identity swaps and low jitter in the torso keypoints. -The fundamental insight: **you do not need to invent a new WiFi standard. You need a sensing-first RF mode that rides on existing silicon, bands, and regulations.** The improvement comes from better observability — more viewpoints, smarter bandwidth use, and coherent fusion — not from new spectrum. +The fundamental insight: **you do not need to invent a new WiFi standard. You need a sensing-first RF mode that rides on existing silicon, bands, and regulations.** The improvement comes from better observability - more viewpoints, smarter bandwidth use, and coherent fusion - not from new spectrum. --- @@ -45,7 +45,7 @@ More bandwidth separates multipath components better, making pose estimation les **RuvSense approach:** Use HT40 on ESP32-S3 (supported in ESP-IDF v5.2) to double subcarrier count from 56 to 114. Then apply `ruvector-solver` sparse interpolation (already integrated per ADR-016) to reconstruct virtual subcarriers between measured ones, achieving effective HT80-like resolution from HT40 hardware. -The key algorithmic insight: the body reflection is spatially sparse — only a few multipath components carry pose information. `ruvector-solver`'s `NeumannSolver` exploits this sparsity via compressed sensing reconstruction: +The key algorithmic insight: the body reflection is spatially sparse - only a few multipath components carry pose information. `ruvector-solver`'s `NeumannSolver` exploits this sparsity via compressed sensing reconstruction: ``` ||y - Φx||₂ + λ||x||₁ → min @@ -73,7 +73,7 @@ Shorter wavelength gives more phase sensitivity to tiny motion. The phase shift 1. **Coarse-to-fine resolution**: 2.4 GHz for robust detection (better wall penetration, wider coverage), 5 GHz for fine-grained pose (2x phase sensitivity) 2. **Phase ambiguity resolution**: Different wavelengths resolve 2π phase wrapping ambiguities, similar to dual-frequency radar -3. **Frequency diversity**: Body part reflections at different frequencies have different magnitudes — arms that are invisible at λ/4 = 3.1cm (2.4 GHz half-wavelength null) are visible at λ/4 = 1.45cm (5 GHz) +3. **Frequency diversity**: Body part reflections at different frequencies have different magnitudes - arms that are invisible at λ/4 = 3.1cm (2.4 GHz half-wavelength null) are visible at λ/4 = 1.45cm (5 GHz) `ruvector-attention`'s `ScaledDotProductAttention` fuses dual-band CSI with learned frequency-dependent weights, automatically emphasizing the band that carries more information for each body region. @@ -83,7 +83,7 @@ DensePose accuracy improves fundamentally with multiple viewpoints. A single TX- **The geometry argument:** -A single link measures the body's effect on one ellipsoidal Fresnel zone (defined by TX and RX positions). The zone's intersection with the body produces a 1D integral of body conductivity along the ellipsoid. N links with different geometries provide N such integrals. With sufficient angular diversity, these can be inverted to recover the 3D body conductivity distribution — which is exactly what DensePose estimates. +A single link measures the body's effect on one ellipsoidal Fresnel zone (defined by TX and RX positions). The zone's intersection with the body produces a 1D integral of body conductivity along the ellipsoid. N links with different geometries provide N such integrals. With sufficient angular diversity, these can be inverted to recover the 3D body conductivity distribution - which is exactly what DensePose estimates. **Required diversity:** For 17-keypoint pose estimation, theoretical minimum is ~6 independent viewpoints (each resolving 2-3 DOF). Practical minimum with noise: 8-12 links with >30° angular separation. @@ -119,7 +119,7 @@ Each ESP32-S3 acts as both transmitter and receiver in time-division mode. With | 5-9 | Node B | A | C | D | 4ms | | 10-14 | Node C | A | B | D | 4ms | | 15-19 | Node D | A | B | C | 4ms | -| 20-49 | — | Processing + fusion | | | 30ms | +| 20-49 | - | Processing + fusion | | | 30ms | **Total cycle: 50ms = 20 Hz update rate.** @@ -171,7 +171,7 @@ NDP frames are already used by 802.11bf for sensing. They contain only preamble RuvSense defines a lightweight time-division protocol for coordinating multistatic sensing: ```rust -/// Sensing Schedule Protocol — coordinates multistatic ESP32 mesh +/// Sensing Schedule Protocol - coordinates multistatic ESP32 mesh pub struct SensingSchedule { /// Nodes in the mesh, ordered by slot assignment nodes: Vec, @@ -511,7 +511,7 @@ This ensures the system remains stable over days even as the environment slowly The existing ESP32 firmware (ADR-018, 606 lines C) requires these additions: ```c -// sensing_schedule.h — TDMA slot management +// sensing_schedule.h - TDMA slot management typedef struct { uint8_t node_id; // 0-3 for 4-node mesh uint8_t n_nodes; // total nodes in mesh @@ -552,7 +552,7 @@ The aggregator runs on the 5th ESP32 (or an x86/RPi host) and: 4. Outputs fused pose estimates at 20 Hz ```rust -/// RuvSense aggregator — collects and fuses multistatic CSI +/// RuvSense aggregator - collects and fuses multistatic CSI pub struct RuvSenseAggregator { /// Per-link compressed ring buffers link_buffers: Vec, // ruvector-temporal-tensor @@ -622,7 +622,7 @@ Pair Cognitum v1 hardware with the RuvSense software stack: | Cost per node | $10 | $200-500 (estimated) | | Deployment | DIY mesh | Integrated unit | -The same RuvSense software stack runs on both — the only difference is the CSI input quality. +The same RuvSense software stack runs on both - the only difference is the CSI input quality. --- @@ -777,7 +777,7 @@ Measured over 10-minute windows with subject standing still. | Standard | Status | Relevance | |----------|--------|-----------| -| IEEE 802.11bf | Published 2024 | WLAN Sensing — defines sensing frames, roles, measurements | +| IEEE 802.11bf | Published 2024 | WLAN Sensing - defines sensing frames, roles, measurements | | IEEE 802.11be (WiFi 7) | Finalized 2025 | 320 MHz channels, 3,984 subcarriers | | IEEE 802.11bn (WiFi 8) | Draft | Sub-7 GHz + 45/60 GHz, native sensing | @@ -870,7 +870,7 @@ Multistatic mesh with 4 nodes should exceed these single-node results by providi ## 15. Conclusion -RuvSense achieves high-fidelity WiFi DensePose by exploiting three physical levers — bandwidth, frequency, and viewpoints — through a multistatic ESP32 mesh that implements a sensing-first RF mode on existing commodity silicon. The complete RuVector integration provides the algorithmic foundation for sparse CIR reconstruction (solver), multi-link attention fusion (attention), person separation (mincut), temporal compression (temporal-tensor), and coherence gating (attn-mincut). +RuvSense achieves high-fidelity WiFi DensePose by exploiting three physical levers - bandwidth, frequency, and viewpoints - through a multistatic ESP32 mesh that implements a sensing-first RF mode on existing commodity silicon. The complete RuVector integration provides the algorithmic foundation for sparse CIR reconstruction (solver), multi-link attention fusion (attention), person separation (mincut), temporal compression (temporal-tensor), and coherence gating (attn-mincut). The architecture is incrementally deployable: start with 2 nodes for basic improvement, scale to 4+ for full multistatic sensing. The same software stack runs on ESP32 mesh or Cognitum hardware, with only the CSI input interface changing. @@ -886,14 +886,14 @@ The architecture is incrementally deployable: start with 2 nodes for basic impro ## 16. Beyond Pose: RF as Spatial Intelligence -Sections 1-15 treat WiFi as a pose estimator. That is the floor. The ceiling is treating the electromagnetic field as a **persistent, self-updating model of the physical world** — a model that remembers, predicts, and explains. +Sections 1-15 treat WiFi as a pose estimator. That is the floor. The ceiling is treating the electromagnetic field as a **persistent, self-updating model of the physical world** - a model that remembers, predicts, and explains. The shift: instead of asking "where are the keypoints right now?", ask "how has this room changed since yesterday, and what does that change mean?" This requires three architectural upgrades: 1. **Field normal modes**: Model the room itself, not just the people in it 2. **Longitudinal memory**: Store structured embeddings over days/weeks via RuVector -3. **Coherence as reasoning**: Use coherence gating not just for quality control, but as a semantic signal — when coherence breaks, something meaningful happened +3. **Coherence as reasoning**: Use coherence gating not just for quality control, but as a semantic signal - when coherence breaks, something meaningful happened --- @@ -901,12 +901,12 @@ This requires three architectural upgrades: ### Tier 1: Field Normal Modes -The room becomes the thing you model. You learn the stable electromagnetic baseline — the set of propagation paths, reflection coefficients, and interference patterns that exist when nobody is present. This is the **field normal mode**: the eigenstructure of the empty room's channel transfer function. +The room becomes the thing you model. You learn the stable electromagnetic baseline - the set of propagation paths, reflection coefficients, and interference patterns that exist when nobody is present. This is the **field normal mode**: the eigenstructure of the empty room's channel transfer function. -People and objects become **structured perturbations** to this baseline. A person entering the room does not create a new signal — they perturb existing modes. The perturbation has structure: it is spatially localized (the person is somewhere), spectrally colored (different body parts affect different subcarriers), and temporally smooth (people move continuously). +People and objects become **structured perturbations** to this baseline. A person entering the room does not create a new signal - they perturb existing modes. The perturbation has structure: it is spatially localized (the person is somewhere), spectrally colored (different body parts affect different subcarriers), and temporally smooth (people move continuously). ```rust -/// Field Normal Mode — the room's electromagnetic eigenstructure +/// Field Normal Mode - the room's electromagnetic eigenstructure pub struct FieldNormalMode { /// Baseline CSI per link (measured during empty-room calibration) pub baseline: Vec>>, // [n_links × n_subcarriers] @@ -947,7 +947,7 @@ impl FieldNormalMode { } ``` -**Why this matters:** The field normal mode enables a building that **senses itself**. Changes are explained as deltas from baseline. A new chair is a permanent mode shift. A person walking is a transient perturbation. A door opening changes specific path coefficients. The system does not need to be told what changed — it can decompose the change into structural categories. +**Why this matters:** The field normal mode enables a building that **senses itself**. Changes are explained as deltas from baseline. A new chair is a permanent mode shift. A person walking is a transient perturbation. A door opening changes specific path coefficients. The system does not need to be told what changed - it can decompose the change into structural categories. **RuVector integration:** `ruvector-solver` fits the environmental mode matrix via low-rank SVD. `ruvector-temporal-tensor` stores the baseline history with adaptive quantization. @@ -974,7 +974,7 @@ Node B │upa- │ Node C From 12 link attenuations, This is not a camera. It is a **probabilistic density field** that tells you where mass is, not what it looks like. It stays useful in darkness, smoke, occlusion, and clutter. ```rust -/// Coarse RF tomography — 3D occupancy from link attenuations +/// Coarse RF tomography - 3D occupancy from link attenuations pub struct RfTomographer { /// 3D voxel grid dimensions pub grid_dims: [usize; 3], // e.g., [8, 10, 4] for 4m × 5m × 2m at 0.5m resolution @@ -1004,11 +1004,11 @@ impl RfTomographer { } ``` -**Resolution:** With 4 nodes (12 links) and 0.5m voxels, the tomographic grid is 8×10×4 = 320 voxels. 12 measurements for 320 unknowns is severely underdetermined, but L1 regularization exploits sparsity — typically only 5-15 voxels are occupied by a person. At 8+ nodes (56 links), resolution improves to ~0.25m. +**Resolution:** With 4 nodes (12 links) and 0.5m voxels, the tomographic grid is 8×10×4 = 320 voxels. 12 measurements for 320 unknowns is severely underdetermined, but L1 regularization exploits sparsity - typically only 5-15 voxels are occupied by a person. At 8+ nodes (56 links), resolution improves to ~0.25m. ### Tier 3: Intention Lead Signals -Subtle pre-movement dynamics appear **before visible motion**. Lean, weight shift, arm tension, center-of-mass displacement. These are not noise — they are the body's preparatory phase for action. +Subtle pre-movement dynamics appear **before visible motion**. Lean, weight shift, arm tension, center-of-mass displacement. These are not noise - they are the body's preparatory phase for action. With contrastive embeddings plus temporal memory, you can **predict action onset** early enough to drive safety and robotics applications. @@ -1069,7 +1069,7 @@ Not diagnosis. **Drift.** You build a personal baseline for gait symmetry, stabi RuVector is the memory and the audit trail. ```rust -/// Personal biomechanics baseline — stores longitudinal embedding statistics +/// Personal biomechanics baseline - stores longitudinal embedding statistics pub struct PersonalBaseline { pub person_id: PersonId, /// Per-metric rolling statistics (Welford online algorithm) @@ -1187,7 +1187,7 @@ impl CrossRoomTracker { A room becomes an interface. Multi-user gesture control that works through clothing, in darkness, with line-of-sight blocked. -The key insight: the same multistatic CSI pipeline that estimates pose can detect **gestural micro-patterns** when the pose is held relatively still. A hand wave, a pointing gesture, a beckoning motion — all produce characteristic CSI perturbation signatures that are person-localized (thanks to the multi-person separator) and geometry-invariant (thanks to MERIDIAN conditioning). +The key insight: the same multistatic CSI pipeline that estimates pose can detect **gestural micro-patterns** when the pose is held relatively still. A hand wave, a pointing gesture, a beckoning motion - all produce characteristic CSI perturbation signatures that are person-localized (thanks to the multi-person separator) and geometry-invariant (thanks to MERIDIAN conditioning). ```rust /// Gesture recognition from multistatic CSI @@ -1219,7 +1219,7 @@ pub enum GestureType { You can detect when the signal looks **physically impossible** given the room model. Coherence gating becomes a **security primitive**, not just a quality check. ```rust -/// Adversarial signal detector — identifies physically impossible CSI +/// Adversarial signal detector - identifies physically impossible CSI pub struct AdversarialDetector { /// Room field normal modes (baseline) pub field_model: FieldNormalMode, @@ -1264,7 +1264,7 @@ pub enum SecurityVerdict { } ``` -**Why multistatic helps security:** To spoof a single-link system, an attacker injects a signal into one receiver. To spoof a multistatic mesh, the attacker must simultaneously inject consistent signals into all receivers — signals that are geometrically consistent with a fake body position. This is physically difficult because each receiver sees a different projection. +**Why multistatic helps security:** To spoof a single-link system, an attacker injects a signal into one receiver. To spoof a multistatic mesh, the attacker must simultaneously inject consistent signals into all receivers - signals that are geometrically consistent with a fake body position. This is physically difficult because each receiver sees a different projection. --- @@ -1426,7 +1426,7 @@ This appliance was never possible before because we did not have: ## 20. Extended Acceptance Tests -### 20.1 Pose Fidelity (Tier 0 — ADR-029) +### 20.1 Pose Fidelity (Tier 0 - ADR-029) Two people in a room, 20 Hz, stable tracks for 10 minutes with no identity swaps and low jitter in the torso keypoints. @@ -1444,7 +1444,7 @@ Two people in a room, 20 Hz, stable tracks for 10 minutes with no identity swaps 1. Detects meaningful environmental or behavioral drift 2. Less than 5% false alarm rate 3. Provides traceable evidence chain for every alert -4. Operates autonomously — no manual calibration after initial setup +4. Operates autonomously - no manual calibration after initial setup --- @@ -1456,7 +1456,7 @@ Two people in a room, 20 Hz, stable tracks for 10 minutes with no identity swaps Rationale: - It is the foundation for everything else. Without a room baseline, you cannot detect drift (Tier 4), cross-room transitions (Tier 5), or adversarial signals (Tier 7). -- It requires no new hardware — just a calibration phase during empty-room periods. +- It requires no new hardware - just a calibration phase during empty-room periods. - It immediately improves pose quality by separating environmental from body-caused CSI variation. - It uses `ruvector-solver` (SVD) and `ruvector-temporal-tensor` (baseline storage), both already integrated. @@ -1488,8 +1488,8 @@ The architecture decomposes into three layers: | **Field** (§16-17) | Room modeling, drift detection, intention signals, tomography | +8 weeks | | **Appliance** (§19) | Product categories: Guardian, Digital Twin, Interaction Surface | +12 weeks | -Each layer builds on the one below. The complete stack — from ESP32 NDP injection to 30-day autonomous drift monitoring — uses no cameras, stores no images, and runs on $73-91 of commodity hardware. +Each layer builds on the one below. The complete stack - from ESP32 NDP injection to 30-day autonomous drift monitoring - uses no cameras, stores no images, and runs on $73-91 of commodity hardware. -RuVector provides the algorithmic spine: solving, attention, graph partitioning, temporal compression, and coherence gating. AETHER provides the embedding space. MERIDIAN provides domain generalization. The result is a system that remembers rooms, recognizes people, detects drift, and explains change — all through WiFi. +RuVector provides the algorithmic spine: solving, attention, graph partitioning, temporal compression, and coherence gating. AETHER provides the embedding space. MERIDIAN provides domain generalization. The result is a system that remembers rooms, recognizes people, detects drift, and explains change - all through WiFi. **You can detect signals, not diagnoses. That distinction matters legally, ethically, and technically. But the signals are rich enough to build products that were never possible before.** diff --git a/docs/research/wifi-sensing-ruvector-sota-2026.md b/docs/research/wifi-sensing-ruvector-sota-2026.md index a92201eac..a396c092f 100644 --- a/docs/research/wifi-sensing-ruvector-sota-2026.md +++ b/docs/research/wifi-sensing-ruvector-sota-2026.md @@ -20,7 +20,7 @@ This work established that commodity WiFi routers contain sufficient spatial inf ### 1.2 Multi-Person 3D Pose Estimation (CVPR 2024) -Yan et al. presented **Person-in-WiFi 3D** at CVPR 2024 ([paper](https://openaccess.thecvf.com/content/CVPR2024/papers/Yan_Person-in-WiFi_3D_End-to-End_Multi-Person_3D_Pose_Estimation_with_Wi-Fi_CVPR_2024_paper.pdf)), advancing the field from 2D to end-to-end multi-person 3D pose estimation using WiFi signals. This represents a significant leap — handling multiple subjects simultaneously in three dimensions using only wireless signals. +Yan et al. presented **Person-in-WiFi 3D** at CVPR 2024 ([paper](https://openaccess.thecvf.com/content/CVPR2024/papers/Yan_Person-in-WiFi_3D_End-to-End_Multi-Person_3D_Pose_Estimation_with_Wi-Fi_CVPR_2024_paper.pdf)), advancing the field from 2D to end-to-end multi-person 3D pose estimation using WiFi signals. This represents a significant leap - handling multiple subjects simultaneously in three dimensions using only wireless signals. ### 1.3 Cross-Site Generalization (IEEE IoT Journal, 2024) @@ -28,11 +28,11 @@ Zhou et al. published **AdaPose** (IEEE Internet of Things Journal, 2024, vol. 1 ### 1.4 Lightweight Architectures (ECCV 2024) -**HPE-Li** was presented at ECCV 2024 in Milan, introducing WiFi-enabled lightweight dual selective kernel convolution for human pose estimation. This work targets deployment on resource-constrained edge devices — a critical requirement for practical WiFi sensing systems. +**HPE-Li** was presented at ECCV 2024 in Milan, introducing WiFi-enabled lightweight dual selective kernel convolution for human pose estimation. This work targets deployment on resource-constrained edge devices - a critical requirement for practical WiFi sensing systems. ### 1.5 Subcarrier-Level Analysis (2025) -**CSI-Channel Spatial Decomposition** (Electronics, February 2025, [MDPI](https://www.mdpi.com/2079-9292/14/4/756)) decomposes CSI spatial structure into dual-view observations — spatial direction and channel sensitivity — demonstrating that this decomposition is sufficient for unambiguous localization and identification. This work directly informs how subcarrier-level features should be extracted from CSI data. +**CSI-Channel Spatial Decomposition** (Electronics, February 2025, [MDPI](https://www.mdpi.com/2079-9292/14/4/756)) decomposes CSI spatial structure into dual-view observations - spatial direction and channel sensitivity - demonstrating that this decomposition is sufficient for unambiguous localization and identification. This work directly informs how subcarrier-level features should be extracted from CSI data. **Deciphering the Silent Signals** (Springer, 2025) applies explainable AI to understand which WiFi frequency components contribute most to pose estimation, providing critical insight into feature selection for signal processing pipelines. @@ -190,7 +190,7 @@ WiFi-DensePose's early adoption of ML-DSA-65 positions it ahead of the deprecati ### 5.1 WiFi Evolution and Sensing Resolution -#### WiFi 7 (802.11be) — Available Now +#### WiFi 7 (802.11be) - Available Now - **320 MHz channels** with up to 3,984 CSI tones (vs. 56 on ESP32 today) - **16×16 MU-MIMO** spatial streams (vs. 2×2 on ESP32) - **Sub-nanosecond RTT resolution** for centimeter-level positioning @@ -198,7 +198,7 @@ WiFi-DensePose's early adoption of ML-DSA-65 positions it ahead of the deprecati WiFi 7's 320 MHz bandwidth provides ~71x more CSI tones than current ESP32 implementations. This alone transforms sensing resolution. -#### WiFi 8 (802.11bn) — Expected ~2028 +#### WiFi 8 (802.11bn) - Expected ~2028 - Operations across **sub-7 GHz, 45 GHz, and 60 GHz** bands ([survey](https://www.sciencedirect.com/science/article/abs/pii/S1389128625005572)) - **WLAN sensing as a core PHY/MAC capability** (not an add-on) - Formalized sensing frames and measurement reporting @@ -231,7 +231,7 @@ WiFi 7's 320 MHz bandwidth provides ~71x more CSI tones than current ESP32 imple **Projected deployment:** - **2028**: Major cities deploy WiFi 7/8 infrastructure with integrated sensing. Pedestrian flow monitoring replaces camera-based surveillance in privacy-sensitive zones. - **2032**: Urban-scale mesh sensing networks provide real-time occupancy maps of public spaces, transit systems, and emergency shelters. Disaster response systems (like wifi-densepose-mat) operate as permanent city infrastructure. -- **2038**: Full-city coverage enables ambient intelligence: traffic optimization, crowd management, emergency detection — all without cameras, using only the WiFi infrastructure already deployed for connectivity. +- **2038**: Full-city coverage enables ambient intelligence: traffic optimization, crowd management, emergency detection - all without cameras, using only the WiFi infrastructure already deployed for connectivity. ### 5.4 Vector Intelligence at Scale @@ -239,7 +239,7 @@ WiFi 7's 320 MHz bandwidth provides ~71x more CSI tones than current ESP32 imple - **2028**: HNSW indexes of 10M+ CSI fingerprints per city zone, enabling instant environment recognition and person identification across any WiFi-equipped space. RVF containers store environment-specific models that adapt in <1ms. - **2032**: Federated learning across city-scale HNSW indexes. Each building's local index contributes to a global model without sharing raw CSI data. Post-quantum signatures ensure tamper-evident data provenance. - **2038**: Continuous self-learning via SONA at city scale. The system improves autonomously from billions of daily observations. EWC++ prevents catastrophic forgetting across seasonal and environmental changes. -- **2042**: Exascale vector indexes (~1T fingerprints) with sub-microsecond queries via quantum-classical hybrid search. WiFi sensing becomes an ambient utility like electricity — invisible, always-on, universally available. +- **2042**: Exascale vector indexes (~1T fingerprints) with sub-microsecond queries via quantum-classical hybrid search. WiFi sensing becomes an ambient utility like electricity - invisible, always-on, universally available. ### 5.5 Privacy-Preserving Sensing Architecture @@ -260,39 +260,39 @@ The convergence of these technologies creates a clear path for wifi-densepose: 2. **Medium-term (2028–2032)**: WiFi 7/8 CSI (3,984+ tones) transforms sensing from coarse presence to fine-grained pose estimation. SONA adaptation makes the system self-improving. Post-quantum signatures secure the sensor mesh. -3. **Long-term (2032–2046)**: WiFi sensing becomes ambient infrastructure. Medical-grade monitoring replaces wearables. City-scale vector intelligence operates autonomously. The architecture established today — RVF containers, HNSW indexes, witness chains, distributed consensus — scales directly to this future. +3. **Long-term (2032–2046)**: WiFi sensing becomes ambient infrastructure. Medical-grade monitoring replaces wearables. City-scale vector intelligence operates autonomously. The architecture established today - RVF containers, HNSW indexes, witness chains, distributed consensus - scales directly to this future. -The fundamental insight: **the software architecture for ambient WiFi sensing at scale is being built now, using technology available today.** The hardware (WiFi 7/8, faster silicon) will arrive to fill the resolution gap. The algorithms (HNSW, SONA, EWC++) are already proven. The cryptography (ML-DSA, SLH-DSA) is standardized. What matters is building the correct abstractions — and that is exactly what the RuVector integration provides. +The fundamental insight: **the software architecture for ambient WiFi sensing at scale is being built now, using technology available today.** The hardware (WiFi 7/8, faster silicon) will arrive to fill the resolution gap. The algorithms (HNSW, SONA, EWC++) are already proven. The cryptography (ML-DSA, SLH-DSA) is standardized. What matters is building the correct abstractions - and that is exactly what the RuVector integration provides. --- ## References ### WiFi Sensing -- [DensePose From WiFi](https://arxiv.org/abs/2301.00250) — Geng, Huang, De la Torre (CMU, 2023) -- [Person-in-WiFi 3D](https://openaccess.thecvf.com/content/CVPR2024/papers/Yan_Person-in-WiFi_3D_End-to-End_Multi-Person_3D_Pose_Estimation_with_Wi-Fi_CVPR_2024_paper.pdf) — Yan et al. (CVPR 2024) -- [CSI-Channel Spatial Decomposition](https://www.mdpi.com/2079-9292/14/4/756) — Electronics, Feb 2025 -- [WiFi CSI-Based Through-Wall HAR with ESP32](https://link.springer.com/chapter/10.1007/978-3-031-44137-0_4) — Springer, 2023 -- [Espressif ESP-CSI](https://github.com/espressif/esp-csi) — Official CSI tools -- [WiFi Sensing Survey](https://dl.acm.org/doi/10.1145/3705893) — ACM Computing Surveys, 2025 -- [WiFi-Based Human Identification Survey](https://pmc.ncbi.nlm.nih.gov/articles/PMC11479185/) — PMC, 2024 +- [DensePose From WiFi](https://arxiv.org/abs/2301.00250) - Geng, Huang, De la Torre (CMU, 2023) +- [Person-in-WiFi 3D](https://openaccess.thecvf.com/content/CVPR2024/papers/Yan_Person-in-WiFi_3D_End-to-End_Multi-Person_3D_Pose_Estimation_with_Wi-Fi_CVPR_2024_paper.pdf) - Yan et al. (CVPR 2024) +- [CSI-Channel Spatial Decomposition](https://www.mdpi.com/2079-9292/14/4/756) - Electronics, Feb 2025 +- [WiFi CSI-Based Through-Wall HAR with ESP32](https://link.springer.com/chapter/10.1007/978-3-031-44137-0_4) - Springer, 2023 +- [Espressif ESP-CSI](https://github.com/espressif/esp-csi) - Official CSI tools +- [WiFi Sensing Survey](https://dl.acm.org/doi/10.1145/3705893) - ACM Computing Surveys, 2025 +- [WiFi-Based Human Identification Survey](https://pmc.ncbi.nlm.nih.gov/articles/PMC11479185/) - PMC, 2024 ### Vector Search & Fingerprinting -- [WiFi CSI Fingerprinting with Vector Embedding](https://www.sciencedirect.com/science/article/abs/pii/S0957417424026691) — Rocamora & Ho (Expert Systems with Applications, 2024) -- [HNSW Explained](https://milvus.io/blog/understand-hierarchical-navigable-small-worlds-hnsw-for-vector-search.md) — Milvus Blog -- [WiFi Fingerprinting Survey](https://pmc.ncbi.nlm.nih.gov/articles/PMC12656469/) — PMC, 2024 +- [WiFi CSI Fingerprinting with Vector Embedding](https://www.sciencedirect.com/science/article/abs/pii/S0957417424026691) - Rocamora & Ho (Expert Systems with Applications, 2024) +- [HNSW Explained](https://milvus.io/blog/understand-hierarchical-navigable-small-worlds-hnsw-for-vector-search.md) - Milvus Blog +- [WiFi Fingerprinting Survey](https://pmc.ncbi.nlm.nih.gov/articles/PMC12656469/) - PMC, 2024 ### Edge AI & WASM -- [ONNX Runtime Web](https://onnxruntime.ai/docs/tutorials/web/) — Microsoft -- [WONNX: Rust ONNX Runtime](https://github.com/webonnx/wonnx) — WebGPU-accelerated -- [In-Browser Deep Learning on Edge Devices](https://arxiv.org/html/2309.08978v2) — arXiv, 2023 +- [ONNX Runtime Web](https://onnxruntime.ai/docs/tutorials/web/) - Microsoft +- [WONNX: Rust ONNX Runtime](https://github.com/webonnx/wonnx) - WebGPU-accelerated +- [In-Browser Deep Learning on Edge Devices](https://arxiv.org/html/2309.08978v2) - arXiv, 2023 ### Post-Quantum Cryptography -- [NIST PQC Standards](https://www.nist.gov/news-events/news/2024/08/nist-releases-first-3-finalized-post-quantum-encryption-standards) — FIPS 203/204/205 (August 2024) -- [NIST IR 8547: PQC Transition](https://nvlpubs.nist.gov/nistpubs/ir/2024/NIST.IR.8547.ipd.pdf) — Transition timeline -- [State of PQC Internet 2025](https://blog.cloudflare.com/pq-2025/) — Cloudflare +- [NIST PQC Standards](https://www.nist.gov/news-events/news/2024/08/nist-releases-first-3-finalized-post-quantum-encryption-standards) - FIPS 203/204/205 (August 2024) +- [NIST IR 8547: PQC Transition](https://nvlpubs.nist.gov/nistpubs/ir/2024/NIST.IR.8547.ipd.pdf) - Transition timeline +- [State of PQC Internet 2025](https://blog.cloudflare.com/pq-2025/) - Cloudflare ### WiFi Evolution -- [Wi-Fi 7 (802.11be)](https://en.wikipedia.org/wiki/Wi-Fi_7) — Finalized July 2025 -- [From Wi-Fi 7 to Wi-Fi 8 Survey](https://www.sciencedirect.com/science/article/abs/pii/S1389128625005572) — ScienceDirect, 2025 -- [Wi-Fi 7 320MHz Channels](https://www.netgear.com/hub/network/wifi-7-320mhz-channels/) — Netgear +- [Wi-Fi 7 (802.11be)](https://en.wikipedia.org/wiki/Wi-Fi_7) - Finalized July 2025 +- [From Wi-Fi 7 to Wi-Fi 8 Survey](https://www.sciencedirect.com/science/article/abs/pii/S1389128625005572) - ScienceDirect, 2025 +- [Wi-Fi 7 320MHz Channels](https://www.netgear.com/hub/network/wifi-7-320mhz-channels/) - Netgear diff --git a/docs/security-audit-wasm-edge-vendor.md b/docs/security-audit-wasm-edge-vendor.md index cf9bcac1a..2335d3c7a 100644 --- a/docs/security-audit-wasm-edge-vendor.md +++ b/docs/security-audit-wasm-edge-vendor.md @@ -50,7 +50,7 @@ While this is safe in WASM3's single-threaded execution model, the returned `&[( ### HIGH -#### H-01: `coherence.rs:94-96` -- Division by zero when `n_sc == 0` +#### H-01: `coherence.rs:94-96` - Division by zero when `n_sc == 0` **Severity**: HIGH **File**: `coherence.rs:94` @@ -69,18 +69,18 @@ While the `initialized` check at line 71 catches the first call with an early re **Recommendation**: Add `if n_sc == 0 { return self.smoothed_coherence; }` after the `initialized` check. -#### H-02: `occupancy.rs:92,99,105,112` -- Division by zero when `zone_count == 1` and `n_sc < 4` +#### H-02: `occupancy.rs:92,99,105,112` - Division by zero when `zone_count == 1` and `n_sc < 4` **Severity**: HIGH **File**: `occupancy.rs:92-112` -**Description**: When `n_sc == 2` or `n_sc == 3`, `zone_count = (n_sc / 4).min(MAX_ZONES).max(1) = 1` and `subs_per_zone = n_sc / zone_count = n_sc`. The loop computes `count = (end - start) as f32` which is valid. However, when `n_sc == 1`, the function returns early at line 83-85. The real risk is if `n_sc == 0` somehow passes through -- but the check at line 83 `n_sc < 2` guards this. This is actually safe but fragile. +**Description**: When `n_sc == 2` or `n_sc == 3`, `zone_count = (n_sc / 4).min(MAX_ZONES).max(1) = 1` and `subs_per_zone = n_sc / zone_count = n_sc`. The loop computes `count = (end - start) as f32` which is valid. However, when `n_sc == 1`, the function returns early at line 83-85. The real risk is if `n_sc == 0` somehow passes through - but the check at line 83 `n_sc < 2` guards this. This is actually safe but fragile. However, a more serious issue: the `count` variable at line 99 is computed as `(end - start) as f32` and used as a divisor at lines 105 and 112. If `subs_per_zone == 0` (which can happen if `zone_count > n_sc`), `count` would be 0, causing division by zero. Currently `zone_count` is capped by `n_sc / 4` so this cannot happen with `n_sc >= 2`, but the logic is fragile. **Recommendation**: Add a guard `if count < 1.0 { continue; }` before the division at line 105. -#### H-03: `rvf.rs:209-215` -- `patch_signature` has no bounds check on `offset + RVF_SIGNATURE_LEN` +#### H-03: `rvf.rs:209-215` - `patch_signature` has no bounds check on `offset + RVF_SIGNATURE_LEN` **Severity**: HIGH **File**: `rvf.rs:209-215` (std-only builder code) @@ -104,7 +104,7 @@ If called with a truncated or malformed RVF buffer, or if `wasm_len` in the head ### MEDIUM -#### M-01: `lib.rs:391` -- Negative `n_subcarriers` from host silently wraps to large `usize` +#### M-01: `lib.rs:391` - Negative `n_subcarriers` from host silently wraps to large `usize` **Severity**: MEDIUM **File**: `lib.rs:391` @@ -113,7 +113,7 @@ If called with a truncated or malformed RVF buffer, or if `wasm_len` in the head **Recommendation**: Add: `let n_sc = if n_subcarriers < 0 { 0 } else { n_subcarriers as usize };` -#### M-02: `coherence.rs:142-144` -- `mean_phasor_angle()` uses stale `phasor_re/phasor_im` fields +#### M-02: `coherence.rs:142-144` - `mean_phasor_angle()` uses stale `phasor_re/phasor_im` fields **Severity**: MEDIUM **File**: `coherence.rs:142-144` @@ -124,7 +124,7 @@ If called with a truncated or malformed RVF buffer, or if `wasm_len` in the head **Recommendation**: Store the per-frame mean phasor components: `self.phasor_re = mean_re; self.phasor_im = mean_im;` at the end of `process_frame()`. -#### M-03: `gesture.rs:200` -- DTW cost matrix uses 9.6 KB stack, no guard for mismatched sizes +#### M-03: `gesture.rs:200` - DTW cost matrix uses 9.6 KB stack, no guard for mismatched sizes **Severity**: MEDIUM **File**: `gesture.rs:200` @@ -148,7 +148,7 @@ The `vendor_common.rs` DTW functions use `[[f32::MAX; 64]; 64]` = 16384 bytes, w **Recommendation**: Use `.wrapping_add(1)` explicitly in all modules for clarity. For modules with threshold comparisons, add a `saturating` flag to prevent re-triggering. -#### M-05: `tmp_pattern_sequence.rs:159` -- potential out-of-bounds write at day boundary +#### M-05: `tmp_pattern_sequence.rs:159` - potential out-of-bounds write at day boundary **Severity**: MEDIUM **File**: `tmp_pattern_sequence.rs:159` @@ -159,7 +159,7 @@ Actually, the issue is that `minute_counter` is `u16` and is compared against `D **Downgrading concern**: This is actually well-handled. Keeping as MEDIUM because the pattern of computing `DAY_LEN + minute_counter` without the guard would be dangerous. -#### M-06: `spt_micro_hnsw.rs:187` -- neighbor index stored as `u8`, silent truncation for `MAX_VECTORS > 255` +#### M-06: `spt_micro_hnsw.rs:187` - neighbor index stored as `u8`, silent truncation for `MAX_VECTORS > 255` **Severity**: MEDIUM **File**: `spt_micro_hnsw.rs:187,197` @@ -172,7 +172,7 @@ Actually, the issue is that `minute_counter` is `u16` and is compared against `D ### LOW -#### L-01: `lib.rs:35` -- `#![allow(clippy::missing_safety_doc)]` suppresses safety documentation +#### L-01: `lib.rs:35` - `#![allow(clippy::missing_safety_doc)]` suppresses safety documentation **Severity**: LOW **File**: `lib.rs:35` @@ -190,7 +190,7 @@ Actually, the issue is that `minute_counter` is `u16` and is compared against `D **Recommendation**: Run tests with `-- --test-threads=1` or add a note in the test configuration. -#### L-03: `lrn_dtw_gesture_learn.rs:357` -- `next_id` wraps at 255, potentially colliding with built-in gesture IDs +#### L-03: `lrn_dtw_gesture_learn.rs:357` - `next_id` wraps at 255, potentially colliding with built-in gesture IDs **Severity**: LOW **File**: `lrn_dtw_gesture_learn.rs:357` @@ -199,7 +199,7 @@ Actually, the issue is that `minute_counter` is `u16` and is compared against `D **Recommendation**: Use `wrapping_add(1).max(100)` or saturating_add to stay in the 100-255 range. -#### L-04: `ais_prompt_shield.rs:294` -- FNV-1a hash quantization resolution may cause false replay positives +#### L-04: `ais_prompt_shield.rs:294` - FNV-1a hash quantization resolution may cause false replay positives **Severity**: LOW **File**: `ais_prompt_shield.rs:292-308` @@ -208,12 +208,12 @@ Actually, the issue is that `minute_counter` is `u16` and is compared against `D **Recommendation**: Increase quantization resolution to 0.001 or add a secondary discriminator (e.g., include a frame sequence counter in the hash). -#### L-05: `qnt_quantum_coherence.rs:188` -- `inv_n` computed without zero check +#### L-05: `qnt_quantum_coherence.rs:188` - `inv_n` computed without zero check **Severity**: LOW **File**: `qnt_quantum_coherence.rs:188` -**Description**: `let inv_n = 1.0 / (n_sc as f32);` -- While `n_sc < 2` is checked at line 94, the pattern of dividing without an explicit guard is inconsistent with other modules. +**Description**: `let inv_n = 1.0 / (n_sc as f32);` - While `n_sc < 2` is checked at line 94, the pattern of dividing without an explicit guard is inconsistent with other modules. --- diff --git a/docs/user-guide.md b/docs/user-guide.md index 78b185f3a..6ba844995 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -254,7 +254,7 @@ docker run --network host ruvnet/wifi-densepose:latest --source wifi --tick-ms 5 > **Community verified:** Tested on Windows 10 (10.0.26200) with Intel Wi-Fi 6 AX201 160MHz, Python 3.14, StormFiber 5 GHz network. All 7 tutorial steps passed with stable RSSI readings at -48 dBm. See [Tutorial #36](https://github.com/ruvnet/RuView/issues/36) for the full walkthrough and test results. -**Vital signs from RSSI:** The sensing server now supports breathing rate estimation from RSSI variance patterns (requires stationary subject near AP) and motion classification with confidence scoring. RSSI-based vital sign detection has lower fidelity than ESP32 CSI — it is best for presence detection and coarse motion classification. +**Vital signs from RSSI:** The sensing server now supports breathing rate estimation from RSSI variance patterns (requires stationary subject near AP) and motion classification with confidence scoring. RSSI-based vital sign detection has lower fidelity than ESP32 CSI - it is best for presence detection and coarse motion classification. ### macOS WiFi (RSSI Only) @@ -394,7 +394,7 @@ curl -s http://localhost:3000/api/v1/pose/current | python -m json.tool Real-time sensing data is available via WebSocket. -**URL:** `ws://localhost:3000/ws/sensing` (same port as HTTP — recommended) or `ws://localhost:3001/ws/sensing` (dedicated WS port). +**URL:** `ws://localhost:3000/ws/sensing` (same port as HTTP - recommended) or `ws://localhost:3001/ws/sensing` (dedicated WS port). > **Note:** The `/ws/sensing` WebSocket endpoint is available on both the HTTP port (3000) and the dedicated WebSocket port (3001/8765). The web UI uses the HTTP port so only one port needs to be exposed. The dedicated WS port remains available for backward compatibility. @@ -662,7 +662,7 @@ Once trained, the adaptive model runs automatically: - Record with clearly distinct activities (actually leave the room for "empty") - Record 30-60 seconds per activity (more data = better model) - Re-record and retrain if you move the ESP32 or rearrange the room -- The model is environment-specific — retrain when the physical setup changes +- The model is environment-specific - retrain when the physical setup changes ### Adaptive Classifier API @@ -819,10 +819,10 @@ Pre-built binaries are available at [Releases](https://github.com/ruvnet/RuView/ | Release | What It Includes | Tag | |---------|-----------------|-----| -| [v0.5.0](https://github.com/ruvnet/RuView/releases/tag/v0.5.0-esp32) | **Stable (recommended)** — mmWave sensor fusion (MR60BHA2/LD2410 auto-detect), 48-byte fused vitals, all v0.4.3.1 fixes | `v0.5.0-esp32` | +| [v0.5.0](https://github.com/ruvnet/RuView/releases/tag/v0.5.0-esp32) | **Stable (recommended)** - mmWave sensor fusion (MR60BHA2/LD2410 auto-detect), 48-byte fused vitals, all v0.4.3.1 fixes | `v0.5.0-esp32` | | [v0.4.3.1](https://github.com/ruvnet/RuView/releases/tag/v0.4.3.1-esp32) | Fall detection fix ([#263](https://github.com/ruvnet/RuView/issues/263)), 4MB flash ([#265](https://github.com/ruvnet/RuView/issues/265)), watchdog fix ([#266](https://github.com/ruvnet/RuView/issues/266)) | `v0.4.3.1-esp32` | | [v0.4.1](https://github.com/ruvnet/RuView/releases/tag/v0.4.1-esp32) | CSI build fix, compile guard, AMOLED display, edge intelligence ([ADR-057](../docs/adr/ADR-057-firmware-csi-build-guard.md)) | `v0.4.1-esp32` | -| [v0.3.0-alpha](https://github.com/ruvnet/RuView/releases/tag/v0.3.0-alpha-esp32) | Alpha — adds on-device edge intelligence (ADR-039) | `v0.3.0-alpha-esp32` | +| [v0.3.0-alpha](https://github.com/ruvnet/RuView/releases/tag/v0.3.0-alpha-esp32) | Alpha - adds on-device edge intelligence (ADR-039) | `v0.3.0-alpha-esp32` | | [v0.2.0](https://github.com/ruvnet/RuView/releases/tag/v0.2.0-esp32) | Raw CSI streaming, TDM, channel hopping, QUIC mesh | `v0.2.0-esp32` | > **Important:** Always use **v0.4.3.1 or later**. Earlier versions have false fall detection alerts (v0.4.2 and below) and CSI disabled in the build config (pre-v0.4.1). @@ -870,7 +870,7 @@ All nodes in a mesh must share the same 256-bit mesh key for HMAC-SHA256 beacon Each node in a multistatic mesh needs a unique TDM slot ID (0-based): ```bash -# Node 0 (slot 0) — first transmitter +# Node 0 (slot 0) - first transmitter python firmware/esp32-csi-node/provision.py --port COM7 --tdm-slot 0 --tdm-total 3 # Node 1 (slot 1) @@ -882,11 +882,11 @@ python firmware/esp32-csi-node/provision.py --port COM9 --tdm-slot 2 --tdm-total **Edge Intelligence (v0.3.0-alpha, [ADR-039](../docs/adr/ADR-039-esp32-edge-intelligence.md)):** -The v0.3.0-alpha firmware adds on-device signal processing that runs directly on the ESP32-S3 — no host PC needed for basic presence and vital signs. Edge processing is disabled by default for full backward compatibility. +The v0.3.0-alpha firmware adds on-device signal processing that runs directly on the ESP32-S3 - no host PC needed for basic presence and vital signs. Edge processing is disabled by default for full backward compatibility. | Tier | What It Does | Extra RAM | |------|-------------|-----------| -| **0** | Disabled (default) — streams raw CSI to the aggregator | 0 KB | +| **0** | Disabled (default) - streams raw CSI to the aggregator | 0 KB | | **1** | Phase unwrapping, running statistics, top-K subcarrier selection, delta compression | ~30 KB | | **2** | Everything in Tier 1, plus presence detection, breathing/heart rate, motion scoring, fall detection | ~33 KB | @@ -958,7 +958,7 @@ This starts: ## Testing Firmware Without Hardware (QEMU) -You can test the ESP32-S3 firmware on your computer without any physical hardware. The project uses **QEMU** — an emulator that pretends to be an ESP32-S3 chip, running the real firmware code inside a virtual machine on your PC. +You can test the ESP32-S3 firmware on your computer without any physical hardware. The project uses **QEMU** - an emulator that pretends to be an ESP32-S3 chip, running the real firmware code inside a virtual machine on your PC. This is useful when: - You don't have an ESP32-S3 board yet @@ -1004,7 +1004,7 @@ pip install esptool pyyaml esp-idf-nvs-partition-gen **For multi-node testing (optional):** ```bash -# Linux only — needed for virtual network bridges +# Linux only - needed for virtual network bridges sudo apt install socat bridge-utils iproute2 ``` @@ -1041,7 +1041,7 @@ bash scripts/qemu-esp32s3-test.sh ``` **What happens behind the scenes:** -1. The firmware is compiled with a "mock CSI" mode — instead of reading real WiFi signals, it generates synthetic test data that mimics real people walking, falling, or breathing +1. The firmware is compiled with a "mock CSI" mode - instead of reading real WiFi signals, it generates synthetic test data that mimics real people walking, falling, or breathing 2. The compiled firmware is loaded into QEMU, which boots it like a real ESP32-S3 3. The emulator's serial output (what you'd see on a USB cable) is captured 4. A validation script checks the output for expected behavior and errors @@ -1081,18 +1081,18 @@ The test runs 16 checks on the firmware's output. Here's what a successful run l | Code | Meaning | What to do | |------|---------|-----------| -| 0 | **PASS** — everything works | Nothing, you're good! | -| 1 | **WARN** — minor issues | Review the output; usually safe to continue | -| 2 | **FAIL** — something broke | Check the `[FAIL]` lines for what went wrong | -| 3 | **FATAL** — can't even start | Usually a missing tool or build failure; check error messages | +| 0 | **PASS** - everything works | Nothing, you're good! | +| 1 | **WARN** - minor issues | Review the output; usually safe to continue | +| 2 | **FAIL** - something broke | Check the `[FAIL]` lines for what went wrong | +| 3 | **FATAL** - can't even start | Usually a missing tool or build failure; check error messages | ### Testing Multiple Nodes at Once (Swarm) Real deployments use 3-8 ESP32 nodes. The **swarm configurator** lets you simulate multiple nodes on your computer, each with a different role: -- **Sensor nodes** — generate WiFi signal data (like ESP32s placed around a room) -- **Coordinator node** — collects data from all sensors and runs analysis -- **Gateway node** — bridges data to your computer +- **Sensor nodes** - generate WiFi signal data (like ESP32s placed around a room) +- **Coordinator node** - collects data from all sensors and runs analysis +- **Gateway node** - bridges data to your computer ```bash # Quick 2-node smoke test (15 seconds) @@ -1220,17 +1220,17 @@ bash scripts/qemu-esp32s3-test.sh # 2. Multi-node swarm test (1 minute) python3 scripts/qemu_swarm.py --preset standard -# 3. Fuzz testing — finds edge-case crashes (1-5 minutes) +# 3. Fuzz testing - finds edge-case crashes (1-5 minutes) cd firmware/esp32-csi-node/test make all CC=clang make run_serialize FUZZ_DURATION=60 make run_edge FUZZ_DURATION=60 make run_nvs FUZZ_DURATION=60 -# 4. NVS configuration matrix — tests 14 config combinations +# 4. NVS configuration matrix - tests 14 config combinations python3 scripts/generate_nvs_matrix.py --output-dir build/nvs_matrix -# 5. Chaos testing — injects faults to test resilience (2 minutes) +# 5. Chaos testing - injects faults to test resilience (2 minutes) bash scripts/qemu-chaos-test.sh ``` @@ -1278,7 +1278,7 @@ Firmware versions prior to v0.4.1 had `CONFIG_ESP_WIFI_CSI_ENABLED` disabled in ### ESP32: No data arriving -1. Verify firmware is v0.4.1+ (older versions had CSI disabled — see above) +1. Verify firmware is v0.4.1+ (older versions had CSI disabled - see above) 2. Verify the ESP32 is connected to the same WiFi network 3. Check the target IP matches the sensing server machine: `python firmware/esp32-csi-node/provision.py --port COM7 --target-ip ` 4. Verify UDP port 5005 is not blocked by firewall @@ -1308,18 +1308,18 @@ Run the terminal as Administrator (required for `netsh wlan` access). Verified w The server applies a 3-stage smoothing pipeline (ADR-048). If readings are still unstable: - Ensure the subject is relatively still (large movements mask vital sign oscillations) - Train the adaptive classifier for your specific environment: `curl -X POST http://localhost:3000/api/v1/adaptive/train` -- Check signal quality: `curl http://localhost:3000/api/v1/sensing/latest` — look for `signal_quality > 0.4` +- Check signal quality: `curl http://localhost:3000/api/v1/sensing/latest` - look for `signal_quality > 0.4` ### Observatory shows DEMO instead of LIVE - Verify the sensing server is running: `curl http://localhost:3000/health` - Access Observatory via the server URL: `http://localhost:3000/ui/observatory.html` (not a file:// URL) - Hard refresh with Ctrl+Shift+R to clear cached settings -- The auto-detect probes `/health` on the same origin — cross-origin won't work +- The auto-detect probes `/health` on the same origin - cross-origin won't work ### QEMU: "qemu-system-xtensa: command not found" -QEMU for ESP32-S3 must be built from Espressif's fork — it is not in standard package managers: +QEMU for ESP32-S3 must be built from Espressif's fork - it is not in standard package managers: ```bash git clone https://github.com/espressif/qemu.git diff --git a/examples/happiness-vector/README.md b/examples/happiness-vector/README.md index 61a20bf5a..5a194e9a0 100644 --- a/examples/happiness-vector/README.md +++ b/examples/happiness-vector/README.md @@ -1,4 +1,4 @@ -# Happiness Vector — WiFi CSI Guest Sentiment Sensing +# Happiness Vector - WiFi CSI Guest Sentiment Sensing Contactless hotel guest happiness scoring using WiFi Channel State Information (CSI) from ESP32-S3 nodes, coordinated by a Cognitum Seed edge intelligence appliance. @@ -179,14 +179,14 @@ Event IDs emitted by the WASM module: This system is designed to be privacy-preserving by construction: -- **No images** — WiFi CSI captures RF signal patterns, not visual data -- **No audio** — radio waves only -- **No facial recognition** — physically impossible with CSI -- **No individual identity** — cannot distinguish Bob from Alice -- **Aggregate only** — 8 floating-point numbers per observation -- **Works in the dark** — RF sensing needs no lighting -- **Through-wall** — single sensor covers adjacent rooms without line-of-sight -- **GDPR-friendly** — no personal data collected; happiness scores are anonymous statistical aggregates +- **No images** - WiFi CSI captures RF signal patterns, not visual data +- **No audio** - radio waves only +- **No facial recognition** - physically impossible with CSI +- **No individual identity** - cannot distinguish Bob from Alice +- **Aggregate only** - 8 floating-point numbers per observation +- **Works in the dark** - RF sensing needs no lighting +- **Through-wall** - single sensor covers adjacent rooms without line-of-sight +- **GDPR-friendly** - no personal data collected; happiness scores are anonymous statistical aggregates ## Files @@ -199,8 +199,8 @@ This system is designed to be privacy-preserving by construction: ## Related -- [ADR-065](../../docs/adr/ADR-065-happiness-scoring-seed-bridge.md) — Happiness scoring pipeline architecture -- [ADR-066](../../docs/adr/ADR-066-esp32-swarm-seed-coordinator.md) — ESP32 swarm with Seed coordinator -- [exo_happiness_score.rs](../../rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/exo_happiness_score.rs) — WASM edge module (Rust) -- [swarm_bridge.c](../../firmware/esp32-csi-node/main/swarm_bridge.c) — ESP32 firmware swarm bridge -- [ruview_live.py](../ruview_live.py) — RuView Live dashboard with `--mode happiness` +- [ADR-065](../../docs/adr/ADR-065-happiness-scoring-seed-bridge.md) - Happiness scoring pipeline architecture +- [ADR-066](../../docs/adr/ADR-066-esp32-swarm-seed-coordinator.md) - ESP32 swarm with Seed coordinator +- [exo_happiness_score.rs](../../rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm-edge/src/exo_happiness_score.rs) - WASM edge module (Rust) +- [swarm_bridge.c](../../firmware/esp32-csi-node/main/swarm_bridge.c) - ESP32 firmware swarm bridge +- [ruview_live.py](../ruview_live.py) - RuView Live dashboard with `--mode happiness` diff --git a/examples/happiness-vector/happiness_vector_schema.json b/examples/happiness-vector/happiness_vector_schema.json index 3afe92a72..59e0c30fb 100644 --- a/examples/happiness-vector/happiness_vector_schema.json +++ b/examples/happiness-vector/happiness_vector_schema.json @@ -78,11 +78,11 @@ "type": "object", "description": "WASM edge module event IDs (690-694)", "properties": { - "690_HAPPINESS_SCORE": "Composite happiness [0, 1] — emitted every frame", - "691_GAIT_ENERGY": "Gait speed + stride regularity composite — emitted every 4th frame", - "692_AFFECT_VALENCE": "Breathing calm + fluidity + posture composite — emitted every 4th frame", - "693_SOCIAL_ENERGY": "Group animation level — emitted every 4th frame", - "694_TRANSIT_DIRECTION": "1.0=entering, 0.0=exiting — emitted every 4th frame" + "690_HAPPINESS_SCORE": "Composite happiness [0, 1] - emitted every frame", + "691_GAIT_ENERGY": "Gait speed + stride regularity composite - emitted every 4th frame", + "692_AFFECT_VALENCE": "Breathing calm + fluidity + posture composite - emitted every 4th frame", + "693_SOCIAL_ENERGY": "Group animation level - emitted every 4th frame", + "694_TRANSIT_DIRECTION": "1.0=entering, 0.0=exiting - emitted every 4th frame" } }, "seed_id_scheme": { diff --git a/examples/happiness-vector/provision_swarm.sh b/examples/happiness-vector/provision_swarm.sh index 9295b2481..0d91786d0 100644 --- a/examples/happiness-vector/provision_swarm.sh +++ b/examples/happiness-vector/provision_swarm.sh @@ -1,5 +1,5 @@ #!/bin/bash -# ESP32 Swarm Provisioning — ADR-065/066 +# ESP32 Swarm Provisioning - ADR-065/066 # # Provisions multiple ESP32-S3 nodes for a hotel happiness sensing deployment. # Each node gets WiFi credentials, a unique node_id, zone name, and Seed token. diff --git a/examples/medical/README.md b/examples/medical/README.md index 693b168d8..7704403c4 100644 --- a/examples/medical/README.md +++ b/examples/medical/README.md @@ -1,6 +1,6 @@ # Medical Sensing Examples -Contactless vital sign monitoring using 60 GHz mmWave radar — no wearable, no camera, no physical contact. +Contactless vital sign monitoring using 60 GHz mmWave radar - no wearable, no camera, no physical contact. ## Blood Pressure Estimator @@ -28,7 +28,7 @@ From there, the estimator: | Component | Cost | Role | |-----------|------|------| | ESP32-C6 + Seeed MR60BHA2 | ~$15 | 60 GHz mmWave radar (HR, BR, presence) | -| USB cable | — | Power + serial data | +| USB cable | - | Power + serial data | That's it. Total cost: **~$15**. @@ -37,7 +37,7 @@ That's it. Total cost: **~$15**. ```bash pip install pyserial numpy -# Basic (uncalibrated — shows trends) +# Basic (uncalibrated - shows trends) python examples/medical/bp_estimator.py --port COM4 # Calibrated (take a real BP reading first, then enter it) @@ -72,7 +72,7 @@ python examples/medical/bp_estimator.py --port COM4 \ |-----------|----------| | Uncalibrated, stationary | ±15-20 mmHg (trend tracking) | | Calibrated, stationary | ±8-12 mmHg | -| Moving subject | Not reliable — wait for subject to be still | +| Moving subject | Not reliable - wait for subject to be still | Accuracy improves with: - Longer recording duration (60s minimum, 120s recommended) diff --git a/firmware/esp32-csi-node/README.md b/firmware/esp32-csi-node/README.md index a3cfe28d7..dc39fe715 100644 --- a/firmware/esp32-csi-node/README.md +++ b/firmware/esp32-csi-node/README.md @@ -2,7 +2,7 @@ **Turn a $7 microcontroller into a privacy-first human sensing node.** -This firmware captures WiFi Channel State Information (CSI) from an ESP32-S3 and transforms it into real-time presence detection, vital sign monitoring, and programmable sensing -- all without cameras or wearables. Part of the [WiFi-DensePose](../../README.md) project. +This firmware captures WiFi Channel State Information (CSI) from an ESP32-S3 and transforms it into real-time presence detection, vital sign monitoring, and programmable sensing - all without cameras or wearables. Part of the [WiFi-DensePose](../../README.md) project. [![ESP-IDF v5.2](https://img.shields.io/badge/ESP--IDF-v5.2-blue.svg)](https://docs.espressif.com/projects/esp-idf/en/v5.2/) [![Target: ESP32-S3](https://img.shields.io/badge/target-ESP32--S3-purple.svg)](https://www.espressif.com/en/products/socs/esp32-s3) @@ -25,7 +25,7 @@ This firmware captures WiFi Channel State Information (CSI) from an ESP32-S3 and For users who want to get running fast. Detailed explanations follow in later sections. -### 1. Build (Docker -- the only reliable method) +### 1. Build (Docker - the only reliable method) ```bash # From the repository root: @@ -55,7 +55,7 @@ python scripts/provision.py --port COM7 \ ### 4. Start the sensing server ```bash -cargo run -p wifi-densepose-sensing-server -- --http-port 3000 --source auto +cargo run -p wifi-densepose-sensing-server - --http-port 3000 --source auto ``` ### 5. Open the UI @@ -105,7 +105,7 @@ The firmware implements a tiered processing pipeline. Each tier builds on the pr +--------------------------------------------------------------------------+ ``` -### Tier 0 -- Raw CSI Passthrough (Stable) +### Tier 0 - Raw CSI Passthrough (Stable) The default, production-stable baseline. Captures CSI frames from the WiFi driver and streams them over UDP in the ADR-018 binary format. @@ -114,29 +114,29 @@ The default, production-stable baseline. Captures CSI frames from the WiFi drive - **Payload:** 20-byte header + I/Q pairs (2 bytes per subcarrier per antenna) - **Bandwidth:** ~5 KB/s per node (64 subcarriers, 1 antenna) -### Tier 1 -- Basic DSP (Stable) +### Tier 1 - Basic DSP (Stable) Adds on-device signal conditioning to reduce bandwidth and improve signal quality. -- **Phase unwrapping** -- removes 2-pi discontinuities -- **Welford running statistics** -- incremental mean and variance per subcarrier -- **Top-K subcarrier selection** -- tracks only the K highest-variance subcarriers -- **Delta compression** -- XOR + RLE encoding reduces bandwidth by ~70% +- **Phase unwrapping** - removes 2-pi discontinuities +- **Welford running statistics** - incremental mean and variance per subcarrier +- **Top-K subcarrier selection** - tracks only the K highest-variance subcarriers +- **Delta compression** - XOR + RLE encoding reduces bandwidth by ~70% -### Tier 2 -- Full Pipeline (Stable) +### Tier 2 - Full Pipeline (Stable) Adds real-time health and safety monitoring. -- **Breathing rate** -- biquad IIR bandpass 0.1-0.5 Hz, zero-crossing BPM (6-30 BPM) -- **Heart rate** -- biquad IIR bandpass 0.8-2.0 Hz, zero-crossing BPM (40-120 BPM) -- **Presence detection** -- adaptive threshold calibration (60 s ambient learning) -- **Fall detection** -- phase acceleration exceeds configurable threshold -- **Multi-person estimation** -- subcarrier group clustering (up to 4 persons) -- **Vitals packet** -- 32-byte UDP packet at 1 Hz (magic `0xC5110002`) +- **Breathing rate** - biquad IIR bandpass 0.1-0.5 Hz, zero-crossing BPM (6-30 BPM) +- **Heart rate** - biquad IIR bandpass 0.8-2.0 Hz, zero-crossing BPM (40-120 BPM) +- **Presence detection** - adaptive threshold calibration (60 s ambient learning) +- **Fall detection** - phase acceleration exceeds configurable threshold +- **Multi-person estimation** - subcarrier group clustering (up to 4 persons) +- **Vitals packet** - 32-byte UDP packet at 1 Hz (magic `0xC5110002`) -### Tier 3 -- WASM Programmable Sensing (Alpha) +### Tier 3 - WASM Programmable Sensing (Alpha) -Turns the ESP32 from a fixed-function sensor into a programmable sensing computer. Instead of reflashing firmware to change algorithms, you upload new sensing logic as small WASM modules -- compiled from Rust, packaged in signed RVF containers. +Turns the ESP32 from a fixed-function sensor into a programmable sensing computer. Instead of reflashing firmware to change algorithms, you upload new sensing logic as small WASM modules - compiled from Rust, packaged in signed RVF containers. See the [WASM Programmable Sensing](#wasm-programmable-sensing-tier-3) section for full details. @@ -197,8 +197,8 @@ Offset Size Field | Docker Desktop | 28.x+ | Cross-compile firmware in ESP-IDF container | | esptool | 5.x+ | Flash firmware to ESP32 (`pip install esptool`) | | Python 3.10+ | 3.10+ | Provisioning script, serial monitor | -| ESP32-S3 board | -- | Target hardware | -| CP210x driver | -- | USB-UART bridge driver ([download](https://www.silabs.com/developers/usb-to-uart-bridge-vcp-drivers)) | +| ESP32-S3 board | - | Target hardware | +| CP210x driver | - | USB-UART bridge driver ([download](https://www.silabs.com/developers/usb-to-uart-bridge-vcp-drivers)) | > **Why Docker?** ESP-IDF does NOT work from Git Bash/MSYS2 on Windows. The `idf.py` script detects the `MSYSTEM` environment variable and skips `main()`. Even removing `MSYSTEM`, the `cmd.exe` subprocess injects `doskey` aliases that break the ninja linker. Docker is the only reliable cross-platform build method. @@ -215,9 +215,9 @@ MSYS_NO_PATHCONV=1 docker run --rm \ The `MSYS_NO_PATHCONV=1` prefix prevents Git Bash from mangling the `/project` path to `C:/Program Files/Git/project`. **Build output:** -- `build/bootloader/bootloader.bin` -- second-stage bootloader -- `build/partition_table/partition-table.bin` -- flash partition layout -- `build/esp32-csi-node.bin` -- application firmware +- `build/bootloader/bootloader.bin` - second-stage bootloader +- `build/partition_table/partition-table.bin` - flash partition layout +- `build/esp32-csi-node.bin` - application firmware ### Custom Configuration @@ -268,7 +268,7 @@ python -m serial.tools.miniterm COM7 115200 Expected output after boot: ``` -I (321) main: ESP32-S3 CSI Node (ADR-018) -- Node ID: 1 +I (321) main: ESP32-S3 CSI Node (ADR-018) - Node ID: 1 I (345) main: WiFi STA initialized, connecting to SSID: wifi-densepose I (1023) main: Connected to WiFi I (1025) main: CSI streaming active -> 192.168.1.100:5005 (edge_tier=2, OTA=ready, WASM=ready) @@ -456,11 +456,11 @@ cargo build -p wifi-densepose-wasm-edge --target wasm32-unknown-unknown --releas | Component | SRAM | PSRAM | Flash | |-----------|------|-------|-------| -| Base firmware (Tier 0) | ~12 KB | -- | ~820 KB | -| Tier 1-2 DSP pipeline | ~10 KB | -- | ~33 KB | -| WASM3 interpreter | ~10 KB | -- | ~100 KB | -| WASM arenas (x4 slots) | -- | 640 KB | -- | -| Host API + HTTP upload | ~3 KB | -- | ~23 KB | +| Base firmware (Tier 0) | ~12 KB | - | ~820 KB | +| Tier 1-2 DSP pipeline | ~10 KB | - | ~33 KB | +| WASM3 interpreter | ~10 KB | - | ~100 KB | +| WASM arenas (x4 slots) | - | 640 KB | - | +| Host API + HTTP upload | ~3 KB | - | ~23 KB | | **Total** | **~35 KB** | **640 KB** | **~943 KB** | - **PSRAM remaining:** 7.36 MB (available for future use) @@ -525,12 +525,12 @@ The firmware is continuously verified by [`.github/workflows/firmware-ci.yml`](. ## QEMU Testing (ADR-061) -Test the firmware without physical hardware using Espressif's QEMU fork. A compile-time mock CSI generator (`CONFIG_CSI_MOCK_ENABLED=y`) replaces the real WiFi CSI callback with a timer-driven synthetic frame injector that exercises the full edge processing pipeline -- biquad filtering, Welford stats, top-K selection, presence/fall detection, and vitals extraction. +Test the firmware without physical hardware using Espressif's QEMU fork. A compile-time mock CSI generator (`CONFIG_CSI_MOCK_ENABLED=y`) replaces the real WiFi CSI callback with a timer-driven synthetic frame injector that exercises the full edge processing pipeline - biquad filtering, Welford stats, top-K selection, presence/fall detection, and vitals extraction. ### Prerequisites -- **ESP-IDF v5.4** -- [installation guide](https://docs.espressif.com/projects/esp-idf/en/v5.4/esp32s3/get-started/) -- **Espressif QEMU fork** -- must be built from source (not in Ubuntu packages): +- **ESP-IDF v5.4** - [installation guide](https://docs.espressif.com/projects/esp-idf/en/v5.4/esp32s3/get-started/) +- **Espressif QEMU fork** - must be built from source (not in Ubuntu packages): ```bash git clone --depth 1 https://github.com/espressif/qemu.git /tmp/qemu @@ -666,7 +666,7 @@ Key breakpoints: | `wasm_runtime.c:wasm_on_csi` | WASM module dispatch | | `mock_csi.c:mock_generate_csi_frame` | Synthetic frame generation | -VS Code integration -- add to `.vscode/launch.json`: +VS Code integration - add to `.vscode/launch.json`: ```json { @@ -755,7 +755,7 @@ No physical ESP32 hardware is needed in CI. | No serial output | Wrong baud rate | Use `115200` in your serial monitor | | WiFi won't connect | Wrong SSID/password | Re-run `provision.py` with correct credentials | | No UDP frames received | Firewall blocking | Allow inbound UDP on port 5005 (see below) | -| `idf.py` fails on Windows | Git Bash/MSYS2 incompatibility | Use Docker -- this is the only supported build method on Windows | +| `idf.py` fails on Windows | Git Bash/MSYS2 incompatibility | Use Docker - this is the only supported build method on Windows | | CSI callback not firing | Promiscuous mode issue | Verify `esp_wifi_set_promiscuous(true)` in `csi_collector.c` | | WASM upload rejected | Signature verification | Disable with `wasm_verify=0` via NVS for development, or sign with Ed25519 | | High frame drop rate | Ring buffer overflow | Reduce `edge_tier` or increase `dwell_ms` | diff --git a/install.sh b/install.sh index ee2a84d79..19d753152 100755 --- a/install.sh +++ b/install.sh @@ -633,7 +633,7 @@ install_rust_deps() { return 1 fi fi - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s - -y # shellcheck source=/dev/null source "${HOME}/.cargo/env" 2>/dev/null || true HAS_RUST=true @@ -881,7 +881,7 @@ build_wasm() { build_wasm_field() { echo "" echo -e " ${CYAN}Building WASM package with WiFi-Mat (field profile ~62MB)...${RESET}" - (cd "${RUST_DIR}" && wasm-pack build crates/wifi-densepose-wasm --target web --release -- --features mat 2>&1 | tail -10) + (cd "${RUST_DIR}" && wasm-pack build crates/wifi-densepose-wasm --target web --release - --features mat 2>&1 | tail -10) if [ -d "${RUST_DIR}/crates/wifi-densepose-wasm/pkg" ]; then local wasm_size @@ -975,7 +975,7 @@ post_install() { echo " firmware/esp32-csi-node/sdkconfig.defaults" echo " # Edit sdkconfig.defaults: set SSID, password, aggregator IP" echo "" - echo " # 2. Build firmware (Docker — no local ESP-IDF needed):" + echo " # 2. Build firmware (Docker - no local ESP-IDF needed):" echo " cd firmware/esp32-csi-node" echo " docker run --rm -v \"\$(pwd):/project\" -w /project \\" echo " espressif/idf:v5.2 bash -c 'idf.py set-target esp32s3 && idf.py build'" @@ -985,7 +985,7 @@ post_install() { echo " --baud 460800 write-flash @flash_args" echo "" echo " # 4. Run the aggregator:" - echo " cargo run -p wifi-densepose-hardware --bin aggregator -- \\" + echo " cargo run -p wifi-densepose-hardware --bin aggregator - \\" echo " --bind 0.0.0.0:5005 --verbose" ;; docker) diff --git a/rust-port/wifi-densepose-rs/crates/README.md b/rust-port/wifi-densepose-rs/crates/README.md index 0bc3fa028..d46ffd76c 100644 --- a/rust-port/wifi-densepose-rs/crates/README.md +++ b/rust-port/wifi-densepose-rs/crates/README.md @@ -21,7 +21,7 @@ A modular Rust workspace for WiFi-based human pose estimation, vital sign monito | Feature Extraction | ~8 ms | 9.03 us | **~890x** | | Motion Detection | ~1 ms | 186 ns | **~5400x** | | Full Pipeline | ~15 ms | 18.47 us | **~810x** | -| Vital Signs | N/A | 86 us (11,665 fps) | -- | +| Vital Signs | N/A | 86 us (11,665 fps) | - | ## Crate Overview @@ -38,21 +38,21 @@ A modular Rust workspace for WiFi-based human pose estimation, vital sign monito | Crate | Description | RuVector Integration | crates.io | |-------|-------------|---------------------|-----------| | [`wifi-densepose-signal`](wifi-densepose-signal/) | SOTA CSI signal processing (6 algorithms from SpotFi, FarSense, Widar 3.0) | `ruvector-mincut`, `ruvector-attn-mincut`, `ruvector-attention`, `ruvector-solver` | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-signal.svg)](https://crates.io/crates/wifi-densepose-signal) | -| [`wifi-densepose-vitals`](wifi-densepose-vitals/) | Vital sign extraction: breathing (6-30 BPM) and heart rate (40-120 BPM) | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-vitals.svg)](https://crates.io/crates/wifi-densepose-vitals) | -| [`wifi-densepose-wifiscan`](wifi-densepose-wifiscan/) | Multi-BSSID WiFi scanning for Windows-enhanced sensing | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-wifiscan.svg)](https://crates.io/crates/wifi-densepose-wifiscan) | +| [`wifi-densepose-vitals`](wifi-densepose-vitals/) | Vital sign extraction: breathing (6-30 BPM) and heart rate (40-120 BPM) | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-vitals.svg)](https://crates.io/crates/wifi-densepose-vitals) | +| [`wifi-densepose-wifiscan`](wifi-densepose-wifiscan/) | Multi-BSSID WiFi scanning for Windows-enhanced sensing | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-wifiscan.svg)](https://crates.io/crates/wifi-densepose-wifiscan) | ### Neural Network & Training | Crate | Description | RuVector Integration | crates.io | |-------|-------------|---------------------|-----------| -| [`wifi-densepose-nn`](wifi-densepose-nn/) | Multi-backend inference (ONNX, PyTorch, Candle) with DensePose head (24 body parts) | -- | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-nn.svg)](https://crates.io/crates/wifi-densepose-nn) | +| [`wifi-densepose-nn`](wifi-densepose-nn/) | Multi-backend inference (ONNX, PyTorch, Candle) with DensePose head (24 body parts) | - | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-nn.svg)](https://crates.io/crates/wifi-densepose-nn) | | [`wifi-densepose-train`](wifi-densepose-train/) | Training pipeline with MM-Fi dataset, 114->56 subcarrier interpolation | **All 5 crates** | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-train.svg)](https://crates.io/crates/wifi-densepose-train) | ### Disaster Response | Crate | Description | RuVector Integration | crates.io | |-------|-------------|---------------------|-----------| -| [`wifi-densepose-mat`](wifi-densepose-mat/) | Mass Casualty Assessment Tool -- survivor detection, triage, multi-AP localization | `ruvector-solver`, `ruvector-temporal-tensor` | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-mat.svg)](https://crates.io/crates/wifi-densepose-mat) | +| [`wifi-densepose-mat`](wifi-densepose-mat/) | Mass Casualty Assessment Tool - survivor detection, triage, multi-AP localization | `ruvector-solver`, `ruvector-temporal-tensor` | [![crates.io](https://img.shields.io/crates/v/wifi-densepose-mat.svg)](https://crates.io/crates/wifi-densepose-mat) | ### Hardware & Deployment @@ -201,7 +201,7 @@ println!("RSSI: {} dBm, {} subcarriers", frame.metadata.rssi, frame.subcarriers. cargo check -p wifi-densepose-train --no-default-features # Run training with GPU (requires tch/libtorch) -cargo run -p wifi-densepose-train --features tch-backend --bin train -- \ +cargo run -p wifi-densepose-train --features tch-backend --bin train - \ --config training.toml --dataset /path/to/mmfi # Verify deterministic training proof @@ -286,9 +286,9 @@ Key design decisions documented in [`docs/adr/`](https://github.com/ruvnet/wifi- ## Related Projects -- **[WiFi-DensePose](https://github.com/ruvnet/wifi-densepose)** -- Main repository (Python v1 + Rust v2) -- **[RuVector](https://github.com/ruvnet/ruvector)** -- Graph algorithms for neural networks (5 crates, v2.0.4) -- **[rUv](https://github.com/ruvnet)** -- Creator and maintainer +- **[WiFi-DensePose](https://github.com/ruvnet/wifi-densepose)** - Main repository (Python v1 + Rust v2) +- **[RuVector](https://github.com/ruvnet/ruvector)** - Graph algorithms for neural networks (5 crates, v2.0.4) +- **[rUv](https://github.com/ruvnet)** - Creator and maintainer ## License diff --git a/rust-port/wifi-densepose-rs/crates/ruv-neural/README.md b/rust-port/wifi-densepose-rs/crates/ruv-neural/README.md index fadff7426..f501f742e 100644 --- a/rust-port/wifi-densepose-rs/crates/ruv-neural/README.md +++ b/rust-port/wifi-densepose-rs/crates/ruv-neural/README.md @@ -1,4 +1,4 @@ -# rUv Neural — Brain Topology Analysis System +# rUv Neural - Brain Topology Analysis System > Quantum sensor integration x RuVector graph memory x Dynamic mincut coherence detection @@ -32,13 +32,13 @@ analysis. It transforms neural magnetic field measurements from quantum sensors magnetometers, optically pumped magnetometers) into dynamic connectivity graphs, then uses minimum cut algorithms to detect cognitive state transitions. -This is not mind reading — it measures **how cognition organizes itself** by tracking the +This is not mind reading - it measures **how cognition organizes itself** by tracking the topology of brain networks in real time. ## Hardware Parts List Below is a reference bill of materials for building a basic multi-channel neural sensing rig. -Prices are approximate (2026). Links are for reference only — equivalent components from any +Prices are approximate (2026). Links are for reference only - equivalent components from any vendor will work. ### Core: NV Diamond Magnetometer Array @@ -86,7 +86,7 @@ vendor will work. |-----------|-----|-------------|------|-------| | Soldering Station (adjustable temp) | 1 | $25 | [AliExpress: Soldering Station](https://www.aliexpress.com/w/wholesale-soldering-station-adjustable.html) | For sensor board assembly | | Breadboard + Jumper Wire Kit | 1 | $8 | [AliExpress: Breadboard Kit](https://www.aliexpress.com/w/wholesale-breadboard-jumper-wire-kit.html) | Prototyping | -| 3D Printed Sensor Mount (STL provided) | 1 | — | Print locally | Holds diamond chips in array | +| 3D Printed Sensor Mount (STL provided) | 1 | - | Print locally | Holds diamond chips in array | **Estimated total cost:** ~$650–$900 for a 16-channel NV diamond setup, ~$500 for OPM, ~$200 for EEG. @@ -110,12 +110,12 @@ vendor will work. 4. **Host Software** - Install Rust 1.75+ and build: `cargo build --workspace --release` - - Run the pipeline: `cargo run -p ruv-neural-cli --release -- pipeline --channels 16 --duration 60` + - Run the pipeline: `cargo run -p ruv-neural-cli --release - pipeline --channels 16 --duration 60` - Or use individual crates as a library (see [Use as Library](#use-as-library)) 5. **Verification** - - Generate a witness bundle: `cargo run -p ruv-neural-cli -- witness --output witness.json` - - Verify Ed25519 signature: `cargo run -p ruv-neural-cli -- witness --verify witness.json` + - Generate a witness bundle: `cargo run -p ruv-neural-cli - witness --output witness.json` + - Verify Ed25519 signature: `cargo run -p ruv-neural-cli - witness --verify witness.json` - Expected output: `VERDICT: PASS` (41 capability attestations, 338 tests) ## Architecture @@ -182,7 +182,7 @@ All crates are published on [crates.io](https://crates.io/search?q=ruv-neural): | [`ruv-neural-memory`](https://crates.io/crates/ruv-neural-memory) | [![crates.io](https://img.shields.io/crates/v/ruv-neural-memory.svg)](https://crates.io/crates/ruv-neural-memory) | Persistent neural state memory + HNSW | core | | [`ruv-neural-decoder`](https://crates.io/crates/ruv-neural-decoder) | [![crates.io](https://img.shields.io/crates/v/ruv-neural-decoder.svg)](https://crates.io/crates/ruv-neural-decoder) | Cognitive state classification + BCI | core | | [`ruv-neural-esp32`](https://crates.io/crates/ruv-neural-esp32) | [![crates.io](https://img.shields.io/crates/v/ruv-neural-esp32.svg)](https://crates.io/crates/ruv-neural-esp32) | ESP32 edge sensor integration | core | -| `ruv-neural-wasm` | — | WebAssembly browser bindings | core | +| `ruv-neural-wasm` | - | WebAssembly browser bindings | core | | [`ruv-neural-viz`](https://crates.io/crates/ruv-neural-viz) | [![crates.io](https://img.shields.io/crates/v/ruv-neural-viz.svg)](https://crates.io/crates/ruv-neural-viz) | Visualization and ASCII rendering | core, graph, mincut | | [`ruv-neural-cli`](https://crates.io/crates/ruv-neural-cli) | [![crates.io](https://img.shields.io/crates/v/ruv-neural-cli.svg)](https://crates.io/crates/ruv-neural-cli) | CLI tool (`ruv-neural` binary) | all | @@ -222,9 +222,9 @@ cargo test --workspace ### Run CLI ```bash -cargo run -p ruv-neural-cli -- simulate --channels 64 --duration 10 -cargo run -p ruv-neural-cli -- pipeline --channels 32 --duration 5 --dashboard -cargo run -p ruv-neural-cli -- mincut --input brain_graph.json +cargo run -p ruv-neural-cli - simulate --channels 64 --duration 10 +cargo run -p ruv-neural-cli - pipeline --channels 32 --duration 5 --dashboard +cargo run -p ruv-neural-cli - mincut --input brain_graph.json ``` ### Install from crates.io @@ -290,13 +290,13 @@ println!("Embedding dim: {}", embedding.dimension); Each crate is independently usable. Common combinations: -- **Sensor + Signal** -- Data acquisition and preprocessing only -- **Graph + Mincut** -- Graph analysis without sensor dependency -- **Embed + Memory** -- Embedding storage without real-time pipeline -- **Core + WASM** -- Browser-based graph visualization -- **ESP32 alone** -- Edge preprocessing on embedded hardware -- **Signal + Embed** -- Feature extraction pipeline without graph construction -- **Mincut + Viz** -- Topology analysis with ASCII dashboard output +- **Sensor + Signal** - Data acquisition and preprocessing only +- **Graph + Mincut** - Graph analysis without sensor dependency +- **Embed + Memory** - Embedding storage without real-time pipeline +- **Core + WASM** - Browser-based graph visualization +- **ESP32 alone** - Edge preprocessing on embedded hardware +- **Signal + Embed** - Feature extraction pipeline without graph construction +- **Mincut + Viz** - Topology analysis with ASCII dashboard output ## Platform Support @@ -322,26 +322,26 @@ cargo build -p ruv-neural-wasm --target wasm32-unknown-unknown --release - **Butterworth IIR filters** in second-order sections (SOS) form - **Welch PSD** estimation with configurable window and overlap - **Hilbert transform** for instantaneous phase extraction -- **Artifact detection** -- eye blink, muscle, cardiac artifact rejection -- **Connectivity metrics** -- PLV, coherence, imaginary coherence, AEC +- **Artifact detection** - eye blink, muscle, cardiac artifact rejection +- **Connectivity metrics** - PLV, coherence, imaginary coherence, AEC ### Minimum Cut Analysis (`ruv-neural-mincut`) -- **Stoer-Wagner** -- Global minimum cut in O(V^3) -- **Normalized cut** (Shi-Malik) -- Spectral bisection via the Fiedler vector -- **Multiway cut** -- Recursive normalized cut for k-module detection -- **Spectral cut** -- Cheeger constant and spectral bisection bounds -- **Dynamic tracking** -- Temporal topology transition detection -- **Coherence events** -- Network formation, dissolution, merger, split +- **Stoer-Wagner** - Global minimum cut in O(V^3) +- **Normalized cut** (Shi-Malik) - Spectral bisection via the Fiedler vector +- **Multiway cut** - Recursive normalized cut for k-module detection +- **Spectral cut** - Cheeger constant and spectral bisection bounds +- **Dynamic tracking** - Temporal topology transition detection +- **Coherence events** - Network formation, dissolution, merger, split ### Embeddings (`ruv-neural-embed`) -- **Spectral** -- Laplacian eigenvector positional encoding -- **Topology** -- Hand-crafted topological feature vectors -- **Node2Vec** -- Random-walk co-occurrence embeddings -- **Combined** -- Weighted concatenation of multiple methods -- **Temporal** -- Sliding-window context-enriched embeddings -- **RVF export** -- Serialization to RuVector `.rvf` format +- **Spectral** - Laplacian eigenvector positional encoding +- **Topology** - Hand-crafted topological feature vectors +- **Node2Vec** - Random-walk co-occurrence embeddings +- **Combined** - Weighted concatenation of multiple methods +- **Temporal** - Sliding-window context-enriched embeddings +- **RVF export** - Serialization to RuVector `.rvf` format ## RVF Format @@ -367,10 +367,10 @@ and that all tests passed. ```bash # Generate a signed witness bundle -cargo run -p ruv-neural-cli -- witness --output witness-bundle.json +cargo run -p ruv-neural-cli - witness --output witness-bundle.json # Verify (any third party can do this) -cargo run -p ruv-neural-cli -- witness --verify witness-bundle.json +cargo run -p ruv-neural-cli - witness --verify witness-bundle.json ``` The bundle contains: @@ -380,7 +380,7 @@ The bundle contains: - **Public key** for independent verification - Test count and pass/fail status -Tampered bundles are detected — modifying any attestation invalidates the digest and +Tampered bundles are detected - modifying any attestation invalidates the digest and signature verification returns `FAIL`. ## Testing @@ -393,7 +393,7 @@ cargo test --workspace cargo test -p ruv-neural-mincut # Run with logging enabled -RUST_LOG=debug cargo test --workspace -- --nocapture +RUST_LOG=debug cargo test --workspace - --nocapture # Run benchmarks (requires nightly or criterion) cargo bench -p ruv-neural-mincut diff --git a/rust-port/wifi-densepose-rs/crates/ruv-neural/SECURITY_REVIEW.md b/rust-port/wifi-densepose-rs/crates/ruv-neural/SECURITY_REVIEW.md index bc5a44dbc..9843eb1ce 100644 --- a/rust-port/wifi-densepose-rs/crates/ruv-neural/SECURITY_REVIEW.md +++ b/rust-port/wifi-densepose-rs/crates/ruv-neural/SECURITY_REVIEW.md @@ -428,10 +428,10 @@ criterion = { workspace = true } ```bash # Generate a flamegraph of the full pipeline -cargo flamegraph --bench full_pipeline -- --bench +cargo flamegraph --bench full_pipeline - --bench # Memory profiling with DHAT -cargo test --features dhat-heap -- --test full_pipeline +cargo test --features dhat-heap - --test full_pipeline ``` #### WASM Performance diff --git a/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-cli/README.md b/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-cli/README.md index a20c70af6..d5af67490 100644 --- a/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-cli/README.md +++ b/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-cli/README.md @@ -17,12 +17,12 @@ exporting to multiple visualization formats. cargo install --path . # Or run directly -cargo run -p ruv-neural-cli -- +cargo run -p ruv-neural-cli - ``` ## Commands -### `simulate` -- Generate synthetic neural data +### `simulate` - Generate synthetic neural data ```bash ruv-neural simulate --channels 64 --duration 10 --sample-rate 1000 --output data.json @@ -35,7 +35,7 @@ ruv-neural simulate --channels 64 --duration 10 --sample-rate 1000 --output data | `-s, --sample-rate` | 1000.0 | Sample rate in Hz | | `-o, --output` | (none) | Output file path (JSON) | -### `analyze` -- Analyze a brain connectivity graph +### `analyze` - Analyze a brain connectivity graph ```bash ruv-neural analyze --input graph.json --ascii --csv metrics.csv @@ -47,7 +47,7 @@ ruv-neural analyze --input graph.json --ascii --csv metrics.csv | `--ascii` | false | Show ASCII visualization | | `--csv` | (none) | Export metrics to CSV file | -### `mincut` -- Compute minimum cut +### `mincut` - Compute minimum cut ```bash ruv-neural mincut --input graph.json --k 4 @@ -58,7 +58,7 @@ ruv-neural mincut --input graph.json --k 4 | `-i, --input` | (required) | Input graph file (JSON) | | `-k` | (none) | Multi-way cut with k partitions| -### `pipeline` -- Full end-to-end pipeline +### `pipeline` - Full end-to-end pipeline ```bash ruv-neural pipeline --channels 32 --duration 5 --dashboard @@ -72,7 +72,7 @@ Runs: simulate -> preprocess -> build graph -> mincut -> embed -> decode. | `-d, --duration` | 5.0 | Duration in seconds | | `--dashboard` | false | Show real-time ASCII dashboard | -### `export` -- Export to visualization format +### `export` - Export to visualization format ```bash ruv-neural export --input graph.json --format dot --output graph.dot @@ -84,7 +84,7 @@ ruv-neural export --input graph.json --format dot --output graph.dot | `-f, --format` | d3 | Output format: d3, dot, gexf, csv, rvf | | `-o, --output` | (required) | Output file path | -### `info` -- Show system information +### `info` - Show system information ```bash ruv-neural info diff --git a/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-core/README.md b/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-core/README.md index 6bf96792e..fba5e265a 100644 --- a/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-core/README.md +++ b/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-core/README.md @@ -6,7 +6,7 @@ Core types, traits, and error types for the rUv Neural brain topology analysis s `ruv-neural-core` is the foundation crate of the rUv Neural workspace. It defines all shared data types, trait interfaces, and the RVF binary file format used across the -other eleven crates. This crate has **zero** internal dependencies -- every other +other eleven crates. This crate has **zero** internal dependencies - every other ruv-neural crate depends on it. ## Features diff --git a/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-decoder/README.md b/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-decoder/README.md index 72cbd58ff..bc8cd8424 100644 --- a/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-decoder/README.md +++ b/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-decoder/README.md @@ -5,9 +5,9 @@ Cognitive state classification and BCI decoding from neural topology embeddings. ## Overview `ruv-neural-decoder` classifies cognitive states from brain graph embeddings and -topology metrics. It provides multiple decoding strategies -- KNN classification +topology metrics. It provides multiple decoding strategies - KNN classification from labeled exemplars, threshold-based rule systems, temporal transition detection, -and clinical biomarker scoring -- plus an ensemble pipeline that combines all +and clinical biomarker scoring - plus an ensemble pipeline that combines all strategies for robust real-time brain-computer interface (BCI) output. ## Features diff --git a/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-wasm/README.md b/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-wasm/README.md index ec4f81555..a0d7c4d10 100644 --- a/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-wasm/README.md +++ b/rust-port/wifi-densepose-rs/crates/ruv-neural/ruv-neural-wasm/README.md @@ -16,15 +16,15 @@ separately targeting `wasm32-unknown-unknown`. ## Features -- **Graph parsing**: `create_brain_graph` -- parse `BrainGraph` from JSON -- **Minimum cut**: `compute_mincut` -- Stoer-Wagner on graphs up to 500 nodes -- **Topology metrics**: `compute_topology_metrics` -- density, efficiency, +- **Graph parsing**: `create_brain_graph` - parse `BrainGraph` from JSON +- **Minimum cut**: `compute_mincut` - Stoer-Wagner on graphs up to 500 nodes +- **Topology metrics**: `compute_topology_metrics` - density, efficiency, modularity, Fiedler value, entropy, module count -- **Spectral embedding**: `embed_graph` -- power iteration on normalized Laplacian +- **Spectral embedding**: `embed_graph` - power iteration on normalized Laplacian (no LAPACK dependency) -- **State decoding**: `decode_state` -- threshold-based cognitive state classification +- **State decoding**: `decode_state` - threshold-based cognitive state classification from topology metrics -- **RVF I/O**: `load_rvf` / `export_rvf` -- read and write RuVector binary files +- **RVF I/O**: `load_rvf` / `export_rvf` - read and write RuVector binary files - **Streaming** (`streaming`): WebSocket-compatible streaming data processor - **Visualization data** (`viz_data`): Data structures for D3.js and Three.js rendering diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-api/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-api/README.md index b1837c24d..1dc04ef74 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-api/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-api/README.md @@ -17,13 +17,13 @@ clients. ## Planned Features -- **REST endpoints** -- CRUD for scan zones, pose queries, model configuration, and health checks. -- **WebSocket streaming** -- Real-time pose estimate broadcasts with per-client subscription filters. -- **Authentication** -- Token-based auth middleware via `tower` layers. -- **Rate limiting** -- Configurable per-route limits to protect hardware-constrained deployments. -- **OpenAPI spec** -- Auto-generated documentation via `utoipa`. -- **CORS** -- Configurable cross-origin support for browser-based dashboards. -- **Graceful shutdown** -- Clean connection draining on SIGTERM. +- **REST endpoints** - CRUD for scan zones, pose queries, model configuration, and health checks. +- **WebSocket streaming** - Real-time pose estimate broadcasts with per-client subscription filters. +- **Authentication** - Token-based auth middleware via `tower` layers. +- **Rate limiting** - Configurable per-route limits to protect hardware-constrained deployments. +- **OpenAPI spec** - Auto-generated documentation via `utoipa`. +- **CORS** - Configurable cross-origin support for browser-based dashboards. +- **Graceful shutdown** - Clean connection draining on SIGTERM. ## Quick Start diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-cli/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-cli/README.md index 39f3737ff..4ca8c3f29 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-cli/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-cli/README.md @@ -9,7 +9,7 @@ disaster response operations. ## Overview -`wifi-densepose-cli` ships the `wifi-densepose` binary -- a single entry point for operating the +`wifi-densepose-cli` ships the `wifi-densepose` binary - a single entry point for operating the WiFi-DensePose system from the terminal. The primary command group is `mat`, which drives the disaster survivor detection and triage workflow powered by the `wifi-densepose-mat` crate. @@ -19,17 +19,17 @@ Built with [clap](https://docs.rs/clap) for argument parsing, ## Features -- **Survivor scanning** -- Start continuous or one-shot scans across disaster zones with configurable +- **Survivor scanning** - Start continuous or one-shot scans across disaster zones with configurable sensitivity, depth, and disaster type. -- **Triage management** -- List detected survivors sorted by triage priority (Immediate / Delayed / +- **Triage management** - List detected survivors sorted by triage priority (Immediate / Delayed / Minor / Deceased / Unknown) with filtering and output format options. -- **Alert handling** -- View, acknowledge, resolve, and escalate alerts generated by the detection +- **Alert handling** - View, acknowledge, resolve, and escalate alerts generated by the detection pipeline. -- **Zone management** -- Add, remove, pause, and resume rectangular or circular scan zones. -- **Data export** -- Export scan results to JSON or CSV for integration with external USAR systems. -- **Simulation mode** -- Run demo scans with synthetic detections (`--simulate`) for testing and +- **Zone management** - Add, remove, pause, and resume rectangular or circular scan zones. +- **Data export** - Export scan results to JSON or CSV for integration with external USAR systems. +- **Simulation mode** - Run demo scans with synthetic detections (`--simulate`) for testing and training without hardware. -- **Multiple output formats** -- Table, JSON, and compact single-line output for scripting. +- **Multiple output formats** - Table, JSON, and compact single-line output for scripting. ### Feature flags diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-config/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-config/README.md index ffcfd5c71..9a2d3fc3a 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-config/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-config/README.md @@ -17,15 +17,15 @@ variables, TOML/YAML files, and CLI overrides into strongly-typed Rust structs. ## Planned Features -- **Multi-source loading** -- Merge configuration from `.env`, TOML files, YAML files, and +- **Multi-source loading** - Merge configuration from `.env`, TOML files, YAML files, and environment variables with well-defined precedence. -- **Typed configuration** -- Strongly-typed structs for server, signal processing, neural network, +- **Typed configuration** - Strongly-typed structs for server, signal processing, neural network, hardware, and database settings. -- **Validation** -- Schema validation with human-readable error messages on startup. -- **Hot reload** -- Watch configuration files for changes and notify dependent services. -- **Profile support** -- Named profiles (`development`, `production`, `testing`) with per-profile +- **Validation** - Schema validation with human-readable error messages on startup. +- **Hot reload** - Watch configuration files for changes and notify dependent services. +- **Profile support** - Named profiles (`development`, `production`, `testing`) with per-profile overrides. -- **Secret filtering** -- Redact sensitive values (API keys, database passwords) in logs and debug +- **Secret filtering** - Redact sensitive values (API keys, database passwords) in logs and debug output. ## Quick Start diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-core/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-core/README.md index 6c2acdadd..202decf87 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-core/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-core/README.md @@ -15,14 +15,14 @@ unsafe code. ## Features -- **Core data types** -- `CsiFrame`, `ProcessedSignal`, `PoseEstimate`, `PersonPose`, `Keypoint`, +- **Core data types** - `CsiFrame`, `ProcessedSignal`, `PoseEstimate`, `PersonPose`, `Keypoint`, `KeypointType`, `BoundingBox`, `Confidence`, `Timestamp`, and more. -- **Trait abstractions** -- `SignalProcessor`, `NeuralInference`, and `DataStore` define the +- **Trait abstractions** - `SignalProcessor`, `NeuralInference`, and `DataStore` define the contracts for signal processing, neural network inference, and data persistence respectively. -- **Error hierarchy** -- `CoreError`, `SignalError`, `InferenceError`, and `StorageError` provide +- **Error hierarchy** - `CoreError`, `SignalError`, `InferenceError`, and `StorageError` provide typed error handling across subsystem boundaries. -- **`no_std` support** -- Disable the default `std` feature for embedded or WASM targets. -- **Constants** -- `MAX_KEYPOINTS` (17, COCO format), `MAX_SUBCARRIERS` (256), +- **`no_std` support** - Disable the default `std` feature for embedded or WASM targets. +- **Constants** - `MAX_KEYPOINTS` (17, COCO format), `MAX_SUBCARRIERS` (256), `DEFAULT_CONFIDENCE_THRESHOLD` (0.5). ### Feature flags @@ -59,11 +59,11 @@ use wifi_densepose_core::prelude::*; ```text wifi-densepose-core/src/ - lib.rs -- Re-exports, constants, prelude - types.rs -- CsiFrame, PoseEstimate, Keypoint, etc. - traits.rs -- SignalProcessor, NeuralInference, DataStore - error.rs -- CoreError, SignalError, InferenceError, StorageError - utils.rs -- Shared helper functions + lib.rs - Re-exports, constants, prelude + types.rs - CsiFrame, PoseEstimate, Keypoint, etc. + traits.rs - SignalProcessor, NeuralInference, DataStore + error.rs - CoreError, SignalError, InferenceError, StorageError + utils.rs - Shared helper functions ``` ## Related Crates diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-db/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-db/README.md index 0fc8b66ad..f779649fa 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-db/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-db/README.md @@ -17,17 +17,17 @@ backends are [SQLx](https://docs.rs/sqlx) for relational storage (PostgreSQL and ## Planned Features -- **Dual backend** -- PostgreSQL for production deployments, SQLite for single-node and embedded +- **Dual backend** - PostgreSQL for production deployments, SQLite for single-node and embedded use. Selectable at compile time via feature flags. -- **Redis caching** -- Connection-pooled Redis for low-latency pose estimate lookups, session +- **Redis caching** - Connection-pooled Redis for low-latency pose estimate lookups, session state, and pub/sub event distribution. -- **Migrations** -- Embedded SQL migrations managed by SQLx, applied automatically on startup. -- **Repository pattern** -- Typed repository structs (`PoseRepository`, `SessionRepository`, +- **Migrations** - Embedded SQL migrations managed by SQLx, applied automatically on startup. +- **Repository pattern** - Typed repository structs (`PoseRepository`, `SessionRepository`, `AlertRepository`) implementing the core `DataStore` trait. -- **Connection pooling** -- Configurable pool sizes via `sqlx::PgPool` / `sqlx::SqlitePool`. -- **Transaction support** -- Scoped transactions for multi-table writes (e.g., survivor detection +- **Connection pooling** - Configurable pool sizes via `sqlx::PgPool` / `sqlx::SqlitePool`. +- **Transaction support** - Scoped transactions for multi-table writes (e.g., survivor detection plus alert creation). -- **Time-series optimisation** -- Partitioned tables and retention policies for high-frequency CSI +- **Time-series optimisation** - Partitioned tables and retention policies for high-frequency CSI frame storage. ### Planned feature flags diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-desktop/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-desktop/README.md index 16e064001..dfd12c593 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-desktop/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-desktop/README.md @@ -1,12 +1,12 @@ # RuView Desktop -> **Work in Progress** — This crate is under active development. APIs and UI are subject to change. +> **Work in Progress** - This crate is under active development. APIs and UI are subject to change. Cross-platform desktop application for managing ESP32 WiFi sensing networks. Built with **Tauri v2** (Rust backend) and **React + TypeScript** (frontend), following the [ADR-053 design system](../../docs/adr/ADR-053-ui-design-system.md). ## Overview -RuView Desktop provides a unified interface for node discovery, firmware management, over-the-air updates, WASM edge module deployment, real-time sensing data visualization, and mesh network topology monitoring — all from a single native application. +RuView Desktop provides a unified interface for node discovery, firmware management, over-the-air updates, WASM edge module deployment, real-time sensing data visualization, and mesh network topology monitoring - all from a single native application. ## Pages @@ -100,8 +100,8 @@ Pre-built binaries are available on the [Releases](https://github.com/ruvnet/RuV | Platform | Download | Status | |----------|----------|--------| | Windows x64 | [v0.3.0-alpha](https://github.com/ruvnet/RuView/releases/tag/v0.3.0-desktop-alpha) | Debug build | -| macOS | — | Planned | -| Linux | — | Planned | +| macOS | - | Planned | +| Linux | - | Planned | ### Running the pre-built exe (Windows) @@ -171,4 +171,4 @@ The installer/bundle will be in `target/release/bundle/` (`.msi` on Windows, `.d ## License -MIT — see [LICENSE](../../LICENSE) for details. +MIT - see [LICENSE](../../LICENSE) for details. diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-desktop/ui/src/types.ts b/rust-port/wifi-densepose-rs/crates/wifi-densepose-desktop/ui/src/types.ts index d9b2e2937..0311b5d5b 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-desktop/ui/src/types.ts +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-desktop/ui/src/types.ts @@ -1,5 +1,5 @@ // ============================================================================= -// types.ts — TypeScript types matching the Rust domain model for RuView +// types.ts - TypeScript types matching the Rust domain model for RuView // ============================================================================= // --------------------------------------------------------------------------- diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-hardware/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-hardware/README.md index 682bb10fa..c29f880e2 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-hardware/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-hardware/README.md @@ -10,20 +10,20 @@ Hardware interface abstractions for WiFi CSI sensors (ESP32, Intel 5300, Atheros `wifi-densepose-hardware` provides platform-agnostic parsers for WiFi CSI data from multiple hardware sources. All parsing operates on byte buffers with no C FFI or hardware dependencies at -compile time, making the crate fully portable and deterministic -- the same bytes in always produce +compile time, making the crate fully portable and deterministic - the same bytes in always produce the same parsed output. ## Features -- **ESP32 binary parser** -- Parses ADR-018 binary CSI frames streamed over UDP from ESP32 and +- **ESP32 binary parser** - Parses ADR-018 binary CSI frames streamed over UDP from ESP32 and ESP32-S3 devices. -- **UDP aggregator** -- Receives and aggregates CSI frames from multiple ESP32 nodes (ADR-018 +- **UDP aggregator** - Receives and aggregates CSI frames from multiple ESP32 nodes (ADR-018 Layer 2). Provided as a standalone binary. -- **Bridge** -- Converts hardware `CsiFrame` into the `CsiData` format expected by the detection +- **Bridge** - Converts hardware `CsiFrame` into the `CsiData` format expected by the detection pipeline (ADR-018 Layer 3). -- **No mock data** -- Parsers either parse real bytes or return explicit `ParseError` values. +- **No mock data** - Parsers either parse real bytes or return explicit `ParseError` values. There are no synthetic fallbacks. -- **Pure byte-buffer parsing** -- No FFI to ESP-IDF or kernel modules. Safe to compile and test +- **Pure byte-buffer parsing** - No FFI to ESP-IDF or kernel modules. Safe to compile and test on any platform. ### Feature flags @@ -60,12 +60,12 @@ match Esp32CsiParser::parse_frame(raw_bytes) { ```text wifi-densepose-hardware/src/ - lib.rs -- Re-exports: CsiFrame, Esp32CsiParser, ParseError, CsiData - csi_frame.rs -- CsiFrame, CsiMetadata, SubcarrierData, Bandwidth, AntennaConfig - esp32_parser.rs -- Esp32CsiParser (ADR-018 binary protocol) - error.rs -- ParseError - bridge.rs -- CsiData bridge to detection pipeline - aggregator/ -- UDP multi-node frame aggregator (binary) + lib.rs - Re-exports: CsiFrame, Esp32CsiParser, ParseError, CsiData + csi_frame.rs - CsiFrame, CsiMetadata, SubcarrierData, Bandwidth, AntennaConfig + esp32_parser.rs - Esp32CsiParser (ADR-018 binary protocol) + error.rs - ParseError + bridge.rs - CsiData bridge to detection pipeline + aggregator/ - UDP multi-node frame aggregator (binary) ``` ## Related Crates diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-mat/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-mat/README.md index 0b1b99d5d..9d16c6e36 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-mat/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-mat/README.md @@ -10,8 +10,8 @@ Mass Casualty Assessment Tool for WiFi-based disaster survivor detection and loc `wifi-densepose-mat` uses WiFi Channel State Information (CSI) to detect and locate survivors trapped in rubble, debris, or collapsed structures. The crate follows Domain-Driven Design (DDD) -with event sourcing, organized into three bounded contexts -- detection, localization, and -alerting -- plus a machine learning layer for debris penetration modeling and vital signs +with event sourcing, organized into three bounded contexts - detection, localization, and +alerting - plus a machine learning layer for debris penetration modeling and vital signs classification. Use cases include earthquake search and rescue, building collapse response, avalanche victim @@ -19,18 +19,18 @@ location, flood rescue operations, and mine collapse detection. ## Features -- **Vital signs detection** -- Breathing patterns, heartbeat signatures, and movement +- **Vital signs detection** - Breathing patterns, heartbeat signatures, and movement classification with ensemble classifier combining all three modalities. -- **Survivor localization** -- 3D position estimation through debris via triangulation, depth +- **Survivor localization** - 3D position estimation through debris via triangulation, depth estimation, and position fusion. -- **Triage classification** -- Automatic START protocol-compatible triage with priority-based +- **Triage classification** - Automatic START protocol-compatible triage with priority-based alert generation and dispatch. -- **Event sourcing** -- All state changes emitted as domain events (`DetectionEvent`, +- **Event sourcing** - All state changes emitted as domain events (`DetectionEvent`, `AlertEvent`, `ZoneEvent`) stored in a pluggable `EventStore`. -- **ML debris model** -- Debris material classification, signal attenuation prediction, and +- **ML debris model** - Debris material classification, signal attenuation prediction, and uncertainty-aware vital signs classification. -- **REST + WebSocket API** -- `axum`-based HTTP API for real-time monitoring dashboards. -- **ruvector integration** -- `ruvector-solver` for triangulation math, `ruvector-temporal-tensor` +- **REST + WebSocket API** - `axum`-based HTTP API for real-time monitoring dashboards. +- **ruvector integration** - `ruvector-solver` for triangulation math, `ruvector-temporal-tensor` for compressed CSI buffering. ### Feature flags @@ -80,22 +80,22 @@ async fn main() -> anyhow::Result<()> { ```text wifi-densepose-mat/src/ - lib.rs -- DisasterResponse coordinator, config builder, MatError + lib.rs - DisasterResponse coordinator, config builder, MatError domain/ - survivor.rs -- Survivor aggregate root - disaster_event.rs -- DisasterEvent, DisasterType - scan_zone.rs -- ScanZone, ZoneBounds - alert.rs -- Alert, Priority - vital_signs.rs -- VitalSignsReading, BreathingPattern, HeartbeatSignature - triage.rs -- TriageStatus, TriageCalculator (START protocol) - coordinates.rs -- Coordinates3D, LocationUncertainty - events.rs -- DomainEvent, EventStore, InMemoryEventStore - detection/ -- BreathingDetector, HeartbeatDetector, MovementClassifier, EnsembleClassifier - localization/ -- Triangulator, DepthEstimator, PositionFuser - alerting/ -- AlertGenerator, AlertDispatcher, TriageService - ml/ -- DebrisPenetrationModel, VitalSignsClassifier, UncertaintyEstimate - api/ -- axum REST + WebSocket router - integration/ -- SignalAdapter, NeuralAdapter, HardwareAdapter + survivor.rs - Survivor aggregate root + disaster_event.rs - DisasterEvent, DisasterType + scan_zone.rs - ScanZone, ZoneBounds + alert.rs - Alert, Priority + vital_signs.rs - VitalSignsReading, BreathingPattern, HeartbeatSignature + triage.rs - TriageStatus, TriageCalculator (START protocol) + coordinates.rs - Coordinates3D, LocationUncertainty + events.rs - DomainEvent, EventStore, InMemoryEventStore + detection/ - BreathingDetector, HeartbeatDetector, MovementClassifier, EnsembleClassifier + localization/ - Triangulator, DepthEstimator, PositionFuser + alerting/ - AlertGenerator, AlertDispatcher, TriageService + ml/ - DebrisPenetrationModel, VitalSignsClassifier, UncertaintyEstimate + api/ - axum REST + WebSocket router + integration/ - SignalAdapter, NeuralAdapter, HardwareAdapter ``` ## Related Crates diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-nn/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-nn/README.md index 463125b1d..68d7f86e8 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-nn/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-nn/README.md @@ -9,28 +9,28 @@ Multi-backend neural network inference for WiFi-based DensePose estimation. ## Overview `wifi-densepose-nn` provides the inference engine that maps processed WiFi CSI features to -DensePose body surface predictions. It supports three backends -- ONNX Runtime (default), -PyTorch via `tch-rs`, and Candle -- so models can run on CPU, CUDA GPU, or TensorRT depending +DensePose body surface predictions. It supports three backends - ONNX Runtime (default), +PyTorch via `tch-rs`, and Candle - so models can run on CPU, CUDA GPU, or TensorRT depending on the deployment target. The crate implements two key neural components: -- **DensePose Head** -- Predicts 24 body part segmentation masks and per-part UV coordinate +- **DensePose Head** - Predicts 24 body part segmentation masks and per-part UV coordinate regression. -- **Modality Translator** -- Translates CSI feature embeddings into visual feature space, +- **Modality Translator** - Translates CSI feature embeddings into visual feature space, bridging the domain gap between WiFi signals and image-based pose estimation. ## Features -- **ONNX Runtime backend** (default) -- Load and run `.onnx` models with CPU or GPU execution +- **ONNX Runtime backend** (default) - Load and run `.onnx` models with CPU or GPU execution providers. -- **PyTorch backend** (`tch-backend`) -- Native PyTorch inference via libtorch FFI. -- **Candle backend** (`candle-backend`) -- Pure-Rust inference with `candle-core` and +- **PyTorch backend** (`tch-backend`) - Native PyTorch inference via libtorch FFI. +- **Candle backend** (`candle-backend`) - Pure-Rust inference with `candle-core` and `candle-nn`. -- **CUDA acceleration** (`cuda`) -- GPU execution for supported backends. -- **TensorRT optimization** (`tensorrt`) -- INT8/FP16 optimized inference via ONNX Runtime. -- **Batched inference** -- Process multiple CSI frames in a single forward pass. -- **Model caching** -- Memory-mapped model weights via `memmap2`. +- **CUDA acceleration** (`cuda`) - GPU execution for supported backends. +- **TensorRT optimization** (`tensorrt`) - INT8/FP16 optimized inference via ONNX Runtime. +- **Batched inference** - Process multiple CSI frames in a single forward pass. +- **Model caching** - Memory-mapped model weights via `memmap2`. ### Feature flags @@ -64,13 +64,13 @@ println!("Body parts: {}", output.body_parts.shape()[1]); // 24 ```text wifi-densepose-nn/src/ - lib.rs -- Re-exports, constants (NUM_BODY_PARTS=24), prelude - densepose.rs -- DensePoseHead, DensePoseConfig, DensePoseOutput - inference.rs -- Backend trait, InferenceEngine, InferenceOptions - onnx.rs -- OnnxBackend, OnnxSession (feature-gated) - tensor.rs -- Tensor, TensorShape utilities - translator.rs -- ModalityTranslator (CSI -> visual space) - error.rs -- NnError, NnResult + lib.rs - Re-exports, constants (NUM_BODY_PARTS=24), prelude + densepose.rs - DensePoseHead, DensePoseConfig, DensePoseOutput + inference.rs - Backend trait, InferenceEngine, InferenceOptions + onnx.rs - OnnxBackend, OnnxSession (feature-gated) + tensor.rs - Tensor, TensorShape utilities + translator.rs - ModalityTranslator (CSI -> visual space) + error.rs - NnError, NnResult ``` ## Related Crates diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-ruvector/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-ruvector/README.md index e2f18ae1a..96bc35305 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-ruvector/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-ruvector/README.md @@ -1,6 +1,6 @@ # wifi-densepose-ruvector -RuVector v2.0.4 integration layer for WiFi-DensePose — ADR-017. +RuVector v2.0.4 integration layer for WiFi-DensePose - ADR-017. This crate implements all 7 ADR-017 ruvector integration points for the signal-processing pipeline and the Multi-AP Triage (MAT) disaster-detection diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-sensing-server/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-sensing-server/README.md index dd8ae39c2..2000de952 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-sensing-server/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-sensing-server/README.md @@ -21,28 +21,28 @@ per ADR-022 Phase 3. ## Features -- **UDP CSI ingestion** -- Receives ESP32 CSI frames on port 5005 and parses them into the internal +- **UDP CSI ingestion** - Receives ESP32 CSI frames on port 5005 and parses them into the internal `CsiFrame` representation. -- **Vital sign detection** -- Pure-Rust FFT-based breathing rate (0.1--0.5 Hz) and heart rate +- **Vital sign detection** - Pure-Rust FFT-based breathing rate (0.1--0.5 Hz) and heart rate (0.67--2.0 Hz) estimation from CSI amplitude time series (ADR-021). -- **RVF container** -- Standalone binary container format for packaging model weights, metadata, and +- **RVF container** - Standalone binary container format for packaging model weights, metadata, and configuration into a single `.rvf` file with 64-byte aligned segments. -- **RVF pipeline** -- Progressive model loading with streaming segment decoding. -- **Graph Transformer** -- Cross-attention bottleneck between antenna-space CSI features and the +- **RVF pipeline** - Progressive model loading with streaming segment decoding. +- **Graph Transformer** - Cross-attention bottleneck between antenna-space CSI features and the COCO 17-keypoint body graph, followed by GCN message passing (ADR-023 Phase 2). Pure `std`, no ML dependencies. -- **SONA adaptation** -- LoRA + EWC++ online adaptation for environment drift without catastrophic +- **SONA adaptation** - LoRA + EWC++ online adaptation for environment drift without catastrophic forgetting (ADR-023 Phase 5). -- **Contrastive CSI embeddings** -- Self-supervised SimCLR-style pretraining with InfoNCE loss, +- **Contrastive CSI embeddings** - Self-supervised SimCLR-style pretraining with InfoNCE loss, projection head, fingerprint indexing, and cross-modal pose alignment (ADR-024). -- **Sparse inference** -- Activation profiling, sparse matrix-vector multiply, INT8/FP16 +- **Sparse inference** - Activation profiling, sparse matrix-vector multiply, INT8/FP16 quantization, and a full sparse inference engine for edge deployment (ADR-023 Phase 6). -- **Dataset pipeline** -- Training dataset loading and batching. -- **Multi-BSSID scanning** -- Windows `netsh` integration for BSSID discovery via +- **Dataset pipeline** - Training dataset loading and batching. +- **Multi-BSSID scanning** - Windows `netsh` integration for BSSID discovery via `wifi-densepose-wifiscan` (ADR-022). -- **WebSocket broadcast** -- Real-time sensing updates pushed to all connected clients at +- **WebSocket broadcast** - Real-time sensing updates pushed to all connected clients at `ws://localhost:8765/ws/sensing`. -- **Static file serving** -- Hosts the sensing UI on port 8080 with CORS headers. +- **Static file serving** - Hosts the sensing UI on port 8080 with CORS headers. ## Modules @@ -68,7 +68,7 @@ cargo build -p wifi-densepose-sensing-server cargo run -p wifi-densepose-sensing-server # Run with custom ports -cargo run -p wifi-densepose-sensing-server -- \ +cargo run -p wifi-densepose-sensing-server - \ --http-port 9000 \ --udp-port 5005 \ --static-dir ./ui diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-signal/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-signal/README.md index 66b1cd23f..95507b3e9 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-signal/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-signal/README.md @@ -27,11 +27,11 @@ graph and attention operations. ## Features -- **CSI preprocessing** -- Noise removal, windowing, normalization via `CsiProcessor`. -- **Phase sanitization** -- Unwrapping, outlier removal, and smoothing via `PhaseSanitizer`. -- **Feature extraction** -- Amplitude, phase, correlation, Doppler, and PSD features. -- **Motion detection** -- Human presence detection with confidence scoring via `MotionDetector`. -- **ruvector integration** -- Graph min-cut (person matching), attention mechanisms (antenna and +- **CSI preprocessing** - Noise removal, windowing, normalization via `CsiProcessor`. +- **Phase sanitization** - Unwrapping, outlier removal, and smoothing via `PhaseSanitizer`. +- **Feature extraction** - Amplitude, phase, correlation, Doppler, and PSD features. +- **Motion detection** - Human presence detection with confidence scoring via `MotionDetector`. +- **ruvector integration** - Graph min-cut (person matching), attention mechanisms (antenna and spatial attention), and sparse solvers (subcarrier interpolation). ## Quick Start @@ -58,17 +58,17 @@ let processor = CsiProcessor::new(config); ```text wifi-densepose-signal/src/ - lib.rs -- Re-exports, SignalError, prelude - bvp.rs -- Body Velocity Profile (Widar 3.0) - csi_processor.rs -- Core preprocessing pipeline - csi_ratio.rs -- Conjugate multiplication (SpotFi) - features.rs -- Amplitude/phase/Doppler/PSD feature extraction - fresnel.rs -- Fresnel zone diffraction model - hampel.rs -- Hampel outlier filter - motion.rs -- Motion and human presence detection - phase_sanitizer.rs -- Phase unwrapping and sanitization - spectrogram.rs -- Time-frequency CSI spectrograms - subcarrier_selection.rs -- Variance-based subcarrier selection + lib.rs - Re-exports, SignalError, prelude + bvp.rs - Body Velocity Profile (Widar 3.0) + csi_processor.rs - Core preprocessing pipeline + csi_ratio.rs - Conjugate multiplication (SpotFi) + features.rs - Amplitude/phase/Doppler/PSD feature extraction + fresnel.rs - Fresnel zone diffraction model + hampel.rs - Hampel outlier filter + motion.rs - Motion and human presence detection + phase_sanitizer.rs - Phase unwrapping and sanitization + spectrogram.rs - Time-frequency CSI spectrograms + subcarrier_selection.rs - Variance-based subcarrier selection ``` ## Related Crates diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-train/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-train/README.md index 4610f7b07..c83e660f5 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-train/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-train/README.md @@ -18,18 +18,18 @@ subcarrier interpolation APIs needed for data preprocessing and proof verificati ## Features -- **MM-Fi dataset loader** -- Reads the MM-Fi multimodal dataset (NeurIPS 2023) from disk with +- **MM-Fi dataset loader** - Reads the MM-Fi multimodal dataset (NeurIPS 2023) from disk with memory-mapped `.npy` files. -- **Synthetic dataset** -- Deterministic, fixed-seed CSI generation for unit tests and proofs. -- **Subcarrier interpolation** -- 114 -> 56 subcarrier compression via `ruvector-solver` sparse +- **Synthetic dataset** - Deterministic, fixed-seed CSI generation for unit tests and proofs. +- **Subcarrier interpolation** - 114 -> 56 subcarrier compression via `ruvector-solver` sparse interpolation with variance-based selection. -- **Loss functions** (`tch-backend`) -- Pose estimation losses including MSE, OKS, and combined +- **Loss functions** (`tch-backend`) - Pose estimation losses including MSE, OKS, and combined multi-task loss. -- **Metrics** (`tch-backend`) -- PCKh, OKS-AP, and per-keypoint evaluation with +- **Metrics** (`tch-backend`) - PCKh, OKS-AP, and per-keypoint evaluation with `ruvector-mincut`-based person matching. -- **Training orchestrator** (`tch-backend`) -- Full training loop with learning rate scheduling, +- **Training orchestrator** (`tch-backend`) - Full training loop with learning rate scheduling, gradient clipping, checkpointing, and reproducible proofs. -- **All 5 ruvector crates** -- `ruvector-mincut`, `ruvector-attn-mincut`, +- **All 5 ruvector crates** - `ruvector-mincut`, `ruvector-attn-mincut`, `ruvector-temporal-tensor`, `ruvector-solver`, and `ruvector-attention` integrated across dataset loading, metrics, and model attention. @@ -70,16 +70,16 @@ println!("amplitude shape: {:?}", sample.amplitude.shape()); ```text wifi-densepose-train/src/ - lib.rs -- Re-exports, VERSION - config.rs -- TrainingConfig, hyperparameters, validation - dataset.rs -- CsiDataset trait, MmFiDataset, SyntheticCsiDataset, DataLoader - error.rs -- TrainError, ConfigError, DatasetError, SubcarrierError - subcarrier.rs -- interpolate_subcarriers (114->56), variance-based selection - losses.rs -- (tch) MSE, OKS, multi-task loss [feature-gated] - metrics.rs -- (tch) PCKh, OKS-AP, person matching [feature-gated] - model.rs -- (tch) Model definition with attention [feature-gated] - proof.rs -- (tch) Deterministic training proofs [feature-gated] - trainer.rs -- (tch) Training loop orchestrator [feature-gated] + lib.rs - Re-exports, VERSION + config.rs - TrainingConfig, hyperparameters, validation + dataset.rs - CsiDataset trait, MmFiDataset, SyntheticCsiDataset, DataLoader + error.rs - TrainError, ConfigError, DatasetError, SubcarrierError + subcarrier.rs - interpolate_subcarriers (114->56), variance-based selection + losses.rs - (tch) MSE, OKS, multi-task loss [feature-gated] + metrics.rs - (tch) PCKh, OKS-AP, person matching [feature-gated] + model.rs - (tch) Model definition with attention [feature-gated] + proof.rs - (tch) Deterministic training proofs [feature-gated] + trainer.rs - (tch) Training loop orchestrator [feature-gated] ``` ## Related Crates diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-vitals/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-vitals/README.md index 28a957114..d2fb36e1d 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-vitals/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-vitals/README.md @@ -16,13 +16,13 @@ resource-constrained edge deployments alongside ESP32 hardware. ## Pipeline Stages -1. **Preprocessing** (`CsiVitalPreprocessor`) -- EMA-based static component suppression, +1. **Preprocessing** (`CsiVitalPreprocessor`) - EMA-based static component suppression, producing per-subcarrier residuals that isolate body-induced signal variation. -2. **Breathing extraction** (`BreathingExtractor`) -- Bandpass filtering at 0.1--0.5 Hz with +2. **Breathing extraction** (`BreathingExtractor`) - Bandpass filtering at 0.1--0.5 Hz with zero-crossing analysis for respiratory rate estimation. -3. **Heart rate extraction** (`HeartRateExtractor`) -- Bandpass filtering at 0.8--2.0 Hz with +3. **Heart rate extraction** (`HeartRateExtractor`) - Bandpass filtering at 0.8--2.0 Hz with autocorrelation peak detection and inter-subcarrier phase coherence weighting. -4. **Anomaly detection** (`VitalAnomalyDetector`) -- Z-score analysis using Welford running +4. **Anomaly detection** (`VitalAnomalyDetector`) - Z-score analysis using Welford running statistics for real-time clinical alerts (apnea, tachycardia, bradycardia). Results are stored in a `VitalSignStore` with configurable retention for historical trend @@ -80,13 +80,13 @@ if let Some(residuals) = preprocessor.process(&frame) { ```text wifi-densepose-vitals/src/ - lib.rs -- Re-exports, module declarations - types.rs -- CsiFrame, VitalReading, VitalEstimate, VitalStatus - preprocessor.rs -- CsiVitalPreprocessor (EMA static suppression) - breathing.rs -- BreathingExtractor (0.1-0.5 Hz bandpass) - heartrate.rs -- HeartRateExtractor (0.8-2.0 Hz autocorrelation) - anomaly.rs -- VitalAnomalyDetector (Z-score, Welford stats) - store.rs -- VitalSignStore, VitalStats (historical retention) + lib.rs - Re-exports, module declarations + types.rs - CsiFrame, VitalReading, VitalEstimate, VitalStatus + preprocessor.rs - CsiVitalPreprocessor (EMA static suppression) + breathing.rs - BreathingExtractor (0.1-0.5 Hz bandpass) + heartrate.rs - HeartRateExtractor (0.8-2.0 Hz autocorrelation) + anomaly.rs - VitalAnomalyDetector (Z-score, Welford stats) + store.rs - VitalSignStore, VitalStats (historical retention) ``` ## Related Crates diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm/README.md index bde62f763..0764abee3 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-wasm/README.md @@ -10,7 +10,7 @@ WebAssembly bindings for running WiFi-DensePose directly in the browser. `wifi-densepose-wasm` compiles the WiFi-DensePose stack to `wasm32-unknown-unknown` and exposes a JavaScript API via [wasm-bindgen](https://rustwasm.github.io/wasm-bindgen/). The primary export is -`MatDashboard` -- a fully client-side disaster response dashboard that manages scan zones, tracks +`MatDashboard` - a fully client-side disaster response dashboard that manages scan zones, tracks survivors, generates triage alerts, and renders to an HTML Canvas element. The crate also provides utility functions (`init`, `getVersion`, `isMatEnabled`, `getTimestamp`) and @@ -18,17 +18,17 @@ a logging bridge that routes Rust `log` output to the browser console. ## Features -- **MatDashboard** -- Create disaster events, add rectangular and circular scan zones, subscribe to +- **MatDashboard** - Create disaster events, add rectangular and circular scan zones, subscribe to survivor-detected and alert-generated callbacks, and render zone/survivor overlays on Canvas. -- **Real-time callbacks** -- Register JavaScript closures for `onSurvivorDetected` and +- **Real-time callbacks** - Register JavaScript closures for `onSurvivorDetected` and `onAlertGenerated` events, called from the Rust event loop. -- **Canvas rendering** -- Draw zone boundaries, survivor markers (colour-coded by triage status), +- **Canvas rendering** - Draw zone boundaries, survivor markers (colour-coded by triage status), and alert indicators directly to a `CanvasRenderingContext2d`. -- **WebSocket integration** -- Connect to a sensing server for live CSI data via `web-sys` WebSocket +- **WebSocket integration** - Connect to a sensing server for live CSI data via `web-sys` WebSocket bindings. -- **Panic hook** -- `console_error_panic_hook` provides human-readable stack traces in the browser +- **Panic hook** - `console_error_panic_hook` provides human-readable stack traces in the browser console on panic. -- **Optimised WASM** -- Release profile uses `-O4` wasm-opt with mutable globals for minimal binary +- **Optimised WASM** - Release profile uses `-O4` wasm-opt with mutable globals for minimal binary size. ### Feature flags diff --git a/rust-port/wifi-densepose-rs/crates/wifi-densepose-wifiscan/README.md b/rust-port/wifi-densepose-rs/crates/wifi-densepose-wifiscan/README.md index fe90c6b48..87e62f12c 100644 --- a/rust-port/wifi-densepose-rs/crates/wifi-densepose-wifiscan/README.md +++ b/rust-port/wifi-densepose-rs/crates/wifi-densepose-wifiscan/README.md @@ -18,16 +18,16 @@ pluggable platform adapters. ## Features -- **BSSID registry** -- Tracks observed access points with running RSSI statistics, band/radio +- **BSSID registry** - Tracks observed access points with running RSSI statistics, band/radio type classification, and metadata. Types: `BssidId`, `BssidObservation`, `BssidRegistry`, `BssidEntry`. -- **Netsh adapter** (Tier 1) -- Parses `netsh wlan show networks mode=bssid` output into +- **Netsh adapter** (Tier 1) - Parses `netsh wlan show networks mode=bssid` output into structured `BssidObservation` records. Zero platform dependencies. -- **WLAN API scanner** (Tier 2, `wlanapi` feature) -- Async scanning via the Windows WLAN API +- **WLAN API scanner** (Tier 2, `wlanapi` feature) - Async scanning via the Windows WLAN API with `tokio` integration. -- **Multi-AP frame** -- `MultiApFrame` aggregates observations from multiple BSSIDs into a single +- **Multi-AP frame** - `MultiApFrame` aggregates observations from multiple BSSIDs into a single timestamped frame for downstream processing. -- **Sensing pipeline** (`pipeline` feature) -- `WindowsWifiPipeline` orchestrates motion +- **Sensing pipeline** (`pipeline` feature) - `WindowsWifiPipeline` orchestrates motion detection, breathing estimation, attention-weighted AP selection, and location fingerprint matching. @@ -72,16 +72,16 @@ let pipeline = WindowsWifiPipeline::new(); ```text wifi-densepose-wifiscan/src/ - lib.rs -- Re-exports, feature gates + lib.rs - Re-exports, feature gates domain/ - bssid.rs -- BssidId, BssidObservation, BandType, RadioType - registry.rs -- BssidRegistry, BssidEntry, BssidMeta, RunningStats - frame.rs -- MultiApFrame (multi-BSSID aggregated frame) - result.rs -- EnhancedSensingResult - port.rs -- WlanScanPort trait (platform abstraction) - adapter.rs -- NetshBssidScanner (Tier 1), WlanApiScanner (Tier 2) - pipeline.rs -- WindowsWifiPipeline (motion, breathing, attention, fingerprint) - error.rs -- WifiScanError + bssid.rs - BssidId, BssidObservation, BandType, RadioType + registry.rs - BssidRegistry, BssidEntry, BssidMeta, RunningStats + frame.rs - MultiApFrame (multi-BSSID aggregated frame) + result.rs - EnhancedSensingResult + port.rs - WlanScanPort trait (platform abstraction) + adapter.rs - NetshBssidScanner (Tier 1), WlanApiScanner (Tier 2) + pipeline.rs - WindowsWifiPipeline (motion, breathing, attention, fingerprint) + error.rs - WifiScanError ``` ## Related Crates diff --git a/rust-port/wifi-densepose-rs/patches/ruvector-crv/README.md b/rust-port/wifi-densepose-rs/patches/ruvector-crv/README.md index 7c3acb575..915a4bd7f 100644 --- a/rust-port/wifi-densepose-rs/patches/ruvector-crv/README.md +++ b/rust-port/wifi-densepose-rs/patches/ruvector-crv/README.md @@ -54,7 +54,7 @@ accumulated session graph, separating distinct target aspects. ## Cross-Session Convergence Multiple sessions targeting the same coordinate can be analyzed for -convergence — agreement between independent viewers strengthens the +convergence - agreement between independent viewers strengthens the signal validity: ```rust diff --git a/scripts/generate-witness-bundle.sh b/scripts/generate-witness-bundle.sh index 915fd5bfc..143ff16f9 100644 --- a/scripts/generate-witness-bundle.sh +++ b/scripts/generate-witness-bundle.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# generate-witness-bundle.sh — Create a self-contained RVF witness bundle +# generate-witness-bundle.sh - Create a self-contained RVF witness bundle # # Produces: witness-bundle-ADR028-.tar.gz # Contains: witness log, ADR, proof hash, test results, firmware manifest, @@ -42,7 +42,7 @@ mkdir -p "$BUNDLE_DIR/proof" cp "$REPO_ROOT/v1/data/proof/verify.py" "$BUNDLE_DIR/proof/" cp "$REPO_ROOT/v1/data/proof/expected_features.sha256" "$BUNDLE_DIR/proof/" cp "$REPO_ROOT/v1/data/proof/generate_reference_signal.py" "$BUNDLE_DIR/proof/" -# Reference signal is large (~10 MB) — include metadata only +# Reference signal is large (~10 MB) - include metadata only python3 -c " import json, os with open('$REPO_ROOT/v1/data/proof/sample_csi_data.json') as f: @@ -53,7 +53,7 @@ meta['first_frame_keys'] = list(d['frames'][0].keys()) meta['file_size_bytes'] = os.path.getsize('$REPO_ROOT/v1/data/proof/sample_csi_data.json') with open('$BUNDLE_DIR/proof/reference_signal_metadata.json', 'w') as f: json.dump(meta, f, indent=2) -" 2>/dev/null && echo " Reference signal metadata extracted." || echo " (Python not available — metadata skipped)" +" 2>/dev/null && echo " Reference signal metadata extracted." || echo " (Python not available - metadata skipped)" # --------------------------------------------------------------- # 3. Run Rust tests and capture output @@ -90,7 +90,7 @@ if [ -d "$REPO_ROOT/firmware/esp32-csi-node/main" ]; then > "$BUNDLE_DIR/firmware-manifest/source-hashes.txt" 2>/dev/null || true echo " Firmware source files hashed." else - echo " (No firmware directory found — skipped)" + echo " (No firmware directory found - skipped)" fi # --------------------------------------------------------------- @@ -113,7 +113,7 @@ cat "$BUNDLE_DIR/crate-manifest/versions.txt" echo "[7/7] Creating VERIFY.sh..." cat > "$BUNDLE_DIR/VERIFY.sh" << 'VERIFY_EOF' #!/usr/bin/env bash -# VERIFY.sh — Recipient verification script for WiFi-DensePose Witness Bundle +# VERIFY.sh - Recipient verification script for WiFi-DensePose Witness Bundle # # Run this script after cloning the repository at the witnessed commit. # It re-runs all verification steps and compares against the bundled results. @@ -192,7 +192,7 @@ echo " Results: ${PASS_COUNT} passed, ${FAIL_COUNT} failed" if [ "$FAIL_COUNT" -eq 0 ]; then echo " VERDICT: ALL CHECKS PASSED" else - echo " VERDICT: ${FAIL_COUNT} CHECK(S) FAILED — investigate" + echo " VERDICT: ${FAIL_COUNT} CHECK(S) FAILED - investigate" fi echo "================================================================" VERIFY_EOF diff --git a/scripts/install-qemu.sh b/scripts/install-qemu.sh index 0cc7089d2..a37697414 100644 --- a/scripts/install-qemu.sh +++ b/scripts/install-qemu.sh @@ -1,5 +1,5 @@ #!/bin/bash -# install-qemu.sh — Install QEMU with ESP32-S3 support (Espressif fork) +# install-qemu.sh - Install QEMU with ESP32-S3 support (Espressif fork) # Usage: bash scripts/install-qemu.sh [OPTIONS] set -euo pipefail @@ -25,7 +25,7 @@ QEMU_REPO="https://github.com/espressif/qemu.git" # ── Usage ───────────────────────────────────────────────────────────────────── usage() { cat <&1 | tail -20 if [ ! -x "$BUILD_DIR/qemu-system-xtensa" ]; then - err "Build failed — qemu-system-xtensa binary not found" + err "Build failed - qemu-system-xtensa binary not found" err "Troubleshooting:" err " 1. Check build output above for errors" err " 2. Ensure all dependencies are installed: re-run without --skip-deps" diff --git a/scripts/qemu-chaos-test.sh b/scripts/qemu-chaos-test.sh index 7cdd57766..d87b1f2e8 100755 --- a/scripts/qemu-chaos-test.sh +++ b/scripts/qemu-chaos-test.sh @@ -1,17 +1,17 @@ #!/bin/bash -# QEMU Chaos / Fault Injection Test Runner — ADR-061 Layer 9 +# QEMU Chaos / Fault Injection Test Runner - ADR-061 Layer 9 # # Launches firmware under QEMU and injects a series of faults to verify # the firmware's resilience. Each fault is injected via the QEMU monitor # socket (or GDB stub), followed by a recovery window and health check. # # Fault types: -# 1. wifi_kill — Pause/resume VM to simulate WiFi reconnect -# 2. ring_flood — Inject 1000 rapid mock frames (ring buffer stress) -# 3. heap_exhaust — Write to heap metadata to simulate low memory -# 4. timer_starvation — Pause VM for 500ms to starve FreeRTOS timers -# 5. corrupt_frame — Inject a CSI frame with bad magic bytes -# 6. nvs_corrupt — Write garbage to NVS flash region +# 1. wifi_kill - Pause/resume VM to simulate WiFi reconnect +# 2. ring_flood - Inject 1000 rapid mock frames (ring buffer stress) +# 3. heap_exhaust - Write to heap metadata to simulate low memory +# 4. timer_starvation - Pause VM for 500ms to starve FreeRTOS timers +# 5. corrupt_frame - Inject a CSI frame with bad magic bytes +# 6. nvs_corrupt - Write garbage to NVS flash region # # Environment variables: # QEMU_PATH - Path to qemu-system-xtensa (default: qemu-system-xtensa) @@ -20,10 +20,10 @@ # FAULT_WAIT - Seconds to wait after fault injection (default: 5) # # Exit codes: -# 0 PASS — all checks passed -# 1 WARN — non-critical checks failed -# 2 FAIL — critical checks failed -# 3 FATAL — build error, crash, or infrastructure failure +# 0 PASS - all checks passed +# 1 WARN - non-critical checks failed +# 2 FAIL - critical checks failed +# 3 FATAL - build error, crash, or infrastructure failure # ── Help ────────────────────────────────────────────────────────────── usage() { @@ -57,10 +57,10 @@ Examples: FLASH_IMAGE=/path/to/image.bin ./qemu-chaos-test.sh Exit codes: - 0 PASS — all checks passed - 1 WARN — non-critical checks failed - 2 FAIL — critical checks failed - 3 FATAL — build error, crash, or infrastructure failure + 0 PASS - all checks passed + 1 WARN - non-critical checks failed + 2 FAIL - critical checks failed + 3 FATAL - build error, crash, or infrastructure failure HELP exit 0 } @@ -193,7 +193,7 @@ inject_nvs_corrupt() { # Pre-flight checks # ────────────────────────────────────────────────────────────────────── -echo "=== QEMU Chaos Test Runner — ADR-061 Layer 9 ===" +echo "=== QEMU Chaos Test Runner - ADR-061 Layer 9 ===" echo "QEMU binary: $QEMU_BIN" echo "Flash image: $FLASH_IMAGE" echo "Boot timeout: ${BOOT_TIMEOUT}s" @@ -344,9 +344,9 @@ for fault in "${FAULTS[@]}"; do --after-fault "$fault" || fault_exit=$? case "$fault_exit" in - 0) echo " [result] HEALTHY — firmware recovered gracefully" ;; - 1) echo " [result] DEGRADED — firmware running but with issues" ;; - *) echo " [result] UNHEALTHY — firmware in bad state" ;; + 0) echo " [result] HEALTHY - firmware recovered gracefully" ;; + 1) echo " [result] DEGRADED - firmware running but with issues" ;; + *) echo " [result] UNHEALTHY - firmware in bad state" ;; esac FAULT_RESULTS+=("${fault}:${fault_exit}") diff --git a/scripts/qemu-cli.sh b/scripts/qemu-cli.sh index 43ac39008..97372d645 100644 --- a/scripts/qemu-cli.sh +++ b/scripts/qemu-cli.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # ============================================================================ -# qemu-cli.sh — Unified QEMU ESP32-S3 testing CLI (ADR-061) +# qemu-cli.sh - Unified QEMU ESP32-S3 testing CLI (ADR-061) # Version: 1.0.0 # # Single entry point for all QEMU testing operations. @@ -58,7 +58,7 @@ detect_python() { # --- Command: help --------------------------------------------------------- cmd_help() { cat < [options] @@ -251,7 +251,7 @@ cmd_health() { # --- Command: status -------------------------------------------------------- cmd_status() { - # Status should never fail — disable errexit locally + # Status should never fail - disable errexit locally set +e echo -e "${BOLD}=== QEMU ESP32-S3 Testing Status ===${RST}" echo "" @@ -324,7 +324,7 @@ _qemu_cli_completions() { local cmds="install test mesh swarm snapshot chaos fuzz nvs health status help" local cur="${COMP_WORDS[COMP_CWORD]}" if [[ $COMP_CWORD -eq 1 ]]; then - COMPREPLY=( $(compgen -W "$cmds" -- "$cur") ) + COMPREPLY=( $(compgen -W "$cmds" - "$cur") ) fi } complete -F _qemu_cli_completions qemu-cli.sh diff --git a/scripts/qemu-esp32s3-test.sh b/scripts/qemu-esp32s3-test.sh index d5420cca6..1c93c42a8 100755 --- a/scripts/qemu-esp32s3-test.sh +++ b/scripts/qemu-esp32s3-test.sh @@ -12,10 +12,10 @@ # NVS_BIN - Path to a pre-built NVS binary to inject (optional) # # Exit codes: -# 0 PASS — all checks passed -# 1 WARN — non-critical checks failed -# 2 FAIL — critical checks failed -# 3 FATAL — build error, crash, or infrastructure failure +# 0 PASS - all checks passed +# 1 WARN - non-critical checks failed +# 2 FAIL - critical checks failed +# 3 FATAL - build error, crash, or infrastructure failure # ── Help ────────────────────────────────────────────────────────────── usage() { @@ -41,10 +41,10 @@ Examples: QEMU_PATH=/opt/qemu/bin/qemu-system-xtensa QEMU_TIMEOUT=120 ./qemu-esp32s3-test.sh Exit codes: - 0 PASS — all checks passed - 1 WARN — non-critical checks failed - 2 FAIL — critical checks failed - 3 FATAL — build error, crash, or infrastructure failure + 0 PASS - all checks passed + 1 WARN - non-critical checks failed + 2 FAIL - critical checks failed + 3 FATAL - build error, crash, or infrastructure failure HELP exit 0 } @@ -194,7 +194,7 @@ fi echo "------- End QEMU output -------" echo "" -# timeout returns 124 when the process is killed by timeout — that's expected +# timeout returns 124 when the process is killed by timeout - that's expected if [ "$QEMU_EXIT" -eq 124 ]; then echo "QEMU exited via timeout (expected for firmware that loops forever)." elif [ "$QEMU_EXIT" -ne 0 ]; then diff --git a/scripts/qemu-mesh-test.sh b/scripts/qemu-mesh-test.sh index 7dc25fc75..fffc9e70a 100644 --- a/scripts/qemu-mesh-test.sh +++ b/scripts/qemu-mesh-test.sh @@ -25,10 +25,10 @@ # - Rust workspace with wifi-densepose-hardware crate (aggregator binary) # # Exit codes: -# 0 PASS — all checks passed -# 1 WARN — non-critical checks failed -# 2 FAIL — critical checks failed -# 3 FATAL — build error, crash, or infrastructure failure +# 0 PASS - all checks passed +# 1 WARN - non-critical checks failed +# 2 FAIL - critical checks failed +# 3 FATAL - build error, crash, or infrastructure failure # ── Help ────────────────────────────────────────────────────────────── usage() { @@ -62,10 +62,10 @@ Examples: sudo SKIP_BUILD=1 ./qemu-mesh-test.sh 4 Exit codes: - 0 PASS — all checks passed - 1 WARN — non-critical checks failed - 2 FAIL — critical checks failed - 3 FATAL — build error, crash, or infrastructure failure + 0 PASS - all checks passed + 1 WARN - non-critical checks failed + 2 FAIL - critical checks failed + 3 FATAL - build error, crash, or infrastructure failure HELP exit 0 } @@ -161,7 +161,7 @@ fi mkdir -p "$BUILD_DIR" # --------------------------------------------------------------------------- -# Cleanup trap — runs on EXIT regardless of success/failure +# Cleanup trap - runs on EXIT regardless of success/failure # --------------------------------------------------------------------------- QEMU_PIDS=() AGG_PID="" @@ -315,7 +315,7 @@ echo "[5/6] Starting aggregator and $N_NODES QEMU nodes..." # Start Rust aggregator in background echo " Starting aggregator: listen=0.0.0.0:$AGG_PORT expect-nodes=$N_NODES" cargo run --manifest-path "$RUST_DIR/Cargo.toml" \ - -p wifi-densepose-hardware --bin aggregator -- \ + -p wifi-densepose-hardware --bin aggregator - \ --listen "0.0.0.0:$AGG_PORT" \ --expect-nodes "$N_NODES" \ --output "$RESULTS_FILE" \ diff --git a/scripts/qemu-snapshot-test.sh b/scripts/qemu-snapshot-test.sh index 9ce8fa4ae..ca6921b93 100755 --- a/scripts/qemu-snapshot-test.sh +++ b/scripts/qemu-snapshot-test.sh @@ -1,5 +1,5 @@ #!/bin/bash -# QEMU Snapshot-Based Test Runner — ADR-061 Layer 8 +# QEMU Snapshot-Based Test Runner - ADR-061 Layer 8 # # Uses QEMU VM snapshots to accelerate repeated test runs. # Instead of rebooting and re-initializing for each test scenario, @@ -16,10 +16,10 @@ # SKIP_SNAPSHOT - Set to "1" to run without snapshots (baseline timing) # # Exit codes: -# 0 PASS — all checks passed -# 1 WARN — non-critical checks failed -# 2 FAIL — critical checks failed -# 3 FATAL — build error, crash, or infrastructure failure +# 0 PASS - all checks passed +# 1 WARN - non-critical checks failed +# 2 FAIL - critical checks failed +# 3 FATAL - build error, crash, or infrastructure failure # ── Help ────────────────────────────────────────────────────────────── usage() { @@ -45,10 +45,10 @@ Examples: FLASH_IMAGE=/path/to/image.bin ./qemu-snapshot-test.sh Exit codes: - 0 PASS — all checks passed - 1 WARN — non-critical checks failed - 2 FAIL — critical checks failed - 3 FATAL — build error, crash, or infrastructure failure + 0 PASS - all checks passed + 1 WARN - non-critical checks failed + 2 FAIL - critical checks failed + 3 FATAL - build error, crash, or infrastructure failure HELP exit 0 } @@ -193,7 +193,7 @@ restore_snapshot() { # Pre-flight checks # ────────────────────────────────────────────────────────────────────── -echo "=== QEMU Snapshot Test Runner — ADR-061 Layer 8 ===" +echo "=== QEMU Snapshot Test Runner - ADR-061 Layer 8 ===" echo "QEMU binary: $QEMU_BIN" echo "Flash image: $FLASH_IMAGE" echo "Timeout/test: ${TIMEOUT_SEC}s" diff --git a/ui/.eslintrc.json b/ui/.eslintrc.json new file mode 100644 index 000000000..8e5a89e77 --- /dev/null +++ b/ui/.eslintrc.json @@ -0,0 +1,33 @@ +{ + "env": { + "browser": true, + "es2022": true + }, + "parserOptions": { + "ecmaVersion": 2022, + "sourceType": "module" + }, + "rules": { + "no-unused-vars": ["warn", { "argsIgnorePattern": "^_" }], + "no-undef": "error", + "no-var": "error", + "prefer-const": "warn", + "eqeqeq": ["error", "always"], + "no-eval": "error", + "no-implied-eval": "error", + "no-new-func": "error", + "no-script-url": "error", + "no-alert": "warn", + "no-console": ["warn", { "allow": ["warn", "error", "info"] }], + "curly": ["warn", "multi-line"], + "no-throw-literal": "error", + "prefer-template": "warn", + "no-duplicate-imports": "error" + }, + "ignorePatterns": [ + "node_modules/", + "mobile/", + "vendor/", + "*.min.js" + ] +} diff --git a/ui/README.md b/ui/README.md index e337ad5a0..4a7bbe341 100644 --- a/ui/README.md +++ b/ui/README.md @@ -96,12 +96,12 @@ Simulated frames include a `_simulated: true` marker so code can detect syntheti ### Rust Sensing Server (primary) The Rust-based `wifi-densepose-sensing-server` serves the UI and provides: -- `GET /health` — server health -- `GET /api/v1/sensing/latest` — latest sensing features -- `GET /api/v1/vital-signs` — vital sign estimates (HR/RR) -- `GET /api/v1/model/info` — RVF model container info -- `WS /ws/sensing` — real-time sensing data stream -- `WS /api/v1/stream/pose` — real-time pose keypoint stream +- `GET /health` - server health +- `GET /api/v1/sensing/latest` - latest sensing features +- `GET /api/v1/vital-signs` - vital sign estimates (HR/RR) +- `GET /api/v1/model/info` - RVF model container info +- `WS /ws/sensing` - real-time sensing data stream +- `WS /api/v1/stream/pose` - real-time pose keypoint stream ### Python FastAPI (legacy) The original Python backend on port 8000 is still supported. The UI auto-detects which backend is available via `backend-detector.js`. diff --git a/ui/app.js b/ui/app.js index a1c94ded1..b13b9f804 100644 --- a/ui/app.js +++ b/ui/app.js @@ -10,6 +10,14 @@ import { wsService } from './services/websocket.service.js'; import { healthService } from './services/health.service.js'; import { sensingService } from './services/sensing.service.js'; import { backendDetector } from './utils/backend-detector.js'; +import { KeyboardShortcuts } from './utils/keyboard-shortcuts.js'; +import { PerfMonitor } from './utils/perf-monitor.js'; +import { toastManager } from './utils/toast.js'; +import { ThemeToggle } from './utils/theme-toggle.js'; +import { i18n } from './utils/i18n.js'; +import { ScreenshotTool } from './utils/screenshot.js'; +import { UptimeClock } from './utils/uptime-clock.js'; +import { QuickSettings } from './utils/quick-settings.js'; class WiFiDensePoseApp { constructor() { @@ -30,10 +38,13 @@ class WiFiDensePoseApp { // Initialize UI components this.initializeComponents(); - + + // Initialize enhancements + this.initializeEnhancements(); + // Set up global event listeners this.setupEventListeners(); - + this.isInitialized = true; console.log('WiFi DensePose UI initialized successfully'); @@ -74,7 +85,7 @@ class WiFiDensePoseApp { this.showBackendStatus('Connected to Rust sensing server', 'success'); } catch (error) { console.warn('⚠️ Backend not available:', error.message); - this.showBackendStatus('Backend unavailable — start sensing-server', 'warning'); + this.showBackendStatus('Backend unavailable - start sensing-server', 'warning'); } // Start the sensing WebSocket service early so the dashboard and @@ -167,6 +178,42 @@ class WiFiDensePoseApp { } } + // Initialize enhancement modules (keyboard shortcuts, perf monitor, toast, theme) + initializeEnhancements() { + // Toast notifications + toastManager.init(); + + // Theme toggle + this.themeToggle = new ThemeToggle(); + this.themeToggle.init(); + + // Performance monitor + this.perfMonitor = new PerfMonitor(); + this.perfMonitor.init(); + + // Screenshot tool + this.screenshotTool = new ScreenshotTool(); + this.screenshotTool.init(); + + // Uptime clock + this.uptimeClock = new UptimeClock(); + this.uptimeClock.init(); + + // Quick settings panel + this.quickSettings = new QuickSettings(this); + this.quickSettings.init(); + + // Internationalization (EN/PL) + i18n.init(); + + // Keyboard shortcuts (pass app reference for tab switching) + this.keyboardShortcuts = new KeyboardShortcuts(this); + this.keyboardShortcuts.register('s', 'Take screenshot', () => { + document.dispatchEvent(new CustomEvent('take-screenshot')); + }); + this.keyboardShortcuts.init(); + } + // Handle tab changes handleTabChange(newTab, oldTab) { console.log(`Tab changed from ${oldTab} to ${newTab}`); @@ -272,45 +319,17 @@ class WiFiDensePoseApp { }); } - // Show backend status notification + // Show backend status notification (uses enhanced toast system) showBackendStatus(message, type) { - // Create status notification if it doesn't exist - let statusToast = document.getElementById('backendStatusToast'); - if (!statusToast) { - statusToast = document.createElement('div'); - statusToast.id = 'backendStatusToast'; - statusToast.className = 'backend-status-toast'; - document.body.appendChild(statusToast); - } - - statusToast.textContent = message; - statusToast.className = `backend-status-toast ${type}`; - statusToast.classList.add('show'); - - // Auto-hide success messages, keep warnings and errors longer - const timeout = type === 'success' ? 3000 : 8000; - setTimeout(() => { - statusToast.classList.remove('show'); - }, timeout); + const toastType = type === 'success' ? 'success' : 'warning'; + toastManager[toastType](message, { + duration: type === 'success' ? 3000 : 8000 + }); } - // Show global error message + // Show global error message (uses enhanced toast system) showGlobalError(message) { - // Create error toast if it doesn't exist - let errorToast = document.getElementById('globalErrorToast'); - if (!errorToast) { - errorToast = document.createElement('div'); - errorToast.id = 'globalErrorToast'; - errorToast.className = 'error-toast'; - document.body.appendChild(errorToast); - } - - errorToast.textContent = message; - errorToast.classList.add('show'); - - setTimeout(() => { - errorToast.classList.remove('show'); - }, 5000); + toastManager.error(message, { duration: 6000 }); } // Clean up resources @@ -326,9 +345,19 @@ class WiFiDensePoseApp { // Disconnect all WebSocket connections wsService.disconnectAll(); - + // Stop health monitoring healthService.dispose(); + + // Dispose enhancements + if (this.keyboardShortcuts) this.keyboardShortcuts.dispose(); + if (this.perfMonitor) this.perfMonitor.dispose(); + if (this.themeToggle) this.themeToggle.dispose(); + if (this.screenshotTool) this.screenshotTool.dispose(); + if (this.uptimeClock) this.uptimeClock.dispose(); + if (this.quickSettings) this.quickSettings.dispose(); + i18n.dispose(); + toastManager.dispose(); } // Public API diff --git a/ui/components/DashboardTab.js b/ui/components/DashboardTab.js index 9ecd02262..1bb1bac01 100644 --- a/ui/components/DashboardTab.js +++ b/ui/components/DashboardTab.js @@ -52,7 +52,7 @@ export class DashboardTab { this.updateStats(stats); } catch (error) { - // DensePose API may not be running (sensing-only mode) — fail silently + // DensePose API may not be running (sensing-only mode) - fail silently console.log('Dashboard: DensePose API not available (sensing-only mode)'); } } @@ -68,7 +68,7 @@ export class DashboardTab { this._sensingUnsub = sensingService.onStateChange(() => { this.updateDataSourceIndicator(); }); - // Also update on data — catches source changes mid-stream + // Also update on data - catches source changes mid-stream this._sensingDataUnsub = sensingService.onData(() => { this.updateDataSourceIndicator(); }); diff --git a/ui/components/LiveDemoTab.js b/ui/components/LiveDemoTab.js index 4dec767d2..18c06a2bb 100644 --- a/ui/components/LiveDemoTab.js +++ b/ui/components/LiveDemoTab.js @@ -146,7 +146,7 @@ export class LiveDemoTab { // Create enhanced structure if it doesn't exist const enhancedHTML = `
- +
Detecting data source...
@@ -1035,7 +1035,7 @@ export class LiveDemoTab { stopBtn.addEventListener('click', () => this.stopDemo()); } - // Offline demo button — runs client-side animated demo (no server needed) + // Offline demo button - runs client-side animated demo (no server needed) const offlineDemoBtn = this.container.querySelector('#run-offline-demo'); if (offlineDemoBtn) { offlineDemoBtn.addEventListener('click', () => { diff --git a/ui/components/SensingTab.js b/ui/components/SensingTab.js index 6c3115c12..fe25de5e7 100644 --- a/ui/components/SensingTab.js +++ b/ui/components/SensingTab.js @@ -1,5 +1,5 @@ /** - * SensingTab — Live WiFi Sensing Visualization + * SensingTab - Live WiFi Sensing Visualization * * Connects to the sensing WebSocket service and renders: * 1. A 3D Gaussian-splat signal field (via gaussian-splats.js) @@ -34,7 +34,7 @@ export class SensingTab { this.container.innerHTML = `

Live WiFi Sensing

- +
RECONNECTING... diff --git a/ui/components/TabManager.js b/ui/components/TabManager.js index d559c2eac..c2d352976 100644 --- a/ui/components/TabManager.js +++ b/ui/components/TabManager.js @@ -19,6 +19,33 @@ export class TabManager { tab.addEventListener('click', () => this.switchTab(tab)); }); + // Arrow key navigation within tab bar (WCAG) + const nav = this.container.querySelector('.nav-tabs'); + if (nav) { + nav.addEventListener('keydown', (e) => { + const buttonTabs = this.tabs.filter(t => t.tagName === 'BUTTON' && !t.disabled); + const currentIndex = buttonTabs.indexOf(document.activeElement); + if (currentIndex === -1) return; + + let nextIndex = -1; + if (e.key === 'ArrowRight' || e.key === 'ArrowDown') { + nextIndex = (currentIndex + 1) % buttonTabs.length; + } else if (e.key === 'ArrowLeft' || e.key === 'ArrowUp') { + nextIndex = (currentIndex - 1 + buttonTabs.length) % buttonTabs.length; + } else if (e.key === 'Home') { + nextIndex = 0; + } else if (e.key === 'End') { + nextIndex = buttonTabs.length - 1; + } + + if (nextIndex >= 0) { + e.preventDefault(); + buttonTabs[nextIndex].focus(); + this.switchTab(buttonTabs[nextIndex]); + } + }); + } + // Activate first tab if none active const activeTab = this.tabs.find(tab => tab.classList.contains('active')); if (activeTab) { @@ -36,14 +63,22 @@ export class TabManager { return; } - // Update tab states + // Update tab states and ARIA attributes this.tabs.forEach(tab => { - tab.classList.toggle('active', tab === tabElement); + const isActive = tab === tabElement; + tab.classList.toggle('active', isActive); + if (tab.hasAttribute('aria-selected')) { + tab.setAttribute('aria-selected', String(isActive)); + } }); - // Update content visibility + // Update content visibility and ARIA this.tabContents.forEach(content => { - content.classList.toggle('active', content.id === tabId); + const isActive = content.id === tabId; + content.classList.toggle('active', isActive); + if (content.hasAttribute('role')) { + content.setAttribute('aria-hidden', String(!isActive)); + } }); // Update active tab diff --git a/ui/components/gaussian-splats.js b/ui/components/gaussian-splats.js index ecab6e481..2a4d6dfdd 100644 --- a/ui/components/gaussian-splats.js +++ b/ui/components/gaussian-splats.js @@ -87,7 +87,7 @@ export class GaussianSplatRenderer { this.scene = new THREE.Scene(); this.scene.background = new THREE.Color(0x0a0a12); - // Camera — perspective looking down at the room + // Camera - perspective looking down at the room this.camera = new THREE.PerspectiveCamera(55, this.width / this.height, 0.1, 200); this.camera.position.set(0, 14, 14); this.camera.lookAt(0, 0, 0); @@ -183,14 +183,14 @@ export class GaussianSplatRenderer { } _createNodeMarkers(THREE) { - // Router at center — green sphere + // Router at center - green sphere const routerGeo = new THREE.SphereGeometry(0.3, 16, 16); const routerMat = new THREE.MeshBasicMaterial({ color: 0x00ff88, transparent: true, opacity: 0.8 }); this.routerMarker = new THREE.Mesh(routerGeo, routerMat); this.routerMarker.position.set(0, 0.5, 0); this.scene.add(this.routerMarker); - // ESP32 node — cyan sphere (default position, updated from data) + // ESP32 node - cyan sphere (default position, updated from data) const nodeGeo = new THREE.SphereGeometry(0.25, 16, 16); const nodeMat = new THREE.MeshBasicMaterial({ color: 0x00ccff, transparent: true, opacity: 0.8 }); this.nodeMarker = new THREE.Mesh(nodeGeo, nodeMat); @@ -303,7 +303,7 @@ export class GaussianSplatRenderer { const signalField = data.signal_field || {}; const nodes = data.nodes || []; - // -- Update signal field splats ---------------------------------------- + // - Update signal field splats ---------------------------------------- if (signalField.values && this.fieldPoints) { const geo = this.fieldPoints.geometry; const clr = geo.attributes.splatColor.array; @@ -327,7 +327,7 @@ export class GaussianSplatRenderer { geo.attributes.splatOpacity.needsUpdate = true; } - // -- Update body blob -------------------------------------------------- + // - Update body blob -------------------------------------------------- if (this.bodyBlob) { const bGeo = this.bodyBlob.geometry; const bOpac = bGeo.attributes.splatOpacity.array; @@ -369,7 +369,7 @@ export class GaussianSplatRenderer { bGeo.attributes.splatSize.needsUpdate = true; } - // -- Update node positions --------------------------------------------- + // - Update node positions --------------------------------------------- if (nodes.length > 0 && nodes[0].position) { const pos = nodes[0].position; this.nodeMarker.position.set(pos[0], 0.5, pos[2]); diff --git a/ui/config/api.config.js b/ui/config/api.config.js index a8109182d..6c4f98b0f 100644 --- a/ui/config/api.config.js +++ b/ui/config/api.config.js @@ -15,7 +15,7 @@ export const API_CONFIG = { // Mock server configuration (only for testing) MOCK_SERVER: { ENABLED: false, // Set to true only for testing without backend - AUTO_DETECT: false, // Disabled — sensing tab uses its own WebSocket on :8765 + AUTO_DETECT: false, // Disabled - sensing tab uses its own WebSocket on :8765 }, // API Endpoints diff --git a/ui/index.html b/ui/index.html index a68dc7990..f139a7684 100644 --- a/ui/index.html +++ b/ui/index.html @@ -7,36 +7,39 @@ + + Skip to main content +
-
+ -