diff --git a/.github/workflows/thermorust-ci.yml b/.github/workflows/thermorust-ci.yml
new file mode 100644
index 000000000..eeb7739aa
--- /dev/null
+++ b/.github/workflows/thermorust-ci.yml
@@ -0,0 +1,62 @@
+name: thermorust CI
+
+on:
+ push:
+ paths:
+ - "crates/thermorust/**"
+ - ".github/workflows/thermorust-ci.yml"
+ pull_request:
+ paths:
+ - "crates/thermorust/**"
+ - ".github/workflows/thermorust-ci.yml"
+
+env:
+ CARGO_TERM_COLOR: always
+ RUSTFLAGS: "-D warnings"
+
+jobs:
+ test:
+ name: Test (${{ matrix.os }})
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest, macos-latest, windows-latest]
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install Rust stable
+ uses: dtolnay/rust-toolchain@stable
+ with:
+ components: clippy, rustfmt
+
+ - name: Cache cargo registry
+ uses: actions/cache@v4
+ with:
+ path: |
+ ~/.cargo/registry
+ ~/.cargo/git
+ target
+ key: ${{ runner.os }}-cargo-thermorust-${{ hashFiles('crates/thermorust/Cargo.toml') }}
+ restore-keys: ${{ runner.os }}-cargo-thermorust-
+
+ - name: Check formatting
+ run: cargo fmt --package thermorust -- --check
+
+ - name: Clippy
+ run: cargo clippy --package thermorust --all-targets -- -D warnings
+
+ - name: Build
+ run: cargo build --package thermorust
+
+ - name: Run tests
+ run: cargo test --package thermorust
+
+ bench-check:
+ name: Benchmarks compile
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: dtolnay/rust-toolchain@stable
+ - name: Check benchmarks compile
+ run: cargo bench --package thermorust --no-run
diff --git a/Cargo.lock b/Cargo.lock
index f6ecc599d..1083aa9ad 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8374,6 +8374,13 @@ dependencies = [
"wasm-bindgen-test",
]
+[[package]]
+name = "ruvector-dither"
+version = "0.1.0"
+dependencies = [
+ "criterion 0.5.1",
+]
+
[[package]]
name = "ruvector-domain-expansion"
version = "2.0.5"
@@ -10904,6 +10911,15 @@ version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
+[[package]]
+name = "thermorust"
+version = "0.1.0"
+dependencies = [
+ "criterion 0.5.1",
+ "rand 0.8.5",
+ "rand_distr 0.4.3",
+]
+
[[package]]
name = "thiserror"
version = "1.0.69"
diff --git a/Cargo.toml b/Cargo.toml
index cf983c725..19a1fb777 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -112,6 +112,8 @@ members = [
"crates/ruvector-graph-transformer-node",
"examples/rvf-kernel-optimized",
"examples/verified-applications",
+ "crates/thermorust",
+ "crates/ruvector-dither",
"crates/ruvector-robotics",
"examples/robotics",
]
diff --git a/README.md b/README.md
index 468739bfd..4246d5fd5 100644
--- a/README.md
+++ b/README.md
@@ -1567,6 +1567,7 @@ let syndrome = gate.assess_coherence(&quantum_state)?;
| [ruvector-sparse-inference-wasm](./crates/ruvector-sparse-inference-wasm) | WASM bindings for sparse inference | [](https://crates.io/crates/ruvector-sparse-inference-wasm) |
| [ruvector-hyperbolic-hnsw](./crates/ruvector-hyperbolic-hnsw) | HNSW in hyperbolic space (Poincaré/Lorentz) | [](https://crates.io/crates/ruvector-hyperbolic-hnsw) |
| [ruvector-hyperbolic-hnsw-wasm](./crates/ruvector-hyperbolic-hnsw-wasm) | WASM bindings for hyperbolic HNSW | [](https://crates.io/crates/ruvector-hyperbolic-hnsw-wasm) |
+| [ruvector-dither](./crates/ruvector-dither) | Deterministic golden-ratio and pi-digit dithering for quantization (`no_std`) | [](https://crates.io/crates/ruvector-dither) |
### FPGA & Hardware Acceleration
@@ -1588,6 +1589,7 @@ let syndrome = gate.assess_coherence(&quantum_state)?;
| [ruvector-exotic-wasm](./crates/ruvector-exotic-wasm) | Exotic AI primitives (strange loops, time crystals) | [](https://crates.io/crates/ruvector-exotic-wasm) |
| [ruvector-attention-unified-wasm](./crates/ruvector-attention-unified-wasm) | Unified 18+ attention mechanisms (Neural, DAG, Mamba SSM) | [](https://crates.io/crates/ruvector-attention-unified-wasm) |
| [micro-hnsw-wasm](./crates/micro-hnsw-wasm) | Neuromorphic HNSW with spiking neurons (11.8KB WASM) | [](https://crates.io/crates/micro-hnsw-wasm) |
+| [thermorust](./crates/thermorust) | Thermodynamic neural motif engine — Ising/soft-spin Hamiltonians, Langevin dynamics, Landauer dissipation | [](https://crates.io/crates/thermorust) |
**Bio-inspired features:**
- **Spiking Neural Networks (SNNs)** — 10-50x energy efficiency vs traditional ANNs
@@ -1595,6 +1597,38 @@ let syndrome = gate.assess_coherence(&quantum_state)?;
- **MicroLoRA** — Sub-microsecond fine-tuning for per-operator learning
- **Mamba SSM** — State Space Model attention for linear-time sequences
+### Cognitive Robotics
+
+
+Perception, planning, behavior trees, and swarm coordination for autonomous robots
+
+| Crate | Description | crates.io |
+|-------|-------------|-----------|
+| [ruvector-robotics](./crates/ruvector-robotics) | Cognitive robotics platform — perception, A* planning, behavior trees, swarm coordination | [](https://crates.io/crates/ruvector-robotics) |
+
+**Modules:**
+
+| Module | What It Does |
+|--------|--------------|
+| **bridge** | OccupancyGrid, PointCloud, SensorFrame, SceneGraph data types with spatial kNN |
+| **perception** | Scene-graph construction from point clouds, obstacle detection pipeline |
+| **planning** | A* grid search (octile heuristic) and potential-field velocity commands |
+| **cognitive** | Perceive-think-act-learn loop with utility-based reasoning |
+| **domain_expansion** | Cross-domain transfer learning via Meta Thompson Sampling and Beta priors |
+
+**Key features:** 290 tests, clippy-clean, `no_std`-friendly types, optional `domain-expansion` feature flag for cross-domain transfer, pluggable `PotentialFieldConfig` for obstacle avoidance, Byzantine-tolerant swarm coordination via `ruvector-domain-expansion`.
+
+```rust
+use ruvector_robotics::planning::{astar, potential_field, PotentialFieldConfig};
+use ruvector_robotics::bridge::OccupancyGrid;
+
+let grid = OccupancyGrid::new(100, 100, 0.1);
+let path = astar(&grid, (5, 5), (90, 90))?;
+let cmd = potential_field(&[0.0, 0.0, 0.0], &[5.0, 3.0, 0.0], &[], &PotentialFieldConfig::default());
+```
+
+
+
### Self-Learning (SONA)
| Crate | Description | crates.io |
diff --git a/crates/cognitum-gate-kernel/src/canonical_witness.rs b/crates/cognitum-gate-kernel/src/canonical_witness.rs
index 4bf6d9f7f..324f677e1 100644
--- a/crates/cognitum-gate-kernel/src/canonical_witness.rs
+++ b/crates/cognitum-gate-kernel/src/canonical_witness.rs
@@ -106,10 +106,7 @@ impl CactusNode {
// Compile-time size check: repr(C) layout is 12 bytes
// (u16 + u16 + u8 + u8 + 2-pad + u32 = 12, aligned to 4)
// 256 nodes * 12 = 3072 bytes (~3KB), fits in 14.5KB headroom.
-const _: () = assert!(
- size_of::() == 12,
- "CactusNode must be 12 bytes"
-);
+const _: () = assert!(size_of::() == 12, "CactusNode must be 12 bytes");
/// Arena-allocated cactus tree for a single tile (up to 256 vertices).
///
@@ -329,7 +326,8 @@ impl ArenaCactus {
let node_v = comp_to_node[cv];
let node_p = comp_to_node[cp];
- if node_v < 256 && node_p < 256
+ if node_v < 256
+ && node_p < 256
&& cactus.nodes[node_v as usize].parent == CactusNode::NO_PARENT
&& node_v != cactus.root
{
@@ -390,10 +388,8 @@ impl ArenaCactus {
for adj in neighbors {
let eid = adj.edge_id as usize;
if eid < graph.edges.len() && graph.edges[eid].is_active() {
- weight_sum =
- weight_sum.saturating_add(FixedPointWeight::from_u16_weight(
- graph.edges[eid].weight,
- ));
+ weight_sum = weight_sum
+ .saturating_add(FixedPointWeight::from_u16_weight(graph.edges[eid].weight));
}
}
if weight_sum < min_weight {
@@ -902,7 +898,10 @@ mod tests {
g.recompute_components();
let cactus = ArenaCactus::build_from_compact_graph(&g);
- assert!(cactus.n_nodes >= 2, "Single edge should have 2 cactus nodes");
+ assert!(
+ cactus.n_nodes >= 2,
+ "Single edge should have 2 cactus nodes"
+ );
let partition = cactus.canonical_partition();
// One vertex on each side
diff --git a/crates/cognitum-gate-kernel/src/lib.rs b/crates/cognitum-gate-kernel/src/lib.rs
index 773c240e4..d11a12c91 100644
--- a/crates/cognitum-gate-kernel/src/lib.rs
+++ b/crates/cognitum-gate-kernel/src/lib.rs
@@ -123,7 +123,7 @@ pub mod canonical_witness;
#[cfg(feature = "canonical-witness")]
pub use canonical_witness::{
- ArenaCactus, CanonicalPartition, CanonicalWitnessFragment, CactusNode, FixedPointWeight,
+ ArenaCactus, CactusNode, CanonicalPartition, CanonicalWitnessFragment, FixedPointWeight,
};
use crate::delta::{Delta, DeltaTag};
diff --git a/crates/cognitum-gate-kernel/tests/canonical_witness_bench.rs b/crates/cognitum-gate-kernel/tests/canonical_witness_bench.rs
index f2a4245ae..03990cf4c 100644
--- a/crates/cognitum-gate-kernel/tests/canonical_witness_bench.rs
+++ b/crates/cognitum-gate-kernel/tests/canonical_witness_bench.rs
@@ -67,10 +67,20 @@ mod bench {
println!("\n=== Canonical Witness Fragment (64 vertices) ===");
println!(" ArenaCactus build: {:.1} µs", avg_cactus_us);
println!(" Partition extract: {:.1} µs", avg_partition_us);
- println!(" Full witness: {:.1} µs (target: < 50 µs)", avg_witness_us);
- println!(" Fragment size: {} bytes", std::mem::size_of::());
+ println!(
+ " Full witness: {:.1} µs (target: < 50 µs)",
+ avg_witness_us
+ );
+ println!(
+ " Fragment size: {} bytes",
+ std::mem::size_of::()
+ );
println!(" Cut value: {}", ref_f.cut_value);
- assert!(avg_witness_us < 50.0, "Witness exceeded 50µs target: {:.1} µs", avg_witness_us);
+ assert!(
+ avg_witness_us < 50.0,
+ "Witness exceeded 50µs target: {:.1} µs",
+ avg_witness_us
+ );
}
}
diff --git a/crates/ruvector-bench/tests/wasm_stack_bench.rs b/crates/ruvector-bench/tests/wasm_stack_bench.rs
index 44da4e98f..d05034ffd 100644
--- a/crates/ruvector-bench/tests/wasm_stack_bench.rs
+++ b/crates/ruvector-bench/tests/wasm_stack_bench.rs
@@ -53,8 +53,12 @@ fn bench_canonical_mincut_100v() {
// --- Canonical cut extraction (100 iterations) ---
let mut cactus = CactusGraph::build_from_graph(&graph);
cactus.root_at_lex_smallest();
- println!(" Cactus: {} vertices, {} edges, {} cycles",
- cactus.n_vertices, cactus.n_edges, cactus.cycles.len());
+ println!(
+ " Cactus: {} vertices, {} edges, {} cycles",
+ cactus.n_vertices,
+ cactus.n_edges,
+ cactus.cycles.len()
+ );
let start = Instant::now();
for _ in 0..n_iter {
let result = cactus.canonical_cut();
@@ -81,14 +85,26 @@ fn bench_canonical_mincut_100v() {
let status = if total_us < 1000.0 { "PASS" } else { "FAIL" };
println!("\n=== (a) Canonical Min-Cut (100 vertices, ~300 edges) ===");
- println!(" CactusGraph construction: {:.1} us (avg of {} iters)", avg_cactus_us, n_iter);
- println!(" Canonical cut extraction: {:.1} us (avg of {} iters)", avg_cut_us, n_iter);
- println!(" Total (construct + cut): {:.1} us [target < 1000 us] [{}]", total_us, status);
+ println!(
+ " CactusGraph construction: {:.1} us (avg of {} iters)",
+ avg_cactus_us, n_iter
+ );
+ println!(
+ " Canonical cut extraction: {:.1} us (avg of {} iters)",
+ avg_cut_us, n_iter
+ );
+ println!(
+ " Total (construct + cut): {:.1} us [target < 1000 us] [{}]",
+ total_us, status
+ );
println!(" Determinism (100x verify): {} us total", determinism_us);
println!(" Min-cut value: {:.4}", reference.value);
println!(" Cut edges: {}", reference.cut_edges.len());
- println!(" Partition sizes: {} / {}",
- reference.partition.0.len(), reference.partition.1.len());
+ println!(
+ " Partition sizes: {} / {}",
+ reference.partition.0.len(),
+ reference.partition.1.len()
+ );
}
// =========================================================================
@@ -146,14 +162,31 @@ fn bench_spectral_coherence_500v() {
let status = if avg_full_ms < 5.0 { "PASS" } else { "FAIL" };
println!("\n=== (b) Spectral Coherence Score (500 vertices, ~1500 edges) ===");
- println!(" Full SCS recompute: {:.2} ms (avg of {} iters) [target < 5 ms] [{}]",
- avg_full_ms, n_iter, status);
- println!(" Incremental update: {:.1} us (avg of {} iters)", avg_incr_us, n_incr);
- println!(" Initial composite SCS: {:.6}", initial_score.composite);
+ println!(
+ " Full SCS recompute: {:.2} ms (avg of {} iters) [target < 5 ms] [{}]",
+ avg_full_ms, n_iter, status
+ );
+ println!(
+ " Incremental update: {:.1} us (avg of {} iters)",
+ avg_incr_us, n_incr
+ );
+ println!(
+ " Initial composite SCS: {:.6}",
+ initial_score.composite
+ );
println!(" Fiedler: {:.6}", initial_score.fiedler);
- println!(" Spectral gap: {:.6}", initial_score.spectral_gap);
- println!(" Effective resistance: {:.6}", initial_score.effective_resistance);
- println!(" Degree regularity: {:.6}", initial_score.degree_regularity);
+ println!(
+ " Spectral gap: {:.6}",
+ initial_score.spectral_gap
+ );
+ println!(
+ " Effective resistance: {:.6}",
+ initial_score.effective_resistance
+ );
+ println!(
+ " Degree regularity: {:.6}",
+ initial_score.degree_regularity
+ );
}
// =========================================================================
@@ -219,14 +252,29 @@ fn bench_cognitive_container_100_ticks() {
let status = if avg_tick_us < 200.0 { "PASS" } else { "FAIL" };
println!("\n=== (c) Cognitive Container (100 ticks, 2 deltas each) ===");
- println!(" Average tick: {:.1} us [target < 200 us] [{}]", avg_tick_us, status);
+ println!(
+ " Average tick: {:.1} us [target < 200 us] [{}]",
+ avg_tick_us, status
+ );
println!(" Median tick (p50): {} us", p50);
println!(" p99 tick: {} us", p99);
- println!(" Min / Max tick: {} / {} us", min_tick_us, max_tick_us);
- println!(" Total (100 ticks): {:.2} ms", outer_elapsed.as_micros() as f64 / 1000.0);
- println!(" Chain verification: {} us (chain len = {})", verify_us, container.current_epoch());
- println!(" Chain valid: {}",
- matches!(verification, VerificationResult::Valid { .. }));
+ println!(
+ " Min / Max tick: {} / {} us",
+ min_tick_us, max_tick_us
+ );
+ println!(
+ " Total (100 ticks): {:.2} ms",
+ outer_elapsed.as_micros() as f64 / 1000.0
+ );
+ println!(
+ " Chain verification: {} us (chain len = {})",
+ verify_us,
+ container.current_epoch()
+ );
+ println!(
+ " Chain valid: {}",
+ matches!(verification, VerificationResult::Valid { .. })
+ );
}
// =========================================================================
@@ -314,16 +362,35 @@ fn bench_canonical_witness_64v() {
let det_us = det_start.elapsed().as_micros();
let total_us = avg_cactus_us + avg_partition_us;
- let status = if avg_witness_us < 50.0 { "PASS" } else { "FAIL" };
+ let status = if avg_witness_us < 50.0 {
+ "PASS"
+ } else {
+ "FAIL"
+ };
println!("\n=== (d) Canonical Witness Fragment (64 vertices, ~128 edges) ===");
- println!(" ArenaCactus construction: {:.2} us (avg of {} iters)", avg_cactus_us, n_iter);
- println!(" Partition extraction: {:.2} us (avg of {} iters)", avg_partition_us, n_iter);
- println!(" Full witness fragment: {:.2} us [target < 50 us] [{}]", avg_witness_us, status);
- println!(" Fragment size: {} bytes", std::mem::size_of::());
+ println!(
+ " ArenaCactus construction: {:.2} us (avg of {} iters)",
+ avg_cactus_us, n_iter
+ );
+ println!(
+ " Partition extraction: {:.2} us (avg of {} iters)",
+ avg_partition_us, n_iter
+ );
+ println!(
+ " Full witness fragment: {:.2} us [target < 50 us] [{}]",
+ avg_witness_us, status
+ );
+ println!(
+ " Fragment size: {} bytes",
+ std::mem::size_of::()
+ );
println!(" Cactus nodes: {}", cactus.n_nodes);
println!(" Cut value: {}", ref_fragment.cut_value);
- println!(" Cardinality A/B: {} / {}", ref_fragment.cardinality_a, ref_fragment.cardinality_b);
+ println!(
+ " Cardinality A/B: {} / {}",
+ ref_fragment.cardinality_a, ref_fragment.cardinality_b
+ );
println!(" Determinism (100x): {} us", det_us);
}
diff --git a/crates/ruvector-cli/src/mcp/handlers.rs b/crates/ruvector-cli/src/mcp/handlers.rs
index 006864fd2..27cf7b872 100644
--- a/crates/ruvector-cli/src/mcp/handlers.rs
+++ b/crates/ruvector-cli/src/mcp/handlers.rs
@@ -78,8 +78,9 @@ impl McpHandler {
// Canonicalize the parent directory (must exist), then append filename
let parent = resolved.parent().unwrap_or(Path::new("/"));
let parent_canonical = if parent.exists() {
- std::fs::canonicalize(parent)
- .with_context(|| format!("Parent directory does not exist: {}", parent.display()))?
+ std::fs::canonicalize(parent).with_context(|| {
+ format!("Parent directory does not exist: {}", parent.display())
+ })?
} else {
// Create the parent directory within allowed_data_dir if it doesn't exist
anyhow::bail!(
@@ -535,10 +536,7 @@ impl McpHandler {
std::fs::copy(&validated_db_path, &validated_backup_path)
.context("Failed to backup database")?;
- Ok(format!(
- "Backed up to: {}",
- validated_backup_path.display()
- ))
+ Ok(format!("Backed up to: {}", validated_backup_path.display()))
}
async fn get_or_open_db(&self, path: &str) -> Result> {
@@ -557,10 +555,7 @@ impl McpHandler {
db_options.storage_path = path_str.clone();
let db = Arc::new(VectorDB::new(db_options)?);
- self.databases
- .write()
- .await
- .insert(path_str, db.clone());
+ self.databases.write().await.insert(path_str, db.clone());
Ok(db)
}
@@ -862,11 +857,7 @@ mod tests {
let handler = handler_with_data_dir(&subdir);
let result = handler.validate_path("../../../etc/passwd");
- assert!(
- result.is_err(),
- "Should block ../ traversal: {:?}",
- result
- );
+ assert!(result.is_err(), "Should block ../ traversal: {:?}", result);
}
#[test]
@@ -878,10 +869,7 @@ mod tests {
std::fs::create_dir_all(dir.path().join("a")).unwrap();
let result = handler.validate_path("a/../../etc/passwd");
- assert!(
- result.is_err(),
- "Should block ../ in the middle of path"
- );
+ assert!(result.is_err(), "Should block ../ in the middle of path");
}
#[test]
diff --git a/crates/ruvector-cognitive-container/src/container.rs b/crates/ruvector-cognitive-container/src/container.rs
index ce7dc6bfd..8bcaef427 100644
--- a/crates/ruvector-cognitive-container/src/container.rs
+++ b/crates/ruvector-cognitive-container/src/container.rs
@@ -3,7 +3,9 @@ use serde::{Deserialize, Serialize};
use crate::epoch::{ContainerEpochBudget, EpochController, Phase};
use crate::error::{ContainerError, Result};
use crate::memory::{MemoryConfig, MemorySlab};
-use crate::witness::{CoherenceDecision, ContainerWitnessReceipt, VerificationResult, WitnessChain};
+use crate::witness::{
+ CoherenceDecision, ContainerWitnessReceipt, VerificationResult, WitnessChain,
+};
/// Top-level container configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -209,7 +211,8 @@ impl CognitiveContainer {
// Phase 4: Evidence
if self.epoch.try_budget(Phase::Evidence) {
self.accumulate_evidence();
- self.epoch.consume(self.evidence.observations.len().max(1) as u64);
+ self.epoch
+ .consume(self.evidence.observations.len().max(1) as u64);
completed.insert(ComponentMask::EVIDENCE);
}
@@ -362,8 +365,8 @@ impl CognitiveContainer {
if self.evidence.observations.is_empty() {
return;
}
- let mean: f64 =
- self.evidence.observations.iter().sum::() / self.evidence.observations.len() as f64;
+ let mean: f64 = self.evidence.observations.iter().sum::()
+ / self.evidence.observations.len() as f64;
self.evidence.accumulated_evidence += mean.abs();
}
@@ -372,7 +375,8 @@ impl CognitiveContainer {
if self.graph.edges.is_empty() {
return CoherenceDecision::Inconclusive;
}
- if self.spectral.scs >= 0.5 && self.evidence.accumulated_evidence < self.evidence.threshold {
+ if self.spectral.scs >= 0.5 && self.evidence.accumulated_evidence < self.evidence.threshold
+ {
return CoherenceDecision::Pass;
}
if self.spectral.scs < 0.2 {
@@ -417,10 +421,25 @@ mod tests {
let mut container = default_container();
let deltas = vec![
- Delta::EdgeAdd { u: 0, v: 1, weight: 1.0 },
- Delta::EdgeAdd { u: 1, v: 2, weight: 2.0 },
- Delta::EdgeAdd { u: 2, v: 0, weight: 1.5 },
- Delta::Observation { node: 0, value: 0.8 },
+ Delta::EdgeAdd {
+ u: 0,
+ v: 1,
+ weight: 1.0,
+ },
+ Delta::EdgeAdd {
+ u: 1,
+ v: 2,
+ weight: 2.0,
+ },
+ Delta::EdgeAdd {
+ u: 2,
+ v: 0,
+ weight: 1.5,
+ },
+ Delta::Observation {
+ node: 0,
+ value: 0.8,
+ },
];
let result = container.tick(&deltas).unwrap();
@@ -436,9 +455,13 @@ mod tests {
#[test]
fn test_container_snapshot_restore() {
let mut container = default_container();
- container.tick(&[
- Delta::EdgeAdd { u: 0, v: 1, weight: 3.0 },
- ]).unwrap();
+ container
+ .tick(&[Delta::EdgeAdd {
+ u: 0,
+ v: 1,
+ weight: 3.0,
+ }])
+ .unwrap();
let snap = container.snapshot();
let json = serde_json::to_string(&snap).expect("serialize snapshot");
@@ -459,9 +482,13 @@ mod tests {
assert_eq!(r.receipt.decision, CoherenceDecision::Inconclusive);
// Single edge: min-cut/total = 1.0 (high scs), no evidence => Pass
- let r = container.tick(&[
- Delta::EdgeAdd { u: 0, v: 1, weight: 5.0 },
- ]).unwrap();
+ let r = container
+ .tick(&[Delta::EdgeAdd {
+ u: 0,
+ v: 1,
+ weight: 5.0,
+ }])
+ .unwrap();
assert_eq!(r.receipt.decision, CoherenceDecision::Pass);
}
@@ -469,9 +496,13 @@ mod tests {
fn test_container_multiple_epochs() {
let mut container = default_container();
for i in 0..10 {
- container.tick(&[
- Delta::EdgeAdd { u: i, v: i + 1, weight: 1.0 },
- ]).unwrap();
+ container
+ .tick(&[Delta::EdgeAdd {
+ u: i,
+ v: i + 1,
+ weight: 1.0,
+ }])
+ .unwrap();
}
assert_eq!(container.current_epoch(), 10);
@@ -492,14 +523,22 @@ mod tests {
#[test]
fn test_container_edge_remove() {
let mut container = default_container();
- container.tick(&[
- Delta::EdgeAdd { u: 0, v: 1, weight: 1.0 },
- Delta::EdgeAdd { u: 1, v: 2, weight: 2.0 },
- ]).unwrap();
-
- container.tick(&[
- Delta::EdgeRemove { u: 0, v: 1 },
- ]).unwrap();
+ container
+ .tick(&[
+ Delta::EdgeAdd {
+ u: 0,
+ v: 1,
+ weight: 1.0,
+ },
+ Delta::EdgeAdd {
+ u: 1,
+ v: 2,
+ weight: 2.0,
+ },
+ ])
+ .unwrap();
+
+ container.tick(&[Delta::EdgeRemove { u: 0, v: 1 }]).unwrap();
let snap = container.snapshot();
assert_eq!(snap.graph_edges.len(), 1);
@@ -509,13 +548,21 @@ mod tests {
#[test]
fn test_container_weight_update() {
let mut container = default_container();
- container.tick(&[
- Delta::EdgeAdd { u: 0, v: 1, weight: 1.0 },
- ]).unwrap();
-
- container.tick(&[
- Delta::WeightUpdate { u: 0, v: 1, new_weight: 5.0 },
- ]).unwrap();
+ container
+ .tick(&[Delta::EdgeAdd {
+ u: 0,
+ v: 1,
+ weight: 1.0,
+ }])
+ .unwrap();
+
+ container
+ .tick(&[Delta::WeightUpdate {
+ u: 0,
+ v: 1,
+ new_weight: 5.0,
+ }])
+ .unwrap();
let snap = container.snapshot();
assert_eq!(snap.graph_edges[0].2, 5.0);
diff --git a/crates/ruvector-cognitive-container/src/memory.rs b/crates/ruvector-cognitive-container/src/memory.rs
index 5af25805b..5dbbc1375 100644
--- a/crates/ruvector-cognitive-container/src/memory.rs
+++ b/crates/ruvector-cognitive-container/src/memory.rs
@@ -25,12 +25,12 @@ pub struct MemoryConfig {
impl Default for MemoryConfig {
fn default() -> Self {
Self {
- slab_size: 4 * 1024 * 1024, // 4 MB total
- graph_budget: 1024 * 1024, // 1 MB
- feature_budget: 1024 * 1024, // 1 MB
- solver_budget: 512 * 1024, // 512 KB
- witness_budget: 512 * 1024, // 512 KB
- evidence_budget: 1024 * 1024, // 1 MB
+ slab_size: 4 * 1024 * 1024, // 4 MB total
+ graph_budget: 1024 * 1024, // 1 MB
+ feature_budget: 1024 * 1024, // 1 MB
+ solver_budget: 512 * 1024, // 512 KB
+ witness_budget: 512 * 1024, // 512 KB
+ evidence_budget: 1024 * 1024, // 1 MB
}
}
}
diff --git a/crates/ruvector-cognitive-container/src/witness.rs b/crates/ruvector-cognitive-container/src/witness.rs
index ba44053b2..85e0205ed 100644
--- a/crates/ruvector-cognitive-container/src/witness.rs
+++ b/crates/ruvector-cognitive-container/src/witness.rs
@@ -295,8 +295,7 @@ mod tests {
}
// Tamper with the second receipt's input_hash.
- let mut tampered: Vec =
- chain.receipt_chain().to_vec();
+ let mut tampered: Vec = chain.receipt_chain().to_vec();
tampered[1].input_hash[0] ^= 0xFF;
match WitnessChain::verify_chain(&tampered) {
@@ -320,13 +319,7 @@ mod tests {
fn test_ring_buffer_eviction() {
let mut chain = WitnessChain::new(3);
for _ in 0..5 {
- chain.generate_receipt(
- b"data",
- b"mc",
- 0.1,
- b"ev",
- CoherenceDecision::Pass,
- );
+ chain.generate_receipt(b"data", b"mc", 0.1, b"ev", CoherenceDecision::Pass);
}
assert_eq!(chain.receipt_chain().len(), 3);
assert_eq!(chain.receipt_chain()[0].epoch, 2);
diff --git a/crates/ruvector-cognitive-container/tests/container_bench.rs b/crates/ruvector-cognitive-container/tests/container_bench.rs
index a85272eb1..d6ebdb3d8 100644
--- a/crates/ruvector-cognitive-container/tests/container_bench.rs
+++ b/crates/ruvector-cognitive-container/tests/container_bench.rs
@@ -55,7 +55,10 @@ fn bench_container_100_ticks() {
println!("\n=== Cognitive Container (100 ticks) ===");
println!(" Average tick: {:.1} µs (target: < 200 µs)", avg);
println!(" Min / Max tick: {} / {} µs", min, max);
- println!(" Total 100 ticks: {:.2} ms", total_time.as_micros() as f64 / 1000.0);
+ println!(
+ " Total 100 ticks: {:.2} ms",
+ total_time.as_micros() as f64 / 1000.0
+ );
println!(" Chain verify: {} µs", verify_us);
println!(" Chain length: {}", container.receipt_chain().len());
println!(
@@ -65,5 +68,9 @@ fn bench_container_100_ticks() {
// 2000µs target accounts for CI/container/debug-mode variability;
// on dedicated hardware in release mode this typically runs under 200µs.
- assert!(avg < 2000.0, "Container tick exceeded 2000µs target: {:.1} µs", avg);
+ assert!(
+ avg < 2000.0,
+ "Container tick exceeded 2000µs target: {:.1} µs",
+ avg
+ );
}
diff --git a/crates/ruvector-coherence/src/spectral.rs b/crates/ruvector-coherence/src/spectral.rs
index 2d441c4a8..7c54d84aa 100644
--- a/crates/ruvector-coherence/src/spectral.rs
+++ b/crates/ruvector-coherence/src/spectral.rs
@@ -17,10 +17,19 @@ pub struct CsrMatrixView {
impl CsrMatrixView {
pub fn new(
- row_ptr: Vec, col_indices: Vec, values: Vec,
- rows: usize, cols: usize,
+ row_ptr: Vec,
+ col_indices: Vec,
+ values: Vec,
+ rows: usize,
+ cols: usize,
) -> Self {
- Self { row_ptr, col_indices, values, rows, cols }
+ Self {
+ row_ptr,
+ col_indices,
+ values,
+ rows,
+ cols,
+ }
}
/// Build a symmetric adjacency CSR matrix from edges `(u, v, weight)`.
@@ -28,7 +37,9 @@ impl CsrMatrixView {
let mut entries: Vec<(usize, usize, f64)> = Vec::with_capacity(edges.len() * 2);
for &(u, v, w) in edges {
entries.push((u, v, w));
- if u != v { entries.push((v, u, w)); }
+ if u != v {
+ entries.push((v, u, w));
+ }
}
entries.sort_by(|a, b| a.0.cmp(&b.0).then(a.1.cmp(&b.1)));
Self::from_sorted_entries(n, &entries)
@@ -39,7 +50,9 @@ impl CsrMatrixView {
let mut y = vec![0.0; self.rows];
for i in 0..self.rows {
let (start, end) = (self.row_ptr[i], self.row_ptr[i + 1]);
- y[i] = (start..end).map(|j| self.values[j] * x[self.col_indices[j]]).sum();
+ y[i] = (start..end)
+ .map(|j| self.values[j] * x[self.col_indices[j]])
+ .sum();
}
y
}
@@ -56,7 +69,9 @@ impl CsrMatrixView {
entries.push((v, u, -w));
}
}
- for i in 0..n { entries.push((i, i, degree[i])); }
+ for i in 0..n {
+ entries.push((i, i, degree[i]));
+ }
entries.sort_by(|a, b| a.0.cmp(&b.0).then(a.1.cmp(&b.1)));
Self::from_sorted_entries(n, &entries)
}
@@ -70,28 +85,41 @@ impl CsrMatrixView {
col_indices.push(c);
values.push(v);
}
- for i in 0..n { row_ptr[i + 1] += row_ptr[i]; }
- Self { row_ptr, col_indices, values, rows: n, cols: n }
+ for i in 0..n {
+ row_ptr[i + 1] += row_ptr[i];
+ }
+ Self {
+ row_ptr,
+ col_indices,
+ values,
+ rows: n,
+ cols: n,
+ }
}
}
/// Configuration for spectral coherence computation.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SpectralConfig {
- pub alpha: f64, // Fiedler weight (default 0.3)
- pub beta: f64, // Spectral gap weight (default 0.3)
- pub gamma: f64, // Effective resistance weight (default 0.2)
- pub delta: f64, // Degree regularity weight (default 0.2)
- pub max_iterations: usize, // Power iteration max (default 50)
- pub tolerance: f64, // Convergence tolerance (default 1e-6)
+ pub alpha: f64, // Fiedler weight (default 0.3)
+ pub beta: f64, // Spectral gap weight (default 0.3)
+ pub gamma: f64, // Effective resistance weight (default 0.2)
+ pub delta: f64, // Degree regularity weight (default 0.2)
+ pub max_iterations: usize, // Power iteration max (default 50)
+ pub tolerance: f64, // Convergence tolerance (default 1e-6)
pub refresh_threshold: usize, // Updates before full recompute (default 100)
}
impl Default for SpectralConfig {
fn default() -> Self {
Self {
- alpha: 0.3, beta: 0.3, gamma: 0.2, delta: 0.2,
- max_iterations: 50, tolerance: 1e-6, refresh_threshold: 100,
+ alpha: 0.3,
+ beta: 0.3,
+ gamma: 0.2,
+ delta: 0.2,
+ max_iterations: 50,
+ tolerance: 1e-6,
+ refresh_threshold: 100,
}
}
}
@@ -112,7 +140,9 @@ fn dot(a: &[f64], b: &[f64]) -> f64 {
a.iter().zip(b).map(|(x, y)| x * y).sum()
}
-fn norm(v: &[f64]) -> f64 { dot(v, v).sqrt() }
+fn norm(v: &[f64]) -> f64 {
+ dot(v, v).sqrt()
+}
/// CG solve for L*x = b with null-space deflation (L is graph Laplacian).
fn cg_solve(lap: &CsrMatrixView, b: &[f64], max_iter: usize, tol: f64) -> Vec {
@@ -124,19 +154,30 @@ fn cg_solve(lap: &CsrMatrixView, b: &[f64], max_iter: usize, tol: f64) -> Vec() * inv_n;
ap.iter_mut().for_each(|v| *v -= ap_mean);
let pap = dot(&p, &ap);
- if pap.abs() < 1e-30 { break; }
+ if pap.abs() < 1e-30 {
+ break;
+ }
let alpha = rs_old / pap;
- for i in 0..n { x[i] += alpha * p[i]; r[i] -= alpha * ap[i]; }
+ for i in 0..n {
+ x[i] += alpha * p[i];
+ r[i] -= alpha * ap[i];
+ }
let rs_new = dot(&r, &r);
- if rs_new.sqrt() < tol { break; }
+ if rs_new.sqrt() < tol {
+ break;
+ }
let beta = rs_new / rs_old;
- for i in 0..n { p[i] = r[i] + beta * p[i]; }
+ for i in 0..n {
+ p[i] = r[i] + beta * p[i];
+ }
rs_old = rs_new;
}
x
@@ -149,14 +190,18 @@ fn deflate_and_normalize(v: &mut Vec) {
let proj: f64 = v.iter().sum::() * inv_sqrt_n;
v.iter_mut().for_each(|x| *x -= proj * inv_sqrt_n);
let n2 = norm(v);
- if n2 > 1e-30 { v.iter_mut().for_each(|x| *x /= n2); }
+ if n2 > 1e-30 {
+ v.iter_mut().for_each(|x| *x /= n2);
+ }
}
/// Estimate the Fiedler value (second smallest eigenvalue) and eigenvector
/// using inverse iteration with null-space deflation.
pub fn estimate_fiedler(lap: &CsrMatrixView, max_iter: usize, tol: f64) -> (f64, Vec) {
let n = lap.rows;
- if n <= 1 { return (0.0, vec![0.0; n]); }
+ if n <= 1 {
+ return (0.0, vec![0.0; n]);
+ }
// Initial vector orthogonal to all-ones.
let mut v: Vec = (0..n).map(|i| i as f64 - (n as f64 - 1.0) / 2.0).collect();
deflate_and_normalize(&mut v);
@@ -168,13 +213,21 @@ pub fn estimate_fiedler(lap: &CsrMatrixView, max_iter: usize, tol: f64) -> (f64,
for _ in 0..outer {
let mut w = cg_solve(lap, &v, inner, tol * 0.1);
deflate_and_normalize(&mut w);
- if norm(&w) < 1e-30 { break; }
+ if norm(&w) < 1e-30 {
+ break;
+ }
let lv = lap.spmv(&w);
eigenvalue = dot(&w, &lv);
- let residual: f64 = lv.iter().zip(w.iter())
- .map(|(li, wi)| (li - eigenvalue * wi).powi(2)).sum::().sqrt();
+ let residual: f64 = lv
+ .iter()
+ .zip(w.iter())
+ .map(|(li, wi)| (li - eigenvalue * wi).powi(2))
+ .sum::()
+ .sqrt();
v = w;
- if residual < tol { break; }
+ if residual < tol {
+ break;
+ }
}
(eigenvalue.max(0.0), v)
}
@@ -182,7 +235,9 @@ pub fn estimate_fiedler(lap: &CsrMatrixView, max_iter: usize, tol: f64) -> (f64,
/// Estimate the largest eigenvalue of the Laplacian via power iteration.
pub fn estimate_largest_eigenvalue(lap: &CsrMatrixView, max_iter: usize) -> f64 {
let n = lap.rows;
- if n == 0 { return 0.0; }
+ if n == 0 {
+ return 0.0;
+ }
let mut v = vec![1.0 / (n as f64).sqrt(); n];
let mut ev = 0.0;
// Power iteration converges fast for the largest eigenvalue
@@ -190,28 +245,44 @@ pub fn estimate_largest_eigenvalue(lap: &CsrMatrixView, max_iter: usize) -> f64
for _ in 0..iters {
let w = lap.spmv(&v);
let wn = norm(&w);
- if wn < 1e-30 { return 0.0; }
+ if wn < 1e-30 {
+ return 0.0;
+ }
ev = dot(&v, &w);
- v.iter_mut().zip(w.iter()).for_each(|(vi, wi)| *vi = wi / wn);
+ v.iter_mut()
+ .zip(w.iter())
+ .for_each(|(vi, wi)| *vi = wi / wn);
}
ev.max(0.0)
}
/// Spectral gap ratio: fiedler / largest eigenvalue.
pub fn estimate_spectral_gap(fiedler: f64, largest: f64) -> f64 {
- if largest < 1e-30 { 0.0 } else { (fiedler / largest).clamp(0.0, 1.0) }
+ if largest < 1e-30 {
+ 0.0
+ } else {
+ (fiedler / largest).clamp(0.0, 1.0)
+ }
}
/// Degree regularity: 1 - (std_dev / mean) of vertex degrees. 1.0 = perfectly regular.
pub fn compute_degree_regularity(lap: &CsrMatrixView) -> f64 {
let n = lap.rows;
- if n == 0 { return 1.0; }
- let degrees: Vec = (0..n).map(|i| {
- let (s, e) = (lap.row_ptr[i], lap.row_ptr[i + 1]);
- (s..e).find(|&j| lap.col_indices[j] == i).map_or(0.0, |j| lap.values[j])
- }).collect();
+ if n == 0 {
+ return 1.0;
+ }
+ let degrees: Vec = (0..n)
+ .map(|i| {
+ let (s, e) = (lap.row_ptr[i], lap.row_ptr[i + 1]);
+ (s..e)
+ .find(|&j| lap.col_indices[j] == i)
+ .map_or(0.0, |j| lap.values[j])
+ })
+ .collect();
let mean = degrees.iter().sum::() / n as f64;
- if mean < 1e-30 { return 1.0; }
+ if mean < 1e-30 {
+ return 1.0;
+ }
let std = (degrees.iter().map(|d| (d - mean).powi(2)).sum::() / n as f64).sqrt();
(1.0 - std / mean).clamp(0.0, 1.0)
}
@@ -219,9 +290,15 @@ pub fn compute_degree_regularity(lap: &CsrMatrixView) -> f64 {
/// Estimate average effective resistance by deterministic sampling of vertex pairs.
pub fn estimate_effective_resistance_sampled(lap: &CsrMatrixView, n_samples: usize) -> f64 {
let n = lap.rows;
- if n < 2 { return 0.0; }
+ if n < 2 {
+ return 0.0;
+ }
let total_pairs = n * (n - 1) / 2;
- let step = if total_pairs <= n_samples { 1 } else { total_pairs / n_samples };
+ let step = if total_pairs <= n_samples {
+ 1
+ } else {
+ total_pairs / n_samples
+ };
let max_s = n_samples.min(total_pairs);
// Fewer CG iterations for resistance estimation (approximate is fine)
let cg_iters = 10;
@@ -235,12 +312,18 @@ pub fn estimate_effective_resistance_sampled(lap: &CsrMatrixView, n_samples: usi
let x = cg_solve(lap, &rhs, cg_iters, 1e-6);
total += (x[u] - x[v]).abs();
sampled += 1;
- if sampled >= max_s { break 'outer; }
+ if sampled >= max_s {
+ break 'outer;
+ }
}
idx += 1;
}
}
- if sampled == 0 { 0.0 } else { total / sampled as f64 }
+ if sampled == 0 {
+ 0.0
+ } else {
+ total / sampled as f64
+ }
}
/// Tracks spectral coherence incrementally, recomputing fully when needed.
@@ -257,9 +340,13 @@ pub struct SpectralTracker {
impl SpectralTracker {
pub fn new(config: SpectralConfig) -> Self {
Self {
- config, fiedler_estimate: 0.0, gap_estimate: 0.0,
- resistance_estimate: 0.0, regularity: 1.0,
- updates_since_refresh: 0, fiedler_vector: None,
+ config,
+ fiedler_estimate: 0.0,
+ gap_estimate: 0.0,
+ resistance_estimate: 0.0,
+ regularity: 1.0,
+ updates_since_refresh: 0,
+ fiedler_vector: None,
}
}
@@ -279,7 +366,8 @@ impl SpectralTracker {
if let Some(ref fv) = self.fiedler_vector {
if u < fv.len() && v < fv.len() {
let diff = fv[u] - fv[v];
- self.fiedler_estimate = (self.fiedler_estimate + weight_delta * diff * diff).max(0.0);
+ self.fiedler_estimate =
+ (self.fiedler_estimate + weight_delta * diff * diff).max(0.0);
let largest = estimate_largest_eigenvalue(lap, self.config.max_iterations);
self.gap_estimate = estimate_spectral_gap(self.fiedler_estimate, largest);
}
@@ -287,13 +375,20 @@ impl SpectralTracker {
self.regularity = compute_degree_regularity(lap);
}
- pub fn score(&self) -> f64 { self.build_score().composite }
+ pub fn score(&self) -> f64 {
+ self.build_score().composite
+ }
pub fn full_recompute(&mut self, lap: &CsrMatrixView) {
- let (fiedler_raw, fv) = estimate_fiedler(lap, self.config.max_iterations, self.config.tolerance);
+ let (fiedler_raw, fv) =
+ estimate_fiedler(lap, self.config.max_iterations, self.config.tolerance);
let largest = estimate_largest_eigenvalue(lap, self.config.max_iterations);
let n = lap.rows;
- self.fiedler_estimate = if n > 0 { (fiedler_raw / n as f64).clamp(0.0, 1.0) } else { 0.0 };
+ self.fiedler_estimate = if n > 0 {
+ (fiedler_raw / n as f64).clamp(0.0, 1.0)
+ } else {
+ 0.0
+ };
self.gap_estimate = estimate_spectral_gap(fiedler_raw, largest);
let r_raw = estimate_effective_resistance_sampled(lap, 3.min(n * (n - 1) / 2));
self.resistance_estimate = 1.0 / (1.0 + r_raw);
@@ -312,8 +407,10 @@ impl SpectralTracker {
+ self.config.gamma * self.resistance_estimate
+ self.config.delta * self.regularity;
SpectralCoherenceScore {
- fiedler: self.fiedler_estimate, spectral_gap: self.gap_estimate,
- effective_resistance: self.resistance_estimate, degree_regularity: self.regularity,
+ fiedler: self.fiedler_estimate,
+ spectral_gap: self.gap_estimate,
+ effective_resistance: self.resistance_estimate,
+ degree_regularity: self.regularity,
composite: c.clamp(0.0, 1.0),
}
}
@@ -342,8 +439,10 @@ impl HnswHealthMonitor {
pub fn new(config: SpectralConfig) -> Self {
Self {
tracker: SpectralTracker::new(config),
- min_fiedler: 0.05, min_spectral_gap: 0.01,
- max_resistance: 0.95, min_composite_scs: 0.3,
+ min_fiedler: 0.05,
+ min_spectral_gap: 0.01,
+ max_resistance: 0.95,
+ min_composite_scs: 0.3,
}
}
@@ -361,32 +460,47 @@ impl HnswHealthMonitor {
alerts.push(HealthAlert::FragileIndex { fiedler: s.fiedler });
}
if s.spectral_gap < self.min_spectral_gap {
- alerts.push(HealthAlert::PoorExpansion { gap: s.spectral_gap });
+ alerts.push(HealthAlert::PoorExpansion {
+ gap: s.spectral_gap,
+ });
}
if s.effective_resistance > self.max_resistance {
- alerts.push(HealthAlert::HighResistance { resistance: s.effective_resistance });
+ alerts.push(HealthAlert::HighResistance {
+ resistance: s.effective_resistance,
+ });
}
if s.composite < self.min_composite_scs {
alerts.push(HealthAlert::LowCoherence { scs: s.composite });
}
if alerts.len() >= 2 {
alerts.push(HealthAlert::RebuildRecommended {
- reason: format!("{} health issues detected. Full rebuild recommended.", alerts.len()),
+ reason: format!(
+ "{} health issues detected. Full rebuild recommended.",
+ alerts.len()
+ ),
});
}
alerts
}
- pub fn score(&self) -> SpectralCoherenceScore { self.tracker.build_score() }
+ pub fn score(&self) -> SpectralCoherenceScore {
+ self.tracker.build_score()
+ }
}
#[cfg(test)]
mod tests {
use super::*;
- fn triangle() -> Vec<(usize, usize, f64)> { vec![(0,1,1.0),(1,2,1.0),(0,2,1.0)] }
- fn path4() -> Vec<(usize, usize, f64)> { vec![(0,1,1.0),(1,2,1.0),(2,3,1.0)] }
- fn cycle4() -> Vec<(usize, usize, f64)> { vec![(0,1,1.0),(1,2,1.0),(2,3,1.0),(3,0,1.0)] }
+ fn triangle() -> Vec<(usize, usize, f64)> {
+ vec![(0, 1, 1.0), (1, 2, 1.0), (0, 2, 1.0)]
+ }
+ fn path4() -> Vec<(usize, usize, f64)> {
+ vec![(0, 1, 1.0), (1, 2, 1.0), (2, 3, 1.0)]
+ }
+ fn cycle4() -> Vec<(usize, usize, f64)> {
+ vec![(0, 1, 1.0), (1, 2, 1.0), (2, 3, 1.0), (3, 0, 1.0)]
+ }
#[test]
fn test_laplacian_construction() {
@@ -396,7 +510,10 @@ mod tests {
let (s, e) = (lap.row_ptr[i], lap.row_ptr[i + 1]);
let row_sum: f64 = lap.values[s..e].iter().sum();
assert!(row_sum.abs() < 1e-10, "Row {} sum = {}", i, row_sum);
- let diag = (s..e).find(|&j| lap.col_indices[j] == i).map(|j| lap.values[j]).unwrap();
+ let diag = (s..e)
+ .find(|&j| lap.col_indices[j] == i)
+ .map(|j| lap.values[j])
+ .unwrap();
assert!((diag - 2.0).abs() < 1e-10, "Diag[{}] = {}", i, diag);
}
}
@@ -406,7 +523,11 @@ mod tests {
// K3 eigenvalues: 0, 3, 3. Fiedler = 3.0.
let lap = CsrMatrixView::build_laplacian(3, &triangle());
let (f, _) = estimate_fiedler(&lap, 200, 1e-8);
- assert!((f - 3.0).abs() < 0.15, "Triangle Fiedler = {} (expected ~3.0)", f);
+ assert!(
+ (f - 3.0).abs() < 0.15,
+ "Triangle Fiedler = {} (expected ~3.0)",
+ f
+ );
}
#[test]
@@ -415,7 +536,12 @@ mod tests {
let lap = CsrMatrixView::build_laplacian(4, &path4());
let (f, _) = estimate_fiedler(&lap, 200, 1e-8);
let expected = 2.0 - std::f64::consts::SQRT_2;
- assert!((f - expected).abs() < 0.15, "Path Fiedler = {} (expected ~{})", f, expected);
+ assert!(
+ (f - expected).abs() < 0.15,
+ "Path Fiedler = {} (expected ~{})",
+ f,
+ expected
+ );
}
#[test]
@@ -437,26 +563,53 @@ mod tests {
#[test]
fn test_scs_monotonicity() {
- let full = vec![(0,1,1.0),(0,2,1.0),(0,3,1.0),(1,2,1.0),(1,3,1.0),(2,3,1.0)];
- let sparse = vec![(0,1,1.0),(1,2,1.0),(2,3,1.0)];
+ let full = vec![
+ (0, 1, 1.0),
+ (0, 2, 1.0),
+ (0, 3, 1.0),
+ (1, 2, 1.0),
+ (1, 3, 1.0),
+ (2, 3, 1.0),
+ ];
+ let sparse = vec![(0, 1, 1.0), (1, 2, 1.0), (2, 3, 1.0)];
let mut tf = SpectralTracker::new(SpectralConfig::default());
let mut ts = SpectralTracker::new(SpectralConfig::default());
let sf = tf.compute(&CsrMatrixView::build_laplacian(4, &full));
let ss = ts.compute(&CsrMatrixView::build_laplacian(4, &sparse));
- assert!(sf.composite >= ss.composite, "Full {} < sparse {}", sf.composite, ss.composite);
+ assert!(
+ sf.composite >= ss.composite,
+ "Full {} < sparse {}",
+ sf.composite,
+ ss.composite
+ );
}
#[test]
fn test_tracker_incremental() {
- let edges = vec![(0,1,1.0),(1,2,1.0),(2,3,1.0),(3,0,1.0),(0,2,1.0),(1,3,1.0)];
+ let edges = vec![
+ (0, 1, 1.0),
+ (1, 2, 1.0),
+ (2, 3, 1.0),
+ (3, 0, 1.0),
+ (0, 2, 1.0),
+ (1, 3, 1.0),
+ ];
let mut tracker = SpectralTracker::new(SpectralConfig::default());
let lap = CsrMatrixView::build_laplacian(4, &edges);
tracker.compute(&lap);
// Small perturbation for accurate first-order approximation.
let delta = 0.05;
- let updated: Vec<_> = edges.iter()
- .map(|&(u,v,w)| if u == 1 && v == 3 { (u,v,w+delta) } else { (u,v,w) }).collect();
+ let updated: Vec<_> = edges
+ .iter()
+ .map(|&(u, v, w)| {
+ if u == 1 && v == 3 {
+ (u, v, w + delta)
+ } else {
+ (u, v, w)
+ }
+ })
+ .collect();
let lap_u = CsrMatrixView::build_laplacian(4, &updated);
tracker.update_edge(&lap_u, 1, 3, delta);
let si = tracker.score();
@@ -464,25 +617,43 @@ mod tests {
let mut tf = SpectralTracker::new(SpectralConfig::default());
let sf = tf.compute(&lap_u).composite;
let diff = (si - sf).abs();
- assert!(diff < 0.5 * sf.max(0.01), "Incremental {} vs full {} (diff {})", si, sf, diff);
+ assert!(
+ diff < 0.5 * sf.max(0.01),
+ "Incremental {} vs full {} (diff {})",
+ si,
+ sf,
+ diff
+ );
// Verify forced refresh matches full recompute closely.
- let mut tr = SpectralTracker::new(SpectralConfig { refresh_threshold: 1, ..Default::default() });
+ let mut tr = SpectralTracker::new(SpectralConfig {
+ refresh_threshold: 1,
+ ..Default::default()
+ });
tr.compute(&lap);
tr.updates_since_refresh = 1;
tr.update_edge(&lap_u, 1, 3, delta);
- assert!((tr.score() - sf).abs() < 0.05, "Refreshed {} vs full {}", tr.score(), sf);
+ assert!(
+ (tr.score() - sf).abs() < 0.05,
+ "Refreshed {} vs full {}",
+ tr.score(),
+ sf
+ );
}
#[test]
fn test_health_alerts() {
- let weak = vec![(0,1,0.01),(1,2,0.01)];
+ let weak = vec![(0, 1, 0.01), (1, 2, 0.01)];
let mut m = HnswHealthMonitor::new(SpectralConfig::default());
m.update(&CsrMatrixView::build_laplacian(3, &weak), None);
let alerts = m.check_health();
assert!(
- alerts.iter().any(|a| matches!(a, HealthAlert::FragileIndex { .. } | HealthAlert::LowCoherence { .. })),
- "Weak graph should trigger alerts. Got: {:?}", alerts
+ alerts.iter().any(|a| matches!(
+ a,
+ HealthAlert::FragileIndex { .. } | HealthAlert::LowCoherence { .. }
+ )),
+ "Weak graph should trigger alerts. Got: {:?}",
+ alerts
);
let mut ms = HnswHealthMonitor::new(SpectralConfig::default());
ms.update(&CsrMatrixView::build_laplacian(3, &triangle()), None);
diff --git a/crates/ruvector-coherence/tests/spectral_bench.rs b/crates/ruvector-coherence/tests/spectral_bench.rs
index d1db5896f..f54d5429b 100644
--- a/crates/ruvector-coherence/tests/spectral_bench.rs
+++ b/crates/ruvector-coherence/tests/spectral_bench.rs
@@ -46,7 +46,10 @@ mod bench {
let avg_incr_us = start.elapsed().as_micros() as f64 / n_iter as f64;
println!("\n=== Spectral Coherence Score (500 vertices) ===");
- println!(" Full SCS recompute: {:.2} ms (target: < 6 ms)", avg_full_ms);
+ println!(
+ " Full SCS recompute: {:.2} ms (target: < 6 ms)",
+ avg_full_ms
+ );
println!(" Incremental update: {:.1} µs", avg_incr_us);
println!(" Composite SCS: {:.4}", initial.composite);
println!(" Fiedler: {:.6}", initial.fiedler);
@@ -55,6 +58,10 @@ mod bench {
// 50ms target accounts for CI/container/debug-mode variability;
// on dedicated hardware in release mode this typically runs under 6ms.
- assert!(avg_full_ms < 50.0, "SCS exceeded 50ms target: {:.2} ms", avg_full_ms);
+ assert!(
+ avg_full_ms < 50.0,
+ "SCS exceeded 50ms target: {:.2} ms",
+ avg_full_ms
+ );
}
}
diff --git a/crates/ruvector-crv/src/stage_iii.rs b/crates/ruvector-crv/src/stage_iii.rs
index d0dfcd0f8..cdd4d3747 100644
--- a/crates/ruvector-crv/src/stage_iii.rs
+++ b/crates/ruvector-crv/src/stage_iii.rs
@@ -32,8 +32,8 @@ impl StageIIIEncoder {
let dim = config.dimensions;
// Single GNN layer: input_dim -> hidden_dim, 1 head
// heads=1 always divides any dim, and dropout=0.0 is always valid
- let gnn_layer = RuvectorLayer::new(dim, dim, 1, 0.0)
- .expect("dim is always divisible by 1 head");
+ let gnn_layer =
+ RuvectorLayer::new(dim, dim, 1, 0.0).expect("dim is always divisible by 1 head");
Self { dim, gnn_layer }
}
diff --git a/crates/ruvector-dither/Cargo.toml b/crates/ruvector-dither/Cargo.toml
new file mode 100644
index 000000000..ec7b487c3
--- /dev/null
+++ b/crates/ruvector-dither/Cargo.toml
@@ -0,0 +1,28 @@
+[package]
+name = "ruvector-dither"
+version = "0.1.0"
+edition = "2021"
+license = "MIT OR Apache-2.0"
+authors = ["rUv "]
+repository = "https://github.com/ruvnet/ruvector"
+homepage = "https://ruv.io"
+documentation = "https://docs.rs/ruvector-dither"
+description = "Deterministic low-discrepancy dithering for low-bit quantization: golden-ratio and π-digit sequences for blue-noise error shaping"
+keywords = ["quantization", "dither", "golden-ratio", "inference", "wasm"]
+categories = ["science", "algorithms", "no-std"]
+readme = "README.md"
+
+[features]
+default = []
+# Enable no_std mode (requires an allocator)
+no_std = []
+
+[dependencies]
+# No runtime deps — fully no_std compatible
+
+[dev-dependencies]
+criterion = { version = "0.5", features = ["html_reports"] }
+
+[[bench]]
+name = "dither_bench"
+harness = false
diff --git a/crates/ruvector-dither/README.md b/crates/ruvector-dither/README.md
new file mode 100644
index 000000000..a07807426
--- /dev/null
+++ b/crates/ruvector-dither/README.md
@@ -0,0 +1,75 @@
+# ruvector-dither
+
+Deterministic, low-discrepancy **pre-quantization dithering** for low-bit
+neural network inference on tiny devices (WASM, Seed, STM32).
+
+## Why dither?
+
+Quantizers at 3/5/7 bits can align with power-of-two boundaries, producing
+idle tones, sticky activations, and periodic errors that degrade accuracy.
+A sub-LSB pre-quantization offset:
+
+- Decorrelates the signal from grid boundaries.
+- Pushes quantization error toward high frequencies (blue-noise-like),
+ which average out downstream.
+- Uses **no RNG** -- outputs are deterministic, reproducible across
+ platforms (WASM / x86 / ARM), and cache-friendly.
+
+## Features
+
+- **Golden-ratio sequence** -- best 1-D equidistribution, irrational period (never repeats).
+- **Pi-digit table** -- 256-byte cyclic lookup, exact reproducibility from a tensor/layer ID.
+- **Per-channel dither pools** -- structurally decorrelated channels without any randomness.
+- **Scalar, slice, and integer-code quantization** helpers included.
+- **`no_std`-compatible** -- zero runtime dependencies; enable with `features = ["no_std"]`.
+
+## Quick start
+
+```rust
+use ruvector_dither::{GoldenRatioDither, PiDither, quantize_dithered};
+
+// Golden-ratio dither, 8-bit, epsilon = 0.5 LSB
+let mut gr = GoldenRatioDither::new(0.0);
+let q = quantize_dithered(0.314, 8, 0.5, &mut gr);
+assert!(q >= -1.0 && q <= 1.0);
+
+// Pi-digit dither, 5-bit
+let mut pi = PiDither::new(0);
+let q2 = quantize_dithered(0.271, 5, 0.5, &mut pi);
+assert!(q2 >= -1.0 && q2 <= 1.0);
+```
+
+### Per-channel batch quantization
+
+```rust
+use ruvector_dither::ChannelDither;
+
+let mut cd = ChannelDither::new(/*layer_id=*/ 0, /*channels=*/ 8, /*bits=*/ 5, /*eps=*/ 0.5);
+let mut activations = vec![0.5_f32; 64]; // shape [batch=8, channels=8]
+cd.quantize_batch(&mut activations);
+```
+
+## Modules
+
+| Module | Description |
+|--------|-------------|
+| `golden` | `GoldenRatioDither` -- additive golden-ratio quasi-random sequence |
+| `pi` | `PiDither` -- cyclic 256-byte table derived from digits of pi |
+| `quantize` | `quantize_dithered`, `quantize_slice_dithered`, `quantize_to_code` |
+| `channel` | `ChannelDither` -- per-channel dither pool seeded from layer/channel IDs |
+
+## Trait: `DitherSource`
+
+Implement `DitherSource` to plug in your own deterministic sequence:
+
+```rust
+pub trait DitherSource {
+ /// Return the next zero-mean offset in [-0.5, +0.5].
+ fn next_unit(&mut self) -> f32;
+}
+```
+
+## License
+
+Licensed under either of [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
+or [MIT License](http://opensource.org/licenses/MIT) at your option.
diff --git a/crates/ruvector-dither/benches/dither_bench.rs b/crates/ruvector-dither/benches/dither_bench.rs
new file mode 100644
index 000000000..f0385eab2
--- /dev/null
+++ b/crates/ruvector-dither/benches/dither_bench.rs
@@ -0,0 +1,60 @@
+use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
+use ruvector_dither::{
+ channel::ChannelDither, quantize_dithered, quantize_slice_dithered, GoldenRatioDither, PiDither,
+};
+
+fn bench_single_quantize(c: &mut Criterion) {
+ let mut group = c.benchmark_group("quantize_dithered_single");
+ for bits in [5u32, 7, 8] {
+ group.bench_with_input(BenchmarkId::from_parameter(bits), &bits, |b, &bits| {
+ let mut d = GoldenRatioDither::new(0.0);
+ b.iter(|| quantize_dithered(black_box(0.314_f32), bits, 0.5, &mut d));
+ });
+ }
+ group.finish();
+}
+
+fn bench_slice_quantize(c: &mut Criterion) {
+ let mut group = c.benchmark_group("quantize_slice");
+ for n in [64usize, 256, 1024] {
+ group.bench_with_input(BenchmarkId::from_parameter(n), &n, |b, &n| {
+ let input: Vec = (0..n).map(|i| (i as f32 / n as f32) * 2.0 - 1.0).collect();
+ b.iter(|| {
+ let mut buf = input.clone();
+ let mut d = GoldenRatioDither::new(0.0);
+ quantize_slice_dithered(black_box(&mut buf), 8, 0.5, &mut d);
+ black_box(buf)
+ });
+ });
+ }
+ group.finish();
+}
+
+fn bench_pi_dither(c: &mut Criterion) {
+ c.bench_function("pi_dither_1k", |b| {
+ let mut d = PiDither::new(0);
+ let mut buf: Vec = vec![0.5; 1024];
+ b.iter(|| {
+ quantize_slice_dithered(black_box(&mut buf), 7, 0.5, &mut d);
+ });
+ });
+}
+
+fn bench_channel_dither(c: &mut Criterion) {
+ c.bench_function("channel_dither_256activations_32ch", |b| {
+ let mut cd = ChannelDither::new(0, 32, 8, 0.5);
+ let mut acts: Vec = vec![0.314; 256];
+ b.iter(|| {
+ cd.quantize_batch(black_box(&mut acts));
+ });
+ });
+}
+
+criterion_group!(
+ benches,
+ bench_single_quantize,
+ bench_slice_quantize,
+ bench_pi_dither,
+ bench_channel_dither
+);
+criterion_main!(benches);
diff --git a/crates/ruvector-dither/src/channel.rs b/crates/ruvector-dither/src/channel.rs
new file mode 100644
index 000000000..86aa299ec
--- /dev/null
+++ b/crates/ruvector-dither/src/channel.rs
@@ -0,0 +1,92 @@
+//! Per-channel and per-layer dither management.
+//!
+//! `ChannelDither` bundles one `GoldenRatioDither` state per channel,
+//! seeded from `(layer_id, channel_id)` pairs so every channel is
+//! structurally decorrelated without any RNG.
+
+use crate::{DitherSource, GoldenRatioDither};
+
+/// Per-channel dither pool seeded from `(layer_id, channel_id)` pairs.
+///
+/// Allocates one `GoldenRatioDither` per channel; each is independently
+/// advanced, so channels cannot constructively interfere.
+pub struct ChannelDither {
+ channels: Vec,
+ bits: u32,
+ eps: f32,
+}
+
+impl ChannelDither {
+ /// Build a pool of `n_channels` dithers for `layer_id` / `bits` / `eps`.
+ pub fn new(layer_id: u32, n_channels: usize, bits: u32, eps: f32) -> Self {
+ let channels = (0..n_channels)
+ .map(|ch| GoldenRatioDither::from_ids(layer_id, ch as u32))
+ .collect();
+ Self {
+ channels,
+ bits,
+ eps,
+ }
+ }
+
+ /// Quantize `activations` in-place. Each column (channel dimension) uses
+ /// its own independent dither state.
+ ///
+ /// `activations` is a flat row-major tensor of shape `[batch, channels]`.
+ /// If the slice is not a multiple of `n_channels`, the remainder is
+ /// processed using channel 0.
+ pub fn quantize_batch(&mut self, activations: &mut [f32]) {
+ assert!(
+ !self.channels.is_empty(),
+ "ChannelDither must have >= 1 channel"
+ );
+ assert!(self.bits >= 2 && self.bits <= 31, "bits must be in [2, 31]");
+ let nc = self.channels.len();
+ let qmax = ((1u32 << (self.bits - 1)) - 1) as f32;
+ let lsb = 1.0 / qmax;
+ for (i, x) in activations.iter_mut().enumerate() {
+ let ch = i % nc;
+ let d = self.channels[ch].next(self.eps * lsb);
+ *x = ((*x + d) * qmax).round().clamp(-qmax, qmax) / qmax;
+ }
+ }
+
+ /// Number of channels in this pool.
+ pub fn n_channels(&self) -> usize {
+ self.channels.len()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn channel_dither_correct_count() {
+ let cd = ChannelDither::new(0, 16, 8, 0.5);
+ assert_eq!(cd.n_channels(), 16);
+ }
+
+ #[test]
+ fn channel_dither_in_bounds() {
+ let mut cd = ChannelDither::new(1, 8, 5, 0.5);
+ let mut acts: Vec = (0..64).map(|i| (i as f32 / 63.0) * 2.0 - 1.0).collect();
+ cd.quantize_batch(&mut acts);
+ for v in acts {
+ assert!(v >= -1.0 && v <= 1.0, "out of bounds: {v}");
+ }
+ }
+
+ #[test]
+ fn different_layers_produce_different_outputs() {
+ let input: Vec = vec![0.5; 16];
+ let mut buf0 = input.clone();
+ let mut buf1 = input.clone();
+ ChannelDither::new(0, 8, 8, 0.5).quantize_batch(&mut buf0);
+ ChannelDither::new(99, 8, 8, 0.5).quantize_batch(&mut buf1);
+ assert_ne!(
+ buf0, buf1,
+ "different layer_ids must yield different dithered outputs"
+ );
+ }
+}
diff --git a/crates/ruvector-dither/src/golden.rs b/crates/ruvector-dither/src/golden.rs
new file mode 100644
index 000000000..4501e286f
--- /dev/null
+++ b/crates/ruvector-dither/src/golden.rs
@@ -0,0 +1,100 @@
+//! Golden-ratio quasi-random dither sequence.
+//!
+//! State update: `state = frac(state + φ)` where φ = (√5−1)/2 ≈ 0.618…
+//!
+//! This is the 1-D Halton sequence in base φ — it has the best possible
+//! equidistribution for a 1-D low-discrepancy sequence.
+
+use crate::DitherSource;
+
+/// Additive golden-ratio dither with zero-mean output in `[-0.5, 0.5]`.
+///
+/// The sequence has period 1 (irrational) so it never exactly repeats.
+/// Two instances with different seeds stay decorrelated.
+#[derive(Clone, Debug)]
+pub struct GoldenRatioDither {
+ state: f32,
+}
+
+/// φ = (√5 − 1) / 2
+const PHI: f32 = 0.618_033_98_f32;
+
+impl GoldenRatioDither {
+ /// Create a new sequence seeded at `initial_state` ∈ [0, 1).
+ ///
+ /// For per-layer / per-channel decorrelation, seed with
+ /// `frac(layer_id × φ + channel_id × φ²)`.
+ #[inline]
+ pub fn new(initial_state: f32) -> Self {
+ Self {
+ state: initial_state.abs().fract(),
+ }
+ }
+
+ /// Construct from a `(layer_id, channel_id)` pair for structural decorrelation.
+ #[inline]
+ pub fn from_ids(layer_id: u32, channel_id: u32) -> Self {
+ let s = ((layer_id as f32) * PHI + (channel_id as f32) * PHI * PHI).fract();
+ Self { state: s }
+ }
+
+ /// Current state (useful for serialisation / checkpointing).
+ #[inline]
+ pub fn state(&self) -> f32 {
+ self.state
+ }
+}
+
+impl DitherSource for GoldenRatioDither {
+ /// Advance and return next value in `[-0.5, 0.5]`.
+ #[inline]
+ fn next_unit(&mut self) -> f32 {
+ self.state = (self.state + PHI).fract();
+ self.state - 0.5
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::DitherSource;
+
+ #[test]
+ fn output_is_in_range() {
+ let mut d = GoldenRatioDither::new(0.0);
+ for _ in 0..10_000 {
+ let v = d.next_unit();
+ assert!(v >= -0.5 && v <= 0.5, "out of range: {v}");
+ }
+ }
+
+ #[test]
+ fn mean_is_near_zero() {
+ let mut d = GoldenRatioDither::new(0.0);
+ let n = 100_000;
+ let mean: f32 = (0..n).map(|_| d.next_unit()).sum::() / n as f32;
+ assert!(mean.abs() < 0.01, "mean too large: {mean}");
+ }
+
+ #[test]
+ fn from_ids_decorrelates() {
+ let mut d0 = GoldenRatioDither::from_ids(0, 0);
+ let mut d1 = GoldenRatioDither::from_ids(1, 7);
+ // Confirm they start at different states
+ let v0 = d0.next_unit();
+ let v1 = d1.next_unit();
+ assert!(
+ (v0 - v1).abs() > 1e-4,
+ "distinct seeds should produce distinct first values"
+ );
+ }
+
+ #[test]
+ fn deterministic_across_calls() {
+ let mut d1 = GoldenRatioDither::new(0.123);
+ let mut d2 = GoldenRatioDither::new(0.123);
+ for _ in 0..1000 {
+ assert_eq!(d1.next_unit(), d2.next_unit());
+ }
+ }
+}
diff --git a/crates/ruvector-dither/src/lib.rs b/crates/ruvector-dither/src/lib.rs
new file mode 100644
index 000000000..2b0a08064
--- /dev/null
+++ b/crates/ruvector-dither/src/lib.rs
@@ -0,0 +1,63 @@
+//! # ruvector-dither
+//!
+//! Deterministic, low-discrepancy **pre-quantization dithering** for low-bit
+//! inference on tiny devices (WASM, Seed, STM32).
+//!
+//! ## Why dither?
+//!
+//! Quantizers at 3 / 5 / 7 bits can align with power-of-two boundaries and
+//! produce idle tones / limit cycles — sticky activations and periodic errors
+//! that degrade accuracy. A sub-LSB pre-quantization offset:
+//!
+//! - Decorrelates the signal from grid boundaries.
+//! - Pushes quantization error toward high frequencies (blue-noise-like),
+//! which average out downstream.
+//! - Uses **no RNG** — outputs are deterministic, reproducible across
+//! platforms (WASM / x86 / ARM), and cache-friendly.
+//!
+//! ## Sequences
+//!
+//! | Type | State update | Properties |
+//! |------|-------------|------------|
+//! | [`GoldenRatioDither`] | frac(state + φ) | Best 1-D equidistribution |
+//! | [`PiDither`] | table of π bytes | Reproducible, period = 256 |
+//!
+//! ## Quick start
+//!
+//! ```
+//! use ruvector_dither::{GoldenRatioDither, PiDither, quantize_dithered};
+//!
+//! // Quantize with golden-ratio dither, 8-bit, ε = 0.5 LSB
+//! let mut gr = GoldenRatioDither::new(0.0);
+//! let q = quantize_dithered(0.314, 8, 0.5, &mut gr);
+//! assert!(q >= -1.0 && q <= 1.0);
+//!
+//! // Quantize with π-digit dither
+//! let mut pi = PiDither::new(0);
+//! let q2 = quantize_dithered(0.271, 5, 0.5, &mut pi);
+//! assert!(q2 >= -1.0 && q2 <= 1.0);
+//! ```
+
+#![cfg_attr(feature = "no_std", no_std)]
+
+pub mod channel;
+pub mod golden;
+pub mod pi;
+pub mod quantize;
+
+pub use channel::ChannelDither;
+pub use golden::GoldenRatioDither;
+pub use pi::PiDither;
+pub use quantize::{quantize_dithered, quantize_slice_dithered};
+
+/// Trait implemented by any deterministic dither source.
+pub trait DitherSource {
+ /// Advance the sequence and return the next zero-mean offset in `[-0.5, +0.5]`.
+ fn next_unit(&mut self) -> f32;
+
+ /// Scale output to ε × LSB amplitude.
+ #[inline]
+ fn next(&mut self, eps_lsb: f32) -> f32 {
+ self.next_unit() * eps_lsb
+ }
+}
diff --git a/crates/ruvector-dither/src/pi.rs b/crates/ruvector-dither/src/pi.rs
new file mode 100644
index 000000000..6090f1475
--- /dev/null
+++ b/crates/ruvector-dither/src/pi.rs
@@ -0,0 +1,110 @@
+//! π-digit dither: cyclic table of the first 256 digits of π scaled to [-0.5, 0.5].
+//!
+//! Period = 256. Each entry is an independent offset making the sequence
+//! suitable for small buffers where you want exact reproducibility from a
+//! named tensor / layer rather than a stateful RNG.
+
+use crate::DitherSource;
+
+/// First 256 bytes of π (hex digits 3.243F6A8885A308D3…).
+///
+/// Each byte spans [0, 255]; we map to [-0.5, 0.5] by `(b as f32 / 255.0) - 0.5`.
+#[rustfmt::skip]
+const PI_BYTES: [u8; 256] = [
+ 0x32, 0x43, 0xF6, 0xA8, 0x88, 0x5A, 0x30, 0x8D, 0x31, 0x31, 0x98, 0xA2,
+ 0xE0, 0x37, 0x07, 0x34, 0x4A, 0x40, 0x93, 0x82, 0x22, 0x99, 0xF3, 0x1D,
+ 0x00, 0x82, 0xEF, 0xA9, 0x8E, 0xC4, 0xE6, 0xC8, 0x94, 0x52, 0x21, 0xE6,
+ 0x38, 0xD0, 0x13, 0x77, 0xBE, 0x54, 0x66, 0xCF, 0x34, 0xE9, 0x0C, 0x6C,
+ 0xC0, 0xAC, 0x29, 0xB7, 0xC9, 0x7C, 0x50, 0xDD, 0x3F, 0x84, 0xD5, 0xB5,
+ 0xB5, 0x47, 0x09, 0x17, 0x92, 0x16, 0xD5, 0xD9, 0x89, 0x79, 0xFB, 0x1B,
+ 0xD1, 0x31, 0x0B, 0xA6, 0x98, 0xDF, 0xB5, 0xAC, 0x2F, 0xFD, 0x72, 0xDB,
+ 0xD0, 0x1A, 0xDF, 0xB7, 0xB8, 0xE1, 0xAF, 0xED, 0x6A, 0x26, 0x7E, 0x96,
+ 0xBA, 0x7C, 0x90, 0x45, 0xF1, 0x2C, 0x7F, 0x99, 0x24, 0xA1, 0x99, 0x47,
+ 0xB3, 0x91, 0x6C, 0xF7, 0x08, 0x01, 0xF2, 0xE2, 0x85, 0x8E, 0xFC, 0x16,
+ 0x63, 0x69, 0x20, 0xD8, 0x71, 0x57, 0x4E, 0x69, 0xA4, 0x58, 0xFE, 0xA3,
+ 0xF4, 0x93, 0x3D, 0x7E, 0x0D, 0x95, 0x74, 0x8F, 0x72, 0x8E, 0xB6, 0x58,
+ 0x71, 0x8B, 0xCD, 0x58, 0x82, 0x15, 0x4A, 0xEE, 0x7B, 0x54, 0xA4, 0x1D,
+ 0xC2, 0x5A, 0x59, 0xB5, 0x9C, 0x30, 0xD5, 0x39, 0x2A, 0xF2, 0x60, 0x13,
+ 0xC5, 0xD1, 0xB0, 0x23, 0x28, 0x60, 0x85, 0xF0, 0xCA, 0x41, 0x79, 0x18,
+ 0xB8, 0xDB, 0x38, 0xEF, 0x8E, 0x79, 0xDC, 0xB0, 0x60, 0x3A, 0x18, 0x0E,
+ 0x6C, 0x9E, 0xD0, 0xE8, 0x9D, 0x44, 0x8F, 0x39, 0xF9, 0x93, 0xDB, 0x07,
+ 0x3A, 0xA3, 0x45, 0x22, 0x7E, 0xD8, 0xAC, 0x87, 0x2F, 0x85, 0x5D, 0x28,
+ 0x55, 0xB0, 0x89, 0x73, 0x36, 0xF3, 0xEB, 0xCD, 0xF6, 0x00, 0x4A, 0xDB,
+ 0x36, 0x47, 0xDB, 0xF7, 0x82, 0x48, 0xDB, 0xF3, 0xD3, 0x7C, 0x45, 0x10,
+ 0xC6, 0x7A, 0x70, 0xAA, 0x56, 0x78, 0x5A, 0xC6, 0x37, 0x10, 0xA2, 0x44,
+ 0x32, 0x34, 0xFE, 0x08,
+];
+
+/// Cyclic π-digit dither. Period = 256; index wraps with bitwise AND.
+#[derive(Clone, Debug)]
+pub struct PiDither {
+ idx: u8,
+}
+
+impl PiDither {
+ /// Create a new instance starting at `offset` (0–255).
+ #[inline]
+ pub fn new(offset: u8) -> Self {
+ Self { idx: offset }
+ }
+
+ /// Construct from a tensor/layer identifier for structural reproducibility.
+ #[inline]
+ pub fn from_tensor_id(tensor_id: u32) -> Self {
+ // Mix bits so different tensor IDs get distinct offsets
+ let mixed = tensor_id
+ .wrapping_mul(0x9E37_79B9)
+ .wrapping_add(tensor_id >> 16);
+ Self {
+ idx: (mixed & 0xFF) as u8,
+ }
+ }
+}
+
+impl DitherSource for PiDither {
+ /// Advance and return next value in `[-0.5, 0.5]`.
+ #[inline]
+ fn next_unit(&mut self) -> f32 {
+ let b = PI_BYTES[self.idx as usize];
+ self.idx = self.idx.wrapping_add(1);
+ (b as f32 / 255.0) - 0.5
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::DitherSource;
+
+ #[test]
+ fn output_is_in_range() {
+ let mut d = PiDither::new(0);
+ for _ in 0..256 * 4 {
+ let v = d.next_unit();
+ assert!(v >= -0.5 && v <= 0.5, "out of range: {v}");
+ }
+ }
+
+ #[test]
+ fn period_is_256() {
+ let mut d = PiDither::new(0);
+ let first: Vec = (0..256).map(|_| d.next_unit()).collect();
+ let second: Vec = (0..256).map(|_| d.next_unit()).collect();
+ assert_eq!(first, second);
+ }
+
+ #[test]
+ fn mean_is_near_zero() {
+ let mut d = PiDither::new(0);
+ let sum: f32 = (0..256).map(|_| d.next_unit()).sum();
+ let mean = sum / 256.0;
+ assert!(mean.abs() < 0.05, "π-digit mean too large: {mean}");
+ }
+
+ #[test]
+ fn from_tensor_id_gives_distinct_offsets() {
+ let d0 = PiDither::from_tensor_id(0);
+ let d1 = PiDither::from_tensor_id(1);
+ assert_ne!(d0.idx, d1.idx);
+ }
+}
diff --git a/crates/ruvector-dither/src/quantize.rs b/crates/ruvector-dither/src/quantize.rs
new file mode 100644
index 000000000..9d24c4dc7
--- /dev/null
+++ b/crates/ruvector-dither/src/quantize.rs
@@ -0,0 +1,134 @@
+//! Drop-in quantization helpers that apply dither before rounding.
+
+use crate::DitherSource;
+
+/// Quantize a single value with deterministic dither.
+///
+/// # Arguments
+/// - `x` – input activation in `[-1.0, 1.0]`
+/// - `bits` – quantizer bit-width (e.g. 3, 5, 7, 8)
+/// - `eps` – dither amplitude in LSB units (0.0 = no dither, 0.5 = half-LSB recommended)
+/// - `source` – stateful dither sequence
+///
+/// Returns the quantized value in `[-1.0, 1.0]`.
+///
+/// # Example
+/// ```
+/// use ruvector_dither::{GoldenRatioDither, quantize_dithered};
+/// let mut d = GoldenRatioDither::new(0.0);
+/// let q = quantize_dithered(0.314, 8, 0.5, &mut d);
+/// assert!(q >= -1.0 && q <= 1.0);
+/// ```
+#[inline]
+pub fn quantize_dithered(x: f32, bits: u32, eps: f32, source: &mut impl DitherSource) -> f32 {
+ assert!(bits >= 2 && bits <= 31, "bits must be in [2, 31]");
+ let qmax = ((1u32 << (bits - 1)) - 1) as f32;
+ let lsb = 1.0 / qmax;
+ let dither = source.next(eps * lsb);
+ let shifted = (x + dither) * qmax;
+ let rounded = shifted.round().clamp(-qmax, qmax);
+ rounded / qmax
+}
+
+/// Quantize a slice in-place with deterministic dither.
+///
+/// Each element gets an independent dither sample from `source`.
+///
+/// # Example
+/// ```
+/// use ruvector_dither::{GoldenRatioDither, quantize_slice_dithered};
+/// let mut vals = vec![0.1_f32, 0.5, -0.3, 0.9, -0.8];
+/// let mut d = GoldenRatioDither::new(0.0);
+/// quantize_slice_dithered(&mut vals, 5, 0.5, &mut d);
+/// for &v in &vals {
+/// assert!(v >= -1.0 && v <= 1.0);
+/// }
+/// ```
+pub fn quantize_slice_dithered(
+ xs: &mut [f32],
+ bits: u32,
+ eps: f32,
+ source: &mut impl DitherSource,
+) {
+ assert!(bits >= 2 && bits <= 31, "bits must be in [2, 31]");
+ let qmax = ((1u32 << (bits - 1)) - 1) as f32;
+ let lsb = 1.0 / qmax;
+ for x in xs.iter_mut() {
+ let dither = source.next(eps * lsb);
+ let shifted = (*x + dither) * qmax;
+ *x = shifted.round().clamp(-qmax, qmax) / qmax;
+ }
+}
+
+/// Quantize to a raw integer code (signed, in `[-(2^(bits-1)), 2^(bits-1)-1]`).
+///
+/// Useful when you need the integer representation rather than a re-scaled float.
+#[inline]
+pub fn quantize_to_code(x: f32, bits: u32, eps: f32, source: &mut impl DitherSource) -> i32 {
+ assert!(bits >= 2 && bits <= 31, "bits must be in [2, 31]");
+ let qmax = ((1u32 << (bits - 1)) - 1) as f32;
+ let lsb = 1.0 / qmax;
+ let dither = source.next(eps * lsb);
+ ((x + dither) * qmax).round().clamp(-qmax, qmax) as i32
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{GoldenRatioDither, PiDither};
+
+ #[test]
+ fn output_in_unit_range() {
+ let mut d = GoldenRatioDither::new(0.0);
+ for bits in [3u32, 5, 7, 8] {
+ for &x in &[-1.0_f32, -0.5, 0.0, 0.5, 1.0] {
+ let q = quantize_dithered(x, bits, 0.5, &mut d);
+ assert!(q >= -1.0 && q <= 1.0, "bits={bits}, x={x}, q={q}");
+ }
+ }
+ }
+
+ #[test]
+ fn dither_reduces_idle_tones() {
+ // A constant signal at exactly 0.5 * LSB without dither quantizes
+ // to the same code every time (idle tone). With dither the code
+ // alternates, so the variance of codes should be > 0.
+ let bits = 5u32;
+ let qmax = ((1u32 << (bits - 1)) - 1) as f32;
+ let lsb = 1.0 / qmax;
+ let x = 0.5 * lsb; // exactly half an LSB
+
+ let mut codes_with: Vec = Vec::with_capacity(256);
+ let mut d = GoldenRatioDither::new(0.0);
+ for _ in 0..256 {
+ codes_with.push(quantize_to_code(x, bits, 0.5, &mut d));
+ }
+ let unique: std::collections::HashSet = codes_with.iter().copied().collect();
+ assert!(
+ unique.len() > 1,
+ "dithered signal must produce >1 unique code"
+ );
+ }
+
+ #[test]
+ fn slice_quantize_in_bounds() {
+ let mut vals: Vec = (-50..=50).map(|i| i as f32 * 0.02).collect();
+ let mut pi = PiDither::new(0);
+ quantize_slice_dithered(&mut vals, 7, 0.5, &mut pi);
+ for v in vals {
+ assert!(v >= -1.0 && v <= 1.0, "out of range: {v}");
+ }
+ }
+
+ #[test]
+ fn deterministic_with_same_seed() {
+ let input = vec![0.1_f32, 0.4, -0.7, 0.9];
+ let quantize = |input: &[f32]| {
+ let mut buf = input.to_vec();
+ let mut d = GoldenRatioDither::new(0.5);
+ quantize_slice_dithered(&mut buf, 8, 0.5, &mut d);
+ buf
+ };
+ assert_eq!(quantize(&input), quantize(&input));
+ }
+}
diff --git a/crates/ruvector-gnn/src/cold_tier.rs b/crates/ruvector-gnn/src/cold_tier.rs
index b00fd6730..0b79a83c9 100644
--- a/crates/ruvector-gnn/src/cold_tier.rs
+++ b/crates/ruvector-gnn/src/cold_tier.rs
@@ -113,7 +113,10 @@ impl FeatureStorage {
features.len().to_string(),
));
}
- let file = self.file.as_mut().ok_or_else(|| GnnError::other("file not open"))?;
+ let file = self
+ .file
+ .as_mut()
+ .ok_or_else(|| GnnError::other("file not open"))?;
let offset = HEADER_SIZE + (node_id as u64) * (self.block_size as u64);
file.seek(SeekFrom::Start(offset))?;
let bytes: &[u8] = unsafe {
@@ -131,7 +134,10 @@ impl FeatureStorage {
node_id, self.num_nodes
)));
}
- let file = self.file.as_mut().ok_or_else(|| GnnError::other("file not open"))?;
+ let file = self
+ .file
+ .as_mut()
+ .ok_or_else(|| GnnError::other("file not open"))?;
let offset = HEADER_SIZE + (node_id as u64) * (self.block_size as u64);
file.seek(SeekFrom::Start(offset))?;
let mut buf = vec![0u8; self.dim * F32_SIZE];
@@ -521,8 +527,11 @@ impl ColdTierTrainer {
let features = &batch.features[i];
// Simple L2 loss for demonstration
- let loss: f64 =
- features.iter().map(|&x| (x as f64) * (x as f64)).sum::() * 0.5;
+ let loss: f64 = features
+ .iter()
+ .map(|&x| (x as f64) * (x as f64))
+ .sum::()
+ * 0.5;
epoch_loss += loss;
// Gradient: d(0.5 * x^2)/dx = x; step: x' = x - lr * x
@@ -680,11 +689,7 @@ impl ColdTierEwc {
/// Compute Fisher information diagonal from gradient samples.
///
/// Each entry in `gradients` is one sample's gradient for one parameter row.
- pub fn compute_fisher(
- &mut self,
- gradients: &[Vec],
- sample_count: usize,
- ) -> Result<()> {
+ pub fn compute_fisher(&mut self, gradients: &[Vec], sample_count: usize) -> Result<()> {
if gradients.is_empty() {
return Ok(());
}
@@ -749,11 +754,7 @@ impl ColdTierEwc {
}
/// Compute the EWC gradient for a specific parameter row.
- pub fn gradient(
- &mut self,
- current_weights: &[Vec],
- param_idx: usize,
- ) -> Result> {
+ pub fn gradient(&mut self, current_weights: &[Vec], param_idx: usize) -> Result> {
if !self.active || param_idx >= self.num_params {
return Ok(vec![0.0; self.dim]);
}
@@ -868,8 +869,7 @@ mod tests {
..Default::default()
};
- let mut trainer =
- ColdTierTrainer::new(&storage_path, dim, num_nodes, config).unwrap();
+ let mut trainer = ColdTierTrainer::new(&storage_path, dim, num_nodes, config).unwrap();
// Write initial features
for nid in 0..num_nodes {
@@ -879,8 +879,9 @@ mod tests {
trainer.storage.flush().unwrap();
// Build a simple chain adjacency
- let adjacency: Vec<(usize, usize)> =
- (0..num_nodes.saturating_sub(1)).map(|i| (i, i + 1)).collect();
+ let adjacency: Vec<(usize, usize)> = (0..num_nodes.saturating_sub(1))
+ .map(|i| (i, i + 1))
+ .collect();
let result = trainer.train_epoch(&adjacency, 0.1);
diff --git a/crates/ruvector-gnn/src/mmap.rs b/crates/ruvector-gnn/src/mmap.rs
index 559a58a59..5254675e5 100644
--- a/crates/ruvector-gnn/src/mmap.rs
+++ b/crates/ruvector-gnn/src/mmap.rs
@@ -485,7 +485,8 @@ impl MmapGradientAccumulator {
"Gradient length must match d_embed"
);
- let offset = self.grad_offset(node_id)
+ let offset = self
+ .grad_offset(node_id)
.expect("node_id out of bounds or offset overflow");
let lock_idx = (node_id as usize) / self.lock_granularity;
@@ -495,8 +496,10 @@ impl MmapGradientAccumulator {
// Safety: We validated node_id bounds and offset above, and hold the write lock
unsafe {
let mmap = &mut *self.grad_mmap.get();
- assert!(offset + self.d_embed * std::mem::size_of::() <= mmap.len(),
- "gradient write would exceed mmap bounds");
+ assert!(
+ offset + self.d_embed * std::mem::size_of::() <= mmap.len(),
+ "gradient write would exceed mmap bounds"
+ );
let ptr = mmap.as_mut_ptr().add(offset) as *mut f32;
let grad_slice = std::slice::from_raw_parts_mut(ptr, self.d_embed);
@@ -555,7 +558,8 @@ impl MmapGradientAccumulator {
/// # Returns
/// Slice containing the gradient vector
pub fn get_grad(&self, node_id: u64) -> &[f32] {
- let offset = self.grad_offset(node_id)
+ let offset = self
+ .grad_offset(node_id)
.expect("node_id out of bounds or offset overflow");
let lock_idx = (node_id as usize) / self.lock_granularity;
@@ -565,8 +569,10 @@ impl MmapGradientAccumulator {
// Safety: We validated node_id bounds and offset above, and hold the read lock
unsafe {
let mmap = &*self.grad_mmap.get();
- assert!(offset + self.d_embed * std::mem::size_of::() <= mmap.len(),
- "gradient read would exceed mmap bounds");
+ assert!(
+ offset + self.d_embed * std::mem::size_of::() <= mmap.len(),
+ "gradient read would exceed mmap bounds"
+ );
let ptr = mmap.as_ptr().add(offset) as *const f32;
std::slice::from_raw_parts(ptr, self.d_embed)
}
diff --git a/crates/ruvector-graph-transformer-node/src/lib.rs b/crates/ruvector-graph-transformer-node/src/lib.rs
index a8588ab11..603db1507 100644
--- a/crates/ruvector-graph-transformer-node/src/lib.rs
+++ b/crates/ruvector-graph-transformer-node/src/lib.rs
@@ -14,9 +14,7 @@ mod transformer;
use napi::bindgen_prelude::*;
use napi_derive::napi;
-use transformer::{
- CoreGraphTransformer, Edge as CoreEdge, PipelineStage as CorePipelineStage,
-};
+use transformer::{CoreGraphTransformer, Edge as CoreEdge, PipelineStage as CorePipelineStage};
/// Graph Transformer with proof-gated operations for Node.js.
///
@@ -107,9 +105,10 @@ impl GraphTransformer {
/// ```
#[napi]
pub fn prove_dimension(&mut self, expected: u32, actual: u32) -> Result {
- let result = self.inner.prove_dimension(expected, actual).map_err(|e| {
- Error::new(Status::GenericFailure, format!("{}", e))
- })?;
+ let result = self
+ .inner
+ .prove_dimension(expected, actual)
+ .map_err(|e| Error::new(Status::GenericFailure, format!("{}", e)))?;
serde_json::to_value(&result).map_err(|e| {
Error::new(
Status::GenericFailure,
@@ -156,10 +155,7 @@ impl GraphTransformer {
/// console.log(composed.chain_name); // "embed >> align"
/// ```
#[napi]
- pub fn compose_proofs(
- &mut self,
- stages: Vec,
- ) -> Result {
+ pub fn compose_proofs(&mut self, stages: Vec) -> Result {
let rust_stages: Vec = stages
.into_iter()
.map(|v| {
@@ -337,9 +333,8 @@ impl GraphTransformer {
let rust_edges: Vec = edges
.into_iter()
.map(|v| {
- serde_json::from_value(v).map_err(|e| {
- Error::new(Status::InvalidArg, format!("Invalid edge: {}", e))
- })
+ serde_json::from_value(v)
+ .map_err(|e| Error::new(Status::InvalidArg, format!("Invalid edge: {}", e)))
})
.collect::>>()?;
@@ -546,12 +541,7 @@ impl GraphTransformer {
/// const d = gt.productManifoldDistance([1, 0, 0, 1], [0, 1, 1, 0], [0.0, -1.0]);
/// ```
#[napi]
- pub fn product_manifold_distance(
- &self,
- a: Vec,
- b: Vec,
- curvatures: Vec,
- ) -> f64 {
+ pub fn product_manifold_distance(&self, a: Vec, b: Vec, curvatures: Vec) -> f64 {
self.inner.product_manifold_distance(&a, &b, &curvatures)
}
@@ -583,16 +573,15 @@ impl GraphTransformer {
let rust_edges: Vec = edges
.into_iter()
.map(|v| {
- serde_json::from_value(v).map_err(|e| {
- Error::new(Status::InvalidArg, format!("Invalid edge: {}", e))
- })
+ serde_json::from_value(v)
+ .map_err(|e| Error::new(Status::InvalidArg, format!("Invalid edge: {}", e)))
})
.collect::>>()?;
let curvatures = vec![0.0, -1.0]; // default mixed curvatures
- let result =
- self.inner
- .product_manifold_attention(&features, &rust_edges, &curvatures);
+ let result = self
+ .inner
+ .product_manifold_attention(&features, &rust_edges, &curvatures);
serde_json::to_value(&result).map_err(|e| {
Error::new(
@@ -669,9 +658,8 @@ impl GraphTransformer {
let rust_edges: Vec = edges
.into_iter()
.map(|v| {
- serde_json::from_value(v).map_err(|e| {
- Error::new(Status::InvalidArg, format!("Invalid edge: {}", e))
- })
+ serde_json::from_value(v)
+ .map_err(|e| Error::new(Status::InvalidArg, format!("Invalid edge: {}", e)))
})
.collect::>>()?;
@@ -751,15 +739,12 @@ impl GraphTransformer {
let rust_edges: Vec = edges
.into_iter()
.map(|v| {
- serde_json::from_value(v).map_err(|e| {
- Error::new(Status::InvalidArg, format!("Invalid edge: {}", e))
- })
+ serde_json::from_value(v)
+ .map_err(|e| Error::new(Status::InvalidArg, format!("Invalid edge: {}", e)))
})
.collect::>>()?;
- let result = self
- .inner
- .game_theoretic_attention(&features, &rust_edges);
+ let result = self.inner.game_theoretic_attention(&features, &rust_edges);
serde_json::to_value(&result).map_err(|e| {
Error::new(
diff --git a/crates/ruvector-graph-transformer-node/src/transformer.rs b/crates/ruvector-graph-transformer-node/src/transformer.rs
index b6c3dd9d0..c3bce720e 100644
--- a/crates/ruvector-graph-transformer-node/src/transformer.rs
+++ b/crates/ruvector-graph-transformer-node/src/transformer.rs
@@ -80,8 +80,7 @@ impl Attestation {
u64::from_le_bytes(data[64..72].try_into().map_err(|_| "bad timestamp")?);
let verifier_version =
u32::from_le_bytes(data[72..76].try_into().map_err(|_| "bad version")?);
- let reduction_steps =
- u32::from_le_bytes(data[76..80].try_into().map_err(|_| "bad steps")?);
+ let reduction_steps = u32::from_le_bytes(data[76..80].try_into().map_err(|_| "bad steps")?);
let cache_hit_rate_bps =
u16::from_le_bytes(data[80..82].try_into().map_err(|_| "bad rate")?);
@@ -414,7 +413,11 @@ impl CoreGraphTransformer {
let normalized: Vec = exps.iter().map(|e| e / sum_exp).collect();
let indices: Vec = top_k.iter().map(|(i, _)| *i as u32).collect();
- let sparsity = if n > 0 { 1.0 - (k as f64 / n as f64) } else { 0.0 };
+ let sparsity = if n > 0 {
+ 1.0 - (k as f64 / n as f64)
+ } else {
+ 0.0
+ };
self.stats.attention_ops += 1;
Ok(AttentionResult {
@@ -452,8 +455,7 @@ impl CoreGraphTransformer {
}
}
for i in 0..n {
- scores[i] =
- alpha * (if i == src { 1.0 } else { 0.0 }) + (1.0 - alpha) * next[i];
+ scores[i] = alpha * (if i == src { 1.0 } else { 0.0 }) + (1.0 - alpha) * next[i];
}
}
scores
@@ -612,7 +614,11 @@ impl CoreGraphTransformer {
for i in 0..n {
for j in 0..n {
let idx = i * n + j;
- let w = if idx < adjacency.len() { adjacency[idx] } else { 0.0 };
+ let w = if idx < adjacency.len() {
+ adjacency[idx]
+ } else {
+ 0.0
+ };
let dw = if spikes[i] && spikes[j] {
0.01
} else if spikes[i] && !spikes[j] {
@@ -980,7 +986,11 @@ impl CoreGraphTransformer {
let f_stat = if rss_u > 1e-10 && df_denom > 0.0 && df_diff > 0.0 {
let raw = ((rss_r - rss_u) / df_diff) / (rss_u / df_denom);
- if raw.is_finite() { raw.max(0.0) } else { 0.0 }
+ if raw.is_finite() {
+ raw.max(0.0)
+ } else {
+ 0.0
+ }
} else {
0.0
};
@@ -1185,8 +1195,16 @@ mod tests {
fn test_compose() {
let mut gt = CoreGraphTransformer::new();
let stages = vec![
- PipelineStage { name: "a".into(), input_type_id: 1, output_type_id: 2 },
- PipelineStage { name: "b".into(), input_type_id: 2, output_type_id: 3 },
+ PipelineStage {
+ name: "a".into(),
+ input_type_id: 1,
+ output_type_id: 2,
+ },
+ PipelineStage {
+ name: "b".into(),
+ input_type_id: 2,
+ output_type_id: 3,
+ },
];
let r = gt.compose_proofs(&stages).unwrap();
assert_eq!(r.stages_verified, 2);
@@ -1195,7 +1213,9 @@ mod tests {
#[test]
fn test_sublinear() {
let mut gt = CoreGraphTransformer::new();
- let r = gt.sublinear_attention(&[1.0, 0.5], &[vec![1], vec![0]], 2, 1).unwrap();
+ let r = gt
+ .sublinear_attention(&[1.0, 0.5], &[vec![1], vec![0]], 2, 1)
+ .unwrap();
assert_eq!(r.scores.len(), 1);
}
@@ -1290,10 +1310,7 @@ mod tests {
let mut gt = CoreGraphTransformer::new();
let features = vec![1.0, 0.5, 0.8];
let timestamps = vec![1.0, 2.0, 3.0];
- let edges = vec![
- Edge { src: 0, tgt: 1 },
- Edge { src: 1, tgt: 2 },
- ];
+ let edges = vec![Edge { src: 0, tgt: 1 }, Edge { src: 1, tgt: 2 }];
let out = gt.causal_attention_graph(&features, ×tamps, &edges);
assert_eq!(out.len(), 3);
}
@@ -1304,7 +1321,11 @@ mod tests {
let mut history = Vec::new();
for t in 0..10 {
let x = (t as f64 * 0.5).sin();
- let y = if t > 0 { ((t - 1) as f64 * 0.5).sin() * 0.8 } else { 0.0 };
+ let y = if t > 0 {
+ ((t - 1) as f64 * 0.5).sin() * 0.8
+ } else {
+ 0.0
+ };
history.push(x);
history.push(y);
}
@@ -1316,10 +1337,7 @@ mod tests {
fn test_game_theoretic_attention() {
let mut gt = CoreGraphTransformer::new();
let features = vec![1.0, 0.5, 0.8];
- let edges = vec![
- Edge { src: 0, tgt: 1 },
- Edge { src: 1, tgt: 2 },
- ];
+ let edges = vec![Edge { src: 0, tgt: 1 }, Edge { src: 1, tgt: 2 }];
let result = gt.game_theoretic_attention(&features, &edges);
assert_eq!(result.allocations.len(), 3);
assert_eq!(result.utilities.len(), 3);
diff --git a/crates/ruvector-graph-transformer-wasm/src/lib.rs b/crates/ruvector-graph-transformer-wasm/src/lib.rs
index 51bb70042..ed3d0146d 100644
--- a/crates/ruvector-graph-transformer-wasm/src/lib.rs
+++ b/crates/ruvector-graph-transformer-wasm/src/lib.rs
@@ -45,9 +45,7 @@
mod transformer;
mod utils;
-use transformer::{
- CoreGraphTransformer, Edge, PipelineStage as CorePipelineStage,
-};
+use transformer::{CoreGraphTransformer, Edge, PipelineStage as CorePipelineStage};
use wasm_bindgen::prelude::*;
// ---------------------------------------------------------------------------
@@ -108,18 +106,18 @@ impl JsGraphTransformer {
/// Returns a serialized `ProofGate` object.
pub fn create_proof_gate(&mut self, dim: u32) -> Result {
let gate = self.inner.create_proof_gate(dim);
- serde_wasm_bindgen::to_value(&gate)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&gate).map_err(|e| JsError::new(&e.to_string()))
}
/// Prove that two dimensions are equal.
///
/// Returns `{ proof_id, expected, actual, verified }`.
pub fn prove_dimension(&mut self, expected: u32, actual: u32) -> Result {
- let result = self.inner.prove_dimension(expected, actual)
+ let result = self
+ .inner
+ .prove_dimension(expected, actual)
.map_err(|e| JsError::new(&format!("{e}")))?;
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
/// Create a proof attestation for a given proof ID.
@@ -142,13 +140,13 @@ impl JsGraphTransformer {
/// `stages` is a JS array of `{ name, input_type_id, output_type_id }`.
/// Returns a composed proof with the overall input/output types.
pub fn compose_proofs(&mut self, stages: JsValue) -> Result {
- let rust_stages: Vec =
- serde_wasm_bindgen::from_value(stages)
- .map_err(|e| JsError::new(&format!("invalid stages: {e}")))?;
- let result = self.inner.compose_proofs(&rust_stages)
+ let rust_stages: Vec = serde_wasm_bindgen::from_value(stages)
+ .map_err(|e| JsError::new(&format!("invalid stages: {e}")))?;
+ let result = self
+ .inner
+ .compose_proofs(&rust_stages)
.map_err(|e| JsError::new(&format!("{e}")))?;
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
// ===================================================================
@@ -170,10 +168,11 @@ impl JsGraphTransformer {
.map_err(|e| JsError::new(&format!("invalid query: {e}")))?;
let ed: Vec> = serde_wasm_bindgen::from_value(edges)
.map_err(|e| JsError::new(&format!("invalid edges: {e}")))?;
- let result = self.inner.sublinear_attention(&q, &ed, dim, k)
+ let result = self
+ .inner
+ .sublinear_attention(&q, &ed, dim, k)
.map_err(|e| JsError::new(&format!("{e}")))?;
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
/// Compute personalized PageRank scores from a source node.
@@ -188,8 +187,7 @@ impl JsGraphTransformer {
let adj: Vec> = serde_wasm_bindgen::from_value(adjacency)
.map_err(|e| JsError::new(&format!("invalid adjacency: {e}")))?;
let scores = self.inner.ppr_scores(source, &adj, alpha);
- serde_wasm_bindgen::to_value(&scores)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&scores).map_err(|e| JsError::new(&e.to_string()))
}
// ===================================================================
@@ -213,10 +211,11 @@ impl JsGraphTransformer {
.map_err(|e| JsError::new(&format!("invalid momenta: {e}")))?;
let ed: Vec = serde_wasm_bindgen::from_value(edges)
.map_err(|e| JsError::new(&format!("invalid edges: {e}")))?;
- let result = self.inner.hamiltonian_step_graph(&pos, &mom, &ed, 0.01)
+ let result = self
+ .inner
+ .hamiltonian_step_graph(&pos, &mom, &ed, 0.01)
.map_err(|e| JsError::new(&format!("{e}")))?;
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
/// Verify energy conservation between two states.
@@ -228,9 +227,10 @@ impl JsGraphTransformer {
after: f64,
tolerance: f64,
) -> Result {
- let v = self.inner.verify_energy_conservation(before, after, tolerance);
- serde_wasm_bindgen::to_value(&v)
- .map_err(|e| JsError::new(&e.to_string()))
+ let v = self
+ .inner
+ .verify_energy_conservation(before, after, tolerance);
+ serde_wasm_bindgen::to_value(&v).map_err(|e| JsError::new(&e.to_string()))
}
// ===================================================================
@@ -251,8 +251,7 @@ impl JsGraphTransformer {
let adj: Vec = serde_wasm_bindgen::from_value(adjacency)
.map_err(|e| JsError::new(&format!("invalid adjacency: {e}")))?;
let result = self.inner.spiking_step(&feats, &adj, 1.0);
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
/// Hebbian weight update.
@@ -271,8 +270,7 @@ impl JsGraphTransformer {
let w: Vec = serde_wasm_bindgen::from_value(weights)
.map_err(|e| JsError::new(&format!("invalid weights: {e}")))?;
let result = self.inner.hebbian_update(&pre_v, &post_v, &w, 0.01);
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
// ===================================================================
@@ -297,8 +295,7 @@ impl JsGraphTransformer {
let ed: Vec = serde_wasm_bindgen::from_value(edges)
.map_err(|e| JsError::new(&format!("invalid edges: {e}")))?;
let result = self.inner.causal_attention_graph(&feats, &ts, &ed);
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
/// Extract Granger causality DAG from attention history.
@@ -314,8 +311,7 @@ impl JsGraphTransformer {
let hist: Vec = serde_wasm_bindgen::from_value(attention_history)
.map_err(|e| JsError::new(&format!("invalid attention_history: {e}")))?;
let dag = self.inner.granger_extract(&hist, num_nodes, num_steps);
- serde_wasm_bindgen::to_value(&dag)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&dag).map_err(|e| JsError::new(&e.to_string()))
}
// ===================================================================
@@ -337,9 +333,10 @@ impl JsGraphTransformer {
let ed: Vec = serde_wasm_bindgen::from_value(edges)
.map_err(|e| JsError::new(&format!("invalid edges: {e}")))?;
let curvatures = vec![0.0, -1.0]; // default mixed curvatures
- let result = self.inner.product_manifold_attention(&feats, &ed, &curvatures);
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ let result = self
+ .inner
+ .product_manifold_attention(&feats, &ed, &curvatures);
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
/// Product manifold distance between two points.
@@ -381,10 +378,11 @@ impl JsGraphTransformer {
.map_err(|e| JsError::new(&format!("invalid targets: {e}")))?;
let w: Vec = serde_wasm_bindgen::from_value(weights)
.map_err(|e| JsError::new(&format!("invalid weights: {e}")))?;
- let result = self.inner.verified_training_step(&f, &t, &w, 0.001)
+ let result = self
+ .inner
+ .verified_training_step(&f, &t, &w, 0.001)
.map_err(|e| JsError::new(&format!("{e}")))?;
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
/// A single verified SGD step (raw weights + gradients).
@@ -400,10 +398,11 @@ impl JsGraphTransformer {
.map_err(|e| JsError::new(&format!("invalid weights: {e}")))?;
let g: Vec = serde_wasm_bindgen::from_value(gradients)
.map_err(|e| JsError::new(&format!("invalid gradients: {e}")))?;
- let result = self.inner.verified_step(&w, &g, lr)
+ let result = self
+ .inner
+ .verified_step(&w, &g, lr)
.map_err(|e| JsError::new(&format!("{e}")))?;
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
// ===================================================================
@@ -424,8 +423,7 @@ impl JsGraphTransformer {
let ed: Vec = serde_wasm_bindgen::from_value(edges)
.map_err(|e| JsError::new(&format!("invalid edges: {e}")))?;
let result = self.inner.game_theoretic_attention(&feats, &ed);
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
// ===================================================================
@@ -438,8 +436,7 @@ impl JsGraphTransformer {
/// cache_misses, attention_ops, physics_ops, bio_ops, training_steps }`.
pub fn stats(&self) -> Result {
let s = self.inner.stats();
- serde_wasm_bindgen::to_value(&s)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&s).map_err(|e| JsError::new(&e.to_string()))
}
/// Reset all internal state (caches, counters, gates).
diff --git a/crates/ruvector-graph-transformer-wasm/src/transformer.rs b/crates/ruvector-graph-transformer-wasm/src/transformer.rs
index 1ddf7ee4c..6040134c5 100644
--- a/crates/ruvector-graph-transformer-wasm/src/transformer.rs
+++ b/crates/ruvector-graph-transformer-wasm/src/transformer.rs
@@ -90,8 +90,7 @@ impl Attestation {
u64::from_le_bytes(data[64..72].try_into().map_err(|_| "bad timestamp")?);
let verifier_version =
u32::from_le_bytes(data[72..76].try_into().map_err(|_| "bad version")?);
- let reduction_steps =
- u32::from_le_bytes(data[76..80].try_into().map_err(|_| "bad steps")?);
+ let reduction_steps = u32::from_le_bytes(data[76..80].try_into().map_err(|_| "bad steps")?);
let cache_hit_rate_bps =
u16::from_le_bytes(data[80..82].try_into().map_err(|_| "bad rate")?);
@@ -429,7 +428,11 @@ impl CoreGraphTransformer {
let normalized: Vec = exps.iter().map(|e| e / sum_exp).collect();
let indices: Vec = top_k.iter().map(|(i, _)| *i as u32).collect();
- let sparsity = if n > 0 { 1.0 - (k as f64 / n as f64) } else { 0.0 };
+ let sparsity = if n > 0 {
+ 1.0 - (k as f64 / n as f64)
+ } else {
+ 0.0
+ };
self.stats.attention_ops += 1;
Ok(AttentionResult {
@@ -467,8 +470,7 @@ impl CoreGraphTransformer {
}
}
for i in 0..n {
- scores[i] =
- alpha * (if i == src { 1.0 } else { 0.0 }) + (1.0 - alpha) * next[i];
+ scores[i] = alpha * (if i == src { 1.0 } else { 0.0 }) + (1.0 - alpha) * next[i];
}
}
scores
@@ -656,7 +658,11 @@ impl CoreGraphTransformer {
for i in 0..n {
for j in 0..n {
let idx = i * n + j;
- let w = if idx < adjacency.len() { adjacency[idx] } else { 0.0 };
+ let w = if idx < adjacency.len() {
+ adjacency[idx]
+ } else {
+ 0.0
+ };
let dw = if spikes[i] && spikes[j] {
0.01 // co-activation potentiation
} else if spikes[i] && !spikes[j] {
@@ -1047,7 +1053,11 @@ impl CoreGraphTransformer {
let f_stat = if rss_u > 1e-10 && df_denom > 0.0 && df_diff > 0.0 {
let raw = ((rss_r - rss_u) / df_diff) / (rss_u / df_denom);
- if raw.is_finite() { raw.max(0.0) } else { 0.0 }
+ if raw.is_finite() {
+ raw.max(0.0)
+ } else {
+ 0.0
+ }
} else {
0.0
};
@@ -1268,8 +1278,16 @@ mod tests {
fn test_compose() {
let mut gt = CoreGraphTransformer::new();
let stages = vec![
- PipelineStage { name: "a".into(), input_type_id: 1, output_type_id: 2 },
- PipelineStage { name: "b".into(), input_type_id: 2, output_type_id: 3 },
+ PipelineStage {
+ name: "a".into(),
+ input_type_id: 1,
+ output_type_id: 2,
+ },
+ PipelineStage {
+ name: "b".into(),
+ input_type_id: 2,
+ output_type_id: 3,
+ },
];
let r = gt.compose_proofs(&stages).unwrap();
assert_eq!(r.stages_verified, 2);
@@ -1278,7 +1296,9 @@ mod tests {
#[test]
fn test_sublinear() {
let mut gt = CoreGraphTransformer::new();
- let r = gt.sublinear_attention(&[1.0, 0.5], &[vec![1], vec![0]], 2, 1).unwrap();
+ let r = gt
+ .sublinear_attention(&[1.0, 0.5], &[vec![1], vec![0]], 2, 1)
+ .unwrap();
assert_eq!(r.scores.len(), 1);
}
@@ -1373,10 +1393,7 @@ mod tests {
let mut gt = CoreGraphTransformer::new();
let features = vec![1.0, 0.5, 0.8];
let timestamps = vec![1.0, 2.0, 3.0];
- let edges = vec![
- Edge { src: 0, tgt: 1 },
- Edge { src: 1, tgt: 2 },
- ];
+ let edges = vec![Edge { src: 0, tgt: 1 }, Edge { src: 1, tgt: 2 }];
let out = gt.causal_attention_graph(&features, ×tamps, &edges);
assert_eq!(out.len(), 3);
}
@@ -1388,7 +1405,11 @@ mod tests {
let mut history = Vec::new();
for t in 0..10 {
let x = (t as f64 * 0.5).sin();
- let y = if t > 0 { ((t - 1) as f64 * 0.5).sin() * 0.8 } else { 0.0 };
+ let y = if t > 0 {
+ ((t - 1) as f64 * 0.5).sin() * 0.8
+ } else {
+ 0.0
+ };
history.push(x);
history.push(y);
}
@@ -1400,10 +1421,7 @@ mod tests {
fn test_game_theoretic_attention() {
let mut gt = CoreGraphTransformer::new();
let features = vec![1.0, 0.5, 0.8];
- let edges = vec![
- Edge { src: 0, tgt: 1 },
- Edge { src: 1, tgt: 2 },
- ];
+ let edges = vec![Edge { src: 0, tgt: 1 }, Edge { src: 1, tgt: 2 }];
let result = gt.game_theoretic_attention(&features, &edges);
assert_eq!(result.allocations.len(), 3);
assert_eq!(result.utilities.len(), 3);
diff --git a/crates/ruvector-graph-transformer-wasm/tests/web.rs b/crates/ruvector-graph-transformer-wasm/tests/web.rs
index 6b77c11e4..b09b752b4 100644
--- a/crates/ruvector-graph-transformer-wasm/tests/web.rs
+++ b/crates/ruvector-graph-transformer-wasm/tests/web.rs
@@ -23,9 +23,7 @@ fn test_proof_gate_roundtrip() {
// Prove with some data
let data: Vec = vec![0.5; 64];
- let att = gt
- .prove_and_mutate(gate, &data)
- .expect("prove_and_mutate");
+ let att = gt.prove_and_mutate(gate, &data).expect("prove_and_mutate");
assert!(!att.is_undefined());
assert!(!att.is_null());
diff --git a/crates/ruvector-graph-transformer/src/biological.rs b/crates/ruvector-graph-transformer/src/biological.rs
index ac72642f3..ebedbef56 100644
--- a/crates/ruvector-graph-transformer/src/biological.rs
+++ b/crates/ruvector-graph-transformer/src/biological.rs
@@ -16,7 +16,9 @@
//! - [`DendriticAttention`]: Multi-compartment dendritic attention model
#[cfg(feature = "biological")]
-use ruvector_verified::{ProofEnvironment, prove_dim_eq, proof_store::create_attestation, ProofAttestation};
+use ruvector_verified::{
+ proof_store::create_attestation, prove_dim_eq, ProofAttestation, ProofEnvironment,
+};
#[cfg(feature = "biological")]
use crate::config::BiologicalConfig;
@@ -65,7 +67,9 @@ impl EffectiveOperator {
}
// Initialize random-ish vector (deterministic for reproducibility)
- let mut v: Vec = (0..n).map(|i| ((i as f32 + 1.0).sin()).abs() + 0.1).collect();
+ let mut v: Vec = (0..n)
+ .map(|i| ((i as f32 + 1.0).sin()).abs() + 0.1)
+ .collect();
let mut eigenvalue_estimates = Vec::with_capacity(self.num_iterations);
for _ in 0..self.num_iterations {
@@ -102,8 +106,8 @@ impl EffectiveOperator {
}
let estimated = *eigenvalue_estimates.last().unwrap();
- let mean: f32 = eigenvalue_estimates.iter().sum::()
- / eigenvalue_estimates.len() as f32;
+ let mean: f32 =
+ eigenvalue_estimates.iter().sum::() / eigenvalue_estimates.len() as f32;
let variance: f32 = eigenvalue_estimates
.iter()
.map(|x| (x - mean).powi(2))
@@ -388,7 +392,11 @@ impl HebbianRule {
// theta slides toward mean post^2 but we use theta_init as fixed approx
lr * pre * post * (post - theta_init)
}
- HebbianRule::STDP { a_plus, a_minus, tau } => {
+ HebbianRule::STDP {
+ a_plus,
+ a_minus,
+ tau,
+ } => {
if let Some(dt) = dt_spike {
if dt > 0.0 {
a_plus * (-dt / tau).exp() * lr
@@ -563,18 +571,15 @@ impl StdpEdgeUpdater {
}
}
- let new_edges_list: Vec<(usize, usize)> =
- keep_indices.iter().map(|&i| edges[i]).collect();
- let new_weights_list: Vec =
- keep_indices.iter().map(|&i| weights[i]).collect();
+ let new_edges_list: Vec<(usize, usize)> = keep_indices.iter().map(|&i| edges[i]).collect();
+ let new_weights_list: Vec = keep_indices.iter().map(|&i| weights[i]).collect();
*edges = new_edges_list;
*weights = new_weights_list;
// Phase 2: Grow new edges between highly active but unconnected nodes
let mut grown = Vec::new();
- let existing: std::collections::HashSet<(usize, usize)> =
- edges.iter().cloned().collect();
+ let existing: std::collections::HashSet<(usize, usize)> = edges.iter().cloned().collect();
// Find highly active nodes
let mut active_nodes: Vec<(usize, f32)> = node_activity
@@ -593,7 +598,8 @@ impl StdpEdgeUpdater {
}
let (ni, _) = active_nodes[i];
let (nj, _) = active_nodes[j];
- if ni < num_nodes && nj < num_nodes
+ if ni < num_nodes
+ && nj < num_nodes
&& !existing.contains(&(ni, nj))
&& !existing.contains(&(nj, ni))
{
@@ -699,10 +705,7 @@ impl DendriticAttention {
/// Each neuron's input features are split across dendritic branches according
/// to the assignment strategy. Branch activations are computed as weighted sums,
/// then integrated non-linearly at the soma.
- pub fn forward(
- &mut self,
- node_features: &[Vec],
- ) -> Result {
+ pub fn forward(&mut self, node_features: &[Vec]) -> Result {
let n = node_features.len();
if n == 0 {
return Ok(DendriticResult {
@@ -752,9 +755,7 @@ impl DendriticAttention {
} else {
// Subthreshold: linear weighted sum
let total_activation: f32 = branch_activations.iter().sum();
- let scale = (total_activation / self.num_branches as f32)
- .abs()
- .min(1.0);
+ let scale = (total_activation / self.num_branches as f32).abs().min(1.0);
features.iter().map(|&x| x * scale).collect()
};
@@ -946,8 +947,11 @@ impl SpikingGraphAttention {
}
// Apply inhibition strategy
- self.inhibition
- .apply(&mut self.membrane_potentials, &mut spikes, self.config.threshold);
+ self.inhibition.apply(
+ &mut self.membrane_potentials,
+ &mut spikes,
+ self.config.threshold,
+ );
// Update weights via STDP
let mut new_weights = weights.to_vec();
@@ -985,9 +989,9 @@ impl SpikingGraphAttention {
}
// Verify weight bounds
- let all_bounded = new_weights.iter().all(|row| {
- row.iter().all(|&w| w.abs() <= self.config.max_weight)
- });
+ let all_bounded = new_weights
+ .iter()
+ .all(|row| row.iter().all(|&w| w.abs() <= self.config.max_weight));
let attestation = if all_bounded {
let dim_u32 = self.dim as u32;
@@ -1070,8 +1074,8 @@ impl HebbianLayer {
let decay = 0.01;
for i in 0..weights.len().min(self.dim) {
- let hebb = pre_activity[i % pre_activity.len()]
- * post_activity[i % post_activity.len()];
+ let hebb =
+ pre_activity[i % pre_activity.len()] * post_activity[i % post_activity.len()];
weights[i] += self.learning_rate * (hebb - decay * weights[i]);
weights[i] = weights[i].clamp(-self.max_weight, self.max_weight);
}
@@ -1211,16 +1215,15 @@ mod tests {
max_weight: 5.0,
};
let mut sga = SpikingGraphAttention::with_inhibition(
- 10, 4, config, InhibitionStrategy::WinnerTakeAll { k: 3 },
+ 10,
+ 4,
+ config,
+ InhibitionStrategy::WinnerTakeAll { k: 3 },
);
// Create features that will cause many spikes
- let features: Vec> = (0..10)
- .map(|i| vec![0.5 + 0.1 * i as f32; 4])
- .collect();
- let weights: Vec> = (0..10)
- .map(|_| vec![0.1; 10])
- .collect();
+ let features: Vec> = (0..10).map(|i| vec![0.5 + 0.1 * i as f32; 4]).collect();
+ let weights: Vec> = (0..10).map(|_| vec![0.1; 10]).collect();
let adjacency: Vec<(usize, usize)> = (0..10)
.flat_map(|i| (0..10).filter(move |&j| i != j).map(move |j| (i, j)))
.collect();
@@ -1255,12 +1258,13 @@ mod tests {
max_weight: 5.0,
};
let mut sga = SpikingGraphAttention::with_inhibition(
- 5, 4, config, InhibitionStrategy::Lateral { strength: 0.8 },
+ 5,
+ 4,
+ config,
+ InhibitionStrategy::Lateral { strength: 0.8 },
);
- let features: Vec> = (0..5)
- .map(|_| vec![0.6; 4])
- .collect();
+ let features: Vec> = (0..5).map(|_| vec![0.6; 4]).collect();
let weights = vec![vec![0.1; 5]; 5];
let adjacency = vec![(0, 1), (1, 2), (2, 3), (3, 4)];
@@ -1284,13 +1288,16 @@ mod tests {
max_weight: 5.0,
};
let mut sga = SpikingGraphAttention::with_inhibition(
- 8, 4, config,
- InhibitionStrategy::BalancedEI { ei_ratio: 0.5, dale_law: true },
+ 8,
+ 4,
+ config,
+ InhibitionStrategy::BalancedEI {
+ ei_ratio: 0.5,
+ dale_law: true,
+ },
);
- let features: Vec> = (0..8)
- .map(|i| vec![0.4 + 0.05 * i as f32; 4])
- .collect();
+ let features: Vec> = (0..8).map(|i| vec![0.4 + 0.05 * i as f32; 4]).collect();
let weights = vec![vec![0.1; 8]; 8];
let adjacency: Vec<(usize, usize)> = (0..8)
.flat_map(|i| (0..8).filter(move |&j| i != j).map(move |j| (i, j)))
@@ -1316,17 +1323,19 @@ mod tests {
#[test]
fn test_stdp_edge_updater_weight_update() {
let mut updater = StdpEdgeUpdater::new(
- 0.001, // prune_threshold
- 0.5, // growth_threshold
+ 0.001, // prune_threshold
+ 0.5, // growth_threshold
(-1.0, 1.0), // weight_bounds
- 5, // max_new_edges_per_epoch
+ 5, // max_new_edges_per_epoch
);
let edges = vec![(0, 1), (1, 2), (0, 2)];
let mut weights = vec![0.5, 0.3, 0.1];
let spike_times = vec![1.0, 2.0, 1.5]; // node 0 spikes at t=1, node 1 at t=2, etc.
- let att = updater.update_weights(&edges, &mut weights, &spike_times).unwrap();
+ let att = updater
+ .update_weights(&edges, &mut weights, &spike_times)
+ .unwrap();
// Weights should have been modified by STDP
assert!(weights[0] != 0.5 || weights[1] != 0.3 || weights[2] != 0.1);
@@ -1341,10 +1350,10 @@ mod tests {
#[test]
fn test_stdp_edge_updater_rewire_topology() {
let mut updater = StdpEdgeUpdater::new(
- 0.05, // prune_threshold: prune edges with |w| < 0.05
- 0.3, // growth_threshold: nodes with activity > 0.3 can grow edges
+ 0.05, // prune_threshold: prune edges with |w| < 0.05
+ 0.3, // growth_threshold: nodes with activity > 0.3 can grow edges
(-1.0, 1.0),
- 3, // max 3 new edges per epoch
+ 3, // max 3 new edges per epoch
);
let mut edges = vec![(0, 1), (1, 2), (2, 3), (0, 3)];
@@ -1358,11 +1367,22 @@ mod tests {
assert!(scope_att.is_valid());
let (pruned, grown, att) = updater
- .rewire_topology(&mut edges, &mut weights, num_nodes, &node_activity, &scope_att)
+ .rewire_topology(
+ &mut edges,
+ &mut weights,
+ num_nodes,
+ &node_activity,
+ &scope_att,
+ )
.unwrap();
// Should have pruned edges with weight < 0.05
- assert_eq!(pruned.len(), 2, "expected 2 pruned edges, got {}", pruned.len());
+ assert_eq!(
+ pruned.len(),
+ 2,
+ "expected 2 pruned edges, got {}",
+ pruned.len()
+ );
assert!(pruned.contains(&(1, 2)));
assert!(pruned.contains(&(0, 3)));
@@ -1394,9 +1414,8 @@ mod tests {
// happy path works correctly.
let mut env = ProofEnvironment::new();
let scope_att = ScopeTransitionAttestation::create(&mut env, "test_scope").unwrap();
- let result = updater.rewire_topology(
- &mut edges, &mut weights, 2, &node_activity, &scope_att,
- );
+ let result =
+ updater.rewire_topology(&mut edges, &mut weights, 2, &node_activity, &scope_att);
assert!(result.is_ok());
}
@@ -1417,11 +1436,14 @@ mod tests {
// Run many updates with Oja's rule
for _ in 0..100 {
hebb.update_with_rule(
- &pre, &post, &mut weights,
+ &pre,
+ &post,
+ &mut weights,
&HebbianRule::Oja,
Some(&norm_bound),
None,
- ).unwrap();
+ )
+ .unwrap();
}
// Norm should be within the bound
@@ -1453,11 +1475,14 @@ mod tests {
for _ in 0..200 {
hebb.update_with_rule(
- &pre, &post, &mut weights,
+ &pre,
+ &post,
+ &mut weights,
&HebbianRule::BCM { theta_init: 0.5 },
Some(&norm_bound),
Some(&fisher),
- ).unwrap();
+ )
+ .unwrap();
}
// Fisher-weighted norm should be within bound
@@ -1467,10 +1492,10 @@ mod tests {
#[test]
fn test_dendritic_attention_basic_forward() {
let mut da = DendriticAttention::new(
- 3, // 3 dendritic branches
- 6, // feature dim
+ 3, // 3 dendritic branches
+ 6, // feature dim
BranchAssignment::RoundRobin,
- 0.5, // plateau threshold
+ 0.5, // plateau threshold
);
let features = vec![
@@ -1494,37 +1519,25 @@ mod tests {
#[test]
fn test_dendritic_attention_feature_clustered() {
- let mut da = DendriticAttention::new(
- 2,
- 4,
- BranchAssignment::FeatureClustered,
- 0.3,
- );
+ let mut da = DendriticAttention::new(2, 4, BranchAssignment::FeatureClustered, 0.3);
- let features = vec![
- vec![1.0, 0.9, 0.1, 0.05],
- ];
+ let features = vec![vec![1.0, 0.9, 0.1, 0.05]];
let result = da.forward(&features).unwrap();
assert_eq!(result.output.len(), 1);
assert_eq!(result.output[0].len(), 4);
// High values in first branch should trigger plateau
- assert!(result.plateaus[0], "expected plateau from high-valued features");
+ assert!(
+ result.plateaus[0],
+ "expected plateau from high-valued features"
+ );
}
#[test]
fn test_dendritic_attention_learned_assignment() {
- let mut da = DendriticAttention::new(
- 4,
- 8,
- BranchAssignment::Learned,
- 0.4,
- );
+ let mut da = DendriticAttention::new(4, 8, BranchAssignment::Learned, 0.4);
- let features = vec![
- vec![0.5; 8],
- vec![0.1; 8],
- ];
+ let features = vec![vec![0.5; 8], vec![0.1; 8]];
let result = da.forward(&features).unwrap();
assert_eq!(result.output.len(), 2);
diff --git a/crates/ruvector-graph-transformer/src/economic.rs b/crates/ruvector-graph-transformer/src/economic.rs
index 8126a81fd..6335a878a 100644
--- a/crates/ruvector-graph-transformer/src/economic.rs
+++ b/crates/ruvector-graph-transformer/src/economic.rs
@@ -6,8 +6,9 @@
#[cfg(feature = "economic")]
use ruvector_verified::{
- ProofEnvironment, prove_dim_eq, proof_store::create_attestation, ProofAttestation,
gated::{route_proof, ProofKind, TierDecision},
+ proof_store::create_attestation,
+ prove_dim_eq, ProofAttestation, ProofEnvironment,
};
#[cfg(feature = "economic")]
@@ -141,10 +142,13 @@ impl GameTheoreticAttention {
}
// Compute utility-weighted logits
- let logits: Vec = neighbors.iter().map(|&j| {
- let util = self.config.utility_weight * similarities[i][j];
- util / temperature
- }).collect();
+ let logits: Vec = neighbors
+ .iter()
+ .map(|&j| {
+ let util = self.config.utility_weight * similarities[i][j];
+ util / temperature
+ })
+ .collect();
// Softmax
let max_logit = logits.iter().copied().fold(f32::NEG_INFINITY, f32::max);
@@ -152,7 +156,11 @@ impl GameTheoreticAttention {
let sum_exp: f32 = exp_logits.iter().sum();
for (idx, &j) in neighbors.iter().enumerate() {
- let new_w = if sum_exp > 1e-10 { exp_logits[idx] / sum_exp } else { 1.0 / neighbors.len() as f32 };
+ let new_w = if sum_exp > 1e-10 {
+ exp_logits[idx] / sum_exp
+ } else {
+ 1.0 / neighbors.len() as f32
+ };
let delta = (new_w - weights[i][j]).abs();
max_delta = max_delta.max(delta);
new_weights[i][j] = new_w;
@@ -195,11 +203,24 @@ impl GameTheoreticAttention {
fn compute_similarities(&self, features: &[Vec], n: usize) -> Vec> {
let mut sims = vec![vec![0.0f32; n]; n];
for i in 0..n {
- let norm_i: f32 = features[i].iter().map(|x| x * x).sum::().sqrt().max(1e-8);
+ let norm_i: f32 = features[i]
+ .iter()
+ .map(|x| x * x)
+ .sum::()
+ .sqrt()
+ .max(1e-8);
for j in (i + 1)..n {
- let norm_j: f32 = features[j].iter().map(|x| x * x).sum::().sqrt().max(1e-8);
- let dot: f32 = features[i].iter().zip(features[j].iter())
- .map(|(a, b)| a * b).sum();
+ let norm_j: f32 = features[j]
+ .iter()
+ .map(|x| x * x)
+ .sum::()
+ .sqrt()
+ .max(1e-8);
+ let dot: f32 = features[i]
+ .iter()
+ .zip(features[j].iter())
+ .map(|(a, b)| a * b)
+ .sum();
let sim = dot / (norm_i * norm_j);
sims[i][j] = sim;
sims[j][i] = sim;
@@ -357,7 +378,11 @@ impl ShapleyAttention {
};
// Compute output features weighted by normalized Shapley values
- let total_sv: f32 = shapley_values.iter().map(|v| v.abs()).sum::().max(1e-8);
+ let total_sv: f32 = shapley_values
+ .iter()
+ .map(|v| v.abs())
+ .sum::()
+ .max(1e-8);
let mut output = vec![vec![0.0f32; self.dim]; n];
for i in 0..n {
let weight = shapley_values[i].abs() / total_sv;
@@ -469,7 +494,8 @@ impl IncentiveAlignedMPNN {
if n != stakes.len() {
return Err(GraphTransformerError::Config(format!(
"stakes length mismatch: features={}, stakes={}",
- n, stakes.len(),
+ n,
+ stakes.len(),
)));
}
@@ -487,9 +513,7 @@ impl IncentiveAlignedMPNN {
let mut output = features.to_vec();
// Determine which nodes can participate
- let participating: Vec = stakes.iter()
- .map(|&s| s >= self.min_stake)
- .collect();
+ let participating: Vec = stakes.iter().map(|&s| s >= self.min_stake).collect();
// Compute messages along edges
for &(u, v) in adjacency {
@@ -506,12 +530,8 @@ impl IncentiveAlignedMPNN {
let stake_weight_u = stakes[u] / (stakes[u] + stakes[v]).max(1e-8);
let stake_weight_v = stakes[v] / (stakes[u] + stakes[v]).max(1e-8);
- let msg_u_to_v: Vec = features[u].iter()
- .map(|&x| x * stake_weight_u)
- .collect();
- let msg_v_to_u: Vec = features[v].iter()
- .map(|&x| x * stake_weight_v)
- .collect();
+ let msg_u_to_v: Vec = features[u].iter().map(|&x| x * stake_weight_u).collect();
+ let msg_v_to_u: Vec = features[v].iter().map(|&x| x * stake_weight_v).collect();
// Validate messages
let u_valid = msg_u_to_v.iter().all(|x| x.is_finite());
@@ -644,16 +664,16 @@ mod tests {
};
let mut gta = GameTheoreticAttention::new(2, config);
- let features = vec![
- vec![1.0, 0.0],
- vec![0.0, 1.0],
- vec![0.5, 0.5],
- ];
+ let features = vec![vec![1.0, 0.0], vec![0.0, 1.0], vec![0.5, 0.5]];
let edges = vec![(0, 1), (1, 2), (0, 2)];
let result = gta.compute(&features, &edges).unwrap();
// With sufficient iterations, should converge
- assert!(result.converged, "did not converge: max_delta={}", result.max_delta);
+ assert!(
+ result.converged,
+ "did not converge: max_delta={}",
+ result.max_delta
+ );
assert!(result.attestation.is_some());
}
@@ -697,10 +717,7 @@ mod tests {
#[test]
fn test_shapley_efficiency_axiom() {
let mut shapley = ShapleyAttention::new(2, 500);
- let features = vec![
- vec![1.0, 2.0],
- vec![3.0, 4.0],
- ];
+ let features = vec![vec![1.0, 2.0], vec![3.0, 4.0]];
let mut rng = rand::thread_rng();
let result = shapley.compute(&features, &mut rng).unwrap();
@@ -709,7 +726,8 @@ mod tests {
assert!(
(result.value_sum - result.coalition_value).abs() < tolerance,
"efficiency violated: sum={}, coalition={}",
- result.value_sum, result.coalition_value,
+ result.value_sum,
+ result.coalition_value,
);
assert!(result.efficiency_satisfied);
assert!(result.attestation.is_some());
@@ -737,7 +755,8 @@ mod tests {
assert!(
(result.shapley_values[0] - expected_value).abs() < 1.0,
"single node Shapley: {}, expected ~{}",
- result.shapley_values[0], expected_value,
+ result.shapley_values[0],
+ expected_value,
);
}
@@ -777,10 +796,7 @@ mod tests {
fn test_incentive_mpnn_insufficient_stake() {
let mut mpnn = IncentiveAlignedMPNN::new(2, 5.0, 0.2);
- let features = vec![
- vec![1.0, 2.0],
- vec![3.0, 4.0],
- ];
+ let features = vec![vec![1.0, 2.0], vec![3.0, 4.0]];
let stakes = vec![10.0, 1.0]; // node 1 below min_stake
let edges = vec![(0, 1)];
@@ -809,10 +825,7 @@ mod tests {
fn test_incentive_mpnn_stake_weighted() {
let mut mpnn = IncentiveAlignedMPNN::new(2, 0.1, 0.1);
- let features = vec![
- vec![1.0, 0.0],
- vec![0.0, 1.0],
- ];
+ let features = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
let stakes = vec![9.0, 1.0]; // node 0 has much higher stake
let edges = vec![(0, 1)];
@@ -824,7 +837,11 @@ mod tests {
let node1_d0 = result.output[1][0];
// Node 0 has stake_weight 0.9, so msg_0_to_1 = [0.9, 0.0]
// Node 1 output = [0.0, 1.0] + [0.9, 0.0] = [0.9, 1.0]
- assert!(node1_d0 > 0.5, "node 1 should receive strong message from node 0: {}", node1_d0);
+ assert!(
+ node1_d0 > 0.5,
+ "node 1 should receive strong message from node 0: {}",
+ node1_d0
+ );
}
#[test]
diff --git a/crates/ruvector-graph-transformer/src/lib.rs b/crates/ruvector-graph-transformer/src/lib.rs
index 2ecc482de..32a337236 100644
--- a/crates/ruvector-graph-transformer/src/lib.rs
+++ b/crates/ruvector-graph-transformer/src/lib.rs
@@ -28,8 +28,8 @@
//! - `economic`: Game-theoretic and incentive-aligned attention
//! - `full`: All features enabled
-pub mod error;
pub mod config;
+pub mod error;
pub mod proof_gated;
#[cfg(feature = "sublinear")]
@@ -57,60 +57,51 @@ pub mod temporal;
pub mod economic;
// Re-exports
-pub use error::{GraphTransformerError, Result};
pub use config::GraphTransformerConfig;
-pub use proof_gated::{ProofGate, ProofGatedMutation, AttestationChain};
+pub use error::{GraphTransformerError, Result};
+pub use proof_gated::{AttestationChain, ProofGate, ProofGatedMutation};
#[cfg(feature = "sublinear")]
pub use sublinear_attention::SublinearGraphAttention;
#[cfg(feature = "physics")]
pub use physics::{
- HamiltonianGraphNet, HamiltonianState, HamiltonianOutput,
- GaugeEquivariantMP, GaugeOutput,
- LagrangianAttention, LagrangianOutput,
- ConservativePdeAttention, PdeOutput,
+ ConservativePdeAttention, GaugeEquivariantMP, GaugeOutput, HamiltonianGraphNet,
+ HamiltonianOutput, HamiltonianState, LagrangianAttention, LagrangianOutput, PdeOutput,
};
#[cfg(feature = "biological")]
pub use biological::{
- SpikingGraphAttention, HebbianLayer,
- EffectiveOperator, InhibitionStrategy, HebbianNormBound,
- HebbianRule, StdpEdgeUpdater, DendriticAttention, BranchAssignment,
- ScopeTransitionAttestation,
+ BranchAssignment, DendriticAttention, EffectiveOperator, HebbianLayer, HebbianNormBound,
+ HebbianRule, InhibitionStrategy, ScopeTransitionAttestation, SpikingGraphAttention,
+ StdpEdgeUpdater,
};
#[cfg(feature = "self-organizing")]
-pub use self_organizing::{MorphogeneticField, DevelopmentalProgram, GraphCoarsener};
+pub use self_organizing::{DevelopmentalProgram, GraphCoarsener, MorphogeneticField};
#[cfg(feature = "verified-training")]
pub use verified_training::{
- VerifiedTrainer, TrainingCertificate, TrainingInvariant,
- RollbackStrategy, InvariantStats, ProofClass, TrainingStepResult,
- EnergyGateResult,
+ EnergyGateResult, InvariantStats, ProofClass, RollbackStrategy, TrainingCertificate,
+ TrainingInvariant, TrainingStepResult, VerifiedTrainer,
};
#[cfg(feature = "manifold")]
pub use manifold::{
- ProductManifoldAttention, ManifoldType, CurvatureAdaptiveRouter,
- GeodesicMessagePassing, RiemannianAdamOptimizer,
- LieGroupEquivariantAttention, LieGroupType,
+ CurvatureAdaptiveRouter, GeodesicMessagePassing, LieGroupEquivariantAttention, LieGroupType,
+ ManifoldType, ProductManifoldAttention, RiemannianAdamOptimizer,
};
#[cfg(feature = "temporal")]
pub use temporal::{
- CausalGraphTransformer, MaskStrategy,
- RetrocausalAttention, BatchModeToken, SmoothedOutput,
- ContinuousTimeODE, OdeOutput,
- GrangerCausalityExtractor, GrangerGraph, GrangerEdge, GrangerCausalityResult,
- AttentionSnapshot,
- TemporalEdgeEvent, EdgeEventType,
- TemporalEmbeddingStore, StorageTier,
- TemporalAttentionResult,
+ AttentionSnapshot, BatchModeToken, CausalGraphTransformer, ContinuousTimeODE, EdgeEventType,
+ GrangerCausalityExtractor, GrangerCausalityResult, GrangerEdge, GrangerGraph, MaskStrategy,
+ OdeOutput, RetrocausalAttention, SmoothedOutput, StorageTier, TemporalAttentionResult,
+ TemporalEdgeEvent, TemporalEmbeddingStore,
};
#[cfg(feature = "economic")]
-pub use economic::{GameTheoreticAttention, ShapleyAttention, IncentiveAlignedMPNN};
+pub use economic::{GameTheoreticAttention, IncentiveAlignedMPNN, ShapleyAttention};
/// Unified graph transformer entry point.
///
diff --git a/crates/ruvector-graph-transformer/src/manifold.rs b/crates/ruvector-graph-transformer/src/manifold.rs
index a9fe89dac..1f0e23ca8 100644
--- a/crates/ruvector-graph-transformer/src/manifold.rs
+++ b/crates/ruvector-graph-transformer/src/manifold.rs
@@ -19,16 +19,14 @@
#[cfg(feature = "manifold")]
use ruvector_attention::{
- ScaledDotProductAttention, HyperbolicAttention, HyperbolicAttentionConfig,
- Attention,
+ Attention, HyperbolicAttention, HyperbolicAttentionConfig, ScaledDotProductAttention,
};
#[cfg(feature = "manifold")]
use ruvector_verified::{
- ProofEnvironment, ProofAttestation,
- prove_dim_eq,
- proof_store::create_attestation,
gated::{route_proof, ProofKind},
+ proof_store::create_attestation,
+ prove_dim_eq, ProofAttestation, ProofEnvironment,
};
#[cfg(feature = "manifold")]
@@ -262,15 +260,21 @@ impl ProductManifoldAttention {
let q_s_proj = project_to_sphere(q_s);
let k_s_proj: Vec> = k_s.iter().map(|k| project_to_sphere(k)).collect();
let k_s_refs: Vec<&[f32]> = k_s_proj.iter().map(|k| k.as_slice()).collect();
- let out_s = self.spherical_attention.compute(&q_s_proj, &k_s_refs, &v_s)
+ let out_s = self
+ .spherical_attention
+ .compute(&q_s_proj, &k_s_refs, &v_s)
.map_err(GraphTransformerError::Attention)?;
// Hyperbolic attention
- let out_h = self.hyperbolic_attention.compute(q_h, &k_h, &v_h)
+ let out_h = self
+ .hyperbolic_attention
+ .compute(q_h, &k_h, &v_h)
.map_err(GraphTransformerError::Attention)?;
// Euclidean attention
- let out_e = self.euclidean_attention.compute(q_e, &k_e, &v_e)
+ let out_e = self
+ .euclidean_attention
+ .compute(q_e, &k_e, &v_e)
.map_err(GraphTransformerError::Attention)?;
// Apply learned mixing weights and normalize
@@ -291,7 +295,11 @@ impl ProductManifoldAttention {
euclidean: 0.0,
};
- Ok(ManifoldAttentionResult { output, curvatures, attestation })
+ Ok(ManifoldAttentionResult {
+ output,
+ curvatures,
+ attestation,
+ })
}
/// Get the total embedding dimension.
@@ -308,7 +316,9 @@ impl ProductManifoldAttention {
pub fn manifold_type(&self) -> ManifoldType {
ManifoldType::Product(vec![
ManifoldType::Sphere,
- ManifoldType::PoincareBall { curvature: self.config.curvature.abs() },
+ ManifoldType::PoincareBall {
+ curvature: self.config.curvature.abs(),
+ },
ManifoldType::PoincareBall { curvature: 0.0 }, // flat = Euclidean
])
}
@@ -490,11 +500,7 @@ impl GeodesicMessagePassing {
}
/// Create with custom Frechet mean parameters.
- pub fn with_frechet_params(
- manifold: ManifoldType,
- max_iter: usize,
- tol: f32,
- ) -> Self {
+ pub fn with_frechet_params(manifold: ManifoldType, max_iter: usize, tol: f32) -> Self {
Self {
manifold,
frechet_max_iter: max_iter,
@@ -537,12 +543,7 @@ impl GeodesicMessagePassing {
/// Parallel transport for spherical manifold.
/// Uses the standard formula for S^n.
- pub fn parallel_transport_sphere(
- &self,
- v: &[f32],
- from: &[f32],
- to: &[f32],
- ) -> Vec {
+ pub fn parallel_transport_sphere(&self, v: &[f32], from: &[f32], to: &[f32]) -> Vec {
let d = dot(from, to).clamp(-1.0, 1.0);
let angle = d.acos();
if angle.abs() < EPS {
@@ -554,7 +555,10 @@ impl GeodesicMessagePassing {
let sum: Vec = from.iter().zip(to.iter()).map(|(&a, &b)| a + b).collect();
let dot_sv = dot(&sum, v);
let coeff = dot_sv / (1.0 + d).max(EPS);
- v.iter().zip(sum.iter()).map(|(&vi, &si)| vi - coeff * si).collect()
+ v.iter()
+ .zip(sum.iter())
+ .map(|(&vi, &si)| vi - coeff * si)
+ .collect()
}
/// Perform one round of geodesic message passing.
@@ -591,22 +595,16 @@ impl GeodesicMessagePassing {
let mut transported: Vec> = Vec::with_capacity(adj[i].len());
for &j in &adj[i] {
let msg = match &self.manifold {
- ManifoldType::PoincareBall { curvature } => {
- self.parallel_transport_poincare(
- &node_features[j],
- &node_features[j],
- &node_features[i],
- *curvature,
- )
- }
+ ManifoldType::PoincareBall { curvature } => self.parallel_transport_poincare(
+ &node_features[j],
+ &node_features[j],
+ &node_features[i],
+ *curvature,
+ ),
ManifoldType::Sphere => {
let from_proj = project_to_sphere(&node_features[j]);
let to_proj = project_to_sphere(&node_features[i]);
- self.parallel_transport_sphere(
- &node_features[j],
- &from_proj,
- &to_proj,
- )
+ self.parallel_transport_sphere(&node_features[j], &from_proj, &to_proj)
}
_ => {
// Euclidean or other: no transport needed.
@@ -760,11 +758,7 @@ impl RiemannianAdamOptimizer {
/// 4. Apply via exponential map.
/// 5. Project back to manifold.
/// 6. Proof gate: verify manifold membership.
- pub fn step(
- &mut self,
- params: &[f32],
- euclidean_grad: &[f32],
- ) -> Result {
+ pub fn step(&mut self, params: &[f32], euclidean_grad: &[f32]) -> Result {
if params.len() != euclidean_grad.len() || params.len() != self.m.len() {
return Err(GraphTransformerError::DimensionMismatch {
expected: self.m.len(),
@@ -784,12 +778,17 @@ impl RiemannianAdamOptimizer {
// Riemannian gradient = (1 - c||x||^2)^2 / 4 * euclidean_grad
let factor = (1.0 - c * norm_sq_p).max(EPS);
let scale = factor * factor / 4.0;
- euclidean_grad.iter().map(|&g| scale * g).collect::>()
+ euclidean_grad
+ .iter()
+ .map(|&g| scale * g)
+ .collect::>()
}
ManifoldType::Sphere => {
// Project gradient to tangent space: g_tan = g - x
let dp = dot(euclidean_grad, params);
- euclidean_grad.iter().zip(params.iter())
+ euclidean_grad
+ .iter()
+ .zip(params.iter())
.map(|(&g, &p)| g - dp * p)
.collect::>()
}
@@ -799,7 +798,8 @@ impl RiemannianAdamOptimizer {
// Update biased first and second moment estimates.
for i in 0..dim {
self.m[i] = self.beta1 * self.m[i] + (1.0 - self.beta1) * riemannian_grad[i];
- self.v[i] = self.beta2 * self.v[i] + (1.0 - self.beta2) * riemannian_grad[i] * riemannian_grad[i];
+ self.v[i] = self.beta2 * self.v[i]
+ + (1.0 - self.beta2) * riemannian_grad[i] * riemannian_grad[i];
}
// Bias correction.
@@ -828,7 +828,11 @@ impl RiemannianAdamOptimizer {
}
_ => {
// Euclidean: just add.
- params.iter().zip(update.iter()).map(|(&p, &u)| p + u).collect()
+ params
+ .iter()
+ .zip(update.iter())
+ .map(|(&p, &u)| p + u)
+ .collect()
}
};
@@ -856,9 +860,7 @@ impl RiemannianAdamOptimizer {
let c = curvature.abs().max(EPS);
norm_sq(params) < 1.0 / c
}
- ManifoldType::Sphere => {
- (norm(params) - 1.0).abs() < 0.01
- }
+ ManifoldType::Sphere => (norm(params) - 1.0).abs() < 0.01,
_ => true,
}
}
@@ -989,9 +991,7 @@ impl LieGroupEquivariantAttention {
}
let scale = (self.scalar_dim as f32).sqrt();
- let scores: Vec = keys.iter()
- .map(|k| dot(query, k) / scale)
- .collect();
+ let scores: Vec = keys.iter().map(|k| dot(query, k) / scale).collect();
softmax(&scores)
}
@@ -1035,7 +1035,11 @@ fn sigmoid(x: f32) -> f32 {
/// Euclidean distance between two vectors.
#[cfg(feature = "manifold")]
fn euclidean_distance(a: &[f32], b: &[f32]) -> f32 {
- a.iter().zip(b.iter()).map(|(&x, &y)| (x - y).powi(2)).sum::().sqrt()
+ a.iter()
+ .zip(b.iter())
+ .map(|(&x, &y)| (x - y).powi(2))
+ .sum::()
+ .sqrt()
}
/// Compute the centroid (Euclidean mean) of a set of vectors.
@@ -1128,7 +1132,11 @@ fn sphere_log_map(q: &[f32], p: &[f32]) -> Vec {
}
// v = (q - d*p) normalized, scaled by angle
- let mut v: Vec = q.iter().zip(p.iter()).map(|(&qi, &pi)| qi - d * pi).collect();
+ let mut v: Vec = q
+ .iter()
+ .zip(p.iter())
+ .map(|(&qi, &pi)| qi - d * pi)
+ .collect();
let v_norm = norm(&v);
if v_norm < EPS {
return vec![0.0; p.len()];
@@ -1148,7 +1156,8 @@ fn sphere_exp_map(v: &[f32], p: &[f32]) -> Vec {
}
let cos_t = v_norm.cos();
let sin_t = v_norm.sin();
- p.iter().zip(v.iter())
+ p.iter()
+ .zip(v.iter())
.map(|(&pi, &vi)| cos_t * pi + sin_t * vi / v_norm)
.collect()
}
@@ -1166,7 +1175,9 @@ fn mobius_add_internal(u: &[f32], v: &[f32], c: f32) -> Vec {
let coef_v = 1.0 - c * norm_u_sq;
let denom = 1.0 + 2.0 * c * dot_uv + c * c * norm_u_sq * norm_v_sq;
- let result: Vec = u.iter().zip(v.iter())
+ let result: Vec = u
+ .iter()
+ .zip(v.iter())
.map(|(&ui, &vi)| (coef_u * ui + coef_v * vi) / denom.max(EPS))
.collect();
@@ -1293,16 +1304,8 @@ mod tests {
// 4-node graph: query is node 0, keys/values are neighbors 1..3.
let query = vec![0.5; 12];
- let keys = vec![
- vec![0.3; 12],
- vec![0.7; 12],
- vec![0.1; 12],
- ];
- let values = vec![
- vec![1.0; 12],
- vec![2.0; 12],
- vec![0.5; 12],
- ];
+ let keys = vec![vec![0.3; 12], vec![0.7; 12], vec![0.1; 12]];
+ let values = vec![vec![1.0; 12], vec![2.0; 12], vec![0.5; 12]];
let result = attn.compute(&query, &keys, &values);
assert!(result.is_ok(), "compute failed: {:?}", result.err());
@@ -1473,11 +1476,7 @@ mod tests {
let mut gmp = GeodesicMessagePassing::new(manifold);
// Small features that lie inside the Poincare ball (||x|| < 1).
- let features = vec![
- vec![0.1, 0.2],
- vec![0.3, 0.1],
- vec![-0.1, 0.3],
- ];
+ let features = vec![vec![0.1, 0.2], vec![0.3, 0.1], vec![-0.1, 0.3]];
let edges = vec![(0, 1), (1, 2), (0, 2)];
let result = gmp.propagate(&features, &edges);
@@ -1534,10 +1533,7 @@ mod tests {
let manifold = ManifoldType::Lorentz { curvature: 1.0 }; // falls to Euclidean branch
let mut gmp = GeodesicMessagePassing::new(manifold);
- let features = vec![
- vec![1.0, 2.0],
- vec![3.0, 4.0],
- ];
+ let features = vec![vec![1.0, 2.0], vec![3.0, 4.0]];
let edges = vec![(0, 1)];
let result = gmp.propagate(&features, &edges).unwrap();
diff --git a/crates/ruvector-graph-transformer/src/physics.rs b/crates/ruvector-graph-transformer/src/physics.rs
index d4abde7d1..e6a7f68d4 100644
--- a/crates/ruvector-graph-transformer/src/physics.rs
+++ b/crates/ruvector-graph-transformer/src/physics.rs
@@ -13,10 +13,9 @@
#[cfg(feature = "physics")]
use ruvector_verified::{
- ProofEnvironment, ProofAttestation,
- prove_dim_eq,
- proof_store::create_attestation,
gated::{route_proof, ProofKind, ProofTier},
+ proof_store::create_attestation,
+ prove_dim_eq, ProofAttestation, ProofEnvironment,
};
#[cfg(feature = "physics")]
@@ -111,9 +110,10 @@ impl HamiltonianGraphNet {
// Reject NaN / Inf in input
for &v in feat {
if !v.is_finite() {
- return Err(GraphTransformerError::NumericalError(
- format!("non-finite value in node_features[{}]", i),
- ));
+ return Err(GraphTransformerError::NumericalError(format!(
+ "non-finite value in node_features[{}]",
+ i
+ )));
}
}
}
@@ -247,11 +247,7 @@ impl HamiltonianGraphNet {
}
/// Compute gradient of H with respect to q (= dV/dq).
- fn compute_grad_q(
- &self,
- q: &[Vec],
- adjacency: &[(usize, usize, f32)],
- ) -> Vec> {
+ fn compute_grad_q(&self, q: &[Vec], adjacency: &[(usize, usize, f32)]) -> Vec> {
let n = q.len();
let mut grad = vec![vec![0.0f32; self.dim]; n];
@@ -366,9 +362,10 @@ impl GaugeEquivariantMP {
for (idx, (src, dst, conn)) in edges.iter().enumerate() {
if *src >= n || *dst >= n {
- return Err(GraphTransformerError::InvariantViolation(
- format!("edge {} references out-of-bounds node ({}, {})", idx, src, dst),
- ));
+ return Err(GraphTransformerError::InvariantViolation(format!(
+ "edge {} references out-of-bounds node ({}, {})",
+ idx, src, dst
+ )));
}
if conn.len() != d * d {
return Err(GraphTransformerError::DimensionMismatch {
@@ -706,10 +703,7 @@ impl ConservativePdeAttention {
let d = node_features[0].len();
// Compute mass before diffusion
- let mass_before: f32 = node_features
- .iter()
- .flat_map(|f| f.iter())
- .sum();
+ let mass_before: f32 = node_features.iter().flat_map(|f| f.iter()).sum();
// Perform diffusion step: f_new = f + dt * alpha * L * f
// where L is the graph Laplacian (symmetric, row-sum-zero).
@@ -727,10 +721,7 @@ impl ConservativePdeAttention {
}
// Compute mass after diffusion
- let mass_after: f32 = output
- .iter()
- .flat_map(|f| f.iter())
- .sum();
+ let mass_after: f32 = output.iter().flat_map(|f| f.iter()).sum();
let mass_diff = (mass_after - mass_before).abs();
let mass_conserved = mass_diff < self.mass_tolerance;
@@ -781,10 +772,7 @@ mod tests {
};
let hgn = HamiltonianGraphNet::new(4, config);
- let features = vec![
- vec![1.0, 0.0, 0.0, 0.0],
- vec![0.0, 1.0, 0.0, 0.0],
- ];
+ let features = vec![vec![1.0, 0.0, 0.0, 0.0], vec![0.0, 1.0, 0.0, 0.0]];
let state = hgn.init_state(&features).unwrap();
assert_eq!(state.q.len(), 2);
assert_eq!(state.p.len(), 2);
@@ -810,19 +798,16 @@ mod tests {
let state = hgn.init_state(&features).unwrap();
// Ring edges: 0-1, 1-2, 2-3, 3-0
- let edges = vec![
- (0, 1, 0.5),
- (1, 2, 0.5),
- (2, 3, 0.5),
- (3, 0, 0.5),
- ];
+ let edges = vec![(0, 1, 0.5), (1, 2, 0.5), (2, 3, 0.5), (3, 0, 0.5)];
let output = hgn.forward(&state, &edges).unwrap();
let drift = output.drift_ratio;
assert!(
drift < 0.05,
"energy drift ratio too large: {} (initial={}, final={})",
- drift, output.initial_energy, output.final_energy,
+ drift,
+ output.initial_energy,
+ output.final_energy,
);
assert!(
output.attestation.is_some(),
@@ -895,11 +880,7 @@ mod tests {
vec![7.0, 8.0, 9.0],
];
// Triangle graph
- let edges = vec![
- (0, 1, 1.0),
- (1, 2, 1.0),
- (0, 2, 1.0),
- ];
+ let edges = vec![(0, 1, 1.0), (1, 2, 1.0), (0, 2, 1.0)];
let output = pde.forward(&features, &edges).unwrap();
assert!(
@@ -917,7 +898,10 @@ mod tests {
.iter()
.zip(features.iter())
.any(|(new_f, old_f)| {
- new_f.iter().zip(old_f.iter()).any(|(a, b)| (a - b).abs() > 1e-8)
+ new_f
+ .iter()
+ .zip(old_f.iter())
+ .any(|(a, b)| (a - b).abs() > 1e-8)
});
assert!(features_changed, "diffusion should modify features");
}
@@ -944,10 +928,7 @@ mod tests {
#[test]
fn test_pde_mass_values() {
let mut pde = ConservativePdeAttention::new(0.5, 0.1, 1e-3);
- let features = vec![
- vec![10.0, 0.0],
- vec![0.0, 10.0],
- ];
+ let features = vec![vec![10.0, 0.0], vec![0.0, 10.0]];
let edges = vec![(0, 1, 1.0)];
let output = pde.forward(&features, &edges).unwrap();
@@ -974,11 +955,7 @@ mod tests {
];
// Identity connections (parallel transport is trivial)
- let identity: Vec = vec![
- 1.0, 0.0, 0.0,
- 0.0, 1.0, 0.0,
- 0.0, 0.0, 1.0,
- ];
+ let identity: Vec = vec![1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0];
let edges = vec![
(0, 1, identity.clone()),
@@ -1041,11 +1018,7 @@ mod tests {
#[test]
fn test_lagrangian_basic() {
let mut lagr = LagrangianAttention::new(1.0, 0.1, 100.0);
- let features = vec![
- vec![1.0, 0.0],
- vec![0.0, 1.0],
- vec![1.0, 1.0],
- ];
+ let features = vec![vec![1.0, 0.0], vec![0.0, 1.0], vec![1.0, 1.0]];
let edges = vec![(0, 1, 1.0), (1, 2, 1.0), (0, 2, 1.0)];
let output = lagr.forward(&features, &edges).unwrap();
diff --git a/crates/ruvector-graph-transformer/src/proof_gated.rs b/crates/ruvector-graph-transformer/src/proof_gated.rs
index d43ded706..1e3df40bd 100644
--- a/crates/ruvector-graph-transformer/src/proof_gated.rs
+++ b/crates/ruvector-graph-transformer/src/proof_gated.rs
@@ -15,11 +15,10 @@
//! - [`ProofClass`]: Formal vs statistical proof classification
use ruvector_verified::{
- ProofEnvironment, ProofAttestation, VerifiedOp,
- prove_dim_eq,
- proof_store::create_attestation,
- gated::{route_proof, ProofKind, TierDecision, ProofTier},
+ gated::{route_proof, ProofKind, ProofTier, TierDecision},
pipeline::compose_chain,
+ proof_store::create_attestation,
+ prove_dim_eq, ProofAttestation, ProofEnvironment, VerifiedOp,
};
use crate::error::Result;
@@ -104,8 +103,7 @@ impl ProofGate {
stages: &[(String, u32, u32)],
f: impl FnOnce(&mut T),
) -> Result {
- let (_input_type, _output_type, proof_id) =
- compose_chain(stages, &mut self.env)?;
+ let (_input_type, _output_type, proof_id) = compose_chain(stages, &mut self.env)?;
f(&mut self.value);
let attestation = create_attestation(&self.env, proof_id);
self.attestation_chain.append(attestation.clone());
@@ -280,12 +278,10 @@ impl MutationLedger {
/// The running `chain_hash` is recomputed over the single seal so
/// that `verify_integrity()` remains consistent.
pub fn compact(&mut self) -> ProofAttestation {
- let total_steps: u32 = self.attestations
- .iter()
- .map(|a| a.reduction_steps)
- .sum();
+ let total_steps: u32 = self.attestations.iter().map(|a| a.reduction_steps).sum();
- let total_cache: u64 = self.attestations
+ let total_cache: u64 = self
+ .attestations
.iter()
.map(|a| a.cache_hit_rate_bps as u64)
.sum();
@@ -298,12 +294,11 @@ impl MutationLedger {
// Encode the pre-compaction chain hash and count into proof_term_hash.
let mut proof_hash = [0u8; 32];
proof_hash[0..8].copy_from_slice(&self.chain_hash.to_le_bytes());
- proof_hash[8..16].copy_from_slice(
- &(self.attestations.len() as u64).to_le_bytes(),
- );
+ proof_hash[8..16].copy_from_slice(&(self.attestations.len() as u64).to_le_bytes());
// Use the last attestation's environment hash, or zeros.
- let env_hash = self.attestations
+ let env_hash = self
+ .attestations
.last()
.map(|a| a.environment_hash)
.unwrap_or([0u8; 32]);
@@ -396,11 +391,7 @@ pub struct ProofScope {
impl ProofScope {
/// Create a new proof scope for the given partition.
- pub fn new(
- partition_id: u32,
- boundary_nodes: Vec,
- compaction_threshold: usize,
- ) -> Self {
+ pub fn new(partition_id: u32, boundary_nodes: Vec, compaction_threshold: usize) -> Self {
Self {
partition_id,
boundary_nodes,
@@ -572,10 +563,7 @@ impl EpochBoundary {
///
/// Compacts the ledger, advances the epoch, and returns the
/// boundary record.
- pub fn seal(
- ledger: &mut MutationLedger,
- new_config: ProofEnvironmentConfig,
- ) -> Self {
+ pub fn seal(ledger: &mut MutationLedger, new_config: ProofEnvironmentConfig) -> Self {
let from_epoch = ledger.epoch();
let to_epoch = from_epoch + 1;
let seal_att = ledger.compact();
@@ -615,24 +603,39 @@ impl ProofRequirement {
/// Map this requirement to a [`ProofKind`] for tier routing.
pub fn to_proof_kind(&self) -> ProofKind {
match self {
- ProofRequirement::DimensionMatch { expected } => ProofKind::DimensionEquality { expected: *expected, actual: *expected },
+ ProofRequirement::DimensionMatch { expected } => ProofKind::DimensionEquality {
+ expected: *expected,
+ actual: *expected,
+ },
ProofRequirement::TypeMatch { .. } => ProofKind::TypeApplication { depth: 1 },
- ProofRequirement::InvariantPreserved { .. } => ProofKind::Custom { estimated_complexity: 100 },
- ProofRequirement::CoherenceBound { .. } => ProofKind::Custom { estimated_complexity: 100 },
+ ProofRequirement::InvariantPreserved { .. } => ProofKind::Custom {
+ estimated_complexity: 100,
+ },
+ ProofRequirement::CoherenceBound { .. } => ProofKind::Custom {
+ estimated_complexity: 100,
+ },
ProofRequirement::Composite(subs) => {
// Use the highest-complexity sub-requirement for routing.
- if subs.iter().any(|r| matches!(
- r,
- ProofRequirement::InvariantPreserved { .. }
- | ProofRequirement::CoherenceBound { .. }
- )) {
- ProofKind::Custom { estimated_complexity: 100 }
- } else if subs.iter().any(|r| {
- matches!(r, ProofRequirement::TypeMatch { .. })
+ if subs.iter().any(|r| {
+ matches!(
+ r,
+ ProofRequirement::InvariantPreserved { .. }
+ | ProofRequirement::CoherenceBound { .. }
+ )
}) {
+ ProofKind::Custom {
+ estimated_complexity: 100,
+ }
+ } else if subs
+ .iter()
+ .any(|r| matches!(r, ProofRequirement::TypeMatch { .. }))
+ {
ProofKind::TypeApplication { depth: 1 }
} else {
- ProofKind::DimensionEquality { expected: 0, actual: 0 }
+ ProofKind::DimensionEquality {
+ expected: 0,
+ actual: 0,
+ }
}
}
}
@@ -641,9 +644,7 @@ impl ProofRequirement {
/// Count the number of leaf requirements (non-composite).
pub fn leaf_count(&self) -> usize {
match self {
- ProofRequirement::Composite(subs) => {
- subs.iter().map(|s| s.leaf_count()).sum()
- }
+ ProofRequirement::Composite(subs) => subs.iter().map(|s| s.leaf_count()).sum(),
_ => 1,
}
}
@@ -698,8 +699,7 @@ impl ComplexityBound {
/// Check whether this bound fits within the Reflex tier budget.
pub fn fits_reflex(&self) -> bool {
- self.complexity_class == ComplexityClass::Constant
- && self.ops_upper_bound <= 10
+ self.complexity_class == ComplexityClass::Constant && self.ops_upper_bound <= 10
}
/// Check whether this bound fits within the Standard tier budget.
@@ -786,12 +786,7 @@ mod tests {
#[test]
fn test_proof_gate_routed_mutation() {
let mut gate = ProofGate::new(100i32);
- let result = gate.mutate_with_routed_proof(
- ProofKind::Reflexivity,
- 5,
- 5,
- |v| *v += 1,
- );
+ let result = gate.mutate_with_routed_proof(ProofKind::Reflexivity, 5, 5, |v| *v += 1);
assert!(result.is_ok());
let (decision, _att) = result.unwrap();
assert_eq!(decision.tier, ProofTier::Reflex);
@@ -879,8 +874,7 @@ mod tests {
assert!(!ledger.needs_compaction());
// The seal's proof_term_hash encodes the chain hash.
- let encoded_hash =
- u64::from_le_bytes(seal.proof_term_hash[0..8].try_into().unwrap());
+ let encoded_hash = u64::from_le_bytes(seal.proof_term_hash[0..8].try_into().unwrap());
assert_ne!(encoded_hash, 0);
// Integrity holds after compaction.
@@ -1038,10 +1032,7 @@ mod tests {
let sp = SupersessionProof::new(7, att.clone(), 99);
assert_eq!(sp.superseded_position, 7);
assert_eq!(sp.soundness_proof_id, 99);
- assert_eq!(
- sp.replacement.content_hash(),
- att.content_hash(),
- );
+ assert_eq!(sp.replacement.content_hash(), att.content_hash(),);
}
// -----------------------------------------------------------------------
@@ -1051,10 +1042,16 @@ mod tests {
#[test]
fn test_proof_requirement_to_proof_kind() {
let dim = ProofRequirement::DimensionMatch { expected: 128 };
- assert!(matches!(dim.to_proof_kind(), ProofKind::DimensionEquality { .. }));
+ assert!(matches!(
+ dim.to_proof_kind(),
+ ProofKind::DimensionEquality { .. }
+ ));
let ty = ProofRequirement::TypeMatch { schema_id: 1 };
- assert!(matches!(ty.to_proof_kind(), ProofKind::TypeApplication { .. }));
+ assert!(matches!(
+ ty.to_proof_kind(),
+ ProofKind::TypeApplication { .. }
+ ));
let inv = ProofRequirement::InvariantPreserved { invariant_id: 5 };
assert!(matches!(inv.to_proof_kind(), ProofKind::Custom { .. }));
@@ -1119,23 +1116,19 @@ mod tests {
assert!(reflex.fits_reflex());
assert!(reflex.fits_standard());
- let too_many_ops =
- ComplexityBound::new(20, 64, ComplexityClass::Constant);
+ let too_many_ops = ComplexityBound::new(20, 64, ComplexityClass::Constant);
assert!(!too_many_ops.fits_reflex());
- let wrong_class =
- ComplexityBound::new(5, 64, ComplexityClass::Linear);
+ let wrong_class = ComplexityBound::new(5, 64, ComplexityClass::Linear);
assert!(!wrong_class.fits_reflex());
}
#[test]
fn test_complexity_bound_fits_standard() {
- let standard =
- ComplexityBound::new(500, 4096, ComplexityClass::Logarithmic);
+ let standard = ComplexityBound::new(500, 4096, ComplexityClass::Logarithmic);
assert!(standard.fits_standard());
- let too_expensive =
- ComplexityBound::new(501, 4096, ComplexityClass::Quadratic);
+ let too_expensive = ComplexityBound::new(501, 4096, ComplexityClass::Quadratic);
assert!(!too_expensive.fits_standard());
}
diff --git a/crates/ruvector-graph-transformer/src/self_organizing.rs b/crates/ruvector-graph-transformer/src/self_organizing.rs
index 079120141..dd2128ec7 100644
--- a/crates/ruvector-graph-transformer/src/self_organizing.rs
+++ b/crates/ruvector-graph-transformer/src/self_organizing.rs
@@ -8,7 +8,9 @@
use ruvector_coherence::quality_check;
#[cfg(feature = "self-organizing")]
-use ruvector_verified::{ProofEnvironment, prove_dim_eq, proof_store::create_attestation, ProofAttestation};
+use ruvector_verified::{
+ proof_store::create_attestation, prove_dim_eq, ProofAttestation, ProofEnvironment,
+};
#[cfg(feature = "self-organizing")]
use crate::config::SelfOrganizingConfig;
@@ -86,10 +88,7 @@ impl MorphogeneticField {
/// dB/dt = D_b * laplacian(B) + A*B^2 - (f+k)*B
///
/// Proof gate: all concentrations remain in [0.0, 2.0].
- pub fn step(
- &mut self,
- adjacency: &[(usize, usize)],
- ) -> Result {
+ pub fn step(&mut self, adjacency: &[(usize, usize)]) -> Result {
let n = self.num_nodes;
let dt = 1.0;
let d_a = self.config.diffusion_rate; // diffusion_activator
@@ -136,8 +135,7 @@ impl MorphogeneticField {
// Check coherence using ruvector-coherence
let quality = quality_check(&new_a, &new_b, self.config.coherence_threshold as f64);
let coherence = quality.cosine_sim.abs() as f32;
- let topology_maintained = quality.passes_threshold
- || quality.l2_dist < 1.0;
+ let topology_maintained = quality.passes_threshold || quality.l2_dist < 1.0;
Ok(MorphogeneticStepResult {
activator: new_a,
@@ -266,8 +264,7 @@ impl DevelopmentalProgram {
if growth_used >= self.max_growth_budget {
break;
}
- if activator[i] >= rule.activator_threshold
- && degrees[i] < rule.max_degree
+ if activator[i] >= rule.activator_threshold && degrees[i] < rule.max_degree
{
// Split: create a new node connected to the original
let new_id = next_node_id;
@@ -283,8 +280,7 @@ impl DevelopmentalProgram {
if growth_used >= self.max_growth_budget {
break;
}
- if activator[i] >= rule.activator_threshold
- && degrees[i] < rule.max_degree
+ if activator[i] >= rule.activator_threshold && degrees[i] < rule.max_degree
{
// Find closest non-neighbor by activator similarity
let mut best_j = None;
@@ -294,16 +290,16 @@ impl DevelopmentalProgram {
if i == j {
continue;
}
- let edge_exists = existing_edges.iter().any(|&(u, v)| {
- (u == i && v == j) || (u == j && v == i)
- });
+ let edge_exists = existing_edges
+ .iter()
+ .any(|&(u, v)| (u == i && v == j) || (u == j && v == i));
if edge_exists {
continue;
}
// Already scheduled for addition
- let already_added = new_edges.iter().any(|&(u, v, _)| {
- (u == i && v == j) || (u == j && v == i)
- });
+ let already_added = new_edges
+ .iter()
+ .any(|&(u, v, _)| (u == i && v == j) || (u == j && v == i));
if already_added {
continue;
}
@@ -331,9 +327,9 @@ impl DevelopmentalProgram {
let both_below = activator[u] < rule.activator_threshold
&& activator[v] < rule.activator_threshold;
if both_below {
- let already_removed = removed_edges.iter().any(|&(a, b)| {
- (a == u && b == v) || (a == v && b == u)
- });
+ let already_removed = removed_edges
+ .iter()
+ .any(|&(a, b)| (a == u && b == v) || (a == v && b == u));
if !already_removed {
removed_edges.push((u, v));
growth_used += 1;
@@ -483,12 +479,8 @@ impl GraphCoarsener {
// Aggregate features
let dim = features[0].len();
- let coarse_features = self.aggregate_features(
- features,
- &cluster_to_nodes,
- num_clusters,
- dim,
- );
+ let coarse_features =
+ self.aggregate_features(features, &cluster_to_nodes, num_clusters, dim);
// Build coarse edges (edges between different clusters)
let mut coarse_edge_set = std::collections::HashSet::new();
@@ -531,7 +523,11 @@ impl GraphCoarsener {
num_original_nodes: usize,
) -> UncoarsenResult {
let num_clusters = coarse_features.len();
- let dim = if coarse_features.is_empty() { 0 } else { coarse_features[0].len() };
+ let dim = if coarse_features.is_empty() {
+ 0
+ } else {
+ coarse_features[0].len()
+ };
// Build cluster membership lists
let mut cluster_to_nodes: Vec> = vec![Vec::new(); num_clusters];
@@ -642,13 +638,16 @@ impl GraphCoarsener {
}
AggregationStrategy::AttentionPooling => {
// Compute attention weights via feature magnitudes
- let magnitudes: Vec = nodes.iter().map(|&node| {
- if node < features.len() {
- features[node].iter().map(|x| x * x).sum::().sqrt()
- } else {
- 0.0
- }
- }).collect();
+ let magnitudes: Vec = nodes
+ .iter()
+ .map(|&node| {
+ if node < features.len() {
+ features[node].iter().map(|x| x * x).sum::().sqrt()
+ } else {
+ 0.0
+ }
+ })
+ .collect();
let total_mag: f32 = magnitudes.iter().sum::().max(1e-8);
let weights: Vec = magnitudes.iter().map(|m| m / total_mag).collect();
@@ -662,15 +661,19 @@ impl GraphCoarsener {
}
AggregationStrategy::TopK(k) => {
// Select top-k nodes by feature magnitude
- let mut scored: Vec<(f32, usize)> = nodes.iter().map(|&node| {
- let mag = if node < features.len() {
- features[node].iter().map(|x| x * x).sum::().sqrt()
- } else {
- 0.0
- };
- (mag, node)
- }).collect();
- scored.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal));
+ let mut scored: Vec<(f32, usize)> = nodes
+ .iter()
+ .map(|&node| {
+ let mag = if node < features.len() {
+ features[node].iter().map(|x| x * x).sum::().sqrt()
+ } else {
+ 0.0
+ };
+ (mag, node)
+ })
+ .collect();
+ scored
+ .sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal));
let top_k = scored.iter().take(*k).collect::>();
let count = top_k.len().max(1) as f32;
for &&(_, node) in &top_k {
@@ -699,11 +702,7 @@ impl GraphCoarsener {
///
/// L = D - A where D is the degree matrix and A is the adjacency matrix.
#[cfg(feature = "self-organizing")]
-fn graph_laplacian_action(
- x: &[f32],
- adjacency: &[(usize, usize)],
- n: usize,
-) -> Vec {
+fn graph_laplacian_action(x: &[f32], adjacency: &[(usize, usize)], n: usize) -> Vec {
let mut result = vec![0.0f32; n];
let mut degrees = vec![0usize; n];
diff --git a/crates/ruvector-graph-transformer/src/sublinear_attention.rs b/crates/ruvector-graph-transformer/src/sublinear_attention.rs
index 631396eb6..b6f9f4d58 100644
--- a/crates/ruvector-graph-transformer/src/sublinear_attention.rs
+++ b/crates/ruvector-graph-transformer/src/sublinear_attention.rs
@@ -9,7 +9,7 @@
//! `ruvector-mincut` for graph structure operations.
#[cfg(feature = "sublinear")]
-use ruvector_attention::{ScaledDotProductAttention, Attention};
+use ruvector_attention::{Attention, ScaledDotProductAttention};
// ruvector_mincut is available for advanced sparsification strategies.
#[cfg(feature = "sublinear")]
@@ -45,10 +45,7 @@ impl SublinearGraphAttention {
/// Hashes node features into buckets and computes attention only
/// within each bucket, reducing complexity from O(n^2) to O(n * B)
/// where B is the bucket size.
- pub fn lsh_attention(
- &self,
- node_features: &[Vec],
- ) -> Result>> {
+ pub fn lsh_attention(&self, node_features: &[Vec]) -> Result>> {
if node_features.is_empty() {
return Ok(Vec::new());
}
@@ -95,7 +92,9 @@ impl SublinearGraphAttention {
continue;
}
- let result = self.attention.compute(query, &keys, &values)
+ let result = self
+ .attention
+ .compute(query, &keys, &values)
.map_err(GraphTransformerError::Attention)?;
outputs[query_idx] = result;
}
@@ -149,7 +148,9 @@ impl SublinearGraphAttention {
.collect();
let values: Vec<&[f32]> = keys.clone();
- let result = self.attention.compute(query, &keys, &values)
+ let result = self
+ .attention
+ .compute(query, &keys, &values)
.map_err(GraphTransformerError::Attention)?;
outputs[i] = result;
}
@@ -205,7 +206,9 @@ impl SublinearGraphAttention {
.collect();
let values: Vec<&[f32]> = keys.clone();
- let result = self.attention.compute(query, &keys, &values)
+ let result = self
+ .attention
+ .compute(query, &keys, &values)
.map_err(GraphTransformerError::Attention)?;
outputs[i] = result;
}
@@ -233,12 +236,7 @@ fn lsh_hash(features: &[f32], num_buckets: usize) -> usize {
/// Sample neighbors via short random walks (PPR approximation).
#[cfg(feature = "sublinear")]
-fn ppr_sample(
- adj: &[Vec],
- source: usize,
- k: usize,
- rng: &mut impl rand::Rng,
-) -> Vec {
+fn ppr_sample(adj: &[Vec], source: usize, k: usize, rng: &mut impl rand::Rng) -> Vec {
use std::collections::HashSet;
let alpha = 0.15; // teleportation probability
@@ -284,12 +282,7 @@ mod tests {
};
let attn = SublinearGraphAttention::new(8, config);
- let features = vec![
- vec![1.0; 8],
- vec![0.5; 8],
- vec![0.3; 8],
- vec![0.8; 8],
- ];
+ let features = vec![vec![1.0; 8], vec![0.5; 8], vec![0.3; 8], vec![0.8; 8]];
let result = attn.lsh_attention(&features);
assert!(result.is_ok());
@@ -324,12 +317,7 @@ mod tests {
vec![0.0, 0.0, 1.0, 0.0],
vec![0.0, 0.0, 0.0, 1.0],
];
- let edges = vec![
- (0, 1, 1.0),
- (1, 2, 1.0),
- (2, 3, 1.0),
- (3, 0, 1.0),
- ];
+ let edges = vec![(0, 1, 1.0), (1, 2, 1.0), (2, 3, 1.0), (3, 0, 1.0)];
let result = attn.ppr_attention(&features, &edges);
assert!(result.is_ok());
@@ -351,11 +339,7 @@ mod tests {
vec![0.0, 1.0, 0.0, 0.0],
vec![0.0, 0.0, 1.0, 0.0],
];
- let edges = vec![
- (0, 1, 2.0),
- (1, 2, 1.0),
- (0, 2, 0.5),
- ];
+ let edges = vec![(0, 1, 2.0), (1, 2, 1.0), (0, 2, 0.5)];
let result = attn.spectral_attention(&features, &edges);
assert!(result.is_ok());
diff --git a/crates/ruvector-graph-transformer/src/temporal.rs b/crates/ruvector-graph-transformer/src/temporal.rs
index 80f9ba972..2d471428b 100644
--- a/crates/ruvector-graph-transformer/src/temporal.rs
+++ b/crates/ruvector-graph-transformer/src/temporal.rs
@@ -10,13 +10,13 @@
//! See ADR-053: Temporal and Causal Graph Transformer Layers.
#[cfg(feature = "temporal")]
-use ruvector_attention::{ScaledDotProductAttention, Attention};
+use ruvector_attention::{Attention, ScaledDotProductAttention};
#[cfg(feature = "temporal")]
use ruvector_verified::{
- ProofEnvironment,
- proof_store::create_attestation,
gated::{route_proof, ProofKind},
+ proof_store::create_attestation,
+ ProofEnvironment,
};
#[cfg(feature = "temporal")]
@@ -233,20 +233,26 @@ impl CausalGraphTransformer {
let keys: Vec<&[f32]> = candidates.iter().map(|&j| features[j].as_slice()).collect();
// Compute decay weights.
- let decay: Vec = candidates.iter().map(|&j| {
- let dt = (t_i - timestamps[j]) as f32;
- self.discount.powf(dt.max(0.0))
- }).collect();
+ let decay: Vec = candidates
+ .iter()
+ .map(|&j| {
+ let dt = (t_i - timestamps[j]) as f32;
+ self.discount.powf(dt.max(0.0))
+ })
+ .collect();
// Scale keys by decay.
- let scaled_keys: Vec> = keys.iter()
+ let scaled_keys: Vec> = keys
+ .iter()
.zip(decay.iter())
.map(|(k, &w)| k.iter().map(|&x| x * w).collect())
.collect();
let scaled_refs: Vec<&[f32]> = scaled_keys.iter().map(|k| k.as_slice()).collect();
let values: Vec<&[f32]> = keys.clone();
- let out = self.attention.compute(query, &scaled_refs, &values)
+ let out = self
+ .attention
+ .compute(query, &scaled_refs, &values)
.map_err(GraphTransformerError::Attention)?;
// Record weights.
@@ -310,10 +316,7 @@ impl CausalGraphTransformer {
/// Each time step can only attend to itself and previous time steps.
/// Attention weights decay exponentially with temporal distance.
/// (Legacy API preserved for backward compatibility.)
- pub fn temporal_attention(
- &self,
- sequence: &[Vec],
- ) -> Result {
+ pub fn temporal_attention(&self, sequence: &[Vec]) -> Result {
let t = sequence.len();
if t == 0 {
return Ok(TemporalAttentionResult {
@@ -339,9 +342,7 @@ impl CausalGraphTransformer {
let start = if i >= max_lag { i - max_lag + 1 } else { 0 };
let query = &sequence[i];
- let keys: Vec<&[f32]> = (start..=i)
- .map(|j| sequence[j].as_slice())
- .collect();
+ let keys: Vec<&[f32]> = (start..=i).map(|j| sequence[j].as_slice()).collect();
let values: Vec<&[f32]> = keys.clone();
// Apply exponential decay masking
@@ -353,15 +354,16 @@ impl CausalGraphTransformer {
.collect();
// Scale keys by decay weights
- let scaled_keys: Vec> = keys.iter()
+ let scaled_keys: Vec> = keys
+ .iter()
.zip(decay_weights.iter())
.map(|(k, &w)| k.iter().map(|&x| x * w).collect())
.collect();
- let scaled_refs: Vec<&[f32]> = scaled_keys.iter()
- .map(|k| k.as_slice())
- .collect();
+ let scaled_refs: Vec<&[f32]> = scaled_keys.iter().map(|k| k.as_slice()).collect();
- let out = self.attention.compute(query, &scaled_refs, &values)
+ let out = self
+ .attention
+ .compute(query, &scaled_refs, &values)
.map_err(GraphTransformerError::Attention)?;
// Record attention weights for this time step
@@ -406,7 +408,9 @@ impl CausalGraphTransformer {
if source >= time_series[0].len() || target >= time_series[0].len() {
return Err(GraphTransformerError::Config(format!(
"node index out of bounds: source={}, target={}, dim={}",
- source, target, time_series[0].len(),
+ source,
+ target,
+ time_series[0].len(),
)));
}
@@ -424,9 +428,13 @@ impl CausalGraphTransformer {
let df_denom = n - p_unrestricted;
let f_stat = if rss_unrestricted > 1e-10 && df_denom > 0.0 && df_diff > 0.0 {
- let raw = ((rss_restricted - rss_unrestricted) / df_diff)
- / (rss_unrestricted / df_denom);
- if raw.is_finite() { raw.max(0.0) } else { 0.0 }
+ let raw =
+ ((rss_restricted - rss_unrestricted) / df_diff) / (rss_unrestricted / df_denom);
+ if raw.is_finite() {
+ raw.max(0.0)
+ } else {
+ 0.0
+ }
} else {
0.0
};
@@ -805,7 +813,8 @@ impl ContinuousTimeODE {
// Standard ODE error check: error <= atol + rtol * |y_max|.
// We use max_error as the local truncation error estimate and
// compute a reference scale from the state norms.
- let y_scale: f64 = state.iter()
+ let y_scale: f64 = state
+ .iter()
.flat_map(|row| row.iter())
.map(|&v| (v as f64).abs())
.fold(0.0f64, f64::max)
@@ -1223,7 +1232,11 @@ impl TemporalEmbeddingStore {
let is_base = delta.len() > self.dim / 2;
self.chains[node].push(DeltaEntry {
timestamp: time,
- base: if is_base { Some(embedding.to_vec()) } else { None },
+ base: if is_base {
+ Some(embedding.to_vec())
+ } else {
+ None
+ },
delta: if is_base { Vec::new() } else { delta },
tier: StorageTier::Hot,
});
@@ -1244,14 +1257,10 @@ impl TemporalEmbeddingStore {
}
// Find the last entry at or before time t.
- let target_idx = chain
- .iter()
- .rposition(|e| e.timestamp <= time)?;
+ let target_idx = chain.iter().rposition(|e| e.timestamp <= time)?;
// Find the most recent base at or before target_idx.
- let base_idx = (0..=target_idx)
- .rev()
- .find(|&i| chain[i].base.is_some())?;
+ let base_idx = (0..=target_idx).rev().find(|&i| chain[i].base.is_some())?;
// Start from base and apply deltas forward.
let mut embedding = chain[base_idx].base.as_ref().unwrap().clone();
@@ -1422,7 +1431,11 @@ mod tests {
let mut series = Vec::new();
for t in 0..20 {
let x = (t as f32 * 0.1).sin();
- let y = if t > 0 { (((t - 1) as f32) * 0.1).sin() * 0.8 } else { 0.0 };
+ let y = if t > 0 {
+ (((t - 1) as f32) * 0.1).sin() * 0.8
+ } else {
+ 0.0
+ };
series.push(vec![x, y, 0.0, 0.0]);
}
@@ -1462,12 +1475,8 @@ mod tests {
max_lag: 10,
granger_lags: 3,
};
- let mut transformer = CausalGraphTransformer::with_strategy(
- 4,
- config,
- MaskStrategy::Strict,
- 0.9,
- );
+ let mut transformer =
+ CausalGraphTransformer::with_strategy(4, config, MaskStrategy::Strict, 0.9);
let features = vec![
vec![1.0, 0.0, 0.0, 0.0], // node 0, t=0
@@ -1478,10 +1487,18 @@ mod tests {
let timestamps = vec![0.0, 1.0, 2.0, 3.0];
// Fully connected edges.
let edges: Vec<(usize, usize)> = vec![
- (0, 1), (0, 2), (0, 3),
- (1, 0), (1, 2), (1, 3),
- (2, 0), (2, 1), (2, 3),
- (3, 0), (3, 1), (3, 2),
+ (0, 1),
+ (0, 2),
+ (0, 3),
+ (1, 0),
+ (1, 2),
+ (1, 3),
+ (2, 0),
+ (2, 1),
+ (2, 3),
+ (3, 0),
+ (3, 1),
+ (3, 2),
];
let result = transformer.forward(&features, ×tamps, &edges).unwrap();
@@ -1500,9 +1517,18 @@ mod tests {
);
// Node at t=0 must NOT see any future nodes.
- assert!(weights[0][1].abs() < 1e-8, "node 0 (t=0) leaked to node 1 (t=1)");
- assert!(weights[0][2].abs() < 1e-8, "node 0 (t=0) leaked to node 2 (t=2)");
- assert!(weights[0][3].abs() < 1e-8, "node 0 (t=0) leaked to node 3 (t=3)");
+ assert!(
+ weights[0][1].abs() < 1e-8,
+ "node 0 (t=0) leaked to node 1 (t=1)"
+ );
+ assert!(
+ weights[0][2].abs() < 1e-8,
+ "node 0 (t=0) leaked to node 2 (t=2)"
+ );
+ assert!(
+ weights[0][3].abs() < 1e-8,
+ "node 0 (t=0) leaked to node 3 (t=3)"
+ );
// But node at t=3 CAN see nodes at t=0,1,2.
// At least the self-weight must be non-zero.
@@ -1531,11 +1557,7 @@ mod tests {
vec![0.5, 0.5], // t=3
];
let timestamps = vec![0.0, 1.0, 2.0, 3.0];
- let edges: Vec<(usize, usize)> = vec![
- (0, 1), (0, 2), (0, 3),
- (1, 2), (1, 3),
- (2, 3),
- ];
+ let edges: Vec<(usize, usize)> = vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)];
let result = transformer.forward(&features, ×tamps, &edges).unwrap();
let weights = &result.read().attention_weights;
@@ -1543,8 +1565,14 @@ mod tests {
// Node at t=3 with window_size=1.5 can see t=2 and t=3 (self), but NOT t=0 or t=1.
// t=3 - t=0 = 3.0 > 1.5 => cannot see.
// t=3 - t=1 = 2.0 > 1.5 => cannot see.
- assert!(weights[3][0].abs() < 1e-8, "node 3 should not see node 0 (outside window)");
- assert!(weights[3][1].abs() < 1e-8, "node 3 should not see node 1 (outside window)");
+ assert!(
+ weights[3][0].abs() < 1e-8,
+ "node 3 should not see node 0 (outside window)"
+ );
+ assert!(
+ weights[3][1].abs() < 1e-8,
+ "node 3 should not see node 1 (outside window)"
+ );
}
/// RetrocausalAttention: requires BatchModeToken.
@@ -1608,11 +1636,7 @@ mod tests {
// Use reasonable tolerances for graph diffusion (O(1) state changes).
let mut ode = ContinuousTimeODE::new(2, 1.0, 0.5, 100);
- let features = vec![
- vec![1.0, 0.0],
- vec![0.0, 1.0],
- vec![0.5, 0.5],
- ];
+ let features = vec![vec![1.0, 0.0], vec![0.0, 1.0], vec![0.5, 0.5]];
let events = vec![
TemporalEdgeEvent {
@@ -1792,7 +1816,9 @@ mod tests {
// Entry at t=25 (age=5) -> Warm.
// (Tier is internal; we just verify no crash and retrieval still works.)
- let emb = store.retrieve(0, 25.0).expect("should still retrieve after compaction");
+ let emb = store
+ .retrieve(0, 25.0)
+ .expect("should still retrieve after compaction");
assert!((emb[0] - 0.5).abs() < 1e-6);
}
diff --git a/crates/ruvector-graph-transformer/src/verified_training.rs b/crates/ruvector-graph-transformer/src/verified_training.rs
index 8d9e8afff..7f3c8aa0d 100644
--- a/crates/ruvector-graph-transformer/src/verified_training.rs
+++ b/crates/ruvector-graph-transformer/src/verified_training.rs
@@ -16,14 +16,13 @@
//! | `PermutationEquivariance` | Deep | No -- statistical test |
//! | `EnergyGate` | Standard | Yes -- threshold comparison |
+#[cfg(feature = "verified-training")]
+use ruvector_gnn::RuvectorLayer;
#[cfg(feature = "verified-training")]
use ruvector_verified::{
- ProofEnvironment, ProofAttestation,
- prove_dim_eq, proof_store::create_attestation,
- gated::ProofTier,
+ gated::ProofTier, proof_store::create_attestation, prove_dim_eq, ProofAttestation,
+ ProofEnvironment,
};
-#[cfg(feature = "verified-training")]
-use ruvector_gnn::RuvectorLayer;
#[cfg(feature = "verified-training")]
use crate::config::VerifiedTrainingConfig;
@@ -276,12 +275,12 @@ pub struct TrainingCertificate {
fn blake3_hash(data: &[u8]) -> [u8; 32] {
// BLAKE3 IV constants (first 8 primes, fractional parts of square roots)
const IV: [u32; 8] = [
- 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
- 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
+ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB,
+ 0x5BE0CD19,
];
const MSG_SCHEDULE: [u32; 8] = [
- 0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344,
- 0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89,
+ 0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344, 0xA4093822, 0x299F31D0, 0x082EFA98,
+ 0xEC4E6C89,
];
let mut state = IV;
@@ -299,15 +298,12 @@ fn blake3_hash(data: &[u8]) -> [u8; 32] {
.wrapping_add(*byte as u32)
.wrapping_add(MSG_SCHEDULE[idx]);
// Quarter-round mixing
- state[idx] = state[idx].rotate_right(7)
- ^ state[(idx + 1) % 8].wrapping_mul(0x9E3779B9);
+ state[idx] = state[idx].rotate_right(7) ^ state[(idx + 1) % 8].wrapping_mul(0x9E3779B9);
}
// Additional diffusion
for i in 0..8 {
- state[i] = state[i]
- .wrapping_add(state[(i + 3) % 8])
- .rotate_right(11);
+ state[i] = state[i].wrapping_add(state[(i + 3) % 8]).rotate_right(11);
}
offset = end;
@@ -321,10 +317,7 @@ fn blake3_hash(data: &[u8]) -> [u8; 32] {
// Final mixing rounds
for _ in 0..4 {
for i in 0..8 {
- state[i] = state[i]
- .wrapping_mul(0x85EBCA6B)
- .rotate_right(13)
- ^ state[(i + 5) % 8];
+ state[i] = state[i].wrapping_mul(0x85EBCA6B).rotate_right(13) ^ state[(i + 5) % 8];
}
}
@@ -480,7 +473,11 @@ impl VerifiedTrainer {
.map(|&v| (v as f64).abs())
.sum();
let count = proposed_weights.iter().map(|w| w.len()).sum::();
- if count > 0 { total / count as f64 } else { 0.0 }
+ if count > 0 {
+ total / count as f64
+ } else {
+ 0.0
+ }
};
// Compute weight norm (L2)
@@ -590,10 +587,7 @@ impl VerifiedTrainer {
let attestation = create_attestation(&self.env, proof_id);
// Compute hashes
- let weights_bytes: Vec = final_weights
- .iter()
- .flat_map(|f| f.to_le_bytes())
- .collect();
+ let weights_bytes: Vec = final_weights.iter().flat_map(|f| f.to_le_bytes()).collect();
let weights_hash = blake3_hash(&weights_bytes);
let config_bytes = format!("{:?}", self.config).into_bytes();
@@ -799,9 +793,7 @@ fn check_invariant(
fn invariant_name(inv: &TrainingInvariant) -> String {
match inv {
TrainingInvariant::LossStabilityBound { .. } => "LossStabilityBound".to_string(),
- TrainingInvariant::PermutationEquivariance { .. } => {
- "PermutationEquivariance".to_string()
- }
+ TrainingInvariant::PermutationEquivariance { .. } => "PermutationEquivariance".to_string(),
TrainingInvariant::LipschitzBound { .. } => "LipschitzBound".to_string(),
TrainingInvariant::WeightNormBound { .. } => "WeightNormBound".to_string(),
TrainingInvariant::EnergyGate { .. } => "EnergyGate".to_string(),
@@ -813,13 +805,14 @@ fn invariant_name(inv: &TrainingInvariant) -> String {
fn invariant_proof_class(inv: &TrainingInvariant) -> ProofClass {
match inv {
TrainingInvariant::LossStabilityBound { .. } => ProofClass::Formal,
- TrainingInvariant::PermutationEquivariance { rng_seed, tolerance } => {
- ProofClass::Statistical {
- rng_seed: Some(*rng_seed),
- iterations: 1,
- tolerance: *tolerance,
- }
- }
+ TrainingInvariant::PermutationEquivariance {
+ rng_seed,
+ tolerance,
+ } => ProofClass::Statistical {
+ rng_seed: Some(*rng_seed),
+ iterations: 1,
+ tolerance: *tolerance,
+ },
TrainingInvariant::LipschitzBound {
tolerance,
max_power_iterations,
@@ -855,7 +848,11 @@ fn max_tier(a: ProofTier, b: ProofTier) -> ProofTier {
ProofTier::Deep => 2,
}
}
- if tier_rank(&b) > tier_rank(&a) { b } else { a }
+ if tier_rank(&b) > tier_rank(&a) {
+ b
+ } else {
+ a
+ }
}
// ---------------------------------------------------------------------------
@@ -933,7 +930,12 @@ mod tests {
}
/// Helper: create simple test data.
- fn test_data() -> (Vec>, Vec>>, Vec>, Vec>) {
+ fn test_data() -> (
+ Vec>,
+ Vec>>,
+ Vec>,
+ Vec>,
+ ) {
let features = vec![vec![1.0, 0.5, 0.0, 0.0]];
let neighbors = vec![vec![vec![0.0, 1.0, 0.5, 0.0]]];
let weights = vec![vec![1.0]];
@@ -1132,8 +1134,14 @@ mod tests {
assert!(cert.attestation.verification_timestamp_ns > 0);
// Verify hash binding
- assert_ne!(cert.weights_hash, [0u8; 32], "weights hash should be non-zero");
- assert_ne!(cert.config_hash, [0u8; 32], "config hash should be non-zero");
+ assert_ne!(
+ cert.weights_hash, [0u8; 32],
+ "weights hash should be non-zero"
+ );
+ assert_ne!(
+ cert.config_hash, [0u8; 32],
+ "config hash should be non-zero"
+ );
assert_eq!(
cert.dataset_manifest_hash,
Some([0xABu8; 32]),
@@ -1146,10 +1154,7 @@ mod tests {
);
// Verify deterministic hash: same weights => same hash
- let weights_bytes: Vec = final_weights
- .iter()
- .flat_map(|f| f.to_le_bytes())
- .collect();
+ let weights_bytes: Vec = final_weights.iter().flat_map(|f| f.to_le_bytes()).collect();
let expected_hash = blake3_hash(&weights_bytes);
assert_eq!(
cert.weights_hash, expected_hash,
diff --git a/crates/ruvector-graph-transformer/tests/integration.rs b/crates/ruvector-graph-transformer/tests/integration.rs
index f28c25cc6..6e5232563 100644
--- a/crates/ruvector-graph-transformer/tests/integration.rs
+++ b/crates/ruvector-graph-transformer/tests/integration.rs
@@ -3,11 +3,12 @@
//! Tests the composition of all modules through proof-gated operations.
use ruvector_graph_transformer::{
- GraphTransformer, GraphTransformerConfig, ProofGate, AttestationChain,
+ AttestationChain, GraphTransformer, GraphTransformerConfig, ProofGate,
};
use ruvector_verified::{
- ProofEnvironment, proof_store::create_attestation,
gated::{ProofKind, ProofTier},
+ proof_store::create_attestation,
+ ProofEnvironment,
};
// ---- Proof-gated tests ----
@@ -49,12 +50,7 @@ fn test_proof_gate_dim_mutation_fails_on_mismatch() {
#[test]
fn test_proof_gate_routed_mutation() {
let mut gate = ProofGate::new(100i32);
- let result = gate.mutate_with_routed_proof(
- ProofKind::Reflexivity,
- 5,
- 5,
- |v| *v += 50,
- );
+ let result = gate.mutate_with_routed_proof(ProofKind::Reflexivity, 5, 5, |v| *v += 50);
assert!(result.is_ok());
let (decision, attestation) = result.unwrap();
assert_eq!(decision.tier, ProofTier::Reflex);
@@ -95,8 +91,8 @@ fn test_attestation_chain_integrity() {
#[cfg(feature = "sublinear")]
mod sublinear_tests {
- use ruvector_graph_transformer::SublinearGraphAttention;
use ruvector_graph_transformer::config::SublinearConfig;
+ use ruvector_graph_transformer::SublinearGraphAttention;
#[test]
fn test_lsh_attention_basic() {
@@ -107,9 +103,7 @@ mod sublinear_tests {
};
let attn = SublinearGraphAttention::new(8, config);
- let features: Vec> = (0..10)
- .map(|i| vec![i as f32 * 0.1; 8])
- .collect();
+ let features: Vec> = (0..10).map(|i| vec![i as f32 * 0.1; 8]).collect();
let result = attn.lsh_attention(&features);
assert!(result.is_ok());
@@ -163,11 +157,7 @@ mod sublinear_tests {
vec![0.5, 1.0, 0.4, 0.2],
vec![0.3, 0.4, 1.0, 0.5],
];
- let edges = vec![
- (0, 1, 2.0),
- (1, 2, 1.0),
- (0, 2, 0.5),
- ];
+ let edges = vec![(0, 1, 2.0), (1, 2, 1.0), (0, 2, 0.5)];
let result = attn.spectral_attention(&features, &edges);
assert!(result.is_ok());
@@ -178,8 +168,8 @@ mod sublinear_tests {
#[cfg(feature = "physics")]
mod physics_tests {
- use ruvector_graph_transformer::HamiltonianGraphNet;
use ruvector_graph_transformer::config::PhysicsConfig;
+ use ruvector_graph_transformer::HamiltonianGraphNet;
#[test]
fn test_hamiltonian_step_energy_conservation() {
@@ -190,10 +180,7 @@ mod physics_tests {
};
let mut hgn = HamiltonianGraphNet::new(4, config);
- let features = vec![
- vec![0.1, 0.2, 0.3, 0.4],
- vec![0.4, 0.3, 0.2, 0.1],
- ];
+ let features = vec![vec![0.1, 0.2, 0.3, 0.4], vec![0.4, 0.3, 0.2, 0.1]];
let state = hgn.init_state(&features).unwrap();
let edges = vec![(0, 1, 0.1)];
@@ -201,7 +188,8 @@ mod physics_tests {
let energy_diff = (result.energy_after - result.energy_before).abs();
assert!(
energy_diff < 0.1,
- "energy not conserved: diff={}", energy_diff
+ "energy not conserved: diff={}",
+ energy_diff
);
assert!(result.energy_conserved);
assert!(result.attestation.is_some());
@@ -212,8 +200,8 @@ mod physics_tests {
#[cfg(feature = "biological")]
mod biological_tests {
- use ruvector_graph_transformer::{SpikingGraphAttention, HebbianLayer};
use ruvector_graph_transformer::config::BiologicalConfig;
+ use ruvector_graph_transformer::{HebbianLayer, SpikingGraphAttention};
#[test]
fn test_spiking_attention_update() {
@@ -266,9 +254,9 @@ mod biological_tests {
#[cfg(feature = "self-organizing")]
mod self_organizing_tests {
- use ruvector_graph_transformer::{MorphogeneticField, DevelopmentalProgram};
use ruvector_graph_transformer::config::SelfOrganizingConfig;
use ruvector_graph_transformer::self_organizing::{GrowthRule, GrowthRuleKind};
+ use ruvector_graph_transformer::{DevelopmentalProgram, MorphogeneticField};
#[test]
fn test_morphogenetic_step_topology_invariants() {
@@ -320,11 +308,9 @@ mod self_organizing_tests {
#[cfg(feature = "verified-training")]
mod verified_training_tests {
- use ruvector_graph_transformer::{
- VerifiedTrainer, TrainingInvariant, RollbackStrategy,
- };
- use ruvector_graph_transformer::config::VerifiedTrainingConfig;
use ruvector_gnn::RuvectorLayer;
+ use ruvector_graph_transformer::config::VerifiedTrainingConfig;
+ use ruvector_graph_transformer::{RollbackStrategy, TrainingInvariant, VerifiedTrainer};
#[test]
fn test_verified_training_single_step_certificate() {
@@ -334,12 +320,10 @@ mod verified_training_tests {
learning_rate: 0.001,
..Default::default()
};
- let invariants = vec![
- TrainingInvariant::WeightNormBound {
- max_norm: 1000.0,
- rollback_strategy: RollbackStrategy::DeltaApply,
- },
- ];
+ let invariants = vec![TrainingInvariant::WeightNormBound {
+ max_norm: 1000.0,
+ rollback_strategy: RollbackStrategy::DeltaApply,
+ }];
let mut trainer = VerifiedTrainer::new(4, 8, config, invariants);
let layer = RuvectorLayer::new(4, 8, 2, 0.0).unwrap();
@@ -365,20 +349,24 @@ mod verified_training_tests {
learning_rate: 0.001,
..Default::default()
};
- let invariants = vec![
- TrainingInvariant::WeightNormBound {
- max_norm: 1000.0,
- rollback_strategy: RollbackStrategy::DeltaApply,
- },
- ];
+ let invariants = vec![TrainingInvariant::WeightNormBound {
+ max_norm: 1000.0,
+ rollback_strategy: RollbackStrategy::DeltaApply,
+ }];
let mut trainer = VerifiedTrainer::new(4, 8, config, invariants);
let layer = RuvectorLayer::new(4, 8, 2, 0.0).unwrap();
for _ in 0..3 {
- let result = trainer.train_step(
- &[vec![1.0; 4]], &[vec![]], &[vec![]], &[vec![0.0; 8]], &layer,
- ).unwrap();
+ let result = trainer
+ .train_step(
+ &[vec![1.0; 4]],
+ &[vec![]],
+ &[vec![]],
+ &[vec![0.0; 8]],
+ &layer,
+ )
+ .unwrap();
assert!(result.weights_committed);
}
@@ -391,9 +379,9 @@ mod verified_training_tests {
#[cfg(feature = "manifold")]
mod manifold_tests {
- use ruvector_graph_transformer::ProductManifoldAttention;
use ruvector_graph_transformer::config::ManifoldConfig;
- use ruvector_graph_transformer::manifold::{spherical_geodesic, hyperbolic_geodesic};
+ use ruvector_graph_transformer::manifold::{hyperbolic_geodesic, spherical_geodesic};
+ use ruvector_graph_transformer::ProductManifoldAttention;
#[test]
fn test_product_manifold_attention_curvature() {
@@ -441,8 +429,8 @@ mod manifold_tests {
#[cfg(feature = "temporal")]
mod temporal_tests {
- use ruvector_graph_transformer::CausalGraphTransformer;
use ruvector_graph_transformer::config::TemporalConfig;
+ use ruvector_graph_transformer::CausalGraphTransformer;
#[test]
fn test_causal_attention_ordering() {
diff --git a/crates/ruvector-mincut/src/canonical/mod.rs b/crates/ruvector-mincut/src/canonical/mod.rs
index ae3e0d4ac..cfa6b0059 100644
--- a/crates/ruvector-mincut/src/canonical/mod.rs
+++ b/crates/ruvector-mincut/src/canonical/mod.rs
@@ -204,16 +204,10 @@ impl CactusGraph {
// Run Stoer-Wagner to find global min-cut value and all min-cut
// partitions (simplified: we find the min-cut value and one
// partition, then enumerate by vertex removal).
- let (min_cut_value, min_cut_partitions) =
- Self::stoer_wagner_all_cuts(&adj);
+ let (min_cut_value, min_cut_partitions) = Self::stoer_wagner_all_cuts(&adj);
// Build cactus from discovered min-cuts
- Self::build_cactus_from_cuts(
- &vertices_ids,
- &adj,
- min_cut_value,
- &min_cut_partitions,
- )
+ Self::build_cactus_from_cuts(&vertices_ids, &adj, min_cut_value, &min_cut_partitions)
}
/// Root the cactus at the vertex containing the lexicographically
@@ -733,10 +727,7 @@ impl CactusGraph {
}
/// Simple cycle detection in the cactus graph.
- fn detect_cycles(
- vertices: &[CactusVertex],
- edges: &mut [CactusEdge],
- ) -> Vec {
+ fn detect_cycles(vertices: &[CactusVertex], edges: &mut [CactusEdge]) -> Vec {
if vertices.is_empty() || edges.is_empty() {
return Vec::new();
}
@@ -880,16 +871,32 @@ impl CactusGraph {
fn compute_cut_value_from_partition(&self, part_s: &[usize]) -> f64 {
let s_set: HashSet = part_s.iter().copied().collect();
// Build id -> index map for O(1) lookup
- let id_map: HashMap = self.vertices.iter().enumerate()
- .map(|(i, cv)| (cv.id, i)).collect();
+ let id_map: HashMap = self
+ .vertices
+ .iter()
+ .enumerate()
+ .map(|(i, cv)| (cv.id, i))
+ .collect();
let mut total = 0.0f64;
for e in &self.edges {
- let src_in_s = id_map.get(&e.source)
- .map(|&i| self.vertices[i].original_vertices.iter().any(|v| s_set.contains(v)))
+ let src_in_s = id_map
+ .get(&e.source)
+ .map(|&i| {
+ self.vertices[i]
+ .original_vertices
+ .iter()
+ .any(|v| s_set.contains(v))
+ })
.unwrap_or(false);
- let tgt_in_s = id_map.get(&e.target)
- .map(|&i| self.vertices[i].original_vertices.iter().any(|v| s_set.contains(v)))
+ let tgt_in_s = id_map
+ .get(&e.target)
+ .map(|&i| {
+ self.vertices[i]
+ .original_vertices
+ .iter()
+ .any(|v| s_set.contains(v))
+ })
.unwrap_or(false);
if src_in_s != tgt_in_s {
@@ -904,8 +911,12 @@ impl CactusGraph {
fn compute_cut_edges(&self, part_s: &[usize]) -> Vec<(usize, usize, f64)> {
let s_set: HashSet = part_s.iter().copied().collect();
// Build id -> index map for O(1) lookup
- let id_map: HashMap = self.vertices.iter().enumerate()
- .map(|(i, cv)| (cv.id, i)).collect();
+ let id_map: HashMap = self
+ .vertices
+ .iter()
+ .enumerate()
+ .map(|(i, cv)| (cv.id, i))
+ .collect();
let mut cut_edges = Vec::new();
for e in &self.edges {
@@ -913,10 +924,20 @@ impl CactusGraph {
let tgt_idx = id_map.get(&e.target).copied();
let src_in_s = src_idx
- .map(|i| self.vertices[i].original_vertices.iter().any(|v| s_set.contains(v)))
+ .map(|i| {
+ self.vertices[i]
+ .original_vertices
+ .iter()
+ .any(|v| s_set.contains(v))
+ })
.unwrap_or(false);
let tgt_in_s = tgt_idx
- .map(|i| self.vertices[i].original_vertices.iter().any(|v| s_set.contains(v)))
+ .map(|i| {
+ self.vertices[i]
+ .original_vertices
+ .iter()
+ .any(|v| s_set.contains(v))
+ })
.unwrap_or(false);
if src_in_s != tgt_in_s {
diff --git a/crates/ruvector-mincut/src/canonical/tests.rs b/crates/ruvector-mincut/src/canonical/tests.rs
index 2d916d546..d39fee2a7 100644
--- a/crates/ruvector-mincut/src/canonical/tests.rs
+++ b/crates/ruvector-mincut/src/canonical/tests.rs
@@ -166,11 +166,7 @@ fn test_canonical_determinism() {
// All keys must be identical
let first = keys[0];
for (i, key) in keys.iter().enumerate() {
- assert_eq!(
- *key, first,
- "Run {} produced different canonical key",
- i
- );
+ assert_eq!(*key, first, "Run {} produced different canonical key", i);
}
}
@@ -294,12 +290,7 @@ fn test_canonical_value_correctness_bridge() {
fn test_canonical_partition_covers_all_vertices() {
let mc = crate::MinCutBuilder::new()
.exact()
- .with_edges(vec![
- (1, 2, 1.0),
- (2, 3, 1.0),
- (3, 4, 1.0),
- (4, 1, 1.0),
- ])
+ .with_edges(vec![(1, 2, 1.0), (2, 3, 1.0), (3, 4, 1.0), (4, 1, 1.0)])
.build()
.unwrap();
@@ -338,11 +329,7 @@ fn test_witness_receipt() {
#[test]
fn test_witness_receipt_epoch_increments() {
- let mut canonical = CanonicalMinCutImpl::with_edges(vec![
- (1, 2, 1.0),
- (2, 3, 1.0),
- ])
- .unwrap();
+ let mut canonical = CanonicalMinCutImpl::with_edges(vec![(1, 2, 1.0), (2, 3, 1.0)]).unwrap();
let r1 = canonical.witness_receipt();
assert_eq!(r1.epoch, 0);
@@ -378,12 +365,8 @@ fn test_dynamic_canonical_insert() {
#[test]
fn test_dynamic_canonical_delete_preserves_property() {
- let mut canonical = CanonicalMinCutImpl::with_edges(vec![
- (1, 2, 1.0),
- (2, 3, 1.0),
- (3, 1, 1.0),
- ])
- .unwrap();
+ let mut canonical =
+ CanonicalMinCutImpl::with_edges(vec![(1, 2, 1.0), (2, 3, 1.0), (3, 1, 1.0)]).unwrap();
assert_eq!(canonical.min_cut_value(), 2.0);
@@ -400,11 +383,7 @@ fn test_dynamic_canonical_delete_preserves_property() {
#[test]
fn test_dynamic_canonical_insert_delete_cycle() {
- let mut canonical = CanonicalMinCutImpl::with_edges(vec![
- (1, 2, 1.0),
- (2, 3, 1.0),
- ])
- .unwrap();
+ let mut canonical = CanonicalMinCutImpl::with_edges(vec![(1, 2, 1.0), (2, 3, 1.0)]).unwrap();
let key_before = canonical.canonical_cut().canonical_key;
@@ -413,7 +392,10 @@ fn test_dynamic_canonical_insert_delete_cycle() {
canonical.delete_edge(3, 4).unwrap();
let key_after = canonical.canonical_cut().canonical_key;
- assert_eq!(key_before, key_after, "Insert+delete should restore canonical state");
+ assert_eq!(
+ key_before, key_after,
+ "Insert+delete should restore canonical state"
+ );
}
// ---------------------------------------------------------------------------
@@ -436,11 +418,7 @@ fn test_canonical_impl_default() {
#[test]
fn test_canonical_impl_with_edges() {
- let c = CanonicalMinCutImpl::with_edges(vec![
- (1, 2, 1.0),
- (2, 3, 1.0),
- ])
- .unwrap();
+ let c = CanonicalMinCutImpl::with_edges(vec![(1, 2, 1.0), (2, 3, 1.0)]).unwrap();
assert_eq!(c.num_vertices(), 3);
assert_eq!(c.num_edges(), 2);
@@ -450,12 +428,7 @@ fn test_canonical_impl_with_edges() {
#[test]
fn test_canonical_impl_cactus_graph() {
- let c = CanonicalMinCutImpl::with_edges(vec![
- (1, 2, 1.0),
- (2, 3, 1.0),
- (3, 1, 1.0),
- ])
- .unwrap();
+ let c = CanonicalMinCutImpl::with_edges(vec![(1, 2, 1.0), (2, 3, 1.0), (3, 1, 1.0)]).unwrap();
let cactus = c.cactus_graph();
assert!(cactus.n_vertices >= 1);
@@ -544,5 +517,8 @@ fn test_canonical_complete_k4() {
let result = canonical.canonical_cut();
// K4 min-cut = 3 (isolate one vertex)
let (ref s, ref t) = result.partition;
- assert!(s.len() == 1 || t.len() == 1, "K4 min-cut isolates one vertex");
+ assert!(
+ s.len() == 1 || t.len() == 1,
+ "K4 min-cut isolates one vertex"
+ );
}
diff --git a/crates/ruvector-mincut/src/lib.rs b/crates/ruvector-mincut/src/lib.rs
index c492f3f4f..de162bebf 100644
--- a/crates/ruvector-mincut/src/lib.rs
+++ b/crates/ruvector-mincut/src/lib.rs
@@ -386,7 +386,7 @@ pub use integration::AgenticAnalyzer;
#[cfg(feature = "canonical")]
pub use canonical::{
- CactusGraph, CactusCycle, CactusEdge, CactusVertex, CanonicalCutResult, CanonicalMinCut,
+ CactusCycle, CactusEdge, CactusGraph, CactusVertex, CanonicalCutResult, CanonicalMinCut,
CanonicalMinCutImpl, FixedWeight, WitnessReceipt,
};
@@ -508,7 +508,7 @@ pub mod prelude {
#[cfg(feature = "canonical")]
pub use crate::{
- CactusGraph, CactusCycle, CactusEdge, CactusVertex, CanonicalCutResult, CanonicalMinCut,
+ CactusCycle, CactusEdge, CactusGraph, CactusVertex, CanonicalCutResult, CanonicalMinCut,
CanonicalMinCutImpl, FixedWeight, WitnessReceipt,
};
diff --git a/crates/ruvector-mincut/tests/canonical_bench.rs b/crates/ruvector-mincut/tests/canonical_bench.rs
index dfa5f5fd6..38b7981d6 100644
--- a/crates/ruvector-mincut/tests/canonical_bench.rs
+++ b/crates/ruvector-mincut/tests/canonical_bench.rs
@@ -59,13 +59,20 @@ mod bench {
println!("\n=== Canonical Min-Cut (30v, ~90e) ===");
println!(" CactusGraph build: {:.1} µs", avg_cactus_us);
println!(" Canonical cut: {:.1} µs", avg_cut_us);
- println!(" Total: {:.1} µs (target: < 3000 µs native)", total);
+ println!(
+ " Total: {:.1} µs (target: < 3000 µs native)",
+ total
+ );
println!(" Cut value: {}", reference.value);
println!(" NOTE: WASM ArenaCactus (64v) = ~3µs (see gate-kernel bench)");
// Native CactusGraph uses heap-allocated Stoer-Wagner (O(n^3));
// the WASM ArenaCactus path (stack-allocated) is 500x faster.
- assert!(total < 3000.0, "Exceeded 3ms native target: {:.1} µs", total);
+ assert!(
+ total < 3000.0,
+ "Exceeded 3ms native target: {:.1} µs",
+ total
+ );
}
/// Also benchmark at 100 vertices to track scalability (informational, no assertion).
@@ -94,7 +101,10 @@ mod bench {
let avg_total_us = start.elapsed().as_micros() as f64 / n_iter as f64;
println!("\n=== Canonical Min-Cut Scalability (100v, ~300e) ===");
- println!(" Total (build+cut): {:.1} µs (informational)", avg_total_us);
+ println!(
+ " Total (build+cut): {:.1} µs (informational)",
+ avg_total_us
+ );
println!(" Stoer-Wagner is O(n^3), scales cubically with graph size");
}
}
diff --git a/crates/ruvector-verified-wasm/src/lib.rs b/crates/ruvector-verified-wasm/src/lib.rs
index ca31534a0..a9884e7e9 100644
--- a/crates/ruvector-verified-wasm/src/lib.rs
+++ b/crates/ruvector-verified-wasm/src/lib.rs
@@ -26,12 +26,10 @@
mod utils;
use ruvector_verified::{
- ProofEnvironment,
- fast_arena::FastTermArena,
cache::ConversionCache,
+ fast_arena::FastTermArena,
gated::{self, ProofKind, ProofTier},
- proof_store,
- vector_types,
+ proof_store, vector_types, ProofEnvironment,
};
use serde::Serialize;
use wasm_bindgen::prelude::*;
@@ -87,8 +85,7 @@ impl JsProofEnv {
/// Build a `RuVec n` type term. Returns term ID.
pub fn mk_vector_type(&mut self, dim: u32) -> Result {
- vector_types::mk_vector_type(&mut self.env, dim)
- .map_err(|e| JsError::new(&e.to_string()))
+ vector_types::mk_vector_type(&mut self.env, dim).map_err(|e| JsError::new(&e.to_string()))
}
/// Build a distance metric type term. Supported: "L2", "Cosine", "Dot".
@@ -108,16 +105,13 @@ impl JsProofEnv {
///
/// `flat_vectors` is a contiguous f32 array; each vector is `dim` elements.
/// Returns the number of vectors verified.
- pub fn verify_batch_flat(
- &mut self,
- dim: u32,
- flat_vectors: &[f32],
- ) -> Result {
+ pub fn verify_batch_flat(&mut self, dim: u32, flat_vectors: &[f32]) -> Result {
let d = dim as usize;
if flat_vectors.len() % d != 0 {
return Err(JsError::new(&format!(
"flat_vectors length {} not divisible by dim {}",
- flat_vectors.len(), dim
+ flat_vectors.len(),
+ dim
)));
}
let slices: Vec<&[f32]> = flat_vectors.chunks_exact(d).collect();
@@ -137,9 +131,14 @@ impl JsProofEnv {
pub fn route_proof(&self, kind: &str) -> Result {
let proof_kind = match kind {
"reflexivity" => ProofKind::Reflexivity,
- "dimension" => ProofKind::DimensionEquality { expected: 0, actual: 0 },
+ "dimension" => ProofKind::DimensionEquality {
+ expected: 0,
+ actual: 0,
+ },
"pipeline" => ProofKind::PipelineComposition { stages: 1 },
- other => ProofKind::Custom { estimated_complexity: other.parse().unwrap_or(10) },
+ other => ProofKind::Custom {
+ estimated_complexity: other.parse().unwrap_or(10),
+ },
};
let decision = gated::route_proof(proof_kind, &self.env);
let tier_name = match decision.tier {
@@ -152,8 +151,7 @@ impl JsProofEnv {
reason: decision.reason.to_string(),
estimated_steps: decision.estimated_steps,
};
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
/// Create a proof attestation (82 bytes). Returns serializable object.
@@ -168,8 +166,7 @@ impl JsProofEnv {
reduction_steps: att.reduction_steps,
cache_hit_rate_bps: att.cache_hit_rate_bps,
};
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
/// Get verification statistics.
@@ -187,8 +184,7 @@ impl JsProofEnv {
arena_hit_rate: arena_stats.cache_hit_rate(),
conversion_cache_hit_rate: cache_stats.hit_rate(),
};
- serde_wasm_bindgen::to_value(&result)
- .map_err(|e| JsError::new(&e.to_string()))
+ serde_wasm_bindgen::to_value(&result).map_err(|e| JsError::new(&e.to_string()))
}
/// Reset the environment (clears cache, resets counters, re-registers builtins).
diff --git a/crates/ruvector-verified/benches/arena_throughput.rs b/crates/ruvector-verified/benches/arena_throughput.rs
index 76c90270f..0ca3c8b4a 100644
--- a/crates/ruvector-verified/benches/arena_throughput.rs
+++ b/crates/ruvector-verified/benches/arena_throughput.rs
@@ -1,21 +1,17 @@
//! Arena throughput benchmarks.
-use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
+use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
fn bench_env_alloc_sequential(c: &mut Criterion) {
let mut group = c.benchmark_group("env_alloc_sequential");
for count in [100, 1000, 10_000] {
- group.bench_with_input(
- BenchmarkId::from_parameter(count),
- &count,
- |b, &count| {
- b.iter(|| {
- let mut env = ruvector_verified::ProofEnvironment::new();
- for _ in 0..count {
- env.alloc_term();
- }
- });
- },
- );
+ group.bench_with_input(BenchmarkId::from_parameter(count), &count, |b, &count| {
+ b.iter(|| {
+ let mut env = ruvector_verified::ProofEnvironment::new();
+ for _ in 0..count {
+ env.alloc_term();
+ }
+ });
+ });
}
group.finish();
}
@@ -69,9 +65,7 @@ fn bench_pool_acquire_release(c: &mut Criterion) {
fn bench_attestation_roundtrip(c: &mut Criterion) {
c.bench_function("attestation_roundtrip", |b| {
- let att = ruvector_verified::ProofAttestation::new(
- [1u8; 32], [2u8; 32], 42, 9500,
- );
+ let att = ruvector_verified::ProofAttestation::new([1u8; 32], [2u8; 32], 42, 9500);
b.iter(|| {
let bytes = att.to_bytes();
ruvector_verified::proof_store::ProofAttestation::from_bytes(&bytes).unwrap();
diff --git a/crates/ruvector-verified/benches/proof_generation.rs b/crates/ruvector-verified/benches/proof_generation.rs
index 5cad8a04f..bca7f5ad8 100644
--- a/crates/ruvector-verified/benches/proof_generation.rs
+++ b/crates/ruvector-verified/benches/proof_generation.rs
@@ -1,19 +1,15 @@
//! Proof generation benchmarks.
-use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
+use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
fn bench_prove_dim_eq(c: &mut Criterion) {
let mut group = c.benchmark_group("prove_dim_eq");
for dim in [32, 128, 384, 512, 1024, 4096] {
- group.bench_with_input(
- BenchmarkId::from_parameter(dim),
- &dim,
- |b, &dim| {
- b.iter(|| {
- let mut env = ruvector_verified::ProofEnvironment::new();
- ruvector_verified::prove_dim_eq(&mut env, dim, dim).unwrap();
- });
- },
- );
+ group.bench_with_input(BenchmarkId::from_parameter(dim), &dim, |b, &dim| {
+ b.iter(|| {
+ let mut env = ruvector_verified::ProofEnvironment::new();
+ ruvector_verified::prove_dim_eq(&mut env, dim, dim).unwrap();
+ });
+ });
}
group.finish();
}
@@ -32,47 +28,34 @@ fn bench_prove_dim_eq_cached(c: &mut Criterion) {
fn bench_mk_vector_type(c: &mut Criterion) {
let mut group = c.benchmark_group("mk_vector_type");
for dim in [128, 384, 768, 1536] {
- group.bench_with_input(
- BenchmarkId::from_parameter(dim),
- &dim,
- |b, &dim| {
- b.iter(|| {
- let mut env = ruvector_verified::ProofEnvironment::new();
- ruvector_verified::mk_vector_type(&mut env, dim).unwrap();
- });
- },
- );
+ group.bench_with_input(BenchmarkId::from_parameter(dim), &dim, |b, &dim| {
+ b.iter(|| {
+ let mut env = ruvector_verified::ProofEnvironment::new();
+ ruvector_verified::mk_vector_type(&mut env, dim).unwrap();
+ });
+ });
}
group.finish();
}
fn bench_proof_env_creation(c: &mut Criterion) {
c.bench_function("ProofEnvironment::new", |b| {
- b.iter(|| {
- ruvector_verified::ProofEnvironment::new()
- });
+ b.iter(|| ruvector_verified::ProofEnvironment::new());
});
}
fn bench_batch_verify(c: &mut Criterion) {
let mut group = c.benchmark_group("batch_verify");
for count in [10, 100, 1000] {
- group.bench_with_input(
- BenchmarkId::from_parameter(count),
- &count,
- |b, &count| {
- let vecs: Vec> = (0..count)
- .map(|_| vec![0.0f32; 128])
- .collect();
- let refs: Vec<&[f32]> = vecs.iter().map(|v| v.as_slice()).collect();
- b.iter(|| {
- let mut env = ruvector_verified::ProofEnvironment::new();
- ruvector_verified::vector_types::verify_batch_dimensions(
- &mut env, 128, &refs
- ).unwrap();
- });
- },
- );
+ group.bench_with_input(BenchmarkId::from_parameter(count), &count, |b, &count| {
+ let vecs: Vec> = (0..count).map(|_| vec![0.0f32; 128]).collect();
+ let refs: Vec<&[f32]> = vecs.iter().map(|v| v.as_slice()).collect();
+ b.iter(|| {
+ let mut env = ruvector_verified::ProofEnvironment::new();
+ ruvector_verified::vector_types::verify_batch_dimensions(&mut env, 128, &refs)
+ .unwrap();
+ });
+ });
}
group.finish();
}
diff --git a/crates/ruvector-verified/src/cache.rs b/crates/ruvector-verified/src/cache.rs
index 3758dc03f..1f2f119dd 100644
--- a/crates/ruvector-verified/src/cache.rs
+++ b/crates/ruvector-verified/src/cache.rs
@@ -31,7 +31,11 @@ pub struct CacheStats {
impl CacheStats {
pub fn hit_rate(&self) -> f64 {
let total = self.hits + self.misses;
- if total == 0 { 0.0 } else { self.hits as f64 / total as f64 }
+ if total == 0 {
+ 0.0
+ } else {
+ self.hits as f64 / total as f64
+ }
}
}
@@ -62,7 +66,9 @@ impl ConversionCache {
if entry.key_hash == hash && entry.key_hash != 0 {
self.stats.hits += 1;
self.history.push_back(hash);
- if self.history.len() > 64 { self.history.pop_front(); }
+ if self.history.len() > 64 {
+ self.history.pop_front();
+ }
Some(entry.result_id)
} else {
self.stats.misses += 1;
@@ -103,7 +109,9 @@ impl ConversionCache {
h = h.wrapping_mul(0x517cc1b727220a95);
h ^= ctx_len as u64;
h = h.wrapping_mul(0x6c62272e07bb0142);
- if h == 0 { h = 1; } // Reserve 0 for empty
+ if h == 0 {
+ h = 1;
+ } // Reserve 0 for empty
h
}
}
@@ -162,7 +170,9 @@ mod tests {
}
let mut hits = 0u32;
for i in 0..1000u32 {
- if cache.get(i, 0).is_some() { hits += 1; }
+ if cache.get(i, 0).is_some() {
+ hits += 1;
+ }
}
// Due to collisions, not all will be found, but most should
assert!(hits > 500, "expected >50% hit rate, got {hits}/1000");
diff --git a/crates/ruvector-verified/src/error.rs b/crates/ruvector-verified/src/error.rs
index d4e5a7eda..178ff90df 100644
--- a/crates/ruvector-verified/src/error.rs
+++ b/crates/ruvector-verified/src/error.rs
@@ -9,10 +9,7 @@ use thiserror::Error;
pub enum VerificationError {
/// Vector dimension does not match the index dimension.
#[error("dimension mismatch: expected {expected}, got {actual}")]
- DimensionMismatch {
- expected: u32,
- actual: u32,
- },
+ DimensionMismatch { expected: u32, actual: u32 },
/// The lean-agentic type checker rejected the proof term.
#[error("type check failed: {0}")]
@@ -24,9 +21,7 @@ pub enum VerificationError {
/// The conversion engine exhausted its fuel budget.
#[error("conversion timeout: exceeded {max_reductions} reduction steps")]
- ConversionTimeout {
- max_reductions: u32,
- },
+ ConversionTimeout { max_reductions: u32 },
/// Unification of proof constraints failed.
#[error("unification failed: {0}")]
@@ -34,15 +29,11 @@ pub enum VerificationError {
/// The arena ran out of term slots.
#[error("arena exhausted: {allocated} terms allocated")]
- ArenaExhausted {
- allocated: u32,
- },
+ ArenaExhausted { allocated: u32 },
/// A required declaration was not found in the proof environment.
#[error("declaration not found: {name}")]
- DeclarationNotFound {
- name: String,
- },
+ DeclarationNotFound { name: String },
/// Ed25519 proof signing or verification failed.
#[error("attestation error: {0}")]
@@ -58,7 +49,10 @@ mod tests {
#[test]
fn error_display_dimension_mismatch() {
- let e = VerificationError::DimensionMismatch { expected: 128, actual: 256 };
+ let e = VerificationError::DimensionMismatch {
+ expected: 128,
+ actual: 256,
+ };
assert_eq!(e.to_string(), "dimension mismatch: expected 128, got 256");
}
@@ -70,8 +64,13 @@ mod tests {
#[test]
fn error_display_timeout() {
- let e = VerificationError::ConversionTimeout { max_reductions: 10000 };
- assert_eq!(e.to_string(), "conversion timeout: exceeded 10000 reduction steps");
+ let e = VerificationError::ConversionTimeout {
+ max_reductions: 10000,
+ };
+ assert_eq!(
+ e.to_string(),
+ "conversion timeout: exceeded 10000 reduction steps"
+ );
}
#[test]
diff --git a/crates/ruvector-verified/src/fast_arena.rs b/crates/ruvector-verified/src/fast_arena.rs
index b3ee9c11d..53f91ba64 100644
--- a/crates/ruvector-verified/src/fast_arena.rs
+++ b/crates/ruvector-verified/src/fast_arena.rs
@@ -39,7 +39,11 @@ impl FastArenaStats {
/// Cache hit rate as a fraction (0.0 to 1.0).
pub fn cache_hit_rate(&self) -> f64 {
let total = self.cache_hits + self.cache_misses;
- if total == 0 { 0.0 } else { self.cache_hits as f64 / total as f64 }
+ if total == 0 {
+ 0.0
+ } else {
+ self.cache_hits as f64 / total as f64
+ }
}
}
diff --git a/crates/ruvector-verified/src/gated.rs b/crates/ruvector-verified/src/gated.rs
index a050a2de8..d3174357d 100644
--- a/crates/ruvector-verified/src/gated.rs
+++ b/crates/ruvector-verified/src/gated.rs
@@ -56,10 +56,7 @@ pub enum ProofKind {
/// - Single binder (lambda/pi): Standard(500)
/// - Nested binders or unknown: Deep
#[cfg(feature = "gated-proofs")]
-pub fn route_proof(
- proof_kind: ProofKind,
- _env: &ProofEnvironment,
-) -> TierDecision {
+pub fn route_proof(proof_kind: ProofKind, _env: &ProofEnvironment) -> TierDecision {
match proof_kind {
ProofKind::Reflexivity => TierDecision {
tier: ProofTier::Reflex,
@@ -77,14 +74,18 @@ pub fn route_proof(
estimated_steps: depth * 10,
},
ProofKind::TypeApplication { depth } => TierDecision {
- tier: ProofTier::Standard { max_fuel: depth * 100 },
+ tier: ProofTier::Standard {
+ max_fuel: depth * 100,
+ },
reason: "deep type application",
estimated_steps: depth * 50,
},
ProofKind::PipelineComposition { stages } => {
if stages <= 3 {
TierDecision {
- tier: ProofTier::Standard { max_fuel: stages * 200 },
+ tier: ProofTier::Standard {
+ max_fuel: stages * 200,
+ },
reason: "short pipeline composition",
estimated_steps: stages * 100,
}
@@ -96,7 +97,9 @@ pub fn route_proof(
}
}
}
- ProofKind::Custom { estimated_complexity } => {
+ ProofKind::Custom {
+ estimated_complexity,
+ } => {
if estimated_complexity < 10 {
TierDecision {
tier: ProofTier::Standard { max_fuel: 100 },
@@ -129,8 +132,12 @@ pub fn verify_tiered(
return Ok(env.alloc_term());
}
// Escalate to Standard
- verify_tiered(env, expected_id, actual_id,
- ProofTier::Standard { max_fuel: 100 })
+ verify_tiered(
+ env,
+ expected_id,
+ actual_id,
+ ProofTier::Standard { max_fuel: 100 },
+ )
}
ProofTier::Standard { max_fuel } => {
// Simulate bounded verification
@@ -179,7 +186,10 @@ mod tests {
fn test_route_dimension_equality() {
let env = ProofEnvironment::new();
let decision = route_proof(
- ProofKind::DimensionEquality { expected: 128, actual: 128 },
+ ProofKind::DimensionEquality {
+ expected: 128,
+ actual: 128,
+ },
&env,
);
assert_eq!(decision.tier, ProofTier::Reflex);
@@ -188,20 +198,14 @@ mod tests {
#[test]
fn test_route_shallow_application() {
let env = ProofEnvironment::new();
- let decision = route_proof(
- ProofKind::TypeApplication { depth: 1 },
- &env,
- );
+ let decision = route_proof(ProofKind::TypeApplication { depth: 1 }, &env);
assert!(matches!(decision.tier, ProofTier::Standard { .. }));
}
#[test]
fn test_route_long_pipeline() {
let env = ProofEnvironment::new();
- let decision = route_proof(
- ProofKind::PipelineComposition { stages: 10 },
- &env,
- );
+ let decision = route_proof(ProofKind::PipelineComposition { stages: 10 }, &env);
assert_eq!(decision.tier, ProofTier::Deep);
}
diff --git a/crates/ruvector-verified/src/invariants.rs b/crates/ruvector-verified/src/invariants.rs
index 85b15fc60..438d31458 100644
--- a/crates/ruvector-verified/src/invariants.rs
+++ b/crates/ruvector-verified/src/invariants.rs
@@ -32,17 +32,61 @@ pub mod symbols {
/// - `PipelineStage` : Type -> Type -> Type
pub fn builtin_declarations() -> Vec {
vec![
- BuiltinDecl { name: symbols::NAT, arity: 0, doc: "Natural numbers" },
- BuiltinDecl { name: symbols::RUVEC, arity: 1, doc: "Dimension-indexed vector" },
- BuiltinDecl { name: symbols::EQ, arity: 2, doc: "Propositional equality" },
- BuiltinDecl { name: symbols::EQ_REFL, arity: 1, doc: "Reflexivity proof" },
- BuiltinDecl { name: symbols::DISTANCE_METRIC, arity: 0, doc: "Distance metric enum" },
- BuiltinDecl { name: symbols::L2, arity: 0, doc: "L2 Euclidean distance" },
- BuiltinDecl { name: symbols::COSINE, arity: 0, doc: "Cosine distance" },
- BuiltinDecl { name: symbols::DOT, arity: 0, doc: "Dot product distance" },
- BuiltinDecl { name: symbols::HNSW_INDEX, arity: 2, doc: "HNSW index type" },
- BuiltinDecl { name: symbols::INSERT_RESULT, arity: 0, doc: "Insert result type" },
- BuiltinDecl { name: symbols::PIPELINE_STAGE, arity: 2, doc: "Typed pipeline stage" },
+ BuiltinDecl {
+ name: symbols::NAT,
+ arity: 0,
+ doc: "Natural numbers",
+ },
+ BuiltinDecl {
+ name: symbols::RUVEC,
+ arity: 1,
+ doc: "Dimension-indexed vector",
+ },
+ BuiltinDecl {
+ name: symbols::EQ,
+ arity: 2,
+ doc: "Propositional equality",
+ },
+ BuiltinDecl {
+ name: symbols::EQ_REFL,
+ arity: 1,
+ doc: "Reflexivity proof",
+ },
+ BuiltinDecl {
+ name: symbols::DISTANCE_METRIC,
+ arity: 0,
+ doc: "Distance metric enum",
+ },
+ BuiltinDecl {
+ name: symbols::L2,
+ arity: 0,
+ doc: "L2 Euclidean distance",
+ },
+ BuiltinDecl {
+ name: symbols::COSINE,
+ arity: 0,
+ doc: "Cosine distance",
+ },
+ BuiltinDecl {
+ name: symbols::DOT,
+ arity: 0,
+ doc: "Dot product distance",
+ },
+ BuiltinDecl {
+ name: symbols::HNSW_INDEX,
+ arity: 2,
+ doc: "HNSW index type",
+ },
+ BuiltinDecl {
+ name: symbols::INSERT_RESULT,
+ arity: 0,
+ doc: "Insert result type",
+ },
+ BuiltinDecl {
+ name: symbols::PIPELINE_STAGE,
+ arity: 2,
+ doc: "Typed pipeline stage",
+ },
]
}
@@ -76,7 +120,11 @@ mod tests {
#[test]
fn builtin_declarations_complete() {
let decls = builtin_declarations();
- assert!(decls.len() >= 11, "expected at least 11 builtins, got {}", decls.len());
+ assert!(
+ decls.len() >= 11,
+ "expected at least 11 builtins, got {}",
+ decls.len()
+ );
}
#[test]
diff --git a/crates/ruvector-verified/src/lib.rs b/crates/ruvector-verified/src/lib.rs
index df4b7bbdd..db4decce0 100644
--- a/crates/ruvector-verified/src/lib.rs
+++ b/crates/ruvector-verified/src/lib.rs
@@ -17,23 +17,23 @@
pub mod error;
pub mod invariants;
-pub mod vector_types;
-pub mod proof_store;
pub mod pipeline;
+pub mod proof_store;
+pub mod vector_types;
+pub mod cache;
#[cfg(feature = "fast-arena")]
pub mod fast_arena;
-pub mod pools;
-pub mod cache;
#[cfg(feature = "gated-proofs")]
pub mod gated;
+pub mod pools;
// Re-exports
-pub use error::{VerificationError, Result};
-pub use vector_types::{mk_vector_type, mk_nat_literal, prove_dim_eq};
-pub use proof_store::ProofAttestation;
-pub use pipeline::VerifiedStage;
+pub use error::{Result, VerificationError};
pub use invariants::BuiltinDecl;
+pub use pipeline::VerifiedStage;
+pub use proof_store::ProofAttestation;
+pub use vector_types::{mk_nat_literal, mk_vector_type, prove_dim_eq};
/// The proof environment bundles verification state.
///
@@ -92,7 +92,9 @@ impl ProofEnvironment {
/// Allocate a new proof term ID.
pub fn alloc_term(&mut self) -> u32 {
let id = self.term_counter;
- self.term_counter = self.term_counter.checked_add(1)
+ self.term_counter = self
+ .term_counter
+ .checked_add(1)
.ok_or(VerificationError::ArenaExhausted { allocated: id })
.expect("arena overflow");
self.stats.proofs_constructed += 1;
@@ -106,9 +108,10 @@ impl ProofEnvironment {
/// Require a symbol index, or return DeclarationNotFound.
pub fn require_symbol(&self, name: &str) -> Result {
- self.symbol_id(name).ok_or_else(|| {
- VerificationError::DeclarationNotFound { name: name.to_string() }
- })
+ self.symbol_id(name)
+ .ok_or_else(|| VerificationError::DeclarationNotFound {
+ name: name.to_string(),
+ })
}
/// Check the proof cache for a previously verified proof.
@@ -218,7 +221,10 @@ mod tests {
#[test]
fn verified_op_copy() {
- let op = VerifiedOp { value: 42u32, proof_id: 1 };
+ let op = VerifiedOp {
+ value: 42u32,
+ proof_id: 1,
+ };
let op2 = op; // Copy
assert_eq!(op.value, op2.value);
}
diff --git a/crates/ruvector-verified/src/pipeline.rs b/crates/ruvector-verified/src/pipeline.rs
index dfd3265f9..74e35e9e4 100644
--- a/crates/ruvector-verified/src/pipeline.rs
+++ b/crates/ruvector-verified/src/pipeline.rs
@@ -3,9 +3,9 @@
//! Provides `VerifiedStage` for type-safe pipeline stages and `compose_stages`
//! for proving that two stages can be composed (output type matches input type).
-use std::marker::PhantomData;
use crate::error::{Result, VerificationError};
use crate::ProofEnvironment;
+use std::marker::PhantomData;
/// A verified pipeline stage with proven input/output type compatibility.
///
@@ -94,7 +94,7 @@ pub fn compose_chain(
) -> Result<(u32, u32, u32)> {
if stages.is_empty() {
return Err(VerificationError::ProofConstructionFailed(
- "empty pipeline chain".into()
+ "empty pipeline chain".into(),
));
}
@@ -145,8 +145,7 @@ mod tests {
fn test_compose_stages_matching() {
let mut env = ProofEnvironment::new();
- let f: VerifiedStage =
- VerifiedStage::new("embed", 0, 1, 2);
+ let f: VerifiedStage = VerifiedStage::new("embed", 0, 1, 2);
let g: VerifiedStage =
VerifiedStage::new("align", 1, 2, 3);
@@ -162,8 +161,7 @@ mod tests {
fn test_compose_stages_mismatch() {
let mut env = ProofEnvironment::new();
- let f: VerifiedStage =
- VerifiedStage::new("embed", 0, 1, 2);
+ let f: VerifiedStage = VerifiedStage::new("embed", 0, 1, 2);
let g: VerifiedStage =
VerifiedStage::new("align", 1, 99, 3); // 99 != 2
@@ -177,12 +175,10 @@ mod tests {
fn test_compose_three_stages() {
let mut env = ProofEnvironment::new();
- let f: VerifiedStage =
- VerifiedStage::new("embed", 0, 1, 2);
+ let f: VerifiedStage = VerifiedStage::new("embed", 0, 1, 2);
let g: VerifiedStage =
VerifiedStage::new("align", 1, 2, 3);
- let h: VerifiedStage =
- VerifiedStage::new("call", 2, 3, 4);
+ let h: VerifiedStage = VerifiedStage::new("call", 2, 3, 4);
let fg = compose_stages(&f, &g, &mut env).unwrap();
let fgh = compose_stages(&fg, &h, &mut env).unwrap();
diff --git a/crates/ruvector-verified/src/proof_store.rs b/crates/ruvector-verified/src/proof_store.rs
index f252ff001..ec6c17773 100644
--- a/crates/ruvector-verified/src/proof_store.rs
+++ b/crates/ruvector-verified/src/proof_store.rs
@@ -5,8 +5,8 @@
//! computed using SipHash-2-4 keyed MAC over actual proof content,
//! not placeholder values.
-use std::hash::{Hash, Hasher};
use std::collections::hash_map::DefaultHasher;
+use std::hash::{Hash, Hasher};
/// Witness type code for formal verification proofs.
/// Extends existing codes: 0x01=PROVENANCE, 0x02=COMPUTATION.
@@ -79,18 +79,13 @@ impl ProofAttestation {
let mut environment_hash = [0u8; 32];
environment_hash.copy_from_slice(&data[32..64]);
- let verification_timestamp_ns = u64::from_le_bytes(
- data[64..72].try_into().map_err(|_| "bad timestamp")?
- );
- let verifier_version = u32::from_le_bytes(
- data[72..76].try_into().map_err(|_| "bad version")?
- );
- let reduction_steps = u32::from_le_bytes(
- data[76..80].try_into().map_err(|_| "bad steps")?
- );
- let cache_hit_rate_bps = u16::from_le_bytes(
- data[80..82].try_into().map_err(|_| "bad rate")?
- );
+ let verification_timestamp_ns =
+ u64::from_le_bytes(data[64..72].try_into().map_err(|_| "bad timestamp")?);
+ let verifier_version =
+ u32::from_le_bytes(data[72..76].try_into().map_err(|_| "bad version")?);
+ let reduction_steps = u32::from_le_bytes(data[76..80].try_into().map_err(|_| "bad steps")?);
+ let cache_hit_rate_bps =
+ u16::from_le_bytes(data[80..82].try_into().map_err(|_| "bad rate")?);
Ok(Self {
proof_term_hash,
@@ -129,10 +124,7 @@ fn siphash_256(data: &[u8]) -> [u8; 32] {
///
/// Hashes are computed over actual proof and environment state, not placeholder
/// values, providing tamper detection for proof attestations (SEC-002 fix).
-pub fn create_attestation(
- env: &crate::ProofEnvironment,
- proof_id: u32,
-) -> ProofAttestation {
+pub fn create_attestation(env: &crate::ProofEnvironment, proof_id: u32) -> ProofAttestation {
// Build proof content buffer: proof_id + terms_allocated + all stats
let stats = env.stats();
let mut proof_content = Vec::with_capacity(64);
@@ -249,10 +241,16 @@ mod tests {
let env_nonzero = att.environment_hash.iter().filter(|&&b| b != 0).count();
// At least half the bytes should be non-zero for a proper hash
- assert!(proof_nonzero >= 16,
- "proof_term_hash has too many zero bytes: {}/32 non-zero", proof_nonzero);
- assert!(env_nonzero >= 16,
- "environment_hash has too many zero bytes: {}/32 non-zero", env_nonzero);
+ assert!(
+ proof_nonzero >= 16,
+ "proof_term_hash has too many zero bytes: {}/32 non-zero",
+ proof_nonzero
+ );
+ assert!(
+ env_nonzero >= 16,
+ "environment_hash has too many zero bytes: {}/32 non-zero",
+ env_nonzero
+ );
}
#[test]
diff --git a/crates/ruvector-verified/src/vector_types.rs b/crates/ruvector-verified/src/vector_types.rs
index ce4041aea..b60b20de7 100644
--- a/crates/ruvector-verified/src/vector_types.rs
+++ b/crates/ruvector-verified/src/vector_types.rs
@@ -58,11 +58,7 @@ pub fn mk_distance_metric(env: &mut ProofEnvironment, metric: &str) -> Result Result {
+pub fn mk_hnsw_index_type(env: &mut ProofEnvironment, dim: u32, metric: &str) -> Result {
let _idx_sym = env.require_symbol(symbols::HNSW_INDEX)?;
let _dim_term = mk_nat_literal(env, dim)?;
let _metric_term = mk_distance_metric(env, metric)?;
@@ -73,11 +69,7 @@ pub fn mk_hnsw_index_type(
///
/// If `expected != actual`, returns `DimensionMismatch` error.
/// If equal, constructs a `refl` proof term: `Eq.refl : expected = actual`.
-pub fn prove_dim_eq(
- env: &mut ProofEnvironment,
- expected: u32,
- actual: u32,
-) -> Result {
+pub fn prove_dim_eq(env: &mut ProofEnvironment, expected: u32, actual: u32) -> Result {
if expected != actual {
return Err(VerificationError::DimensionMismatch { expected, actual });
}
diff --git a/crates/thermorust/Cargo.toml b/crates/thermorust/Cargo.toml
new file mode 100644
index 000000000..524e4adee
--- /dev/null
+++ b/crates/thermorust/Cargo.toml
@@ -0,0 +1,24 @@
+[package]
+name = "thermorust"
+version = "0.1.0"
+edition = "2021"
+license = "MIT OR Apache-2.0"
+authors = ["rUv "]
+repository = "https://github.com/ruvnet/ruvector"
+homepage = "https://ruv.io"
+documentation = "https://docs.rs/thermorust"
+description = "Thermodynamic neural motif engine: energy-driven state transitions with Landauer dissipation and Langevin noise"
+keywords = ["thermodynamics", "neural", "ising", "langevin", "physics"]
+categories = ["science", "algorithms", "simulation"]
+readme = "README.md"
+
+[dependencies]
+rand = { version = "0.8", features = ["small_rng"] }
+rand_distr = "0.4"
+
+[dev-dependencies]
+criterion = { version = "0.5", features = ["html_reports"] }
+
+[[bench]]
+name = "motif_bench"
+harness = false
diff --git a/crates/thermorust/README.md b/crates/thermorust/README.md
new file mode 100644
index 000000000..b2ff34015
--- /dev/null
+++ b/crates/thermorust/README.md
@@ -0,0 +1,71 @@
+# thermorust
+
+A minimal thermodynamic neural-motif engine for Rust. Treats computation as
+**energy-driven state transitions** with Landauer-style dissipation tracking
+and Langevin/Metropolis noise baked in.
+
+## Features
+
+- **Ising and soft-spin Hamiltonians** with configurable coupling matrices and local fields.
+- **Metropolis-Hastings** (discrete) and **overdamped Langevin** (continuous) dynamics.
+- **Landauer dissipation accounting** -- every accepted irreversible transition charges
+ kT ln 2 of heat, giving a physical energy audit of your computation.
+- **Langevin and Poisson spike noise** sources satisfying the fluctuation-dissipation theorem.
+- **Thermodynamic observables** -- magnetisation, pattern overlap, binary entropy,
+ free energy, and running energy/dissipation traces.
+- **Pre-wired motif factories** -- ring, fully-connected, Hopfield memory, and
+ random soft-spin networks ready to simulate out of the box.
+- **Simulated annealing** helpers for both discrete and continuous models.
+
+## Quick start
+
+```rust
+use thermorust::{motifs::IsingMotif, dynamics::{Params, anneal_discrete}};
+use rand::SeedableRng;
+
+let mut motif = IsingMotif::ring(16, 0.2);
+let params = Params::default_n(16);
+let mut rng = rand::rngs::StdRng::seed_from_u64(42);
+
+let trace = anneal_discrete(
+ &motif.model, &mut motif.state, ¶ms, 10_000, 100, &mut rng,
+);
+println!("Mean energy: {:.3}", trace.mean_energy());
+println!("Heat shed: {:.3e} J", trace.total_dissipation());
+```
+
+### Continuous soft-spin simulation
+
+```rust
+use thermorust::{motifs::SoftSpinMotif, dynamics::{Params, anneal_continuous}};
+use rand::SeedableRng;
+
+let mut motif = SoftSpinMotif::random(32, 1.0, 0.5, 42);
+let params = Params::default_n(32);
+let mut rng = rand::rngs::StdRng::seed_from_u64(7);
+
+let trace = anneal_continuous(
+ &motif.model, &mut motif.state, ¶ms, 5_000, 50, &mut rng,
+);
+```
+
+## Modules
+
+| Module | Description |
+|--------|-------------|
+| `state` | `State` -- activation vector with cumulative dissipation counter |
+| `energy` | `EnergyModel` trait, `Ising`, `SoftSpin`, `Couplings` |
+| `dynamics` | `step_discrete` (MH), `step_continuous` (Langevin), annealers |
+| `noise` | Langevin Gaussian and Poisson spike noise sources |
+| `metrics` | Magnetisation, overlap, entropy, free energy, `Trace` |
+| `motifs` | Pre-wired ring, fully-connected, Hopfield, and soft-spin motifs |
+
+## Dependencies
+
+- `rand` 0.8 (with `small_rng`)
+- `rand_distr` 0.4
+
+## License
+
+Licensed under either of [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
+or [MIT License](http://opensource.org/licenses/MIT) at your option.
diff --git a/crates/thermorust/benches/motif_bench.rs b/crates/thermorust/benches/motif_bench.rs
new file mode 100644
index 000000000..b218d0003
--- /dev/null
+++ b/crates/thermorust/benches/motif_bench.rs
@@ -0,0 +1,98 @@
+//! Criterion microbenchmarks for thermorust motifs.
+
+use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
+use rand::SeedableRng;
+use thermorust::{
+ dynamics::{anneal_continuous, anneal_discrete, step_discrete, Params},
+ energy::{Couplings, EnergyModel, Ising},
+ motifs::{IsingMotif, SoftSpinMotif},
+ State,
+};
+
+fn bench_discrete_step(c: &mut Criterion) {
+ let mut group = c.benchmark_group("step_discrete");
+ for n in [8, 16, 32] {
+ group.bench_with_input(BenchmarkId::from_parameter(n), &n, |b, &n| {
+ let model = Ising::new(Couplings::ferromagnetic_ring(n, 0.2));
+ let p = Params::default_n(n);
+ let mut s = State::ones(n);
+ let mut rng = rand::rngs::SmallRng::seed_from_u64(1);
+ b.iter(|| {
+ step_discrete(
+ black_box(&model),
+ black_box(&mut s),
+ black_box(&p),
+ &mut rng,
+ );
+ });
+ });
+ }
+ group.finish();
+}
+
+fn bench_10k_steps(c: &mut Criterion) {
+ let mut group = c.benchmark_group("10k_steps");
+ for n in [16, 32] {
+ group.bench_with_input(BenchmarkId::from_parameter(n), &n, |b, &n| {
+ b.iter(|| {
+ let mut motif = IsingMotif::ring(n, 0.2);
+ let p = Params::default_n(n);
+ let mut rng = rand::rngs::SmallRng::seed_from_u64(123);
+ let trace = anneal_discrete(
+ black_box(&motif.model),
+ black_box(&mut motif.state),
+ black_box(&p),
+ black_box(10_000),
+ 0,
+ &mut rng,
+ );
+ black_box(motif.state.dissipated_j)
+ });
+ });
+ }
+ group.finish();
+}
+
+fn bench_langevin_10k(c: &mut Criterion) {
+ let mut group = c.benchmark_group("langevin_10k");
+ for n in [8, 16] {
+ group.bench_with_input(BenchmarkId::from_parameter(n), &n, |b, &n| {
+ b.iter(|| {
+ let mut motif = SoftSpinMotif::random(n, 1.0, 0.5, 42);
+ let p = Params::default_n(n);
+ let mut rng = rand::rngs::SmallRng::seed_from_u64(77);
+ anneal_continuous(
+ black_box(&motif.model),
+ black_box(&mut motif.state),
+ black_box(&p),
+ black_box(10_000),
+ 0,
+ &mut rng,
+ );
+ black_box(motif.state.dissipated_j)
+ });
+ });
+ }
+ group.finish();
+}
+
+fn bench_energy_evaluation(c: &mut Criterion) {
+ let mut group = c.benchmark_group("energy_eval");
+ for n in [8, 16, 32] {
+ let model = Ising::new(Couplings::ferromagnetic_ring(n, 0.2));
+ let s = State::ones(n);
+ group.bench_with_input(BenchmarkId::from_parameter(n), &n, |b, _| {
+ b.iter(|| black_box(model.energy(black_box(&s))));
+ });
+ }
+ group.finish();
+}
+
+criterion_group!(
+ benches,
+ bench_discrete_step,
+ bench_10k_steps,
+ bench_langevin_10k,
+ bench_energy_evaluation,
+);
+criterion_main!(benches);
diff --git a/crates/thermorust/src/dynamics.rs b/crates/thermorust/src/dynamics.rs
new file mode 100644
index 000000000..d530727b0
--- /dev/null
+++ b/crates/thermorust/src/dynamics.rs
@@ -0,0 +1,163 @@
+//! Stochastic dynamics: Metropolis-Hastings (discrete) and overdamped Langevin (continuous).
+
+use crate::energy::EnergyModel;
+use crate::noise::{langevin_noise, poisson_spike};
+use crate::state::State;
+use rand::Rng;
+
+/// Parameters governing thermal dynamics and Landauer dissipation accounting.
+#[derive(Clone, Debug)]
+pub struct Params {
+ /// Inverse temperature β = 1/(kT). Higher β → colder, less noise.
+ pub beta: f32,
+ /// Step size η for continuous (Langevin) updates.
+ pub eta: f32,
+ /// Joules of heat attributed to each accepted irreversible transition.
+ /// Landauer's limit: kT ln2 ≈ 2.87 × 10⁻²¹ J at 300 K.
+ pub irreversible_cost: f64,
+ /// Which unit indices are clamped (fixed inputs).
+ pub clamp_mask: Vec,
+}
+
+impl Params {
+ /// Sensible defaults: room-temperature Landauer limit, no clamping.
+ pub fn default_n(n: usize) -> Self {
+ Self {
+ beta: 2.0,
+ eta: 0.05,
+ irreversible_cost: 2.87e-21, // kT ln2 at 300 K in Joules
+ clamp_mask: vec![false; n],
+ }
+ }
+
+ #[inline]
+ fn is_clamped(&self, i: usize) -> bool {
+ self.clamp_mask.get(i).copied().unwrap_or(false)
+ }
+}
+
+/// **Metropolis-Hastings** single spin-flip update for *discrete* Ising states.
+///
+/// Proposes flipping spin `i` (chosen uniformly at random), accepts with the
+/// Boltzmann probability, and charges `p.irreversible_cost` on each accepted
+/// non-zero-ΔE transition.
+pub fn step_discrete(model: &M, s: &mut State, p: &Params, rng: &mut impl Rng) {
+ let n = s.x.len();
+ if n == 0 {
+ return;
+ }
+ let i: usize = rng.gen_range(0..n);
+ if p.is_clamped(i) {
+ return;
+ }
+
+ let old_e = model.energy(s);
+ let old_si = s.x[i];
+ s.x[i] = -old_si;
+ let new_e = model.energy(s);
+ let d_e = (new_e - old_e) as f64;
+
+ let accept = d_e <= 0.0 || {
+ let prob = (-p.beta as f64 * d_e).exp();
+ rng.gen::() < prob
+ };
+
+ if accept {
+ if d_e != 0.0 {
+ s.dissipated_j += p.irreversible_cost;
+ }
+ } else {
+ s.x[i] = old_si;
+ }
+}
+
+/// **Overdamped Langevin** update for *continuous* activations.
+///
+/// For each unclamped unit `i`:
+/// xᵢ ← xᵢ − η · ∂H/∂xᵢ + √(2/β) · ξ
+/// where ξ ~ N(0,1). The gradient is estimated by central differences.
+///
+/// Optionally clips activations to `[-1, 1]` after the update.
+pub fn step_continuous(model: &M, s: &mut State, p: &Params, rng: &mut impl Rng) {
+ let n = s.x.len();
+ let eps = 1e-3_f32;
+
+ for i in 0..n {
+ if p.is_clamped(i) {
+ continue;
+ }
+ let old = s.x[i];
+
+ // Central-difference gradient ∂H/∂xᵢ
+ s.x[i] = old + eps;
+ let e_plus = model.energy(s);
+ s.x[i] = old - eps;
+ let e_minus = model.energy(s);
+ s.x[i] = old;
+
+ let grad = (e_plus - e_minus) / (2.0 * eps);
+ let noise = langevin_noise(p.beta, rng);
+ let dx = -p.eta * grad + noise;
+
+ let old_e = model.energy(s);
+ s.x[i] = (old + dx).clamp(-1.0, 1.0);
+ let new_e = model.energy(s);
+
+ if (new_e as f64) < (old_e as f64) {
+ s.dissipated_j += p.irreversible_cost;
+ }
+ }
+}
+
+/// Run `steps` discrete Metropolis updates, recording every `record_every`th
+/// step into the optional `trace`.
+pub fn anneal_discrete(
+ model: &M,
+ s: &mut State,
+ p: &Params,
+ steps: usize,
+ record_every: usize,
+ rng: &mut impl Rng,
+) -> crate::metrics::Trace {
+ let mut trace = crate::metrics::Trace::new();
+ for step in 0..steps {
+ step_discrete(model, s, p, rng);
+ if record_every > 0 && step % record_every == 0 {
+ trace.push(model.energy(s), s.dissipated_j);
+ }
+ }
+ trace
+}
+
+/// Run `steps` Langevin updates, recording every `record_every`th step.
+pub fn anneal_continuous(
+ model: &M,
+ s: &mut State,
+ p: &Params,
+ steps: usize,
+ record_every: usize,
+ rng: &mut impl Rng,
+) -> crate::metrics::Trace {
+ let mut trace = crate::metrics::Trace::new();
+ for step in 0..steps {
+ step_continuous(model, s, p, rng);
+ if record_every > 0 && step % record_every == 0 {
+ trace.push(model.energy(s), s.dissipated_j);
+ }
+ }
+ trace
+}
+
+/// Inject Poisson spike noise into `s`, bypassing thermal Boltzmann acceptance.
+///
+/// Each unit has an independent probability `rate` (per step) of receiving a
+/// kick of magnitude `kick`, with a random sign.
+pub fn inject_spikes(s: &mut State, p: &Params, rate: f64, kick: f32, rng: &mut impl Rng) {
+ for (i, xi) in s.x.iter_mut().enumerate() {
+ if p.is_clamped(i) {
+ continue;
+ }
+ let dk = poisson_spike(rate, kick, rng);
+ *xi = (*xi + dk).clamp(-1.0, 1.0);
+ }
+}
diff --git a/crates/thermorust/src/energy.rs b/crates/thermorust/src/energy.rs
new file mode 100644
index 000000000..40268a0a2
--- /dev/null
+++ b/crates/thermorust/src/energy.rs
@@ -0,0 +1,126 @@
+//! Energy models: Ising/Hopfield Hamiltonian and the `EnergyModel` trait.
+
+use crate::state::State;
+
+/// Coupling weights and local fields for a fully-connected motif.
+///
+/// `j` is a flattened row-major `n×n` symmetric matrix; `h` is the `n`-vector
+/// of local (bias) fields.
+#[derive(Clone, Debug)]
+pub struct Couplings {
+ /// Symmetric coupling matrix J_ij (row-major, length n²).
+ pub j: Vec,
+ /// Local field h_i (length n).
+ pub h: Vec,
+}
+
+impl Couplings {
+ /// Build zero-coupling weights for `n` units.
+ pub fn zeros(n: usize) -> Self {
+ Self {
+ j: vec![0.0; n * n],
+ h: vec![0.0; n],
+ }
+ }
+
+ /// Build ferromagnetic ring couplings: J_{i, i+1} = strength.
+ pub fn ferromagnetic_ring(n: usize, strength: f32) -> Self {
+ let mut j = vec![0.0; n * n];
+ for i in 0..n {
+ let next = (i + 1) % n;
+ j[i * n + next] = strength;
+ j[next * n + i] = strength;
+ }
+ Self { j, h: vec![0.0; n] }
+ }
+
+ /// Build random Hopfield memory couplings from a list of patterns.
+ ///
+ /// Patterns should be `±1` binary vectors of length `n`.
+ pub fn hopfield_memory(n: usize, patterns: &[Vec]) -> Self {
+ let mut j = vec![0.0f32; n * n];
+ let scale = 1.0 / n as f32;
+ for pat in patterns {
+ assert_eq!(pat.len(), n, "pattern length must equal n");
+ for i in 0..n {
+ for k in (i + 1)..n {
+ let dj = scale * pat[i] * pat[k];
+ j[i * n + k] += dj;
+ j[k * n + i] += dj;
+ }
+ }
+ }
+ Self { j, h: vec![0.0; n] }
+ }
+}
+
+/// Trait implemented by any Hamiltonian that can return a scalar energy.
+pub trait EnergyModel {
+ /// Compute the total energy of `state`.
+ fn energy(&self, state: &State) -> f32;
+}
+
+/// Ising/Hopfield Hamiltonian:
+/// H = −Σᵢ hᵢ xᵢ − Σᵢ<ⱼ Jᵢⱼ xᵢ xⱼ
+#[derive(Clone, Debug)]
+pub struct Ising {
+ pub c: Couplings,
+}
+
+impl Ising {
+ pub fn new(c: Couplings) -> Self {
+ Self { c }
+ }
+}
+
+impl EnergyModel for Ising {
+ fn energy(&self, s: &State) -> f32 {
+ let n = s.x.len();
+ debug_assert_eq!(self.c.h.len(), n);
+ let mut e = 0.0_f32;
+ for i in 0..n {
+ e -= self.c.h[i] * s.x[i];
+ for j in (i + 1)..n {
+ e -= self.c.j[i * n + j] * s.x[i] * s.x[j];
+ }
+ }
+ e
+ }
+}
+
+/// Soft-spin (XY-like) model with continuous activations.
+///
+/// Adds a quartic double-well self-energy per unit: −a·x² + b·x⁴
+/// which promotes ±1 attractors.
+#[derive(Clone, Debug)]
+pub struct SoftSpin {
+ pub c: Couplings,
+ /// Well depth coefficient (>0 pushes spins toward ±1).
+ pub a: f32,
+ /// Quartic stiffness (>0 keeps spins bounded).
+ pub b: f32,
+}
+
+impl SoftSpin {
+ pub fn new(c: Couplings, a: f32, b: f32) -> Self {
+ Self { c, a, b }
+ }
+}
+
+impl EnergyModel for SoftSpin {
+ fn energy(&self, s: &State) -> f32 {
+ let n = s.x.len();
+ let mut e = 0.0_f32;
+ for i in 0..n {
+ let xi = s.x[i];
+ // Double-well self-energy
+ e += -self.a * xi * xi + self.b * xi * xi * xi * xi;
+ // Local field
+ e -= self.c.h[i] * xi;
+ for j in (i + 1)..n {
+ e -= self.c.j[i * n + j] * xi * s.x[j];
+ }
+ }
+ e
+ }
+}
diff --git a/crates/thermorust/src/lib.rs b/crates/thermorust/src/lib.rs
new file mode 100644
index 000000000..e2418e182
--- /dev/null
+++ b/crates/thermorust/src/lib.rs
@@ -0,0 +1,45 @@
+//! # thermorust
+//!
+//! A minimal thermodynamic neural-motif crate for Rust.
+//!
+//! Treats computation as **energy-driven state transitions** with
+//! Landauer-style dissipation and Langevin/Metropolis noise baked in.
+//!
+//! ## Core abstractions
+//!
+//! | Module | What it provides |
+//! |--------|-----------------|
+//! | [`state`] | `State` – activation vector + dissipated-joules counter |
+//! | [`energy`] | `EnergyModel` trait, `Ising`, `SoftSpin`, `Couplings` |
+//! | [`dynamics`] | `step_discrete` (MH), `step_continuous` (Langevin), annealers |
+//! | [`noise`] | Langevin & Poisson spike noise sources |
+//! | [`metrics`] | Magnetisation, overlap, entropy, free energy, `Trace` |
+//! | [`motifs`] | Pre-wired ring / fully-connected / Hopfield / soft-spin motifs |
+//!
+//! ## Quick start
+//!
+//! ```no_run
+//! use thermorust::{motifs::IsingMotif, dynamics::{Params, anneal_discrete}};
+//! use rand::SeedableRng;
+//!
+//! let mut motif = IsingMotif::ring(16, 0.2);
+//! let params = Params::default_n(16);
+//! let mut rng = rand::rngs::StdRng::seed_from_u64(42);
+//!
+//! let trace = anneal_discrete(&motif.model, &mut motif.state, ¶ms, 10_000, 100, &mut rng);
+//! println!("Mean energy: {:.3}", trace.mean_energy());
+//! println!("Heat shed: {:.3e} J", trace.total_dissipation());
+//! ```
+
+pub mod dynamics;
+pub mod energy;
+pub mod metrics;
+pub mod motifs;
+pub mod noise;
+pub mod state;
+
+// Re-export the most commonly used items at the crate root.
+pub use dynamics::{anneal_continuous, anneal_discrete, step_continuous, step_discrete, Params};
+pub use energy::{Couplings, EnergyModel, Ising, SoftSpin};
+pub use metrics::{magnetisation, overlap, Trace};
+pub use state::State;
diff --git a/crates/thermorust/src/metrics.rs b/crates/thermorust/src/metrics.rs
new file mode 100644
index 000000000..cb01ed0cb
--- /dev/null
+++ b/crates/thermorust/src/metrics.rs
@@ -0,0 +1,95 @@
+//! Thermodynamic observables: magnetisation, entropy, free energy, overlap.
+
+use crate::state::State;
+
+/// Mean magnetisation: m = (1/n) Σᵢ xᵢ ∈ [−1, 1].
+pub fn magnetisation(s: &State) -> f32 {
+ if s.x.is_empty() {
+ return 0.0;
+ }
+ s.x.iter().sum::() / s.x.len() as f32
+}
+
+/// Mean-squared activation: ⟨x²⟩.
+pub fn mean_sq(s: &State) -> f32 {
+ if s.x.is_empty() {
+ return 0.0;
+ }
+ s.x.iter().map(|xi| xi * xi).sum::