diff --git a/.github/workflows/postgres-extension-ci.yml b/.github/workflows/postgres-extension-ci.yml
index 23a31d923..2b7bd275e 100644
--- a/.github/workflows/postgres-extension-ci.yml
+++ b/.github/workflows/postgres-extension-ci.yml
@@ -89,7 +89,7 @@ jobs:
${{ runner.os }}-cargo-build-target-${{ matrix.pg_version }}-
- name: Install cargo-pgrx
- run: cargo install cargo-pgrx --version 0.12.0 --locked
+ run: cargo install cargo-pgrx --version 0.12.9 --locked
- name: Initialize pgrx (Ubuntu)
if: runner.os == 'Linux'
@@ -114,7 +114,7 @@ jobs:
working-directory: crates/ruvector-postgres
- name: Run tests
- run: cargo pgrx test pg${{ matrix.pg_version }} --no-default-features
+ run: cargo pgrx test pg${{ matrix.pg_version }} --no-default-features --features pg${{ matrix.pg_version }}
working-directory: crates/ruvector-postgres
# Test with all features enabled
@@ -133,11 +133,13 @@ jobs:
- name: Install PostgreSQL
run: |
+ sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
+ wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
sudo apt-get install -y postgresql-17 postgresql-server-dev-17
- name: Install cargo-pgrx
- run: cargo install cargo-pgrx --version 0.12.0 --locked
+ run: cargo install cargo-pgrx --version 0.12.9 --locked
- name: Initialize pgrx
run: cargo pgrx init --pg17=/usr/lib/postgresql/17/bin/pg_config
@@ -150,7 +152,7 @@ jobs:
- name: Test with all features
run: |
- cargo pgrx test pg17 --no-default-features --features index-all,quant-all
+ cargo pgrx test pg17 --no-default-features --features pg17,index-all,quant-all
working-directory: crates/ruvector-postgres
# Benchmark on pull requests
@@ -170,11 +172,13 @@ jobs:
- name: Install PostgreSQL
run: |
+ sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
+ wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
sudo apt-get install -y postgresql-17 postgresql-server-dev-17
- name: Install cargo-pgrx
- run: cargo install cargo-pgrx --version 0.12.0 --locked
+ run: cargo install cargo-pgrx --version 0.12.9 --locked
- name: Initialize pgrx
run: cargo pgrx init --pg17=/usr/lib/postgresql/17/bin/pg_config
@@ -237,7 +241,7 @@ jobs:
sudo apt-get install -y postgresql-${{ matrix.pg_version }} postgresql-server-dev-${{ matrix.pg_version }}
- name: Install cargo-pgrx
- run: cargo install cargo-pgrx --version 0.12.0 --locked
+ run: cargo install cargo-pgrx --version 0.12.9 --locked
- name: Initialize pgrx
run: cargo pgrx init --pg${{ matrix.pg_version }}=/usr/lib/postgresql/${{ matrix.pg_version }}/bin/pg_config
diff --git a/.github/workflows/ruvector-postgres-ci.yml b/.github/workflows/ruvector-postgres-ci.yml
index a7a21c433..8d8271ea1 100644
--- a/.github/workflows/ruvector-postgres-ci.yml
+++ b/.github/workflows/ruvector-postgres-ci.yml
@@ -36,7 +36,7 @@ on:
env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: 1
- PGRX_VERSION: '0.12.6'
+ PGRX_VERSION: '0.12.9'
RUST_VERSION: 'stable'
# Concurrency control - cancel in-progress runs for same PR
diff --git a/Cargo.lock b/Cargo.lock
index 6104fc5d9..e6d8326bc 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8845,7 +8845,7 @@ dependencies = [
[[package]]
name = "ruvector-postgres"
-version = "2.0.3"
+version = "0.3.0"
dependencies = [
"approx",
"bincode 1.3.3",
@@ -8870,7 +8870,12 @@ dependencies = [
"rand_chacha 0.3.1",
"rayon",
"rkyv",
+ "ruvector-attention 0.1.32",
+ "ruvector-domain-expansion",
+ "ruvector-math",
"ruvector-mincut-gated-transformer 0.1.0",
+ "ruvector-solver",
+ "ruvector-sona 0.1.6",
"serde",
"serde_json",
"simsimd",
diff --git a/README.md b/README.md
index 175fd5bba..12b5c3c4e 100644
--- a/README.md
+++ b/README.md
@@ -32,10 +32,10 @@ Most vector databases are static — they store embeddings and search them. That
| 🌿 **Git-like branching** | ❌ | ✅ Branch your data like code — only changes are copied |
| ⚡ **Sublinear Solvers** | ❌ | ✅ O(log n) sparse linear systems, PageRank, spectral methods |
-**One package. Everything included:** vector search, graph queries, GNN learning, distributed clustering, local LLMs, 40+ attention mechanisms, cognitive containers ([RVF](./crates/rvf/README.md) — self-booting `.rvf` files with eBPF, witness chains, and COW branching), and WASM support.
+**One package. Everything included:** vector search, graph queries, GNN learning, distributed clustering, local LLMs, 46 attention mechanisms, cognitive containers ([RVF](./crates/rvf/README.md) — self-booting `.rvf` files with eBPF, witness chains, and COW branching), and WASM support.
-📋 See Full Capabilities (43 features)
+📋 See Full Capabilities (49 features)
**Core Vector Database**
| # | Capability | What It Does |
@@ -60,57 +60,63 @@ Most vector databases are static — they store embeddings and search them. That
| 10 | **Run LLMs locally** | ruvllm with GGUF, Metal/CUDA/ANE acceleration |
| 11 | **RuvLTRA models** | Pre-trained GGUF for routing & embeddings (<10ms) → [HuggingFace](https://huggingface.co/ruv/ruvltra) |
| 12 | **SONA learning** | Self-Optimizing Neural Architecture with LoRA, EWC++ |
-| 13 | **40+ attention mechanisms** | Flash, linear, graph, hyperbolic, mincut-gated (50% compute) |
+| 13 | **46 attention mechanisms** | Flash, linear, graph, hyperbolic, mincut-gated (50% compute) |
| 14 | **Spiking neural networks** | Event-driven neuromorphic computing |
| 15 | **Mincut-gated transformer** | Dynamic attention via graph min-cut optimization |
| 16 | **Route AI requests** | Semantic routing + FastGRNN for LLM optimization |
+| 17 | **Sublinear Solvers in SQL** | PageRank, CG, Laplacian solver — O(log n) to O(√n) via PostgreSQL |
+| 18 | **Math Distances in SQL** | Wasserstein, Sinkhorn OT, KL divergence, spectral clustering |
+| 19 | **Topological Data Analysis** | Persistent homology, Betti numbers, embedding drift detection |
+| 20 | **Sona Learning in SQL** | Micro-LoRA trajectory learning with EWC++ forgetting prevention |
+| 21 | **Domain Expansion** | Cross-domain transfer learning with contextual bandits |
+| 22 | **Extended Attention** | O(n) linear, MoE, hyperbolic, sliding window attention in SQL |
**Cognitive Containers ([RVF](./crates/rvf/README.md))**
| # | Capability | What It Does |
|---|------------|--------------|
-| 17 | **Self-boot as a microservice** | A `.rvf` file contains a real Linux kernel — drop it on a VM and it boots in 125 ms |
-| 18 | **eBPF acceleration** | Hot vectors served in kernel data path via XDP, socket filter, and TC programs |
-| 19 | **5.5 KB WASM runtime** | Same file runs queries in a browser tab with zero backend |
-| 20 | **COW branching** | Git-like copy-on-write — 1M-vector parent, 100 edits = ~2.5 MB child |
-| 21 | **Witness chains** | Tamper-evident hash-linked audit trail for every operation |
-| 22 | **Post-quantum signatures** | ML-DSA-65 and SLH-DSA-128s alongside Ed25519 |
-| 23 | **DNA-style lineage** | Track parent/child derivation chains with cryptographic hashes |
-| 24 | **24 segment types** | VEC, INDEX, KERNEL, EBPF, WASM, COW_MAP, WITNESS, CRYPTO, and 16 more |
+| 23 | **Self-boot as a microservice** | A `.rvf` file contains a real Linux kernel — drop it on a VM and it boots in 125 ms |
+| 24 | **eBPF acceleration** | Hot vectors served in kernel data path via XDP, socket filter, and TC programs |
+| 25 | **5.5 KB WASM runtime** | Same file runs queries in a browser tab with zero backend |
+| 26 | **COW branching** | Git-like copy-on-write — 1M-vector parent, 100 edits = ~2.5 MB child |
+| 27 | **Witness chains** | Tamper-evident hash-linked audit trail for every operation |
+| 28 | **Post-quantum signatures** | ML-DSA-65 and SLH-DSA-128s alongside Ed25519 |
+| 29 | **DNA-style lineage** | Track parent/child derivation chains with cryptographic hashes |
+| 30 | **24 segment types** | VEC, INDEX, KERNEL, EBPF, WASM, COW_MAP, WITNESS, CRYPTO, and 16 more |
**Specialized Processing**
| # | Capability | What It Does |
|---|------------|--------------|
-| 25 | **SciPix OCR** | LaTeX/MathML extraction from scientific documents |
-| 26 | **DAG workflows** | Self-learning directed acyclic graph execution |
-| 27 | **Cognitum Gate** | Cognitive AI gateway with TileZero acceleration |
-| 28 | **FPGA transformer** | Hardware-accelerated transformer inference |
-| 29 | **Quantum coherence** | ruQu for quantum error correction via dynamic min-cut |
-| 30 | **Sublinear Solvers** | 8 algorithms: Neumann, CG, Forward Push, TRUE, BMSSP — O(log n) to O(√n) |
+| 31 | **SciPix OCR** | LaTeX/MathML extraction from scientific documents |
+| 32 | **DAG workflows** | Self-learning directed acyclic graph execution |
+| 33 | **Cognitum Gate** | Cognitive AI gateway with TileZero acceleration |
+| 34 | **FPGA transformer** | Hardware-accelerated transformer inference |
+| 35 | **Quantum coherence** | ruQu for quantum error correction via dynamic min-cut |
+| 36 | **Sublinear Solvers** | 8 algorithms: Neumann, CG, Forward Push, TRUE, BMSSP — O(log n) to O(√n) |
**Genomics & Health**
| # | Capability | What It Does |
|---|------------|--------------|
-| 31 | **rvDNA genomic analysis** | Variant calling, protein translation, HNSW k-mer search in 12 ms |
-| 32 | **`.rvdna` file format** | AI-native binary with pre-computed vectors, tensors, and embeddings |
-| 33 | **Instant diagnostics** | Sickle cell, cancer mutations, drug dosing — runs on any device |
-| 34 | **Privacy-first WASM** | Browser-based genomics, data never leaves the device |
+| 37 | **rvDNA genomic analysis** | Variant calling, protein translation, HNSW k-mer search in 12 ms |
+| 38 | **`.rvdna` file format** | AI-native binary with pre-computed vectors, tensors, and embeddings |
+| 39 | **Instant diagnostics** | Sickle cell, cancer mutations, drug dosing — runs on any device |
+| 40 | **Privacy-first WASM** | Browser-based genomics, data never leaves the device |
**Platform & Integration**
| # | Capability | What It Does |
|---|------------|--------------|
-| 35 | **Run anywhere** | Node.js, browser (WASM), edge (rvLite), HTTP server, Rust, bare metal |
-| 36 | **Drop into Postgres** | pgvector-compatible extension with SIMD acceleration |
-| 37 | **MCP integration** | Model Context Protocol server for AI assistant tools |
-| 38 | **Cloud deployment** | One-click deploy to Cloud Run, Kubernetes |
-| 39 | **13 Rust crates + 4 npm packages** | [RVF SDK](./crates/rvf/README.md) published on [crates.io](https://crates.io/crates/rvf-runtime) and [npm](https://www.npmjs.com/package/@ruvector/rvf) |
+| 41 | **Run anywhere** | Node.js, browser (WASM), edge (rvLite), HTTP server, Rust, bare metal |
+| 42 | **Drop into Postgres** | pgvector-compatible extension with SIMD acceleration |
+| 43 | **MCP integration** | Model Context Protocol server for AI assistant tools |
+| 44 | **Cloud deployment** | One-click deploy to Cloud Run, Kubernetes |
+| 45 | **13 Rust crates + 4 npm packages** | [RVF SDK](./crates/rvf/README.md) published on [crates.io](https://crates.io/crates/rvf-runtime) and [npm](https://www.npmjs.com/package/@ruvector/rvf) |
**Self-Learning & Adaptation**
| # | Capability | What It Does |
|---|------------|--------------|
-| 40 | **Self-learning hooks** | Q-learning, neural patterns, HNSW memory |
-| 41 | **ReasoningBank** | Trajectory learning with verdict judgment |
-| 42 | **Economy system** | Tokenomics, CRDT-based distributed state |
-| 43 | **Agentic synthesis** | Multi-agent workflow composition |
+| 46 | **Self-learning hooks** | Q-learning, neural patterns, HNSW memory |
+| 47 | **ReasoningBank** | Trajectory learning with verdict judgment |
+| 48 | **Economy system** | Tokenomics, CRDT-based distributed state |
+| 49 | **Agentic synthesis** | Multi-agent workflow composition |
@@ -3217,7 +3223,7 @@ let distances = batch_distances(&query, &database); // 8-54x speedup
[](https://hub.docker.com/r/ruvnet/ruvector-postgres)
[](https://hub.docker.com/r/ruvnet/ruvector-postgres)
-**The most advanced PostgreSQL vector extension** — a drop-in pgvector replacement with 230+ SQL functions, hardware-accelerated SIMD operations, and built-in AI capabilities. Transform your existing PostgreSQL database into a full-featured vector search engine with GNN layers, attention mechanisms, and self-learning capabilities.
+**The most advanced PostgreSQL vector extension** — a drop-in pgvector replacement with 143 SQL functions, hardware-accelerated SIMD operations, and built-in AI capabilities. Transform your existing PostgreSQL database into a full-featured vector search engine with GNN layers, attention mechanisms, and self-learning capabilities.
```bash
# Quick Install from Docker Hub
@@ -3235,17 +3241,17 @@ CREATE EXTENSION ruvector;
**Why RuVector Postgres?**
- **Zero Migration** — Works with existing pgvector code, just swap the extension
-- **10x More Functions** — 230+ SQL functions vs pgvector's ~20
+- **10x More Functions** — 143 SQL functions vs pgvector's ~20
- **2x Faster** — AVX-512/AVX2/NEON SIMD acceleration
-- **AI-Native** — GNN layers, 40+ attention mechanisms, local embeddings
+- **AI-Native** — GNN layers, 46 attention mechanisms, local embeddings
- **Self-Learning** — Improves search quality over time with ReasoningBank
| Feature | pgvector | RuVector Postgres |
|---------|----------|-------------------|
-| SQL Functions | ~20 | **230+** |
+| SQL Functions | ~20 | **143** |
| SIMD Acceleration | Basic | AVX-512/AVX2/NEON (~2x faster) |
| Index Types | HNSW, IVFFlat | HNSW, IVFFlat + Hyperbolic |
-| Attention Mechanisms | ❌ | 39 types (Flash, Linear, Graph) |
+| Attention Mechanisms | ❌ | 46 types (Flash, Linear, Graph) |
| GNN Layers | ❌ | GCN, GraphSAGE, GAT, GIN |
| Sparse Vectors | ❌ | BM25, TF-IDF, SPLADE |
| Self-Learning | ❌ | ReasoningBank, trajectory learning |
@@ -3298,8 +3304,9 @@ volumes:
```
**Available Tags:**
-- `ruvnet/ruvector-postgres:latest` - PostgreSQL + RuVector 2.0
-- `ruvnet/ruvector-postgres:2.0.0` - Specific version
+- `ruvnet/ruvector-postgres:latest` - PostgreSQL + RuVector 0.3.0
+- `ruvnet/ruvector-postgres:0.3.0` - Current release (143 SQL functions)
+- `ruvnet/ruvector-postgres:2.0.0` - Previous release
@@ -3401,15 +3408,21 @@ pg15 = ["ruvector-postgres/pg15"]
# AI features (opt-in)
ai-complete = ["ruvector-postgres/ai-complete"] # All AI features
learning = ["ruvector-postgres/learning"] # Self-learning
-attention = ["ruvector-postgres/attention"] # 40+ attention mechanisms
+attention = ["ruvector-postgres/attention"] # 46 attention mechanisms
gnn = ["ruvector-postgres/gnn"] # Graph neural networks
hyperbolic = ["ruvector-postgres/hyperbolic"] # Hyperbolic embeddings
embeddings = ["ruvector-postgres/embeddings"] # Local embedding generation
+solver = ["ruvector-postgres/solver"] # Sublinear solvers
+math-distances = ["ruvector-postgres/math-distances"] # Math distances & spectral
+tda = ["ruvector-postgres/tda"] # Topological data analysis
+sona-learning = ["ruvector-postgres/sona-learning"] # Sona learning
+domain-expansion = ["ruvector-postgres/domain-expansion"] # Domain expansion
+analytics-complete = ["solver", "math-distances", "tda"] # All analytics
```
**Build with all features:**
```bash
-cargo pgrx install --release --features "ai-complete,embeddings"
+cargo pgrx install --release --features "ai-complete,embeddings,analytics-complete,attention-extended,sona-learning,domain-expansion"
```
@@ -3466,7 +3479,7 @@ SELECT ruvector_flash_attention(query, key, value);
-See [ruvector-postgres README](./crates/ruvector-postgres/README.md) for full SQL API reference (230+ functions).
+See [ruvector-postgres README](./crates/ruvector-postgres/README.md) for full SQL API reference (143 functions).
diff --git a/crates/ruqu-algorithms/src/lib.rs b/crates/ruqu-algorithms/src/lib.rs
index e30e9f1aa..262b064f2 100644
--- a/crates/ruqu-algorithms/src/lib.rs
+++ b/crates/ruqu-algorithms/src/lib.rs
@@ -39,7 +39,7 @@ pub mod qaoa;
pub mod surface_code;
pub mod vqe;
-pub use grover::{GroverConfig, GroverResult, run_grover};
-pub use qaoa::{Graph, QaoaConfig, QaoaResult, run_qaoa};
-pub use surface_code::{SurfaceCodeConfig, SurfaceCodeResult, run_surface_code};
-pub use vqe::{VqeConfig, VqeResult, run_vqe};
+pub use grover::{run_grover, GroverConfig, GroverResult};
+pub use qaoa::{run_qaoa, Graph, QaoaConfig, QaoaResult};
+pub use surface_code::{run_surface_code, SurfaceCodeConfig, SurfaceCodeResult};
+pub use vqe::{run_vqe, VqeConfig, VqeResult};
diff --git a/crates/ruqu-algorithms/src/qaoa.rs b/crates/ruqu-algorithms/src/qaoa.rs
index ff2567d1f..353c8881c 100644
--- a/crates/ruqu-algorithms/src/qaoa.rs
+++ b/crates/ruqu-algorithms/src/qaoa.rs
@@ -123,7 +123,11 @@ pub struct QaoaResult {
///
/// `gammas` and `betas` must each have length `p`.
pub fn build_qaoa_circuit(graph: &Graph, gammas: &[f64], betas: &[f64]) -> QuantumCircuit {
- assert_eq!(gammas.len(), betas.len(), "gammas and betas must have equal length");
+ assert_eq!(
+ gammas.len(),
+ betas.len(),
+ "gammas and betas must have equal length"
+ );
let n = graph.num_nodes;
let p = gammas.len();
let mut circuit = QuantumCircuit::new(n);
diff --git a/crates/ruqu-algorithms/src/surface_code.rs b/crates/ruqu-algorithms/src/surface_code.rs
index 4699c33e9..34ac3dd61 100644
--- a/crates/ruqu-algorithms/src/surface_code.rs
+++ b/crates/ruqu-algorithms/src/surface_code.rs
@@ -320,9 +320,7 @@ fn most_common_data_qubit(
/// # Errors
///
/// Returns a [`ruqu_core::error::QuantumError`] on simulator failures.
-pub fn run_surface_code(
- config: &SurfaceCodeConfig,
-) -> ruqu_core::error::Result {
+pub fn run_surface_code(config: &SurfaceCodeConfig) -> ruqu_core::error::Result {
assert_eq!(
config.distance, 3,
"Only distance-3 surface codes are currently supported"
@@ -370,11 +368,9 @@ pub fn run_surface_code(
// row has odd parity -> logical error.
let mut row_parity = 1.0_f64;
for &q in &logical_row {
- let z_exp = state.expectation_value(
- &ruqu_core::types::PauliString {
- ops: vec![(q, ruqu_core::types::PauliOp::Z)],
- },
- );
+ let z_exp = state.expectation_value(&ruqu_core::types::PauliString {
+ ops: vec![(q, ruqu_core::types::PauliOp::Z)],
+ });
// Each Z expectation is in [-1, 1]. For a computational basis
// state, it is exactly +1 (|0>) or -1 (|1>). For superpositions
// we approximate: sign of the product captures parity.
@@ -428,7 +424,11 @@ mod tests {
}
// All 9 data qubits should be covered by X stabilizers.
for q in 0..9u32 {
- assert!(covered.contains(&q), "data qubit {} not covered by X stabilizers", q);
+ assert!(
+ covered.contains(&q),
+ "data qubit {} not covered by X stabilizers",
+ q
+ );
}
}
@@ -446,7 +446,10 @@ mod tests {
let layout = SurfaceCodeLayout::distance_3();
let syndrome = vec![false; 8];
let corrections = decode_syndrome(&syndrome, &layout);
- assert!(corrections.is_empty(), "no corrections when syndrome is trivial");
+ assert!(
+ corrections.is_empty(),
+ "no corrections when syndrome is trivial"
+ );
}
#[test]
@@ -471,10 +474,7 @@ mod tests {
#[test]
fn test_most_common_data_qubit() {
- let stabilizers = vec![
- vec![0, 1, 3, 4],
- vec![1, 2, 4, 5],
- ];
+ let stabilizers = vec![vec![0, 1, 3, 4], vec![1, 2, 4, 5]];
// Both stabilizers 0 and 1 triggered: qubit 1 and 4 appear in both.
let result = most_common_data_qubit(&stabilizers, &[0, 1]);
assert!(result == Some(1) || result == Some(4));
diff --git a/crates/ruqu-algorithms/src/vqe.rs b/crates/ruqu-algorithms/src/vqe.rs
index 3cffdef34..080372a2d 100644
--- a/crates/ruqu-algorithms/src/vqe.rs
+++ b/crates/ruqu-algorithms/src/vqe.rs
@@ -119,10 +119,7 @@ pub fn build_ansatz(num_qubits: u32, depth: u32, params: &[f64]) -> QuantumCircu
/// ansatz parameters.
///
/// Builds the ansatz, simulates it, and returns ``.
-pub fn evaluate_energy(
- config: &VqeConfig,
- params: &[f64],
-) -> ruqu_core::error::Result {
+pub fn evaluate_energy(config: &VqeConfig, params: &[f64]) -> ruqu_core::error::Result {
let circuit = build_ansatz(config.num_qubits, config.ansatz_depth, params);
let sim_config = SimConfig {
seed: config.seed,
diff --git a/crates/ruqu-algorithms/tests/test_algorithms.rs b/crates/ruqu-algorithms/tests/test_algorithms.rs
index 9320cef64..ee4025078 100644
--- a/crates/ruqu-algorithms/tests/test_algorithms.rs
+++ b/crates/ruqu-algorithms/tests/test_algorithms.rs
@@ -64,25 +64,37 @@ fn deutsch_algorithm(oracle: &str) -> bool {
#[test]
fn test_deutsch_f0_constant() {
// f(0) = 0, f(1) = 0 → constant → measure |0⟩
- assert!(!deutsch_algorithm("f0"), "f0 should be classified as constant");
+ assert!(
+ !deutsch_algorithm("f0"),
+ "f0 should be classified as constant"
+ );
}
#[test]
fn test_deutsch_f1_constant() {
// f(0) = 1, f(1) = 1 → constant → measure |0⟩
- assert!(!deutsch_algorithm("f1"), "f1 should be classified as constant");
+ assert!(
+ !deutsch_algorithm("f1"),
+ "f1 should be classified as constant"
+ );
}
#[test]
fn test_deutsch_f2_balanced() {
// f(0) = 0, f(1) = 1 → balanced → measure |1⟩
- assert!(deutsch_algorithm("f2"), "f2 should be classified as balanced");
+ assert!(
+ deutsch_algorithm("f2"),
+ "f2 should be classified as balanced"
+ );
}
#[test]
fn test_deutsch_f3_balanced() {
// f(0) = 1, f(1) = 0 → balanced → measure |1⟩
- assert!(deutsch_algorithm("f3"), "f3 should be classified as balanced");
+ assert!(
+ deutsch_algorithm("f3"),
+ "f3 should be classified as balanced"
+ );
}
#[test]
@@ -96,8 +108,12 @@ fn test_deutsch_deterministic_probabilities() {
match *oracle {
"f0" => {}
- "f1" => { state.apply_gate(&Gate::X(1)).unwrap(); }
- "f2" => { state.apply_gate(&Gate::CNOT(0, 1)).unwrap(); }
+ "f1" => {
+ state.apply_gate(&Gate::X(1)).unwrap();
+ }
+ "f2" => {
+ state.apply_gate(&Gate::CNOT(0, 1)).unwrap();
+ }
"f3" => {
state.apply_gate(&Gate::X(0)).unwrap();
state.apply_gate(&Gate::CNOT(0, 1)).unwrap();
@@ -151,7 +167,8 @@ fn test_deutsch_phase_kickback() {
assert!(
(amps[i].re - exp).abs() < EPSILON && amps[i].im.abs() < EPSILON,
"Amplitude mismatch at index {i}: got ({}, {}), expected ({exp}, 0)",
- amps[i].re, amps[i].im
+ amps[i].re,
+ amps[i].im
);
}
}
@@ -363,8 +380,7 @@ fn test_vqe_simple_z_hamiltonian() {
result.optimal_energy
);
assert!(
- result.optimal_energy >= -1.0 - ALGO_EPSILON
- && result.optimal_energy <= 1.0 + ALGO_EPSILON,
+ result.optimal_energy >= -1.0 - ALGO_EPSILON && result.optimal_energy <= 1.0 + ALGO_EPSILON,
"VQE energy should be in [-1, 1]; got {}",
result.optimal_energy
);
@@ -439,10 +455,7 @@ fn test_vqe_returns_optimal_params() {
fn test_h2_hamiltonian_structure() {
let h = vqe::h2_hamiltonian();
assert_eq!(h.num_qubits, 2);
- assert!(
- !h.terms.is_empty(),
- "H2 Hamiltonian should have terms"
- );
+ assert!(!h.terms.is_empty(), "H2 Hamiltonian should have terms");
}
// ===========================================================================
@@ -537,10 +550,7 @@ fn test_qaoa_build_circuit() {
let betas = vec![0.4, 0.2];
let circuit = qaoa::build_qaoa_circuit(&graph, &gammas, &betas);
assert_eq!(circuit.num_qubits(), 4);
- assert!(
- circuit.gate_count() > 0,
- "QAOA circuit should have gates"
- );
+ assert!(circuit.gate_count() > 0, "QAOA circuit should have gates");
}
#[test]
@@ -633,11 +643,7 @@ fn test_cut_value_triangle_bipartition() {
let graph = qaoa::Graph::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]);
// Partition {0} vs {1, 2}: edges (0,1) and (0,2) are cut = 2
let cv = qaoa::cut_value(&graph, &[true, false, false]);
- assert!(
- approx_eq(cv, 2.0),
- "Expected cut value 2; got {}",
- cv
- );
+ assert!(approx_eq(cv, 2.0), "Expected cut value 2; got {}", cv);
}
#[test]
diff --git a/crates/ruqu-core/benches/quantum_sim.rs b/crates/ruqu-core/benches/quantum_sim.rs
index fe0f65fe5..c44d0217c 100644
--- a/crates/ruqu-core/benches/quantum_sim.rs
+++ b/crates/ruqu-core/benches/quantum_sim.rs
@@ -1,4 +1,4 @@
-use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
+use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use ruqu_core::prelude::*;
fn bench_single_qubit_gates(c: &mut Criterion) {
@@ -45,16 +45,12 @@ fn bench_two_qubit_gates(c: &mut Criterion) {
},
);
- group.bench_with_input(
- BenchmarkId::new("rzz", num_qubits),
- &num_qubits,
- |b, &n| {
- b.iter(|| {
- let mut state = QuantumState::new(n).unwrap();
- state.apply_gate(&Gate::Rzz(0, 1, 0.5)).unwrap();
- });
- },
- );
+ group.bench_with_input(BenchmarkId::new("rzz", num_qubits), &num_qubits, |b, &n| {
+ b.iter(|| {
+ let mut state = QuantumState::new(n).unwrap();
+ state.apply_gate(&Gate::Rzz(0, 1, 0.5)).unwrap();
+ });
+ });
}
group.finish();
}
@@ -93,7 +89,8 @@ fn bench_grover_circuit(c: &mut Criterion) {
state.apply_gate(&Gate::H(q)).unwrap();
}
let target = 0usize;
- let iterations = (std::f64::consts::FRAC_PI_4 * ((1u64 << n) as f64).sqrt()) as u32;
+ let iterations =
+ (std::f64::consts::FRAC_PI_4 * ((1u64 << n) as f64).sqrt()) as u32;
for _ in 0..iterations {
// Oracle (simplified)
state.apply_gate(&Gate::Z(0)).unwrap();
@@ -129,10 +126,12 @@ fn bench_qaoa_layer(c: &mut Criterion) {
|b, &n| {
b.iter(|| {
let mut state = QuantumState::new(n).unwrap();
- for q in 0..n { state.apply_gate(&Gate::H(q)).unwrap(); }
+ for q in 0..n {
+ state.apply_gate(&Gate::H(q)).unwrap();
+ }
// Phase separation: linear chain
for q in 0..n.saturating_sub(1) {
- state.apply_gate(&Gate::Rzz(q, q+1, 0.5)).unwrap();
+ state.apply_gate(&Gate::Rzz(q, q + 1, 0.5)).unwrap();
}
// Mixing
for q in 0..n {
@@ -155,7 +154,9 @@ fn bench_expectation_value(c: &mut Criterion) {
|b, &n| {
let mut state = QuantumState::new(n).unwrap();
state.apply_gate(&Gate::H(0)).unwrap();
- let z = PauliString { ops: vec![(0, PauliOp::Z)] };
+ let z = PauliString {
+ ops: vec![(0, PauliOp::Z)],
+ };
b.iter(|| {
state.expectation_value(&z);
});
@@ -169,7 +170,9 @@ fn bench_expectation_value(c: &mut Criterion) {
let mut state = QuantumState::new(n).unwrap();
state.apply_gate(&Gate::H(0)).unwrap();
state.apply_gate(&Gate::CNOT(0, 1)).unwrap();
- let zz = PauliString { ops: vec![(0, PauliOp::Z), (1, PauliOp::Z)] };
+ let zz = PauliString {
+ ops: vec![(0, PauliOp::Z), (1, PauliOp::Z)],
+ };
b.iter(|| {
state.expectation_value(&zz);
});
diff --git a/crates/ruqu-core/src/benchmark.rs b/crates/ruqu-core/src/benchmark.rs
index 2a1daecd8..6259035af 100644
--- a/crates/ruqu-core/src/benchmark.rs
+++ b/crates/ruqu-core/src/benchmark.rs
@@ -13,8 +13,7 @@ use crate::backend::{analyze_circuit, BackendType};
use crate::circuit::QuantumCircuit;
use crate::confidence::total_variation_distance;
use crate::decoder::{
- PartitionedDecoder, StabilizerMeasurement, SurfaceCodeDecoder, SyndromeData,
- UnionFindDecoder,
+ PartitionedDecoder, StabilizerMeasurement, SurfaceCodeDecoder, SyndromeData, UnionFindDecoder,
};
use crate::decomposition::{classify_segment, decompose, estimate_segment_cost};
use crate::planner::{plan_execution, PlannerConfig};
@@ -267,10 +266,18 @@ fn gen_mixed_circuit(rng: &mut StdRng) -> QuantumCircuit {
for _ in 0..layers {
for q in 0..n {
match rng.gen_range(0..4) {
- 0 => { circ.h(q); }
- 1 => { circ.t(q); }
- 2 => { circ.s(q); }
- _ => { circ.x(q); }
+ 0 => {
+ circ.h(q);
+ }
+ 1 => {
+ circ.t(q);
+ }
+ 2 => {
+ circ.s(q);
+ }
+ _ => {
+ circ.x(q);
+ }
}
}
if n > 1 {
@@ -326,8 +333,7 @@ pub fn run_entanglement_benchmark(seed: u64, num_circuits: usize) -> Entanglemen
if active <= max_segment_qubits {
segments_within += 1;
} else {
- let violation = (active - max_segment_qubits) as f64
- / max_segment_qubits as f64;
+ let violation = (active - max_segment_qubits) as f64 / max_segment_qubits as f64;
if violation > max_violation {
max_violation = violation;
}
@@ -402,8 +408,7 @@ pub fn run_decoder_benchmark(
for &d in distances {
let uf_decoder = UnionFindDecoder::new(0);
let tile_size = (d / 2).max(2);
- let part_decoder =
- PartitionedDecoder::new(tile_size, Box::new(UnionFindDecoder::new(0)));
+ let part_decoder = PartitionedDecoder::new(tile_size, Box::new(UnionFindDecoder::new(0)));
let mut uf_total_ns = 0u64;
let mut part_total_ns = 0u64;
@@ -421,11 +426,7 @@ pub fn run_decoder_benchmark(
// A simple accuracy check: count defects and compare logical
// outcome expectation.
- let defect_count = syndrome
- .stabilizers
- .iter()
- .filter(|s| s.value)
- .count();
+ let defect_count = syndrome.stabilizers.iter().filter(|s| s.value).count();
let expected_logical = defect_count >= d as usize;
if uf_corr.logical_outcome == expected_logical {
uf_correct += 1;
@@ -567,10 +568,18 @@ fn gen_certifiable_circuit(rng: &mut StdRng) -> QuantumCircuit {
for _ in 0..extras {
let q = rng.gen_range(0..n);
match rng.gen_range(0..4) {
- 0 => { circ.h(q); }
- 1 => { circ.s(q); }
- 2 => { circ.x(q); }
- _ => { circ.z(q); }
+ 0 => {
+ circ.h(q);
+ }
+ 1 => {
+ circ.s(q);
+ }
+ 2 => {
+ circ.x(q);
+ }
+ _ => {
+ circ.z(q);
+ }
}
}
// Add measurements for all qubits.
@@ -604,8 +613,7 @@ pub fn run_full_benchmark(seed: u64) -> FullBenchmarkReport {
&[3, 5, 7, 9, 11, 13, 15, 17, 21, 25],
100,
);
- let certification =
- run_certification_benchmark(seed.wrapping_add(3), 100, 500);
+ let certification = run_certification_benchmark(seed.wrapping_add(3), 100, 500);
let total_time_ms = start.elapsed().as_millis() as u64;
diff --git a/crates/ruqu-core/src/clifford_t.rs b/crates/ruqu-core/src/clifford_t.rs
index a65430ec8..1f7aec306 100644
--- a/crates/ruqu-core/src/clifford_t.rs
+++ b/crates/ruqu-core/src/clifford_t.rs
@@ -347,7 +347,11 @@ impl CliffordTState {
let probe_meas = probe.measure(qubit)?;
let p0_k = if (probe_meas.probability - 1.0).abs() < 1e-10 {
- if !probe_meas.result { 1.0 } else { 0.0 }
+ if !probe_meas.result {
+ 1.0
+ } else {
+ 0.0
+ }
} else {
0.5
};
@@ -460,7 +464,11 @@ impl CliffordTState {
if let Ok(mut probe) = state.clone_with_seed(probe_seed) {
if let Ok(meas) = probe.measure(qubit) {
let z_k = if (meas.probability - 1.0).abs() < 1e-10 {
- if !meas.result { 1.0 } else { -1.0 }
+ if !meas.result {
+ 1.0
+ } else {
+ -1.0
+ }
} else {
0.0
};
diff --git a/crates/ruqu-core/src/confidence.rs b/crates/ruqu-core/src/confidence.rs
index 7469bc2fe..23a08b885 100644
--- a/crates/ruqu-core/src/confidence.rs
+++ b/crates/ruqu-core/src/confidence.rs
@@ -67,7 +67,7 @@ pub fn z_score(confidence: f64) -> f64 {
);
let p = (1.0 + confidence) / 2.0; // upper tail probability
- // 1 - p is the tail area; for p close to 1 this is small and positive.
+ // 1 - p is the tail area; for p close to 1 this is small and positive.
let tail = 1.0 - p;
// Rational approximation: for tail area `q`, set t = sqrt(-2 ln q).
@@ -323,10 +323,7 @@ pub fn expectation_confidence(
///
/// Panics if `epsilon` or `delta` is not in (0, 1).
pub fn required_shots(epsilon: f64, delta: f64) -> usize {
- assert!(
- epsilon > 0.0 && epsilon < 1.0,
- "epsilon must be in (0, 1)"
- );
+ assert!(epsilon > 0.0 && epsilon < 1.0, "epsilon must be in (0, 1)");
assert!(delta > 0.0 && delta < 1.0, "delta must be in (0, 1)");
let n = (2.0_f64 / delta).ln() / (2.0 * epsilon * epsilon);
@@ -493,8 +490,7 @@ fn normal_cdf(x: f64) -> f64 {
let poly = t
* (0.319381530
- + t * (-0.356563782
- + t * (1.781477937 + t * (-1.821255978 + t * 1.330274429))));
+ + t * (-0.356563782 + t * (1.781477937 + t * (-1.821255978 + t * 1.330274429))));
if sign > 0.0 {
1.0 - p * poly
@@ -533,14 +529,8 @@ impl ConvergenceMonitor {
}
let window = &self.estimates[self.estimates.len() - self.window_size..];
- let min = window
- .iter()
- .copied()
- .fold(f64::INFINITY, f64::min);
- let max = window
- .iter()
- .copied()
- .fold(f64::NEG_INFINITY, f64::max);
+ let min = window.iter().copied().fold(f64::INFINITY, f64::min);
+ let max = window.iter().copied().fold(f64::NEG_INFINITY, f64::max);
(max - min) < epsilon
}
@@ -599,7 +589,10 @@ mod tests {
fn wilson_contains_true_proportion() {
// 50 successes out of 100 trials, true p = 0.5
let ci = wilson_interval(50, 100, 0.95);
- assert!(ci.lower < 0.5 && ci.upper > 0.5, "Wilson CI should contain 0.5: {ci:?}");
+ assert!(
+ ci.lower < 0.5 && ci.upper > 0.5,
+ "Wilson CI should contain 0.5: {ci:?}"
+ );
assert_eq!(ci.method, "wilson");
assert!((ci.point_estimate - 0.5).abs() < 1e-12);
}
@@ -750,7 +743,10 @@ mod tests {
p.insert(vec![true, true], 250);
let tvd = total_variation_distance(&p, &p);
- assert!(tvd.abs() < 1e-12, "TVD of identical distributions should be 0, got {tvd}");
+ assert!(
+ tvd.abs() < 1e-12,
+ "TVD of identical distributions should be 0, got {tvd}"
+ );
}
#[test]
@@ -780,10 +776,7 @@ mod tests {
let tvd = total_variation_distance(&p, &q);
// |0.6 - 0.4| + |0.4 - 0.6| = 0.4, times 0.5 = 0.2
- assert!(
- (tvd - 0.2).abs() < 1e-12,
- "expected 0.2, got {tvd}"
- );
+ assert!((tvd - 0.2).abs() < 1e-12, "expected 0.2, got {tvd}");
}
#[test]
@@ -833,7 +826,11 @@ mod tests {
let result = chi_squared_test(&obs, &exp);
assert!(result.statistic > 100.0, "statistic should be large");
- assert!(result.p_value < 0.05, "p-value should be small: {}", result.p_value);
+ assert!(
+ result.p_value < 0.05,
+ "p-value should be small: {}",
+ result.p_value
+ );
assert!(result.significant);
}
@@ -857,7 +854,9 @@ mod tests {
fn convergence_detects_stable() {
let mut monitor = ConvergenceMonitor::new(5);
// Add a sequence that stabilises.
- for &v in &[0.5, 0.52, 0.49, 0.501, 0.499, 0.5001, 0.4999, 0.5002, 0.4998, 0.5001] {
+ for &v in &[
+ 0.5, 0.52, 0.49, 0.501, 0.499, 0.5001, 0.4999, 0.5002, 0.4998, 0.5001,
+ ] {
monitor.add_estimate(v);
}
assert!(
diff --git a/crates/ruqu-core/src/control_theory.rs b/crates/ruqu-core/src/control_theory.rs
index 87d73a8d6..2d67ddef0 100644
--- a/crates/ruqu-core/src/control_theory.rs
+++ b/crates/ruqu-core/src/control_theory.rs
@@ -50,12 +50,19 @@ pub struct ControlState {
impl ControlState {
pub fn new() -> Self {
- Self { logical_error_rate: 0.0, error_backlog: 0.0, rounds_decoded: 0, total_latency_ns: 0 }
+ Self {
+ logical_error_rate: 0.0,
+ error_backlog: 0.0,
+ rounds_decoded: 0,
+ total_latency_ns: 0,
+ }
}
}
impl Default for ControlState {
- fn default() -> Self { Self::new() }
+ fn default() -> Self {
+ Self::new()
+ }
}
// -- 2. Stability Analysis ---------------------------------------------------
@@ -84,17 +91,28 @@ pub fn analyze_stability(config: &QecControlLoop) -> StabilityCondition {
let acc = config.controller.accuracy;
let t_syndrome = syndrome_period_ns(d);
- let margin = if t_decode == 0 { f64::INFINITY }
- else { (t_syndrome as f64 / t_decode as f64) - 1.0 };
+ let margin = if t_decode == 0 {
+ f64::INFINITY
+ } else {
+ (t_syndrome as f64 / t_decode as f64) - 1.0
+ };
let is_stable = t_decode < t_syndrome;
let critical_latency_ns = t_syndrome;
let critical_error_rate = 0.01 * acc;
let error_injection = p * (d as f64);
let convergence_rate = if t_syndrome > 0 {
1.0 - (t_decode as f64 / t_syndrome as f64) - error_injection
- } else { -1.0 };
-
- StabilityCondition { is_stable, margin, critical_latency_ns, critical_error_rate, convergence_rate }
+ } else {
+ -1.0
+ };
+
+ StabilityCondition {
+ is_stable,
+ margin,
+ critical_latency_ns,
+ critical_error_rate,
+ convergence_rate,
+ }
}
/// Maximum code distance stable for a given controller and physical error rate.
@@ -102,8 +120,12 @@ pub fn analyze_stability(config: &QecControlLoop) -> StabilityCondition {
pub fn max_stable_distance(controller: &ClassicalController, error_rate: f64) -> u32 {
let mut best = 3u32;
for d in (3..=201).step_by(2) {
- if controller.decode_latency_ns >= syndrome_period_ns(d) { break; }
- if error_rate >= 0.01 * controller.accuracy { break; }
+ if controller.decode_latency_ns >= syndrome_period_ns(d) {
+ break;
+ }
+ if error_rate >= 0.01 * controller.accuracy {
+ break;
+ }
best = d;
}
best
@@ -112,7 +134,9 @@ pub fn max_stable_distance(controller: &ClassicalController, error_rate: f64) ->
/// Minimum decoder throughput (syndromes/sec) to keep up with the plant.
pub fn min_throughput(plant: &QuantumPlant) -> f64 {
let t_ns = syndrome_period_ns(plant.code_distance);
- if t_ns == 0 { return f64::INFINITY; }
+ if t_ns == 0 {
+ return f64::INFINITY;
+ }
1e9 / t_ns as f64
}
@@ -139,42 +163,66 @@ pub struct OptimalAllocation {
/// Enumerate Pareto-optimal resource allocations sorted by descending score.
pub fn optimize_allocation(
- budget: &ResourceBudget, error_rate: f64, min_logical: u32,
+ budget: &ResourceBudget,
+ error_rate: f64,
+ min_logical: u32,
) -> Vec {
let mut candidates = Vec::new();
for d in (3u32..=99).step_by(2) {
let qpl = 2 * d * d - 2 * d + 1;
- if qpl == 0 { continue; }
+ if qpl == 0 {
+ continue;
+ }
let max_logical = budget.total_physical_qubits / qpl;
- if max_logical < min_logical { continue; }
+ if max_logical < min_logical {
+ continue;
+ }
let decode_ns = if budget.classical_cores > 0 && budget.classical_clock_ghz > 0.0 {
- ((d as f64).powi(3) / (budget.classical_cores as f64 * budget.classical_clock_ghz)) as u64
- } else { u64::MAX };
+ ((d as f64).powi(3) / (budget.classical_cores as f64 * budget.classical_clock_ghz))
+ as u64
+ } else {
+ u64::MAX
+ };
let decode_threads = budget.classical_cores.min(max_logical);
let p_th = 0.01_f64;
let ratio = error_rate / p_th;
let exp = (d as f64 + 1.0) / 2.0;
- let p_logical = if ratio < 1.0 { 0.1 * ratio.powf(exp) }
- else { 1.0_f64.min(ratio.powf(exp)) };
+ let p_logical = if ratio < 1.0 {
+ 0.1 * ratio.powf(exp)
+ } else {
+ 1.0_f64.min(ratio.powf(exp))
+ };
let t_syn = syndrome_period_ns(d);
let round_time = t_syn.max(decode_ns);
let budget_ns = budget.total_time_budget_us * 1000;
- if round_time == 0 || budget_ns / round_time == 0 { continue; }
+ if round_time == 0 || budget_ns / round_time == 0 {
+ continue;
+ }
let score = if p_logical > 0.0 && max_logical > 0 {
(max_logical as f64).log2() - p_logical.log10()
- } else if max_logical > 0 { (max_logical as f64).log2() + 15.0 }
- else { 0.0 };
+ } else if max_logical > 0 {
+ (max_logical as f64).log2() + 15.0
+ } else {
+ 0.0
+ };
candidates.push(OptimalAllocation {
- code_distance: d, logical_qubits: max_logical, decode_threads,
- expected_logical_error_rate: p_logical, pareto_score: score,
+ code_distance: d,
+ logical_qubits: max_logical,
+ decode_threads,
+ expected_logical_error_rate: p_logical,
+ pareto_score: score,
});
}
- candidates.sort_by(|a, b| b.pareto_score.partial_cmp(&a.pareto_score).unwrap_or(std::cmp::Ordering::Equal));
+ candidates.sort_by(|a, b| {
+ b.pareto_score
+ .partial_cmp(&a.pareto_score)
+ .unwrap_or(std::cmp::Ordering::Equal)
+ });
candidates
}
@@ -196,8 +244,13 @@ pub fn plan_latency_budget(distance: u32, decode_ns_per_syndrome: u64) -> Latenc
let correction_ns: u64 = 20;
let total_round_ns = extraction_ns + decode_ns_per_syndrome + correction_ns;
let slack_ns = extraction_ns as i64 - (decode_ns_per_syndrome as i64 + correction_ns as i64);
- LatencyBudget { syndrome_extraction_ns: extraction_ns, decode_ns: decode_ns_per_syndrome,
- correction_ns, total_round_ns, slack_ns }
+ LatencyBudget {
+ syndrome_extraction_ns: extraction_ns,
+ decode_ns: decode_ns_per_syndrome,
+ correction_ns,
+ total_round_ns,
+ slack_ns,
+ }
}
// -- 5. Backlog Simulator ----------------------------------------------------
@@ -223,7 +276,9 @@ pub struct RoundSnapshot {
/// Monte Carlo simulation of the QEC control loop with seeded RNG.
pub fn simulate_control_loop(
- config: &QecControlLoop, num_rounds: u64, seed: u64,
+ config: &QecControlLoop,
+ num_rounds: u64,
+ seed: u64,
) -> SimulationTrace {
let mut rng = StdRng::seed_from_u64(seed);
let d = config.plant.code_distance;
@@ -239,7 +294,11 @@ pub fn simulate_control_loop(
for r in 0..num_rounds {
let mut errs: u32 = 0;
- for _ in 0..n_q { if rng.gen::() < p { errs += 1; } }
+ for _ in 0..n_q {
+ if rng.gen::() < p {
+ errs += 1;
+ }
+ }
let jitter = 0.8 + 0.4 * rng.gen::();
let actual_lat = (t_decode as f64 * jitter) as u64;
@@ -247,24 +306,48 @@ pub fn simulate_control_loop(
let corrected = if in_time {
let mut c = 0u32;
- for _ in 0..errs { if rng.gen::() < acc { c += 1; } }
+ for _ in 0..errs {
+ if rng.gen::() < acc {
+ c += 1;
+ }
+ }
c
- } else { 0 };
+ } else {
+ 0
+ };
let uncorrected = errs.saturating_sub(corrected);
backlog += uncorrected as f64;
- if in_time && backlog > 0.0 { backlog -= (backlog * acc).min(backlog); }
- if backlog > max_backlog { max_backlog = backlog; }
- if uncorrected > (d.saturating_sub(1)) / 2 { logical_errors += 1; }
+ if in_time && backlog > 0.0 {
+ backlog -= (backlog * acc).min(backlog);
+ }
+ if backlog > max_backlog {
+ max_backlog = backlog;
+ }
+ if uncorrected > (d.saturating_sub(1)) / 2 {
+ logical_errors += 1;
+ }
rounds.push(RoundSnapshot {
- round: r, errors_this_round: errs, errors_corrected: corrected,
- backlog, decode_latency_ns: actual_lat,
+ round: r,
+ errors_this_round: errs,
+ errors_corrected: corrected,
+ backlog,
+ decode_latency_ns: actual_lat,
});
}
- let final_logical_error_rate = if num_rounds > 0 { logical_errors as f64 / num_rounds as f64 } else { 0.0 };
- SimulationTrace { rounds, converged: backlog < 1.0, final_logical_error_rate, max_backlog }
+ let final_logical_error_rate = if num_rounds > 0 {
+ logical_errors as f64 / num_rounds as f64
+ } else {
+ 0.0
+ };
+ SimulationTrace {
+ rounds,
+ converged: backlog < 1.0,
+ final_logical_error_rate,
+ max_backlog,
+ }
}
// -- 6. Scaling Laws ---------------------------------------------------------
@@ -281,10 +364,26 @@ pub struct ScalingLaw {
/// Known: `"union_find"` O(n), `"mwpm"` O(n^3), `"neural"` O(n). Default: O(n^2).
pub fn classical_overhead_scaling(decoder_name: &str) -> ScalingLaw {
match decoder_name {
- "union_find" => ScalingLaw { name: "Union-Find decoder".into(), exponent: 1.0, prefactor: 1.0 },
- "mwpm" => ScalingLaw { name: "Minimum Weight Perfect Matching".into(), exponent: 3.0, prefactor: 0.5 },
- "neural" => ScalingLaw { name: "Neural network decoder".into(), exponent: 1.0, prefactor: 10.0 },
- _ => ScalingLaw { name: format!("Generic decoder ({})", decoder_name), exponent: 2.0, prefactor: 1.0 },
+ "union_find" => ScalingLaw {
+ name: "Union-Find decoder".into(),
+ exponent: 1.0,
+ prefactor: 1.0,
+ },
+ "mwpm" => ScalingLaw {
+ name: "Minimum Weight Perfect Matching".into(),
+ exponent: 3.0,
+ prefactor: 0.5,
+ },
+ "neural" => ScalingLaw {
+ name: "Neural network decoder".into(),
+ exponent: 1.0,
+ prefactor: 10.0,
+ },
+ _ => ScalingLaw {
+ name: format!("Generic decoder ({})", decoder_name),
+ exponent: 2.0,
+ prefactor: 1.0,
+ },
}
}
@@ -292,13 +391,25 @@ pub fn classical_overhead_scaling(decoder_name: &str) -> ScalingLaw {
/// Below threshold the exponent is the suppression factor lambda = -ln(p/p_th).
pub fn logical_error_scaling(physical_rate: f64, threshold: f64) -> ScalingLaw {
if threshold <= 0.0 || physical_rate <= 0.0 {
- return ScalingLaw { name: "Logical error scaling (degenerate)".into(), exponent: 0.0, prefactor: 1.0 };
+ return ScalingLaw {
+ name: "Logical error scaling (degenerate)".into(),
+ exponent: 0.0,
+ prefactor: 1.0,
+ };
}
if physical_rate >= threshold {
- return ScalingLaw { name: "Logical error scaling (above threshold)".into(), exponent: 0.0, prefactor: 1.0 };
+ return ScalingLaw {
+ name: "Logical error scaling (above threshold)".into(),
+ exponent: 0.0,
+ prefactor: 1.0,
+ };
}
let lambda = -(physical_rate / threshold).ln();
- ScalingLaw { name: "Logical error scaling (below threshold)".into(), exponent: lambda, prefactor: 0.1 }
+ ScalingLaw {
+ name: "Logical error scaling (below threshold)".into(),
+ exponent: lambda,
+ prefactor: 0.1,
+ }
}
// == Tests ===================================================================
@@ -308,98 +419,194 @@ mod tests {
use super::*;
fn make_plant(d: u32, p: f64) -> QuantumPlant {
- QuantumPlant { code_distance: d, physical_error_rate: p, num_data_qubits: d * d, coherence_time_ns: 100_000 }
+ QuantumPlant {
+ code_distance: d,
+ physical_error_rate: p,
+ num_data_qubits: d * d,
+ coherence_time_ns: 100_000,
+ }
}
fn make_controller(lat: u64, tp: f64, acc: f64) -> ClassicalController {
- ClassicalController { decode_latency_ns: lat, decode_throughput: tp, accuracy: acc }
+ ClassicalController {
+ decode_latency_ns: lat,
+ decode_throughput: tp,
+ accuracy: acc,
+ }
}
fn make_loop(d: u32, p: f64, lat: u64) -> QecControlLoop {
- QecControlLoop { plant: make_plant(d, p), controller: make_controller(lat, 1e6, 0.99), state: ControlState::new() }
+ QecControlLoop {
+ plant: make_plant(d, p),
+ controller: make_controller(lat, 1e6, 0.99),
+ state: ControlState::new(),
+ }
}
- #[test] fn test_control_state_new() {
+ #[test]
+ fn test_control_state_new() {
let s = ControlState::new();
- assert_eq!(s.logical_error_rate, 0.0); assert_eq!(s.error_backlog, 0.0);
- assert_eq!(s.rounds_decoded, 0); assert_eq!(s.total_latency_ns, 0);
+ assert_eq!(s.logical_error_rate, 0.0);
+ assert_eq!(s.error_backlog, 0.0);
+ assert_eq!(s.rounds_decoded, 0);
+ assert_eq!(s.total_latency_ns, 0);
+ }
+ #[test]
+ fn test_control_state_default() {
+ assert_eq!(ControlState::default().rounds_decoded, 0);
}
- #[test] fn test_control_state_default() { assert_eq!(ControlState::default().rounds_decoded, 0); }
- #[test] fn test_syndrome_period_scales() {
+ #[test]
+ fn test_syndrome_period_scales() {
assert!(syndrome_period_ns(3) < syndrome_period_ns(5));
assert!(syndrome_period_ns(5) < syndrome_period_ns(7));
}
- #[test] fn test_syndrome_period_d3() { assert_eq!(syndrome_period_ns(3), 360); }
+ #[test]
+ fn test_syndrome_period_d3() {
+ assert_eq!(syndrome_period_ns(3), 360);
+ }
- #[test] fn test_stable_loop() {
+ #[test]
+ fn test_stable_loop() {
let c = analyze_stability(&make_loop(5, 0.001, 100));
- assert!(c.is_stable); assert!(c.margin > 0.0); assert!(c.convergence_rate > 0.0);
+ assert!(c.is_stable);
+ assert!(c.margin > 0.0);
+ assert!(c.convergence_rate > 0.0);
}
- #[test] fn test_unstable_loop() {
+ #[test]
+ fn test_unstable_loop() {
let c = analyze_stability(&make_loop(3, 0.001, 1000));
- assert!(!c.is_stable); assert!(c.margin < 0.0);
+ assert!(!c.is_stable);
+ assert!(c.margin < 0.0);
}
- #[test] fn test_stability_critical_latency() {
- assert_eq!(analyze_stability(&make_loop(5, 0.001, 100)).critical_latency_ns, syndrome_period_ns(5));
+ #[test]
+ fn test_stability_critical_latency() {
+ assert_eq!(
+ analyze_stability(&make_loop(5, 0.001, 100)).critical_latency_ns,
+ syndrome_period_ns(5)
+ );
}
- #[test] fn test_stability_zero_decode() {
+ #[test]
+ fn test_stability_zero_decode() {
let c = analyze_stability(&make_loop(3, 0.001, 0));
- assert!(c.is_stable); assert!(c.margin.is_infinite());
+ assert!(c.is_stable);
+ assert!(c.margin.is_infinite());
}
- #[test] fn test_max_stable_fast() { assert!(max_stable_distance(&make_controller(100, 1e7, 0.99), 0.001) >= 3); }
- #[test] fn test_max_stable_slow() { assert!(max_stable_distance(&make_controller(10_000, 1e5, 0.99), 0.001) >= 3); }
- #[test] fn test_max_stable_above_thresh() { assert_eq!(max_stable_distance(&make_controller(100, 1e7, 0.99), 0.5), 3); }
+ #[test]
+ fn test_max_stable_fast() {
+ assert!(max_stable_distance(&make_controller(100, 1e7, 0.99), 0.001) >= 3);
+ }
+ #[test]
+ fn test_max_stable_slow() {
+ assert!(max_stable_distance(&make_controller(10_000, 1e5, 0.99), 0.001) >= 3);
+ }
+ #[test]
+ fn test_max_stable_above_thresh() {
+ assert_eq!(
+ max_stable_distance(&make_controller(100, 1e7, 0.99), 0.5),
+ 3
+ );
+ }
- #[test] fn test_min_throughput_d3() {
+ #[test]
+ fn test_min_throughput_d3() {
let tp = min_throughput(&make_plant(3, 0.001));
assert!(tp > 2e6 && tp < 3e6);
}
- #[test] fn test_min_throughput_ordering() {
+ #[test]
+ fn test_min_throughput_ordering() {
assert!(min_throughput(&make_plant(3, 0.001)) > min_throughput(&make_plant(5, 0.001)));
}
- #[test] fn test_optimize_basic() {
- let b = ResourceBudget { total_physical_qubits: 10_000, classical_cores: 8, classical_clock_ghz: 3.0, total_time_budget_us: 1_000 };
+ #[test]
+ fn test_optimize_basic() {
+ let b = ResourceBudget {
+ total_physical_qubits: 10_000,
+ classical_cores: 8,
+ classical_clock_ghz: 3.0,
+ total_time_budget_us: 1_000,
+ };
let a = optimize_allocation(&b, 0.001, 1);
assert!(!a.is_empty());
- for w in a.windows(2) { assert!(w[0].pareto_score >= w[1].pareto_score); }
+ for w in a.windows(2) {
+ assert!(w[0].pareto_score >= w[1].pareto_score);
+ }
}
- #[test] fn test_optimize_min_logical() {
- let b = ResourceBudget { total_physical_qubits: 100, classical_cores: 4, classical_clock_ghz: 2.0, total_time_budget_us: 1_000 };
- for a in &optimize_allocation(&b, 0.001, 5) { assert!(a.logical_qubits >= 5); }
+ #[test]
+ fn test_optimize_min_logical() {
+ let b = ResourceBudget {
+ total_physical_qubits: 100,
+ classical_cores: 4,
+ classical_clock_ghz: 2.0,
+ total_time_budget_us: 1_000,
+ };
+ for a in &optimize_allocation(&b, 0.001, 5) {
+ assert!(a.logical_qubits >= 5);
+ }
}
- #[test] fn test_optimize_insufficient() {
- let b = ResourceBudget { total_physical_qubits: 5, classical_cores: 1, classical_clock_ghz: 1.0, total_time_budget_us: 100 };
+ #[test]
+ fn test_optimize_insufficient() {
+ let b = ResourceBudget {
+ total_physical_qubits: 5,
+ classical_cores: 1,
+ classical_clock_ghz: 1.0,
+ total_time_budget_us: 100,
+ };
assert!(optimize_allocation(&b, 0.001, 1).is_empty());
}
- #[test] fn test_optimize_zero_cores() {
- let b = ResourceBudget { total_physical_qubits: 10_000, classical_cores: 0, classical_clock_ghz: 0.0, total_time_budget_us: 1_000 };
+ #[test]
+ fn test_optimize_zero_cores() {
+ let b = ResourceBudget {
+ total_physical_qubits: 10_000,
+ classical_cores: 0,
+ classical_clock_ghz: 0.0,
+ total_time_budget_us: 1_000,
+ };
assert!(optimize_allocation(&b, 0.001, 1).is_empty());
}
- #[test] fn test_latency_budget_d3() {
+ #[test]
+ fn test_latency_budget_d3() {
let lb = plan_latency_budget(3, 100);
- assert_eq!(lb.syndrome_extraction_ns, 360); assert_eq!(lb.decode_ns, 100);
- assert_eq!(lb.correction_ns, 20); assert_eq!(lb.total_round_ns, 480); assert_eq!(lb.slack_ns, 240);
+ assert_eq!(lb.syndrome_extraction_ns, 360);
+ assert_eq!(lb.decode_ns, 100);
+ assert_eq!(lb.correction_ns, 20);
+ assert_eq!(lb.total_round_ns, 480);
+ assert_eq!(lb.slack_ns, 240);
}
- #[test] fn test_latency_budget_negative_slack() { assert!(plan_latency_budget(3, 1000).slack_ns < 0); }
- #[test] fn test_latency_budget_scales() {
- assert!(plan_latency_budget(7, 100).syndrome_extraction_ns > plan_latency_budget(3, 100).syndrome_extraction_ns);
+ #[test]
+ fn test_latency_budget_negative_slack() {
+ assert!(plan_latency_budget(3, 1000).slack_ns < 0);
+ }
+ #[test]
+ fn test_latency_budget_scales() {
+ assert!(
+ plan_latency_budget(7, 100).syndrome_extraction_ns
+ > plan_latency_budget(3, 100).syndrome_extraction_ns
+ );
}
- #[test] fn test_sim_stable() {
+ #[test]
+ fn test_sim_stable() {
let t = simulate_control_loop(&make_loop(5, 0.001, 100), 100, 42);
- assert_eq!(t.rounds.len(), 100); assert!(t.converged); assert!(t.max_backlog < 50.0);
+ assert_eq!(t.rounds.len(), 100);
+ assert!(t.converged);
+ assert!(t.max_backlog < 50.0);
}
- #[test] fn test_sim_unstable() {
+ #[test]
+ fn test_sim_unstable() {
let t = simulate_control_loop(&make_loop(3, 0.3, 1000), 200, 42);
- assert_eq!(t.rounds.len(), 200); assert!(t.max_backlog > 0.0);
+ assert_eq!(t.rounds.len(), 200);
+ assert!(t.max_backlog > 0.0);
}
- #[test] fn test_sim_zero_rounds() {
+ #[test]
+ fn test_sim_zero_rounds() {
let t = simulate_control_loop(&make_loop(3, 0.001, 100), 0, 42);
- assert!(t.rounds.is_empty()); assert_eq!(t.final_logical_error_rate, 0.0); assert!(t.converged);
+ assert!(t.rounds.is_empty());
+ assert_eq!(t.final_logical_error_rate, 0.0);
+ assert!(t.converged);
}
- #[test] fn test_sim_deterministic() {
+ #[test]
+ fn test_sim_deterministic() {
let t1 = simulate_control_loop(&make_loop(5, 0.01, 200), 50, 123);
let t2 = simulate_control_loop(&make_loop(5, 0.01, 200), 50, 123);
for (a, b) in t1.rounds.iter().zip(t2.rounds.iter()) {
@@ -407,27 +614,70 @@ mod tests {
assert_eq!(a.errors_corrected, b.errors_corrected);
}
}
- #[test] fn test_sim_zero_error_rate() {
+ #[test]
+ fn test_sim_zero_error_rate() {
let t = simulate_control_loop(&make_loop(5, 0.0, 100), 50, 99);
- assert!(t.converged); assert_eq!(t.final_logical_error_rate, 0.0);
- for s in &t.rounds { assert_eq!(s.errors_this_round, 0); }
+ assert!(t.converged);
+ assert_eq!(t.final_logical_error_rate, 0.0);
+ for s in &t.rounds {
+ assert_eq!(s.errors_this_round, 0);
+ }
}
- #[test] fn test_sim_snapshot_fields() {
+ #[test]
+ fn test_sim_snapshot_fields() {
let t = simulate_control_loop(&make_loop(3, 0.01, 100), 10, 7);
for (i, s) in t.rounds.iter().enumerate() {
- assert_eq!(s.round, i as u64); assert!(s.errors_corrected <= s.errors_this_round);
+ assert_eq!(s.round, i as u64);
+ assert!(s.errors_corrected <= s.errors_this_round);
assert!(s.decode_latency_ns > 0);
}
}
- #[test] fn test_scaling_uf() { let l = classical_overhead_scaling("union_find"); assert_eq!(l.exponent, 1.0); assert!(l.name.contains("Union-Find")); }
- #[test] fn test_scaling_mwpm() { assert_eq!(classical_overhead_scaling("mwpm").exponent, 3.0); }
- #[test] fn test_scaling_neural() { let l = classical_overhead_scaling("neural"); assert_eq!(l.exponent, 1.0); assert!(l.prefactor > 1.0); }
- #[test] fn test_scaling_unknown() { let l = classical_overhead_scaling("custom"); assert_eq!(l.exponent, 2.0); assert!(l.name.contains("custom")); }
+ #[test]
+ fn test_scaling_uf() {
+ let l = classical_overhead_scaling("union_find");
+ assert_eq!(l.exponent, 1.0);
+ assert!(l.name.contains("Union-Find"));
+ }
+ #[test]
+ fn test_scaling_mwpm() {
+ assert_eq!(classical_overhead_scaling("mwpm").exponent, 3.0);
+ }
+ #[test]
+ fn test_scaling_neural() {
+ let l = classical_overhead_scaling("neural");
+ assert_eq!(l.exponent, 1.0);
+ assert!(l.prefactor > 1.0);
+ }
+ #[test]
+ fn test_scaling_unknown() {
+ let l = classical_overhead_scaling("custom");
+ assert_eq!(l.exponent, 2.0);
+ assert!(l.name.contains("custom"));
+ }
- #[test] fn test_logical_below() { let l = logical_error_scaling(0.001, 0.01); assert!(l.exponent > 0.0); assert_eq!(l.prefactor, 0.1); }
- #[test] fn test_logical_above() { let l = logical_error_scaling(0.05, 0.01); assert_eq!(l.exponent, 0.0); assert_eq!(l.prefactor, 1.0); }
- #[test] fn test_logical_at() { assert_eq!(logical_error_scaling(0.01, 0.01).exponent, 0.0); }
- #[test] fn test_logical_zero_rate() { assert_eq!(logical_error_scaling(0.0, 0.01).exponent, 0.0); }
- #[test] fn test_logical_zero_thresh() { assert_eq!(logical_error_scaling(0.001, 0.0).exponent, 0.0); }
+ #[test]
+ fn test_logical_below() {
+ let l = logical_error_scaling(0.001, 0.01);
+ assert!(l.exponent > 0.0);
+ assert_eq!(l.prefactor, 0.1);
+ }
+ #[test]
+ fn test_logical_above() {
+ let l = logical_error_scaling(0.05, 0.01);
+ assert_eq!(l.exponent, 0.0);
+ assert_eq!(l.prefactor, 1.0);
+ }
+ #[test]
+ fn test_logical_at() {
+ assert_eq!(logical_error_scaling(0.01, 0.01).exponent, 0.0);
+ }
+ #[test]
+ fn test_logical_zero_rate() {
+ assert_eq!(logical_error_scaling(0.0, 0.01).exponent, 0.0);
+ }
+ #[test]
+ fn test_logical_zero_thresh() {
+ assert_eq!(logical_error_scaling(0.001, 0.0).exponent, 0.0);
+ }
}
diff --git a/crates/ruqu-core/src/decoder.rs b/crates/ruqu-core/src/decoder.rs
index 85647cf1d..88b3fac6d 100644
--- a/crates/ruqu-core/src/decoder.rs
+++ b/crates/ruqu-core/src/decoder.rs
@@ -231,8 +231,7 @@ impl UnionFindDecoder {
// Compare with previous round (or implicit all-false for round 0).
let prev = if r > 0 {
- let prev_idx =
- ((r - 1) * grid_w * grid_h + y * grid_w + x) as usize;
+ let prev_idx = ((r - 1) * grid_w * grid_h + y * grid_w + x) as usize;
grid[prev_idx]
} else {
false
@@ -275,8 +274,12 @@ impl UnionFindDecoder {
} else {
1
};
- let dx_min = defect.x.min(grid_w.saturating_sub(1).saturating_sub(defect.x));
- let dy_min = defect.y.min(grid_h.saturating_sub(1).saturating_sub(defect.y));
+ let dx_min = defect
+ .x
+ .min(grid_w.saturating_sub(1).saturating_sub(defect.x));
+ let dy_min = defect
+ .y
+ .min(grid_h.saturating_sub(1).saturating_sub(defect.y));
dx_min.min(dy_min)
}
@@ -326,9 +329,7 @@ impl UnionFindDecoder {
break;
}
// Check if all clusters are even-parity.
- let all_even = defects
- .iter()
- .all(|d| !uf.cluster_parity(d.node_index));
+ let all_even = defects.iter().all(|d| !uf.cluster_parity(d.node_index));
if all_even {
break;
}
@@ -424,12 +425,7 @@ impl UnionFindDecoder {
/// Generate Pauli corrections along the shortest path between two
/// paired defects.
- fn path_between(
- &self,
- a: &Defect,
- b: &Defect,
- code_distance: u32,
- ) -> Vec<(u32, PauliType)> {
+ fn path_between(&self, a: &Defect, b: &Defect, code_distance: u32) -> Vec<(u32, PauliType)> {
let mut corrections = Vec::new();
let (mut cx, mut cy) = (a.x as i64, a.y as i64);
@@ -616,8 +612,7 @@ impl PartitionedDecoder {
// Remap tile-local qubit to global qubit coordinate.
let local_y = qubit / (d.max(1));
let local_x = qubit % (d.max(1));
- let global_qubit =
- (local_y + y_offset) * d + (local_x + x_offset);
+ let global_qubit = (local_y + y_offset) * d + (local_x + x_offset);
all_corrections.push((global_qubit, pauli));
}
@@ -633,7 +628,10 @@ impl PartitionedDecoder {
// Deduplicate corrections: two corrections on the same qubit
// with the same Pauli type cancel out.
- all_corrections.sort_by(|a, b| a.0.cmp(&b.0).then(format!("{:?}", a.1).cmp(&format!("{:?}", b.1))));
+ all_corrections.sort_by(|a, b| {
+ a.0.cmp(&b.0)
+ .then(format!("{:?}", a.1).cmp(&format!("{:?}", b.1)))
+ });
let mut deduped: Vec<(u32, PauliType)> = Vec::new();
let mut i = 0;
while i < all_corrections.len() {
@@ -799,10 +797,7 @@ impl AdaptiveCodeDistance {
if self.error_history.is_empty() {
return f64::NAN;
}
- let window_start = self
- .error_history
- .len()
- .saturating_sub(self.window_size);
+ let window_start = self.error_history.len().saturating_sub(self.window_size);
let window = &self.error_history[window_start..];
let sum: f64 = window.iter().sum();
sum / window.len() as f64
@@ -891,8 +886,7 @@ impl LogicalQubitAllocator {
// Enumerate physical qubits in this patch.
let qubits_per_logical = 2 * d * d - 2 * d + 1;
let start_qubit = patch_idx * qubits_per_logical;
- let physical_qubits: Vec =
- (start_qubit..start_qubit + qubits_per_logical).collect();
+ let physical_qubits: Vec = (start_qubit..start_qubit + qubits_per_logical).collect();
let logical_id = self.next_logical_id;
self.next_logical_id += 1;
@@ -1177,10 +1171,30 @@ mod tests {
let decoder = UnionFindDecoder::new(0);
let syndrome = SyndromeData {
stabilizers: vec![
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: false },
- StabilizerMeasurement { x: 1, y: 0, round: 0, value: false },
- StabilizerMeasurement { x: 0, y: 1, round: 0, value: false },
- StabilizerMeasurement { x: 1, y: 1, round: 0, value: false },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: false,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 0,
+ round: 0,
+ value: false,
+ },
+ StabilizerMeasurement {
+ x: 0,
+ y: 1,
+ round: 0,
+ value: false,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 1,
+ round: 0,
+ value: false,
+ },
],
code_distance: 3,
num_rounds: 1,
@@ -1200,10 +1214,30 @@ mod tests {
let decoder = UnionFindDecoder::new(0);
let syndrome = SyndromeData {
stabilizers: vec![
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: true },
- StabilizerMeasurement { x: 1, y: 0, round: 0, value: false },
- StabilizerMeasurement { x: 0, y: 1, round: 0, value: false },
- StabilizerMeasurement { x: 1, y: 1, round: 0, value: false },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: true,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 0,
+ round: 0,
+ value: false,
+ },
+ StabilizerMeasurement {
+ x: 0,
+ y: 1,
+ round: 0,
+ value: false,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 1,
+ round: 0,
+ value: false,
+ },
],
code_distance: 3,
num_rounds: 1,
@@ -1223,10 +1257,30 @@ mod tests {
// Two adjacent defects should pair and produce corrections between them.
let syndrome = SyndromeData {
stabilizers: vec![
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: true },
- StabilizerMeasurement { x: 1, y: 0, round: 0, value: true },
- StabilizerMeasurement { x: 0, y: 1, round: 0, value: false },
- StabilizerMeasurement { x: 1, y: 1, round: 0, value: false },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: true,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 0,
+ round: 0,
+ value: true,
+ },
+ StabilizerMeasurement {
+ x: 0,
+ y: 1,
+ round: 0,
+ value: false,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 1,
+ round: 0,
+ value: false,
+ },
],
code_distance: 3,
num_rounds: 1,
@@ -1278,7 +1332,10 @@ mod tests {
num_rounds: 1,
};
let defects = decoder.extract_defects(&syndrome);
- assert!(defects.is_empty(), "All-false syndrome should have no defects");
+ assert!(
+ defects.is_empty(),
+ "All-false syndrome should have no defects"
+ );
}
#[test]
@@ -1287,8 +1344,18 @@ mod tests {
let syndrome = SyndromeData {
stabilizers: vec![
// Round 0: (0,0)=false, (1,0)=true
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: false },
- StabilizerMeasurement { x: 1, y: 0, round: 0, value: true },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: false,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 0,
+ round: 0,
+ value: true,
+ },
],
code_distance: 3,
num_rounds: 1,
@@ -1303,17 +1370,37 @@ mod tests {
#[test]
fn test_uf_decoder_manhattan_distance() {
- let a = Defect { x: 0, y: 0, round: 0, node_index: 0 };
- let b = Defect { x: 3, y: 4, round: 1, node_index: 1 };
+ let a = Defect {
+ x: 0,
+ y: 0,
+ round: 0,
+ node_index: 0,
+ };
+ let b = Defect {
+ x: 3,
+ y: 4,
+ round: 1,
+ node_index: 1,
+ };
assert_eq!(UnionFindDecoder::manhattan_distance(&a, &b), 8);
}
#[test]
fn test_uf_decoder_boundary_distance() {
- let d = Defect { x: 0, y: 0, round: 0, node_index: 0 };
+ let d = Defect {
+ x: 0,
+ y: 0,
+ round: 0,
+ node_index: 0,
+ };
assert_eq!(UnionFindDecoder::boundary_distance(&d, 5), 0);
- let d2 = Defect { x: 2, y: 2, round: 0, node_index: 0 };
+ let d2 = Defect {
+ x: 2,
+ y: 2,
+ round: 0,
+ node_index: 0,
+ };
assert_eq!(UnionFindDecoder::boundary_distance(&d2, 5), 1);
}
@@ -1322,8 +1409,18 @@ mod tests {
let decoder = UnionFindDecoder::new(0);
let syndrome = SyndromeData {
stabilizers: vec![
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: true },
- StabilizerMeasurement { x: 0, y: 0, round: 1, value: false },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: true,
+ },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 1,
+ value: false,
+ },
],
code_distance: 3,
num_rounds: 2,
@@ -1341,10 +1438,30 @@ mod tests {
// Few defects -> high confidence.
let syndrome_low = SyndromeData {
stabilizers: vec![
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: true },
- StabilizerMeasurement { x: 1, y: 0, round: 0, value: false },
- StabilizerMeasurement { x: 0, y: 1, round: 0, value: false },
- StabilizerMeasurement { x: 1, y: 1, round: 0, value: false },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: true,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 0,
+ round: 0,
+ value: false,
+ },
+ StabilizerMeasurement {
+ x: 0,
+ y: 1,
+ round: 0,
+ value: false,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 1,
+ round: 0,
+ value: false,
+ },
],
code_distance: 3,
num_rounds: 1,
@@ -1354,10 +1471,30 @@ mod tests {
// Many defects -> lower confidence.
let syndrome_high = SyndromeData {
stabilizers: vec![
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: true },
- StabilizerMeasurement { x: 1, y: 0, round: 0, value: true },
- StabilizerMeasurement { x: 0, y: 1, round: 0, value: true },
- StabilizerMeasurement { x: 1, y: 1, round: 0, value: true },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: true,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 0,
+ round: 0,
+ value: true,
+ },
+ StabilizerMeasurement {
+ x: 0,
+ y: 1,
+ round: 0,
+ value: true,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 1,
+ round: 0,
+ value: true,
+ },
],
code_distance: 3,
num_rounds: 1,
@@ -1376,9 +1513,12 @@ mod tests {
fn test_uf_decoder_decode_time_recorded() {
let decoder = UnionFindDecoder::new(0);
let syndrome = SyndromeData {
- stabilizers: vec![
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: true },
- ],
+ stabilizers: vec![StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: true,
+ }],
code_distance: 3,
num_rounds: 1,
};
@@ -1432,8 +1572,18 @@ mod tests {
let syndrome = SyndromeData {
stabilizers: vec![
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: true },
- StabilizerMeasurement { x: 1, y: 0, round: 0, value: false },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: true,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 0,
+ round: 0,
+ value: false,
+ },
],
code_distance: 3,
num_rounds: 1,
@@ -1813,10 +1963,30 @@ mod tests {
// results to the inner decoder.
let syndrome = SyndromeData {
stabilizers: vec![
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: true },
- StabilizerMeasurement { x: 1, y: 0, round: 0, value: false },
- StabilizerMeasurement { x: 0, y: 1, round: 0, value: false },
- StabilizerMeasurement { x: 1, y: 1, round: 0, value: false },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: true,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 0,
+ round: 0,
+ value: false,
+ },
+ StabilizerMeasurement {
+ x: 0,
+ y: 1,
+ round: 0,
+ value: false,
+ },
+ StabilizerMeasurement {
+ x: 1,
+ y: 1,
+ round: 0,
+ value: false,
+ },
],
code_distance: 3,
num_rounds: 1,
@@ -1840,13 +2010,19 @@ mod tests {
// Verify trait object usage compiles and works.
let decoders: Vec> = vec![
Box::new(UnionFindDecoder::new(0)),
- Box::new(PartitionedDecoder::new(4, Box::new(UnionFindDecoder::new(0)))),
+ Box::new(PartitionedDecoder::new(
+ 4,
+ Box::new(UnionFindDecoder::new(0)),
+ )),
];
let syndrome = SyndromeData {
- stabilizers: vec![
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: false },
- ],
+ stabilizers: vec![StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: false,
+ }],
code_distance: 3,
num_rounds: 1,
};
@@ -1866,9 +2042,10 @@ mod tests {
(1, PauliType::X),
]));
// Odd number of X corrections -> logical_outcome = true.
- assert!(UnionFindDecoder::infer_logical_outcome(&[
- (0, PauliType::X),
- ]));
+ assert!(UnionFindDecoder::infer_logical_outcome(&[(
+ 0,
+ PauliType::X
+ ),]));
// Z corrections don't affect X logical outcome.
assert!(!UnionFindDecoder::infer_logical_outcome(&[
(0, PauliType::Z),
@@ -1882,9 +2059,12 @@ mod tests {
// Distance-1 code is degenerate but should not panic.
let decoder = UnionFindDecoder::new(0);
let syndrome = SyndromeData {
- stabilizers: vec![
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: true },
- ],
+ stabilizers: vec![StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: true,
+ }],
code_distance: 1,
num_rounds: 1,
};
diff --git a/crates/ruqu-core/src/decomposition.rs b/crates/ruqu-core/src/decomposition.rs
index cd72795b6..90bbc02c9 100644
--- a/crates/ruqu-core/src/decomposition.rs
+++ b/crates/ruqu-core/src/decomposition.rs
@@ -549,14 +549,12 @@ pub fn spatial_decomposition(
continue;
}
// Score = number of edges from this neighbor into group members.
- let score: usize = graph
- .adjacency[neighbor as usize]
+ let score: usize = graph.adjacency[neighbor as usize]
.iter()
.filter(|&&adj| group.contains(&adj))
.count();
if score > best_score
- || (score == best_score
- && best_candidate.map_or(true, |bc| neighbor < bc))
+ || (score == best_score && best_candidate.map_or(true, |bc| neighbor < bc))
{
best_score = score;
best_candidate = Some(neighbor);
@@ -770,7 +768,7 @@ pub fn estimate_segment_cost(segment: &QuantumCircuit, backend: BackendType) ->
// Memory: tableau of 2n rows x (2n+1) bits, stored as bools.
let tableau_size = 2 * (n as u64) * (2 * (n as u64) + 1);
let memory_bytes = tableau_size; // 1 byte per bool in practice
- // FLOPs: O(n^2) per gate (row operations over 2n rows of width 2n+1).
+ // FLOPs: O(n^2) per gate (row operations over 2n rows of width 2n+1).
let flops_per_gate = 4 * (n as u64) * (n as u64);
let estimated_flops = gate_count.saturating_mul(flops_per_gate);
SegmentCost {
@@ -837,9 +835,7 @@ pub fn estimate_segment_cost(segment: &QuantumCircuit, backend: BackendType) ->
/// Each input element is `(bitstring, probability)` from one segment's
/// simulation. The output maps combined bitstrings to their joint
/// probabilities.
-pub fn stitch_results(
- partitions: &[(Vec, f64)],
-) -> HashMap, f64> {
+pub fn stitch_results(partitions: &[(Vec, f64)]) -> HashMap, f64> {
if partitions.is_empty() {
return HashMap::new();
}
@@ -1101,10 +1097,7 @@ pub fn decompose(circuit: &QuantumCircuit, max_segment_qubits: u32) -> CircuitPa
// Find the gate index range in the original circuit for this component.
let gate_indices = gate_indices_for_component(circuit, &comp_set);
let gate_range_start = gate_indices.first().copied().unwrap_or(0);
- let _gate_range_end = gate_indices
- .last()
- .map(|&i| i + 1)
- .unwrap_or(0);
+ let _gate_range_end = gate_indices.last().map(|&i| i + 1).unwrap_or(0);
// Temporal decomposition within the component.
let time_slices = temporal_decomposition(&comp_circuit);
@@ -1197,10 +1190,7 @@ fn active_qubit_count(circuit: &QuantumCircuit) -> u32 {
/// Extract a subcircuit containing only the gates that act on qubits in the
/// given component set. The subcircuit has `num_qubits` equal to the size of
/// the component, with qubit indices remapped to `0..component.len()`.
-fn extract_component_circuit(
- circuit: &QuantumCircuit,
- component: &HashSet,
-) -> QuantumCircuit {
+fn extract_component_circuit(circuit: &QuantumCircuit, component: &HashSet) -> QuantumCircuit {
// Build a sorted list for deterministic remapping.
let mut sorted_qubits: Vec = component.iter().copied().collect();
sorted_qubits.sort_unstable();
@@ -1366,18 +1356,12 @@ mod tests {
assert_eq!(graph.edges.len(), 2, "should have 2 distinct edges");
// Find the (0,1) edge and check its count.
- let edge_01 = graph
- .edges
- .iter()
- .find(|&&(a, b, _)| a == 0 && b == 1);
+ let edge_01 = graph.edges.iter().find(|&&(a, b, _)| a == 0 && b == 1);
assert!(edge_01.is_some(), "edge (0,1) should exist");
assert_eq!(edge_01.unwrap().2, 2, "edge (0,1) should have count 2");
// Find the (1,2) edge.
- let edge_12 = graph
- .edges
- .iter()
- .find(|&&(a, b, _)| a == 1 && b == 2);
+ let edge_12 = graph.edges.iter().find(|&&(a, b, _)| a == 1 && b == 2);
assert!(edge_12.is_some(), "edge (1,2) should exist");
assert_eq!(edge_12.unwrap().2, 1, "edge (1,2) should have count 1");
@@ -1584,10 +1568,22 @@ mod tests {
// (true, true, true) = 0.5 * 0.75 = 0.375
assert_eq!(combined.len(), 4);
- let prob_fff = combined.get(&vec![false, false, false]).copied().unwrap_or(0.0);
- let prob_ftt = combined.get(&vec![false, true, true]).copied().unwrap_or(0.0);
- let prob_tff = combined.get(&vec![true, false, false]).copied().unwrap_or(0.0);
- let prob_ttt = combined.get(&vec![true, true, true]).copied().unwrap_or(0.0);
+ let prob_fff = combined
+ .get(&vec![false, false, false])
+ .copied()
+ .unwrap_or(0.0);
+ let prob_ftt = combined
+ .get(&vec![false, true, true])
+ .copied()
+ .unwrap_or(0.0);
+ let prob_tff = combined
+ .get(&vec![true, false, false])
+ .copied()
+ .unwrap_or(0.0);
+ let prob_ttt = combined
+ .get(&vec![true, true, true])
+ .copied()
+ .unwrap_or(0.0);
assert!((prob_fff - 0.125).abs() < 1e-10);
assert!((prob_ftt - 0.375).abs() < 1e-10);
@@ -1823,7 +1819,10 @@ mod tests {
let parts = spatial_decomposition_mincut(&circ, &graph, 3);
assert!(parts.len() >= 2, "Should partition into at least 2 groups");
for (qubits, _sub_circ) in &parts {
- assert!(qubits.len() as u32 <= 3, "Each group should have at most 3 qubits");
+ assert!(
+ qubits.len() as u32 <= 3,
+ "Each group should have at most 3 qubits"
+ );
}
}
@@ -1860,7 +1859,7 @@ mod tests {
let mut circ = QuantumCircuit::new(4);
circ.h(0).cnot(0, 1); // Bell pair 0-1
circ.h(2).cnot(2, 3); // Bell pair 2-3
- circ.cnot(1, 2); // Cross-partition gate
+ circ.cnot(1, 2); // Cross-partition gate
let partition = CircuitPartition {
segments: vec![
@@ -1873,7 +1872,11 @@ mod tests {
backend: BackendType::Stabilizer,
qubit_range: (0, 1),
gate_range: (0, 2),
- estimated_cost: SegmentCost { memory_bytes: 0, estimated_flops: 0, qubit_count: 2 },
+ estimated_cost: SegmentCost {
+ memory_bytes: 0,
+ estimated_flops: 0,
+ qubit_count: 2,
+ },
},
CircuitSegment {
circuit: {
@@ -1884,7 +1887,11 @@ mod tests {
backend: BackendType::Stabilizer,
qubit_range: (2, 3),
gate_range: (2, 4),
- estimated_cost: SegmentCost { memory_bytes: 0, estimated_flops: 0, qubit_count: 2 },
+ estimated_cost: SegmentCost {
+ memory_bytes: 0,
+ estimated_flops: 0,
+ qubit_count: 2,
+ },
},
],
total_qubits: 4,
@@ -1898,7 +1905,10 @@ mod tests {
(vec![true, true], 0.5),
];
let (_dist, fidelity) = stitch_with_fidelity(&partitions, &partition, &circ);
- assert!(fidelity.fidelity < 1.0, "Cut circuit should have fidelity < 1.0");
+ assert!(
+ fidelity.fidelity < 1.0,
+ "Cut circuit should have fidelity < 1.0"
+ );
assert!(fidelity.cut_gates >= 1, "Should detect at least 1 cut gate");
}
}
diff --git a/crates/ruqu-core/src/error.rs b/crates/ruqu-core/src/error.rs
index a555143a2..4d9a8e15a 100644
--- a/crates/ruqu-core/src/error.rs
+++ b/crates/ruqu-core/src/error.rs
@@ -10,10 +10,7 @@ pub enum QuantumError {
QubitLimitExceeded { requested: u32, maximum: u32 },
#[error("invalid qubit index {index} for {num_qubits}-qubit system")]
- InvalidQubitIndex {
- index: QubitIndex,
- num_qubits: u32,
- },
+ InvalidQubitIndex { index: QubitIndex, num_qubits: u32 },
#[error("memory allocation failed: need {required_bytes} bytes")]
MemoryAllocationFailed { required_bytes: usize },
diff --git a/crates/ruqu-core/src/gate.rs b/crates/ruqu-core/src/gate.rs
index d868eea96..f6e06a855 100644
--- a/crates/ruqu-core/src/gate.rs
+++ b/crates/ruqu-core/src/gate.rs
@@ -55,10 +55,9 @@ impl Gate {
| Gate::Reset(q)
| Gate::Unitary1Q(q, _) => vec![*q],
- Gate::CNOT(q1, q2)
- | Gate::CZ(q1, q2)
- | Gate::SWAP(q1, q2)
- | Gate::Rzz(q1, q2, _) => vec![*q1, *q2],
+ Gate::CNOT(q1, q2) | Gate::CZ(q1, q2) | Gate::SWAP(q1, q2) | Gate::Rzz(q1, q2, _) => {
+ vec![*q1, *q2]
+ }
Gate::Barrier => vec![],
}
@@ -138,10 +137,7 @@ impl Gate {
}
// Phase(theta) = [[1, 0], [0, e^(i*theta)]]
- Gate::Phase(_, theta) => Some([
- [c1, c0],
- [c0, Complex::from_polar(1.0, *theta)],
- ]),
+ Gate::Phase(_, theta) => Some([[c1, c0], [c0, Complex::from_polar(1.0, *theta)]]),
// Custom fused unitary
Gate::Unitary1Q(_, m) => Some(*m),
diff --git a/crates/ruqu-core/src/hardware.rs b/crates/ruqu-core/src/hardware.rs
index 7a57693bc..31655d6e1 100644
--- a/crates/ruqu-core/src/hardware.rs
+++ b/crates/ruqu-core/src/hardware.rs
@@ -246,7 +246,11 @@ fn parse_qubit_count(qasm: &str, default: u32) -> u32 {
}
}
}
- if total == 0 { default } else { total }
+ if total == 0 {
+ default
+ } else {
+ total
+ }
}
/// Count gate operations in a QASM string (lines that look like gate
@@ -525,12 +529,9 @@ impl HardwareProvider for LocalSimulatorProvider {
));
}
COMPLETED_JOBS.with(|jobs| {
- jobs.borrow()
- .get(&handle.job_id)
- .cloned()
- .ok_or_else(|| {
- HardwareError::JobFailed(format!("unknown job id: {}", handle.job_id))
- })
+ jobs.borrow().get(&handle.job_id).cloned().ok_or_else(|| {
+ HardwareError::JobFailed(format!("unknown job id: {}", handle.job_id))
+ })
})
}
}
@@ -633,7 +634,11 @@ impl HardwareProvider for IbmQuantumProvider {
.available_devices()
.into_iter()
.find(|d| d.name == device)?;
- Some(synthetic_calibration(device, dev.num_qubits, &dev.coupling_map))
+ Some(synthetic_calibration(
+ device,
+ dev.num_qubits,
+ &dev.coupling_map,
+ ))
}
fn submit_circuit(
@@ -749,8 +754,7 @@ impl HardwareProvider for IonQProvider {
"ionq_aria" => Some(Self::aria_calibration()),
"ionq_forte" => {
let dev = Self::forte_device();
- let mut cal =
- synthetic_calibration(&dev.name, dev.num_qubits, &dev.coupling_map);
+ let mut cal = synthetic_calibration(&dev.name, dev.num_qubits, &dev.coupling_map);
for t1 in &mut cal.qubit_t1 {
*t1 = 10_000_000.0;
}
@@ -805,12 +809,7 @@ impl RigettiProvider {
name: "rigetti_ankaa_2".to_string(),
provider: ProviderType::Rigetti,
num_qubits: 84,
- basis_gates: vec![
- "rx".into(),
- "rz".into(),
- "cz".into(),
- "measure".into(),
- ],
+ basis_gates: vec!["rx".into(), "rz".into(), "cz".into(), "measure".into()],
coupling_map: linear_coupling_map(84),
max_shots: 100_000,
status: DeviceStatus::Online,
@@ -836,7 +835,11 @@ impl HardwareProvider for RigettiProvider {
return None;
}
let dev = Self::ankaa_device();
- Some(synthetic_calibration(device, dev.num_qubits, &dev.coupling_map))
+ Some(synthetic_calibration(
+ device,
+ dev.num_qubits,
+ &dev.coupling_map,
+ ))
}
fn submit_circuit(
@@ -901,12 +904,7 @@ impl AmazonBraketProvider {
name: "braket_rigetti_aspen_m3".to_string(),
provider: ProviderType::AmazonBraket,
num_qubits: 79,
- basis_gates: vec![
- "rx".into(),
- "rz".into(),
- "cz".into(),
- "measure".into(),
- ],
+ basis_gates: vec!["rx".into(), "rz".into(), "cz".into(), "measure".into()],
coupling_map: linear_coupling_map(79),
max_shots: 100_000,
status: DeviceStatus::Online,
@@ -932,7 +930,11 @@ impl HardwareProvider for AmazonBraketProvider {
.available_devices()
.into_iter()
.find(|d| d.name == device)?;
- Some(synthetic_calibration(device, dev.num_qubits, &dev.coupling_map))
+ Some(synthetic_calibration(
+ device,
+ dev.num_qubits,
+ &dev.coupling_map,
+ ))
}
fn submit_circuit(
@@ -1104,8 +1106,7 @@ mod tests {
#[test]
fn hardware_error_is_error_trait() {
- let e: Box =
- Box::new(HardwareError::NetworkError("test".into()));
+ let e: Box = Box::new(HardwareError::NetworkError("test".into()));
assert!(e.to_string().contains("network error"));
}
diff --git a/crates/ruqu-core/src/lib.rs b/crates/ruqu-core/src/lib.rs
index c2600ed60..8d3826d30 100644
--- a/crates/ruqu-core/src/lib.rs
+++ b/crates/ruqu-core/src/lib.rs
@@ -19,54 +19,54 @@
//! ```
// -- Core simulation layer --
-pub mod types;
+pub mod backend;
+pub mod circuit;
+pub mod circuit_analyzer;
pub mod error;
pub mod gate;
-pub mod state;
pub mod mixed_precision;
-pub mod circuit;
-pub mod simulator;
pub mod optimizer;
pub mod simd;
-pub mod backend;
-pub mod circuit_analyzer;
+pub mod simulator;
pub mod stabilizer;
+pub mod state;
pub mod tensor_network;
+pub mod types;
// -- Scientific instrument layer (ADR-QE-015) --
-pub mod qasm;
-pub mod noise;
-pub mod mitigation;
+pub mod confidence;
pub mod hardware;
-pub mod transpiler;
+pub mod mitigation;
+pub mod noise;
+pub mod qasm;
pub mod replay;
-pub mod witness;
-pub mod confidence;
+pub mod transpiler;
pub mod verification;
+pub mod witness;
// -- SOTA differentiation layer --
-pub mod planner;
pub mod clifford_t;
pub mod decomposition;
pub mod pipeline;
+pub mod planner;
// -- QEC control plane --
+pub mod control_theory;
pub mod decoder;
-pub mod subpoly_decoder;
pub mod qec_scheduler;
-pub mod control_theory;
+pub mod subpoly_decoder;
// -- Benchmark & proof suite --
pub mod benchmark;
/// Re-exports of the most commonly used items.
pub mod prelude {
- pub use crate::types::*;
+ pub use crate::backend::BackendType;
+ pub use crate::circuit::QuantumCircuit;
pub use crate::error::{QuantumError, Result};
pub use crate::gate::Gate;
- pub use crate::state::QuantumState;
- pub use crate::circuit::QuantumCircuit;
- pub use crate::simulator::{SimConfig, SimulationResult, Simulator, ShotResult};
pub use crate::qasm::to_qasm3;
- pub use crate::backend::BackendType;
+ pub use crate::simulator::{ShotResult, SimConfig, SimulationResult, Simulator};
+ pub use crate::state::QuantumState;
+ pub use crate::types::*;
}
diff --git a/crates/ruqu-core/src/mitigation.rs b/crates/ruqu-core/src/mitigation.rs
index fb498bf2b..1af9aae0c 100644
--- a/crates/ruqu-core/src/mitigation.rs
+++ b/crates/ruqu-core/src/mitigation.rs
@@ -201,7 +201,10 @@ pub fn polynomial_extrapolate(noise_factors: &[f64], values: &[f64], degree: usi
);
let n = noise_factors.len();
let p = degree + 1; // number of coefficients
- assert!(n >= p, "need at least degree+1 data points for a degree-{degree} polynomial");
+ assert!(
+ n >= p,
+ "need at least degree+1 data points for a degree-{degree} polynomial"
+ );
// Build the Vandermonde matrix A (n x p) where A[i][j] = x_i^j.
// Then solve A^T A c = A^T y via normal equations.
@@ -332,12 +335,7 @@ impl MeasurementCorrector {
// Build per-qubit 2x2 matrices.
let qubit_matrices: Vec<[[f64; 2]; 2]> = readout_errors
.iter()
- .map(|&(p01, p10)| {
- [
- [1.0 - p01, p10],
- [p01, 1.0 - p10],
- ]
- })
+ .map(|&(p01, p10)| [[1.0 - p01, p10], [p01, 1.0 - p10]])
.collect();
// Tensor product to build the full dim x dim matrix.
@@ -369,10 +367,7 @@ impl MeasurementCorrector {
///
/// Returns corrected counts as floating-point values since the inverse
/// may produce non-integer results.
- pub fn correct_counts(
- &self,
- counts: &HashMap, usize>,
- ) -> HashMap, f64> {
+ pub fn correct_counts(&self, counts: &HashMap, usize>) -> HashMap, f64> {
let dim = 1usize << self.num_qubits;
// Build the probability vector from counts.
@@ -458,8 +453,14 @@ impl MeasurementCorrector {
let i1 = 1usize << qubit;
[
- [self.calibration_matrix[i0][i0], self.calibration_matrix[i0][i1]],
- [self.calibration_matrix[i1][i0], self.calibration_matrix[i1][i1]],
+ [
+ self.calibration_matrix[i0][i0],
+ self.calibration_matrix[i0][i1],
+ ],
+ [
+ self.calibration_matrix[i1][i0],
+ self.calibration_matrix[i1][i1],
+ ],
]
}
}
@@ -545,9 +546,7 @@ fn invert_matrix(mat: &[Vec]) -> Vec> {
}
// Extract the right half as the inverse.
- aug.iter()
- .map(|row| row[n..].to_vec())
- .collect()
+ aug.iter().map(|row| row[n..].to_vec()).collect()
}
/// Multiply a matrix by a vector.
@@ -671,7 +670,11 @@ pub fn cdr_correct(noisy_values: &[f64], ideal_values: &[f64], target_noisy: f64
let sum_x: f64 = noisy_values.iter().sum();
let sum_y: f64 = ideal_values.iter().sum();
- let sum_xy: f64 = noisy_values.iter().zip(ideal_values.iter()).map(|(x, y)| x * y).sum();
+ let sum_xy: f64 = noisy_values
+ .iter()
+ .zip(ideal_values.iter())
+ .map(|(x, y)| x * y)
+ .sum();
let sum_x2: f64 = noisy_values.iter().map(|x| x * x).sum();
let n_f64 = n as f64;
@@ -761,10 +764,7 @@ mod tests {
fn test_richardson_cubic() {
// f(x) = x^3 - x + 1 => f(0) = 1
let noise_factors = vec![1.0, 1.5, 2.0, 3.0];
- let values: Vec = noise_factors
- .iter()
- .map(|&x| x * x * x - x + 1.0)
- .collect();
+ let values: Vec = noise_factors.iter().map(|&x| x * x * x - x + 1.0).collect();
let result = richardson_extrapolate(&noise_factors, &values);
assert!(
(result - 1.0).abs() < 1e-9,
@@ -843,7 +843,11 @@ mod tests {
let folded = fold_circuit(&circuit, 3.0);
// 2 unitary gates * factor 3 = 6 gate slots.
- let unitary_count = folded.gates().iter().filter(|g| !g.is_non_unitary()).count();
+ let unitary_count = folded
+ .gates()
+ .iter()
+ .filter(|g| !g.is_non_unitary())
+ .count();
assert_eq!(
unitary_count, 6,
"fold factor=3 on 2-gate circuit: expected 6 unitary gates, got {unitary_count}"
@@ -864,12 +868,13 @@ mod tests {
.iter()
.filter(|g| matches!(g, Gate::Measure(_)))
.count();
- assert_eq!(
- measure_count, 1,
- "measurements should not be folded"
- );
+ assert_eq!(measure_count, 1, "measurements should not be folded");
- let unitary_count = folded.gates().iter().filter(|g| !g.is_non_unitary()).count();
+ let unitary_count = folded
+ .gates()
+ .iter()
+ .filter(|g| !g.is_non_unitary())
+ .count();
assert_eq!(
unitary_count, 3,
"1 H gate folded at factor 3 => 3 unitary gates"
@@ -888,7 +893,11 @@ mod tests {
circuit.z(0);
let folded = fold_circuit(&circuit, 1.5);
- let unitary_count = folded.gates().iter().filter(|g| !g.is_non_unitary()).count();
+ let unitary_count = folded
+ .gates()
+ .iter()
+ .filter(|g| !g.is_non_unitary())
+ .count();
assert_eq!(
unitary_count, 6,
"fold factor=1.5 on 4-gate circuit: expected 6 unitary gates, got {unitary_count}"
@@ -1172,20 +1181,14 @@ mod tests {
(exp0 - (-0.2)).abs() < 1e-12,
"qubit 0: expected -0.2, got {exp0}"
);
- assert!(
- exp1.abs() < 1e-12,
- "qubit 1: expected 0.0, got {exp1}"
- );
+ assert!(exp1.abs() < 1e-12, "qubit 1: expected 0.0, got {exp1}");
}
#[test]
fn test_expectation_empty_counts() {
let counts: HashMap, usize> = HashMap::new();
let exp = expectation_from_counts(&counts, 0);
- assert!(
- exp.abs() < 1e-12,
- "empty counts should give 0.0, got {exp}"
- );
+ assert!(exp.abs() < 1e-12, "empty counts should give 0.0, got {exp}");
}
// ---- Gate dagger correctness ----------------------------------------
@@ -1243,11 +1246,7 @@ mod tests {
let product = mat_mul_2x2(&m, &m_dag);
for i in 0..2 {
for j in 0..2 {
- let expected = if i == j {
- Complex::ONE
- } else {
- Complex::ZERO
- };
+ let expected = if i == j { Complex::ONE } else { Complex::ZERO };
let diff = (product[i][j] - expected).norm();
assert!(
diff < 1e-12,
@@ -1258,10 +1257,7 @@ mod tests {
}
/// Helper: multiply two 2x2 complex matrices.
- fn mat_mul_2x2(
- a: &[[Complex; 2]; 2],
- b: &[[Complex; 2]; 2],
- ) -> [[Complex; 2]; 2] {
+ fn mat_mul_2x2(a: &[[Complex; 2]; 2], b: &[[Complex; 2]; 2]) -> [[Complex; 2]; 2] {
let mut result = [[Complex::ZERO; 2]; 2];
for i in 0..2 {
for j in 0..2 {
diff --git a/crates/ruqu-core/src/mixed_precision.rs b/crates/ruqu-core/src/mixed_precision.rs
index 5bd9eb838..b38c18f6b 100644
--- a/crates/ruqu-core/src/mixed_precision.rs
+++ b/crates/ruqu-core/src/mixed_precision.rs
@@ -301,10 +301,7 @@ impl QuantumStateF32 {
/// Probabilities are returned as f64 for downstream accuracy: the f32
/// norm-squared values are widened before being returned.
pub fn probabilities(&self) -> Vec {
- self.amplitudes
- .iter()
- .map(|a| a.norm_sq() as f64)
- .collect()
+ self.amplitudes.iter().map(|a| a.norm_sq() as f64).collect()
}
/// Estimated memory in bytes for an f32 state of `num_qubits` qubits.
@@ -356,10 +353,7 @@ impl QuantumStateF32 {
}
// Two-qubit gates
- Gate::CNOT(q1, q2)
- | Gate::CZ(q1, q2)
- | Gate::SWAP(q1, q2)
- | Gate::Rzz(q1, q2, _) => {
+ Gate::CNOT(q1, q2) | Gate::CZ(q1, q2) | Gate::SWAP(q1, q2) | Gate::Rzz(q1, q2, _) => {
if q1 == q2 {
return Err(QuantumError::CircuitError(format!(
"two-qubit gate requires distinct qubits, got {} and {}",
@@ -399,11 +393,7 @@ impl QuantumStateF32 {
///
/// For each pair of amplitudes where the qubit bit is 0 (index `i`)
/// versus 1 (index `j = i + step`), the matrix transformation is applied.
- pub fn apply_single_qubit_gate(
- &mut self,
- qubit: QubitIndex,
- matrix: &[[Complex32; 2]; 2],
- ) {
+ pub fn apply_single_qubit_gate(&mut self, qubit: QubitIndex, matrix: &[[Complex32; 2]; 2]) {
let step = 1usize << qubit;
let n = self.amplitudes.len();
diff --git a/crates/ruqu-core/src/noise.rs b/crates/ruqu-core/src/noise.rs
index bfb875658..ee349567f 100644
--- a/crates/ruqu-core/src/noise.rs
+++ b/crates/ruqu-core/src/noise.rs
@@ -94,18 +94,10 @@ impl EnhancedNoiseModel {
let idx = qubit as usize;
// Gate error rate becomes the depolarizing rate.
- let depolarizing_rate = cal
- .gate_errors
- .get(gate_name)
- .copied()
- .unwrap_or(0.0);
+ let depolarizing_rate = cal.gate_errors.get(gate_name).copied().unwrap_or(0.0);
// Gate duration (needed for thermal relaxation conversion).
- let gate_time = cal
- .gate_times
- .get(gate_name)
- .copied()
- .unwrap_or(0.0);
+ let gate_time = cal.gate_times.get(gate_name).copied().unwrap_or(0.0);
// T1 and T2 values for this qubit.
let t1 = cal.qubit_t1.get(idx).copied().unwrap_or(f64::INFINITY);
@@ -138,11 +130,7 @@ impl EnhancedNoiseModel {
// Thermal relaxation if we have valid T1, T2, gate_time.
let thermal_relaxation =
if t1.is_finite() && t2.is_finite() && t1 > 0.0 && t2 > 0.0 && gate_time > 0.0 {
- Some(ThermalRelaxation {
- t1,
- t2,
- gate_time,
- })
+ Some(ThermalRelaxation { t1, t2, gate_time })
} else {
None
};
@@ -164,10 +152,7 @@ impl EnhancedNoiseModel {
// ---------------------------------------------------------------------------
/// Identity matrix as a 2x2 complex array.
-const IDENTITY: [[Complex; 2]; 2] = [
- [Complex::ONE, Complex::ZERO],
- [Complex::ZERO, Complex::ONE],
-];
+const IDENTITY: [[Complex; 2]; 2] = [[Complex::ONE, Complex::ZERO], [Complex::ZERO, Complex::ONE]];
/// Depolarizing channel Kraus operators.
///
@@ -185,16 +170,10 @@ pub fn depolarizing_kraus(p: f64) -> Vec<[[Complex; 2]; 2]> {
let c = |v: f64| Complex::new(v, 0.0);
// K0 = sqrt(1-p) * I
- let k0 = [
- [c(s0), Complex::ZERO],
- [Complex::ZERO, c(s0)],
- ];
+ let k0 = [[c(s0), Complex::ZERO], [Complex::ZERO, c(s0)]];
// K1 = sqrt(p/3) * X
- let k1 = [
- [Complex::ZERO, c(sp)],
- [c(sp), Complex::ZERO],
- ];
+ let k1 = [[Complex::ZERO, c(sp)], [c(sp), Complex::ZERO]];
// K2 = sqrt(p/3) * Y = sqrt(p/3) * [[0, -i],[i, 0]]
let k2 = [
@@ -203,10 +182,7 @@ pub fn depolarizing_kraus(p: f64) -> Vec<[[Complex; 2]; 2]> {
];
// K3 = sqrt(p/3) * Z
- let k3 = [
- [c(sp), Complex::ZERO],
- [Complex::ZERO, c(-sp)],
- ];
+ let k3 = [[c(sp), Complex::ZERO], [Complex::ZERO, c(-sp)]];
vec![k0, k1, k2, k3]
}
@@ -224,15 +200,9 @@ pub fn amplitude_damping_kraus(gamma: f64) -> Vec<[[Complex; 2]; 2]> {
let c = |v: f64| Complex::new(v, 0.0);
- let k0 = [
- [Complex::ONE, Complex::ZERO],
- [Complex::ZERO, c(s1g)],
- ];
+ let k0 = [[Complex::ONE, Complex::ZERO], [Complex::ZERO, c(s1g)]];
- let k1 = [
- [Complex::ZERO, c(sg)],
- [Complex::ZERO, Complex::ZERO],
- ];
+ let k1 = [[Complex::ZERO, c(sg)], [Complex::ZERO, Complex::ZERO]];
vec![k0, k1]
}
@@ -250,15 +220,9 @@ pub fn phase_damping_kraus(lambda: f64) -> Vec<[[Complex; 2]; 2]> {
let c = |v: f64| Complex::new(v, 0.0);
- let k0 = [
- [Complex::ONE, Complex::ZERO],
- [Complex::ZERO, c(s1l)],
- ];
+ let k0 = [[Complex::ONE, Complex::ZERO], [Complex::ZERO, c(s1l)]];
- let k1 = [
- [Complex::ZERO, Complex::ZERO],
- [Complex::ZERO, c(sl)],
- ];
+ let k1 = [[Complex::ZERO, Complex::ZERO], [Complex::ZERO, c(sl)]];
vec![k0, k1]
}
@@ -377,15 +341,9 @@ impl ReadoutCorrector {
///
/// Returns floating-point corrected counts (may be non-integer due to the
/// linear algebra involved). Negative corrected values are clamped to zero.
- pub fn correct_counts(
- &self,
- counts: &HashMap, usize>,
- ) -> HashMap, f64> {
+ pub fn correct_counts(&self, counts: &HashMap, usize>) -> HashMap, f64> {
if self.num_qubits == 0 {
- return counts
- .iter()
- .map(|(k, &v)| (k.clone(), v as f64))
- .collect();
+ return counts.iter().map(|(k, &v)| (k.clone(), v as f64)).collect();
}
if self.num_qubits <= 12 {
@@ -396,10 +354,7 @@ impl ReadoutCorrector {
}
/// Full confusion-matrix inversion for small qubit counts.
- fn correct_full_matrix(
- &self,
- counts: &HashMap, usize>,
- ) -> HashMap, f64> {
+ fn correct_full_matrix(&self, counts: &HashMap, usize>) -> HashMap, f64> {
let n = self.num_qubits;
let dim = 1usize << n;
@@ -447,10 +402,8 @@ impl ReadoutCorrector {
.collect();
// Start with raw counts as floats.
- let mut corrected: HashMap, f64> = counts
- .iter()
- .map(|(k, &v)| (k.clone(), v as f64))
- .collect();
+ let mut corrected: HashMap, f64> =
+ counts.iter().map(|(k, &v)| (k.clone(), v as f64)).collect();
// Apply each qubit's inverse confusion matrix independently.
// For each qubit q, we group bitstrings by all bits except q,
@@ -461,7 +414,8 @@ impl ReadoutCorrector {
// Collect all unique bitstrings that appear, paired by qubit q.
let keys: Vec> = corrected.keys().cloned().collect();
- let mut processed: std::collections::HashSet> = std::collections::HashSet::new();
+ let mut processed: std::collections::HashSet> =
+ std::collections::HashSet::new();
for bits in &keys {
if processed.contains(bits) {
@@ -538,10 +492,7 @@ impl ReadoutCorrector {
// ---------------------------------------------------------------------------
/// Multiply two 2x2 complex matrices.
-fn mat_mul_2x2(
- a: &[[Complex; 2]; 2],
- b: &[[Complex; 2]; 2],
-) -> [[Complex; 2]; 2] {
+fn mat_mul_2x2(a: &[[Complex; 2]; 2], b: &[[Complex; 2]; 2]) -> [[Complex; 2]; 2] {
[
[
a[0][0] * b[0][0] + a[0][1] * b[1][0],
@@ -606,10 +557,7 @@ fn invert_2x2_confusion(p01: f64, p10: f64) -> [[f64; 2]; 2] {
}
let inv_det = 1.0 / det;
- [
- [d * inv_det, -b * inv_det],
- [-c * inv_det, a * inv_det],
- ]
+ [[d * inv_det, -b * inv_det], [-c * inv_det, a * inv_det]]
}
// ---------------------------------------------------------------------------
@@ -812,8 +760,14 @@ mod tests {
ops[1][0][0] * state_one[0] + ops[1][0][1] * state_one[1],
ops[1][1][0] * state_one[0] + ops[1][1][1] * state_one[1],
];
- assert!((k1_on_one[0].re - 1.0).abs() < 1e-14, "Expected |0> component = 1.0");
- assert!(k1_on_one[1].norm_sq() < 1e-28, "Expected |1> component = 0.0");
+ assert!(
+ (k1_on_one[0].re - 1.0).abs() < 1e-14,
+ "Expected |0> component = 1.0"
+ );
+ assert!(
+ k1_on_one[1].norm_sq() < 1e-28,
+ "Expected |1> component = 0.0"
+ );
}
// -------------------------------------------------------------------
@@ -850,11 +804,11 @@ mod tests {
#[test]
fn thermal_relaxation_kraus_trace_preserving() {
let test_cases = [
- (50.0, 30.0, 0.05), // typical: T2 < T1
- (50.0, 50.0, 0.05), // T2 == T1
- (50.0, 100.0, 0.05), // T2 > T1 (clamped to 2*T1)
- (100.0, 80.0, 1.0), // longer gate time
- (50.0, 30.0, 0.001), // very short gate
+ (50.0, 30.0, 0.05), // typical: T2 < T1
+ (50.0, 50.0, 0.05), // T2 == T1
+ (50.0, 100.0, 0.05), // T2 > T1 (clamped to 2*T1)
+ (100.0, 80.0, 1.0), // longer gate time
+ (50.0, 30.0, 0.001), // very short gate
];
for &(t1, t2, gt) in &test_cases {
let ops = thermal_relaxation_kraus(t1, t2, gt);
@@ -970,16 +924,8 @@ mod tests {
let c0 = corrected.get(&vec![false]).copied().unwrap_or(0.0);
let c1 = corrected.get(&vec![true]).copied().unwrap_or(0.0);
- assert!(
- (c0 - 700.0).abs() < 1.0,
- "Expected ~700, got {}",
- c0
- );
- assert!(
- (c1 - 300.0).abs() < 1.0,
- "Expected ~300, got {}",
- c1
- );
+ assert!((c0 - 700.0).abs() < 1.0, "Expected ~700, got {}", c0);
+ assert!((c1 - 300.0).abs() < 1.0, "Expected ~300, got {}", c1);
}
#[test]
@@ -1001,11 +947,7 @@ mod tests {
let c00 = corrected.get(&vec![false, false]).copied().unwrap_or(0.0);
// The corrected count for |00> should be close to 1000.
- assert!(
- (c00 - 1000.0).abs() < 10.0,
- "Expected ~1000, got {}",
- c00
- );
+ assert!((c00 - 1000.0).abs() < 10.0, "Expected ~1000, got {}", c00);
}
// -------------------------------------------------------------------
diff --git a/crates/ruqu-core/src/pipeline.rs b/crates/ruqu-core/src/pipeline.rs
index 73d854402..3581a5eb2 100644
--- a/crates/ruqu-core/src/pipeline.rs
+++ b/crates/ruqu-core/src/pipeline.rs
@@ -21,9 +21,7 @@ use std::collections::HashMap;
use crate::backend::BackendType;
use crate::circuit::QuantumCircuit;
-use crate::decomposition::{
- decompose, stitch_results, CircuitPartition, DecompositionStrategy,
-};
+use crate::decomposition::{decompose, stitch_results, CircuitPartition, DecompositionStrategy};
use crate::error::Result;
use crate::planner::{plan_execution, ExecutionPlan, PlannerConfig};
use crate::simulator::Simulator;
@@ -123,10 +121,7 @@ impl Pipeline {
/// 3. Execute: run each segment on its assigned backend.
/// 4. Stitch: combine segment results into a joint distribution.
/// 5. Verify: optionally cross-check against a reference backend.
- pub fn execute(
- circuit: &QuantumCircuit,
- config: &PipelineConfig,
- ) -> Result {
+ pub fn execute(circuit: &QuantumCircuit, config: &PipelineConfig) -> Result {
// Step 1: Plan
let plan = plan_execution(circuit, &config.planner);
@@ -135,17 +130,12 @@ impl Pipeline {
let decomposition = DecompositionSummary {
num_segments: partition.segments.len(),
strategy: partition.strategy,
- backends: partition
- .segments
- .iter()
- .map(|s| s.backend)
- .collect(),
+ backends: partition.segments.iter().map(|s| s.backend).collect(),
};
// Step 3: Execute each segment
let mut segment_results = Vec::new();
- let mut all_segment_distributions: Vec, f64)>> =
- Vec::new();
+ let mut all_segment_distributions: Vec, f64)>> = Vec::new();
for (idx, segment) in partition.segments.iter().enumerate() {
let shot_seed = config.seed.wrapping_add(idx as u64);
@@ -153,11 +143,8 @@ impl Pipeline {
// Use the multi-shot simulator for each segment.
// The simulator always uses the state-vector backend internally,
// which is correct for segments that fit within max_segment_qubits.
- let shot_result = Simulator::run_shots(
- &segment.circuit,
- config.shots,
- Some(shot_seed),
- )?;
+ let shot_result =
+ Simulator::run_shots(&segment.circuit, config.shots, Some(shot_seed))?;
// Convert the histogram counts to a probability distribution.
let dist = counts_to_distribution(&shot_result.counts);
@@ -177,24 +164,19 @@ impl Pipeline {
// pairs, grouped by segment. Segments are distinguished by
// consecutive runs of equal-length bitstrings (see decomposition.rs).
let flat_partitions: Vec<(Vec, f64)> =
- all_segment_distributions
- .into_iter()
- .flatten()
- .collect();
+ all_segment_distributions.into_iter().flatten().collect();
let distribution = stitch_results(&flat_partitions);
let total_probability: f64 = distribution.values().sum();
// Step 5: Estimate fidelity
- let estimated_fidelity =
- estimate_pipeline_fidelity(&segment_results, &partition);
+ let estimated_fidelity = estimate_pipeline_fidelity(&segment_results, &partition);
// Step 6: Verify (optional)
- let verification =
- if config.verify && circuit.num_qubits() <= 25 {
- Some(verify_circuit(circuit, config.shots, config.seed))
- } else {
- None
- };
+ let verification = if config.verify && circuit.num_qubits() <= 25 {
+ Some(verify_circuit(circuit, config.shots, config.seed))
+ } else {
+ None
+ };
Ok(PipelineResult {
plan,
@@ -232,9 +214,7 @@ fn resolve_backend(backend: BackendType) -> BackendType {
///
/// Each entry in the returned vector is `(bitstring, probability)`, sorted
/// in descending order of probability.
-fn counts_to_distribution(
- counts: &HashMap, usize>,
-) -> Vec<(Vec, f64)> {
+fn counts_to_distribution(counts: &HashMap, usize>) -> Vec<(Vec, f64)> {
let total: usize = counts.values().sum();
if total == 0 {
return Vec::new();
@@ -247,9 +227,7 @@ fn counts_to_distribution(
.collect();
// Sort by probability descending for deterministic output.
- dist.sort_by(|a, b| {
- b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal)
- });
+ dist.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
dist
}
@@ -258,10 +236,7 @@ fn counts_to_distribution(
/// For a single segment (no decomposition), fidelity is 1.0.
/// For multiple segments, fidelity degrades based on the number of
/// cross-segment cuts and the entanglement that was severed.
-fn estimate_pipeline_fidelity(
- segments: &[SegmentResult],
- partition: &CircuitPartition,
-) -> f64 {
+fn estimate_pipeline_fidelity(segments: &[SegmentResult], partition: &CircuitPartition) -> f64 {
if segments.len() <= 1 {
return 1.0;
}
@@ -381,16 +356,8 @@ mod tests {
// GHZ state should have ~50% |00000> and ~50% |11111>.
let all_false = vec![false; 5];
let all_true = vec![true; 5];
- let p_all_false = result
- .distribution
- .get(&all_false)
- .copied()
- .unwrap_or(0.0);
- let p_all_true = result
- .distribution
- .get(&all_true)
- .copied()
- .unwrap_or(0.0);
+ let p_all_false = result.distribution.get(&all_false).copied().unwrap_or(0.0);
+ let p_all_true = result.distribution.get(&all_true).copied().unwrap_or(0.0);
assert!(
p_all_false > 0.3,
"GHZ should have significant |00000>, got {}",
@@ -432,10 +399,7 @@ mod tests {
#[test]
fn test_resolve_backend() {
- assert_eq!(
- resolve_backend(BackendType::Auto),
- BackendType::StateVector
- );
+ assert_eq!(resolve_backend(BackendType::Auto), BackendType::StateVector);
assert_eq!(
resolve_backend(BackendType::StateVector),
BackendType::StateVector
@@ -467,10 +431,7 @@ mod tests {
total_qubits: 5,
strategy: DecompositionStrategy::None,
};
- assert_eq!(
- estimate_pipeline_fidelity(&segments, &partition),
- 1.0
- );
+ assert_eq!(estimate_pipeline_fidelity(&segments, &partition), 1.0);
}
#[test]
@@ -480,19 +441,13 @@ mod tests {
index: 0,
backend: BackendType::StateVector,
num_qubits: 2,
- distribution: vec![
- (vec![false, false], 0.5),
- (vec![true, true], 0.5),
- ],
+ distribution: vec![(vec![false, false], 0.5), (vec![true, true], 0.5)],
},
SegmentResult {
index: 1,
backend: BackendType::StateVector,
num_qubits: 2,
- distribution: vec![
- (vec![false, false], 0.5),
- (vec![true, true], 0.5),
- ],
+ distribution: vec![(vec![false, false], 0.5), (vec![true, true], 0.5)],
},
];
let partition = CircuitPartition {
diff --git a/crates/ruqu-core/src/planner.rs b/crates/ruqu-core/src/planner.rs
index 2774f82fa..892017617 100644
--- a/crates/ruqu-core/src/planner.rs
+++ b/crates/ruqu-core/src/planner.rs
@@ -237,7 +237,11 @@ pub fn plan_execution(circuit: &QuantumCircuit, config: &PlannerConfig) -> Execu
// Evaluate CliffordT backend.
let t_count = analysis.non_clifford_gates;
let ct_viable = t_count > 0 && t_count <= CT_MAX_T_COUNT && num_qubits > 32;
- let ct_terms = if ct_viable { 1u64.checked_shl(t_count as u32).unwrap_or(u64::MAX) } else { u64::MAX };
+ let ct_terms = if ct_viable {
+ 1u64.checked_shl(t_count as u32).unwrap_or(u64::MAX)
+ } else {
+ u64::MAX
+ };
let ct_memory = predict_memory_clifford_t(num_qubits, ct_terms);
let ct_runtime = predict_runtime_clifford_t(num_qubits, total_gates, ct_terms);
@@ -394,9 +398,7 @@ fn predict_memory_stabilizer(num_qubits: u32) -> u64 {
fn predict_memory_tensor_network(num_qubits: u32, chi: u32) -> u64 {
let n = num_qubits as u64;
let c = chi as u64;
- n.saturating_mul(c)
- .saturating_mul(c)
- .saturating_mul(16)
+ n.saturating_mul(c).saturating_mul(c).saturating_mul(16)
}
// ---------------------------------------------------------------------------
@@ -509,10 +511,7 @@ fn select_optimal_backend(
}
// Rule 2: Mostly Clifford with very few non-Clifford on large circuits.
- if analysis.clifford_fraction >= 0.95
- && n > 32
- && analysis.non_clifford_gates <= 10
- {
+ if analysis.clifford_fraction >= 0.95 && n > 32 && analysis.non_clifford_gates <= 10 {
return (
BackendType::Stabilizer,
stab_memory,
@@ -647,9 +646,7 @@ fn select_verification_policy(
// Small enough to cross-check with state vector.
return VerificationPolicy::DownscaledStateVector(num_qubits);
}
- return VerificationPolicy::StatisticalSampling(
- (num_qubits / 2).max(5).min(50),
- );
+ return VerificationPolicy::StatisticalSampling((num_qubits / 2).max(5).min(50));
}
VerificationPolicy::None
@@ -737,12 +734,11 @@ fn compute_cost_breakdown(
MitigationStrategy::None => 1.0,
MitigationStrategy::MeasurementCorrectionOnly => 1.1, // slight overhead
MitigationStrategy::ZneWithScales(scales) => scales.len() as f64,
- MitigationStrategy::ZnePlusMeasurementCorrection(scales) => {
- scales.len() as f64 * 1.1
- }
- MitigationStrategy::Full { zne_scales, cdr_circuits } => {
- zne_scales.len() as f64 + *cdr_circuits as f64 * 0.5
- }
+ MitigationStrategy::ZnePlusMeasurementCorrection(scales) => scales.len() as f64 * 1.1,
+ MitigationStrategy::Full {
+ zne_scales,
+ cdr_circuits,
+ } => zne_scales.len() as f64 + *cdr_circuits as f64 * 0.5,
};
// Verification overhead multiplier.
@@ -750,16 +746,13 @@ fn compute_cost_breakdown(
VerificationPolicy::None => 1.0,
VerificationPolicy::ExactCliffordCheck => 1.05, // cheap stabilizer check
VerificationPolicy::DownscaledStateVector(_) => 1.1,
- VerificationPolicy::StatisticalSampling(n) => {
- 1.0 + (*n as f64) * 0.01
- }
+ VerificationPolicy::StatisticalSampling(n) => 1.0 + (*n as f64) * 0.01,
};
// Total shots: base shots * mitigation overhead.
// Base shots from precision: 1 / precision^2 (Hoeffding bound).
let base_shots = (1.0 / (target_precision * target_precision)).ceil() as u32;
- let mitigated_shots =
- (base_shots as f64 * mitigation_overhead).ceil() as u32;
+ let mitigated_shots = (base_shots as f64 * mitigation_overhead).ceil() as u32;
let total_shots_needed = mitigated_shots.min(shot_budget);
CostBreakdown {
@@ -976,10 +969,7 @@ mod tests {
"ZNE scales must include the baseline 1.0"
);
}
- other => panic!(
- "Expected ZneWithScales for noise=0.05, got {:?}",
- other
- ),
+ other => panic!("Expected ZneWithScales for noise=0.05, got {:?}", other),
}
assert!(
@@ -1181,10 +1171,7 @@ mod tests {
assert!(zne_scales.len() >= 3);
assert!(*cdr_circuits >= 2);
}
- other => panic!(
- "Expected Full mitigation for noise=0.7, got {:?}",
- other
- ),
+ other => panic!("Expected Full mitigation for noise=0.7, got {:?}", other),
}
}
@@ -1324,7 +1311,10 @@ mod tests {
let analysis = make_analysis(5, 10, 0.5);
let strat = select_mitigation_strategy(Some(0.7), 100_000, &analysis);
match strat {
- MitigationStrategy::Full { zne_scales, cdr_circuits } => {
+ MitigationStrategy::Full {
+ zne_scales,
+ cdr_circuits,
+ } => {
assert!(zne_scales.len() >= 3);
assert!(cdr_circuits >= 2);
}
@@ -1339,41 +1329,26 @@ mod tests {
#[test]
fn test_verification_clifford_check() {
let analysis = make_analysis(10, 50, 1.0);
- let policy = select_verification_policy(
- &analysis,
- BackendType::Stabilizer,
- 10,
- );
+ let policy = select_verification_policy(&analysis, BackendType::Stabilizer, 10);
assert_eq!(policy, VerificationPolicy::ExactCliffordCheck);
}
#[test]
fn test_verification_none_for_small_sv() {
let analysis = make_analysis(5, 10, 0.5);
- let policy = select_verification_policy(
- &analysis,
- BackendType::StateVector,
- 5,
- );
+ let policy = select_verification_policy(&analysis, BackendType::StateVector, 5);
assert_eq!(policy, VerificationPolicy::None);
}
#[test]
fn test_verification_statistical_for_tn() {
let analysis = make_analysis(50, 100, 0.5);
- let policy = select_verification_policy(
- &analysis,
- BackendType::TensorNetwork,
- 50,
- );
+ let policy = select_verification_policy(&analysis, BackendType::TensorNetwork, 50);
match policy {
VerificationPolicy::StatisticalSampling(n) => {
assert!(n >= 5, "Should sample at least 5 observables");
}
- other => panic!(
- "Expected StatisticalSampling for TN, got {:?}",
- other
- ),
+ other => panic!("Expected StatisticalSampling for TN, got {:?}", other),
}
}
@@ -1426,8 +1401,7 @@ mod tests {
total_gates: usize,
clifford_fraction: f64,
) -> CircuitAnalysis {
- let clifford_gates =
- (total_gates as f64 * clifford_fraction).round() as usize;
+ let clifford_gates = (total_gates as f64 * clifford_fraction).round() as usize;
let non_clifford_gates = total_gates - clifford_gates;
CircuitAnalysis {
diff --git a/crates/ruqu-core/src/qasm.rs b/crates/ruqu-core/src/qasm.rs
index 0c243a785..cffebac8d 100644
--- a/crates/ruqu-core/src/qasm.rs
+++ b/crates/ruqu-core/src/qasm.rs
@@ -537,7 +537,11 @@ mod tests {
// Extract the three angles from U(theta, phi, lambda)
let (theta, phi, lambda) = extract_u_angles(&lines[0]);
- assert!(theta.abs() < 1e-10, "Identity theta should be ~0, got {}", theta);
+ assert!(
+ theta.abs() < 1e-10,
+ "Identity theta should be ~0, got {}",
+ theta
+ );
// For identity, phi + lambda should be ~0 (mod 2*pi)
let sum = phi + lambda;
let sum_mod = ((sum % (2.0 * PI)) + 2.0 * PI) % (2.0 * PI);
@@ -606,7 +610,11 @@ mod tests {
let (theta, phi, lambda) = extract_u_angles(&lines[0]);
// S is diagonal, so theta should be ~0
- assert!(theta.abs() < 1e-10, "S gate theta should be ~0, got {}", theta);
+ assert!(
+ theta.abs() < 1e-10,
+ "S gate theta should be ~0, got {}",
+ theta
+ );
let reconstructed = reconstruct_zyz(theta, phi, lambda);
assert_unitaries_equal_up_to_phase(&s_matrix, &reconstructed);
@@ -619,14 +627,8 @@ mod tests {
let cos_h = half.cos();
let sin_h = half.sin();
let arb_matrix = [
- [
- Complex::new(cos_h, 0.0),
- Complex::new(0.0, -sin_h),
- ],
- [
- Complex::new(0.0, -sin_h),
- Complex::new(cos_h, 0.0),
- ],
+ [Complex::new(cos_h, 0.0), Complex::new(0.0, -sin_h)],
+ [Complex::new(0.0, -sin_h), Complex::new(cos_h, 0.0)],
];
let mut circuit = QuantumCircuit::new(1);
@@ -704,10 +706,8 @@ mod tests {
);
// Check it uses valid gate/operation keywords
let valid_starts = [
- "h ", "x ", "y ", "z ", "s ", "sdg ", "t ", "tdg ",
- "rx(", "ry(", "rz(", "p(", "rzz(",
- "cx ", "cz ", "swap ",
- "c[", "reset ", "barrier ", "U(",
+ "h ", "x ", "y ", "z ", "s ", "sdg ", "t ", "tdg ", "rx(", "ry(", "rz(", "p(",
+ "rzz(", "cx ", "cz ", "swap ", "c[", "reset ", "barrier ", "U(",
];
assert!(
valid_starts.iter().any(|prefix| line.starts_with(prefix)),
@@ -828,10 +828,7 @@ mod tests {
// Verify it has at least the H gates and measurements
let lines = gate_lines(&qasm);
let h_count = lines.iter().filter(|l| l.starts_with("h ")).count();
- let measure_count = lines
- .iter()
- .filter(|l| l.contains("measure"))
- .count();
+ let measure_count = lines.iter().filter(|l| l.contains("measure")).count();
assert_eq!(h_count, 4);
assert_eq!(measure_count, 4);
}
@@ -845,9 +842,9 @@ mod tests {
let angle_str = &line[open + 1..close];
// Handle the case where there are multiple comma-separated angles (take the first)
let first = angle_str.split(',').next().unwrap().trim();
- first.parse::().unwrap_or_else(|e| {
- panic!("Failed to parse angle '{}': {}", first, e)
- })
+ first
+ .parse::()
+ .unwrap_or_else(|e| panic!("Failed to parse angle '{}': {}", first, e))
}
/// Extract (theta, phi, lambda) from a U gate line like `U(t, p, l) q[0];`
@@ -856,7 +853,12 @@ mod tests {
let close = line.find(')').expect("No closing parenthesis");
let inside = &line[open + 1..close];
let parts: Vec<&str> = inside.split(',').map(|s| s.trim()).collect();
- assert_eq!(parts.len(), 3, "U gate should have 3 angles, got: {:?}", parts);
+ assert_eq!(
+ parts.len(),
+ 3,
+ "U gate should have 3 angles, got: {:?}",
+ parts
+ );
let theta: f64 = parts[0].parse().unwrap();
let phi: f64 = parts[1].parse().unwrap();
let lambda: f64 = parts[2].parse().unwrap();
diff --git a/crates/ruqu-core/src/qec_scheduler.rs b/crates/ruqu-core/src/qec_scheduler.rs
index f0145a604..da7301be3 100644
--- a/crates/ruqu-core/src/qec_scheduler.rs
+++ b/crates/ruqu-core/src/qec_scheduler.rs
@@ -330,10 +330,7 @@ fn compute_quantum_depth(rounds: &[QecRound], distance: u32) -> u32 {
if scheduled[i] {
continue;
}
- let conflicts = ext
- .data_qubits
- .iter()
- .any(|q| used_qubits.contains(q))
+ let conflicts = ext.data_qubits.iter().any(|q| used_qubits.contains(q))
|| used_qubits.contains(&ext.ancilla_qubit);
if !conflicts {
@@ -490,9 +487,7 @@ fn merge_rounds(rounds: &[QecRound]) -> Vec {
current
.syndrome_extractions
.extend(next.syndrome_extractions.iter().cloned());
- current
- .corrections
- .extend(next.corrections.iter().cloned());
+ current.corrections.extend(next.corrections.iter().cloned());
current.is_feed_forward = current.is_feed_forward || next.is_feed_forward;
} else {
merged.push(current);
@@ -507,17 +502,17 @@ fn merge_rounds(rounds: &[QecRound]) -> Vec {
/// Check whether two rounds can be safely merged.
fn can_merge_rounds(first: &QecRound, second: &QecRound) -> bool {
// Cannot merge if second round has feed-forward dependencies.
- if second.corrections.iter().any(|c| c.depends_on_round.is_some()) {
+ if second
+ .corrections
+ .iter()
+ .any(|c| c.depends_on_round.is_some())
+ {
return false;
}
// Check for data qubit conflicts between first's corrections
// and second's syndrome extractions.
- let corrected_qubits: Vec = first
- .corrections
- .iter()
- .map(|c| c.target_qubit)
- .collect();
+ let corrected_qubits: Vec = first.corrections.iter().map(|c| c.target_qubit).collect();
let extraction_qubits: Vec = second
.syndrome_extractions
@@ -568,11 +563,7 @@ fn minimize_feed_forward(rounds: &[QecRound]) -> (Vec, Vec) {
/// The total latency is:
/// sum over rounds of (extraction_depth * gate_time + correction_time)
/// + feed_forward_points * classical_time
-pub fn schedule_latency(
- schedule: &QecSchedule,
- gate_time_ns: u64,
- classical_time_ns: u64,
-) -> u64 {
+pub fn schedule_latency(schedule: &QecSchedule, gate_time_ns: u64, classical_time_ns: u64) -> u64 {
let quantum_latency = schedule.total_quantum_depth as u64 * gate_time_ns;
let classical_latency = schedule.feed_forward_points.len() as u64 * classical_time_ns;
@@ -1222,7 +1213,7 @@ mod tests {
let schedule = generate_surface_code_schedule(3, 2);
let graph = build_dependency_graph(&schedule);
assert_eq!(graph.nodes.len(), 6); // 2 rounds * 3 nodes
- // Cross-round edge: round 0 Correct -> round 1 Extract.
+ // Cross-round edge: round 0 Correct -> round 1 Extract.
assert!(graph.edges.contains(&(2, 3)));
}
diff --git a/crates/ruqu-core/src/replay.rs b/crates/ruqu-core/src/replay.rs
index bf15981f1..27c16730f 100644
--- a/crates/ruqu-core/src/replay.rs
+++ b/crates/ruqu-core/src/replay.rs
@@ -4,7 +4,6 @@
/// seed, noise model, shots) into an [`ExecutionRecord`] so that any run can
/// be replayed bit-for-bit. Also provides [`StateCheckpoint`] for snapshotting
/// the raw amplitude vector mid-simulation.
-
use crate::circuit::QuantumCircuit;
use crate::gate::Gate;
use crate::simulator::{SimConfig, Simulator};
@@ -135,7 +134,10 @@ impl ReplayEngine {
return false;
}
- let noise = record.noise_config.as_ref().map(NoiseConfig::to_noise_model);
+ let noise = record
+ .noise_config
+ .as_ref()
+ .map(NoiseConfig::to_noise_model);
let config = SimConfig {
seed: Some(record.seed),
@@ -332,8 +334,8 @@ fn gate_components(gate: &Gate) -> (u8, Vec, Vec) {
Gate::Unitary1Q(q, m) => {
// Encode the 4 complex entries (8 f64 values).
let params = vec![
- m[0][0].re, m[0][0].im, m[0][1].re, m[0][1].im,
- m[1][0].re, m[1][0].im, m[1][1].re, m[1][1].im,
+ m[0][0].re, m[0][0].im, m[0][1].re, m[0][1].im, m[1][0].re, m[1][0].im, m[1][1].re,
+ m[1][1].im,
];
(19, vec![*q], params)
}
@@ -397,13 +399,20 @@ mod tests {
};
let r1 = Simulator::run_with_config(&circuit, &c1).unwrap();
let r2 = Simulator::run_with_config(&circuit, &c2).unwrap();
- if r1.measurements.iter().zip(r2.measurements.iter()).any(|(a, b)| a.result != b.result)
+ if r1
+ .measurements
+ .iter()
+ .zip(r2.measurements.iter())
+ .any(|(a, b)| a.result != b.result)
{
any_differ = true;
break;
}
}
- assert!(any_differ, "expected at least one pair of seeds to disagree");
+ assert!(
+ any_differ,
+ "expected at least one pair of seeds to disagree"
+ );
}
/// Record + replay round-trip succeeds.
diff --git a/crates/ruqu-core/src/simd.rs b/crates/ruqu-core/src/simd.rs
index 6edc5de77..ef64655e8 100644
--- a/crates/ruqu-core/src/simd.rs
+++ b/crates/ruqu-core/src/simd.rs
@@ -72,12 +72,7 @@ pub fn apply_two_qubit_gate_scalar(
continue;
}
- let idxs = [
- base,
- base | q2_bit,
- base | q1_bit,
- base | q1_bit | q2_bit,
- ];
+ let idxs = [base, base | q2_bit, base | q1_bit, base | q1_bit | q2_bit];
let vals = [
amplitudes[idxs[0]],
@@ -149,31 +144,19 @@ pub unsafe fn apply_single_qubit_gate_simd(
let j = i + step;
// Load two complex values from position i: [re0, im0, re1, im1]
- let a_vec = _mm256_loadu_pd(
- &litudes[i] as *const Complex as *const f64,
- );
+ let a_vec = _mm256_loadu_pd(&litudes[i] as *const Complex as *const f64);
// Load two complex values from position j
- let b_vec = _mm256_loadu_pd(
- &litudes[j] as *const Complex as *const f64,
- );
+ let b_vec = _mm256_loadu_pd(&litudes[j] as *const Complex as *const f64);
// Compute matrix[0][0] * a + matrix[0][1] * b for the i-slot
- let out_i = complex_mul_add_avx2(
- a_vec, m00_re, m00_im, b_vec, m01_re, m01_im, neg_mask,
- );
+ let out_i =
+ complex_mul_add_avx2(a_vec, m00_re, m00_im, b_vec, m01_re, m01_im, neg_mask);
// Compute matrix[1][0] * a + matrix[1][1] * b for the j-slot
- let out_j = complex_mul_add_avx2(
- a_vec, m10_re, m10_im, b_vec, m11_re, m11_im, neg_mask,
- );
-
- _mm256_storeu_pd(
- &mut amplitudes[i] as *mut Complex as *mut f64,
- out_i,
- );
- _mm256_storeu_pd(
- &mut amplitudes[j] as *mut Complex as *mut f64,
- out_j,
- );
+ let out_j =
+ complex_mul_add_avx2(a_vec, m10_re, m10_im, b_vec, m11_re, m11_im, neg_mask);
+
+ _mm256_storeu_pd(&mut amplitudes[i] as *mut Complex as *mut f64, out_i);
+ _mm256_storeu_pd(&mut amplitudes[j] as *mut Complex as *mut f64, out_j);
i += 2;
}
@@ -376,12 +359,7 @@ pub fn apply_two_qubit_gate_parallel(
unsafe {
let ptr = amp_addr as *mut Complex;
- let idxs = [
- base,
- base | q2_bit,
- base | q1_bit,
- base | q1_bit | q2_bit,
- ];
+ let idxs = [base, base | q2_bit, base | q1_bit, base | q1_bit | q2_bit];
let vals = [
*ptr.add(idxs[0]),
@@ -391,10 +369,8 @@ pub fn apply_two_qubit_gate_parallel(
];
for r in 0..4 {
- *ptr.add(idxs[r]) = m[r][0] * vals[0]
- + m[r][1] * vals[1]
- + m[r][2] * vals[2]
- + m[r][3] * vals[3];
+ *ptr.add(idxs[r]) =
+ m[r][0] * vals[0] + m[r][1] * vals[1] + m[r][2] * vals[2] + m[r][3] * vals[3];
}
}
});
diff --git a/crates/ruqu-core/src/simulator.rs b/crates/ruqu-core/src/simulator.rs
index 06f6117e7..ee6baef4b 100644
--- a/crates/ruqu-core/src/simulator.rs
+++ b/crates/ruqu-core/src/simulator.rs
@@ -1,10 +1,10 @@
//! High-level simulator that executes quantum circuits
use crate::circuit::QuantumCircuit;
+use crate::error::Result;
use crate::gate::Gate;
use crate::state::QuantumState;
use crate::types::*;
-use crate::error::Result;
use rand::Rng;
use std::collections::HashMap;
diff --git a/crates/ruqu-core/src/stabilizer.rs b/crates/ruqu-core/src/stabilizer.rs
index e9f963d4b..5a04e65d8 100644
--- a/crates/ruqu-core/src/stabilizer.rs
+++ b/crates/ruqu-core/src/stabilizer.rs
@@ -140,9 +140,7 @@ impl StabilizerState {
// Combine phases: new_r = (2*r_target + 2*r_source + phase_sum) mod 4
// r=1 means phase -1 (i.e. factor of i^2 = -1), so we work mod 4 in
// units of i. r_bit maps to 0 or 2.
- let total = 2 * (self.r(target) as i32)
- + 2 * (self.r(source) as i32)
- + phase_sum;
+ let total = 2 * (self.r(target) as i32) + 2 * (self.r(source) as i32) + phase_sum;
// Result phase bit: total mod 4 == 2 => r=1, else r=0
let new_r = ((total % 4) + 4) % 4 == 2;
self.set_r(target, new_r);
@@ -393,9 +391,7 @@ impl StabilizerState {
}
let scratch_r = scratch[2 * n];
let stab_r = self.r(stab_row);
- let total = 2 * (scratch_r as i32)
- + 2 * (stab_r as i32)
- + phase_sum;
+ let total = 2 * (scratch_r as i32) + 2 * (stab_r as i32) + phase_sum;
scratch[2 * n] = ((total % 4) + 4) % 4 == 2;
for j in 0..n {
@@ -550,13 +546,35 @@ fn g(x1: bool, z1: bool, x2: bool, z2: bool) -> i32 {
}
if x1 && z1 {
// Y * ...
- if x2 && z2 { 0 } else if x2 { 1 } else if z2 { -1 } else { 0 }
+ if x2 && z2 {
+ 0
+ } else if x2 {
+ 1
+ } else if z2 {
+ -1
+ } else {
+ 0
+ }
} else if x1 && !z1 {
// X * ...
- if x2 && z2 { -1 } else if x2 { 0 } else if z2 { 1 } else { 0 }
+ if x2 && z2 {
+ -1
+ } else if x2 {
+ 0
+ } else if z2 {
+ 1
+ } else {
+ 0
+ }
} else {
// Z * ... (z1 && !x1)
- if x2 && z2 { 1 } else if x2 { -1 } else { 0 }
+ if x2 && z2 {
+ 1
+ } else if x2 {
+ -1
+ } else {
+ 0
+ }
}
}
@@ -601,10 +619,7 @@ mod tests {
state.cnot(0, 1);
let o0 = state.measure(0).unwrap();
let o1 = state.measure(1).unwrap();
- assert_eq!(
- o0.result, o1.result,
- "Bell state qubits must be correlated"
- );
+ assert_eq!(o0.result, o1.result, "Bell state qubits must be correlated");
}
#[test]
@@ -729,7 +744,7 @@ mod tests {
state.hadamard(0);
state.phase_gate(0); // S
state.apply_gate(&Gate::Sdg(0)).unwrap(); // Sdg
- // Should be back to H|0> = |+>
+ // Should be back to H|0> = |+>
state.hadamard(0);
let outcome = state.measure(0).unwrap();
assert!(!outcome.result, "S.Sdg should be identity");
diff --git a/crates/ruqu-core/src/state.rs b/crates/ruqu-core/src/state.rs
index 758672d1a..a399cb715 100644
--- a/crates/ruqu-core/src/state.rs
+++ b/crates/ruqu-core/src/state.rs
@@ -175,10 +175,7 @@ impl QuantumState {
}
// Two-qubit gates
- Gate::CNOT(q1, q2)
- | Gate::CZ(q1, q2)
- | Gate::SWAP(q1, q2)
- | Gate::Rzz(q1, q2, _) => {
+ Gate::CNOT(q1, q2) | Gate::CZ(q1, q2) | Gate::SWAP(q1, q2) | Gate::Rzz(q1, q2, _) => {
if q1 == q2 {
return Err(QuantumError::CircuitError(format!(
"two-qubit gate requires distinct qubits, got {} and {}",
diff --git a/crates/ruqu-core/src/subpoly_decoder.rs b/crates/ruqu-core/src/subpoly_decoder.rs
index 671012e13..07784e9a8 100644
--- a/crates/ruqu-core/src/subpoly_decoder.rs
+++ b/crates/ruqu-core/src/subpoly_decoder.rs
@@ -29,7 +29,9 @@
use std::time::Instant;
-use crate::decoder::{Correction, PauliType, StabilizerMeasurement, SurfaceCodeDecoder, SyndromeData};
+use crate::decoder::{
+ Correction, PauliType, StabilizerMeasurement, SurfaceCodeDecoder, SyndromeData,
+};
// ---------------------------------------------------------------------------
// Internal defect representation
@@ -178,7 +180,12 @@ fn path_to_boundary(defect: &Defect, d: u32) -> Vec<(u32, PauliType)> {
}
fn infer_logical(corrections: &[(u32, PauliType)]) -> bool {
- corrections.iter().filter(|(_, p)| *p == PauliType::X).count() % 2 == 1
+ corrections
+ .iter()
+ .filter(|(_, p)| *p == PauliType::X)
+ .count()
+ % 2
+ == 1
}
// ---------------------------------------------------------------------------
@@ -251,7 +258,11 @@ impl HierarchicalTiledDecoder {
}
/// Provable complexity bound for a given code distance and error rate.
- pub fn complexity_bound(&self, code_distance: u32, physical_error_rate: f64) -> ComplexityBound {
+ pub fn complexity_bound(
+ &self,
+ code_distance: u32,
+ physical_error_rate: f64,
+ ) -> ComplexityBound {
let d = code_distance as f64;
let s = self.tile_size as f64;
let p = physical_error_rate;
@@ -663,11 +674,7 @@ impl ComplexityAnalyzer {
let avg_ns = total_ns as f64 / trials as f64;
let d = distance as f64;
// Estimate scaling exponent from a single distance (rough).
- let alpha = if d > 1.0 {
- avg_ns.ln() / d.ln()
- } else {
- 2.0
- };
+ let alpha = if d > 1.0 { avg_ns.ln() / d.ln() } else { 2.0 };
ComplexityBound {
expected_ops: avg_ns,
@@ -679,10 +686,7 @@ impl ComplexityAnalyzer {
}
/// Estimate threshold and logical error suppression from Monte-Carlo runs.
- pub fn threshold_analysis(
- error_rates: &[f64],
- distances: &[u32],
- ) -> ThresholdTheorem {
+ pub fn threshold_analysis(error_rates: &[f64], distances: &[u32]) -> ThresholdTheorem {
// Standard surface code threshold estimate: ~1% for depolarizing noise.
let p_th = 0.01;
@@ -735,7 +739,9 @@ impl ComplexityAnalyzer {
for y in 0..grid_w {
for x in 0..grid_w {
// Simple hash-based PRNG.
- hash = hash.wrapping_mul(6364136223846793005).wrapping_add(1442695040888963407);
+ hash = hash
+ .wrapping_mul(6364136223846793005)
+ .wrapping_add(1442695040888963407);
let r = (hash >> 33) as f64 / (u32::MAX as f64);
stabs.push(StabilizerMeasurement {
x,
@@ -880,10 +886,7 @@ pub struct SubpolyVerification {
}
/// Measure empirical decode time scaling across code distances.
-pub fn benchmark_scaling(
- distances: &[u32],
- error_rate: f64,
-) -> Vec {
+pub fn benchmark_scaling(distances: &[u32], error_rate: f64) -> Vec {
let samples_per_d = 20u32;
let decoder = HierarchicalTiledDecoder::new(4, 3);
let mut data = Vec::with_capacity(distances.len());
@@ -1044,8 +1047,7 @@ mod tests {
#[test]
fn hierarchical_trait_object() {
- let dec: Box =
- Box::new(HierarchicalTiledDecoder::new(2, 2));
+ let dec: Box = Box::new(HierarchicalTiledDecoder::new(2, 2));
let syn = simple_syndrome(3, &[(0, 0)]);
let _ = dec.decode(&syn);
assert_eq!(dec.name(), "HierarchicalTiledDecoder");
@@ -1105,9 +1107,24 @@ mod tests {
fn sliding_multi_round() {
let dec = SlidingWindowDecoder::new(2);
let stabs = vec![
- StabilizerMeasurement { x: 0, y: 0, round: 0, value: true },
- StabilizerMeasurement { x: 0, y: 0, round: 1, value: false },
- StabilizerMeasurement { x: 0, y: 0, round: 2, value: true },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 0,
+ value: true,
+ },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 1,
+ value: false,
+ },
+ StabilizerMeasurement {
+ x: 0,
+ y: 0,
+ round: 2,
+ value: true,
+ },
];
let syn = SyndromeData {
stabilizers: stabs,
diff --git a/crates/ruqu-core/src/tensor_network.rs b/crates/ruqu-core/src/tensor_network.rs
index 06b7af241..45ba3e667 100644
--- a/crates/ruqu-core/src/tensor_network.rs
+++ b/crates/ruqu-core/src/tensor_network.rs
@@ -235,10 +235,9 @@ impl MpsState {
// Step 1: Contract over the shared bond index to form a 4-index tensor
// theta(l, ia, ib, r) = Sum_m A_a(l, ia, m) * A_b(m, ib, r)
let mut theta = vec![Complex::ZERO; left_dim * 2 * 2 * right_dim];
- let theta_idx =
- |l: usize, ia: usize, ib: usize, r: usize| -> usize {
- l * (4 * right_dim) + ia * (2 * right_dim) + ib * right_dim + r
- };
+ let theta_idx = |l: usize, ia: usize, ib: usize, r: usize| -> usize {
+ l * (4 * right_dim) + ia * (2 * right_dim) + ib * right_dim + r
+ };
for l in 0..left_dim {
for ia in 0..2 {
@@ -370,11 +369,7 @@ impl MpsState {
// Move q1 adjacent to q2 via SWAP chain.
// We swap q1 toward q2, keeping track of its current position.
- let (mut pos1, target_pos) = if q1 < q2 {
- (q1, q2 - 1)
- } else {
- (q1, q2 + 1)
- };
+ let (mut pos1, target_pos) = if q1 < q2 { (q1, q2 - 1) } else { (q1, q2 + 1) };
// Forward swaps: move pos1 toward target_pos
let forward_steps: Vec = if pos1 < target_pos {
@@ -495,10 +490,7 @@ impl MpsState {
Ok(vec![])
}
- Gate::CNOT(q1, q2)
- | Gate::CZ(q1, q2)
- | Gate::SWAP(q1, q2)
- | Gate::Rzz(q1, q2, _) => {
+ Gate::CNOT(q1, q2) | Gate::CZ(q1, q2) | Gate::SWAP(q1, q2) | Gate::Rzz(q1, q2, _) => {
if q1 == q2 {
return Err(QuantumError::CircuitError(format!(
"two-qubit gate requires distinct qubits, got {} and {}",
@@ -607,9 +599,7 @@ impl MpsState {
continue;
}
for p in 0..2 {
- sum += e.conj()
- * t.get(ro, p, ri).conj()
- * t.get(co, p, ci);
+ sum += e.conj() * t.get(ro, p, ri).conj() * t.get(co, p, ci);
}
}
}
@@ -636,10 +626,8 @@ impl MpsState {
if e_r.norm_sq() == 0.0 {
continue;
}
- val += e_l.conj()
- * t.get(l1, phys, r1).conj()
- * t.get(l2, phys, r2)
- * e_r;
+ val +=
+ e_l.conj() * t.get(l1, phys, r1).conj() * t.get(l2, phys, r2) * e_r;
}
}
}
diff --git a/crates/ruqu-core/src/transpiler.rs b/crates/ruqu-core/src/transpiler.rs
index fecab6a06..1db0c3d7c 100644
--- a/crates/ruqu-core/src/transpiler.rs
+++ b/crates/ruqu-core/src/transpiler.rs
@@ -175,18 +175,12 @@ pub fn decompose_to_ibm(gate: &Gate) -> Vec {
}
// SWAP = CNOT(a,b) CNOT(b,a) CNOT(a,b)
- Gate::SWAP(a, b) => vec![
- Gate::CNOT(*a, *b),
- Gate::CNOT(*b, *a),
- Gate::CNOT(*a, *b),
- ],
+ Gate::SWAP(a, b) => vec![Gate::CNOT(*a, *b), Gate::CNOT(*b, *a), Gate::CNOT(*a, *b)],
// Rzz(theta) = CNOT(a,b) Rz(b, theta) CNOT(a,b)
- Gate::Rzz(a, b, theta) => vec![
- Gate::CNOT(*a, *b),
- Gate::Rz(*b, *theta),
- Gate::CNOT(*a, *b),
- ],
+ Gate::Rzz(a, b, theta) => {
+ vec![Gate::CNOT(*a, *b), Gate::Rz(*b, *theta), Gate::CNOT(*a, *b)]
+ }
// --- non-unitary / pass-through ---
Gate::Measure(q) => vec![Gate::Measure(*q)],
@@ -539,9 +533,7 @@ fn remap_gate(gate: &Gate, log2phys: &[u32]) -> Gate {
Gate::CNOT(c, t) => Gate::CNOT(log2phys[*c as usize], log2phys[*t as usize]),
Gate::CZ(a, b) => Gate::CZ(log2phys[*a as usize], log2phys[*b as usize]),
Gate::SWAP(a, b) => Gate::SWAP(log2phys[*a as usize], log2phys[*b as usize]),
- Gate::Rzz(a, b, theta) => {
- Gate::Rzz(log2phys[*a as usize], log2phys[*b as usize], *theta)
- }
+ Gate::Rzz(a, b, theta) => Gate::Rzz(log2phys[*a as usize], log2phys[*b as usize], *theta),
Gate::Measure(q) => Gate::Measure(log2phys[*q as usize]),
Gate::Reset(q) => Gate::Reset(log2phys[*q as usize]),
Gate::Barrier => Gate::Barrier,
@@ -850,7 +842,11 @@ mod tests {
.iter()
.filter(|g| matches!(g, Gate::SWAP(_, _)))
.count();
- assert!(swap_count >= 1, "expected at least 1 SWAP, got {}", swap_count);
+ assert!(
+ swap_count >= 1,
+ "expected at least 1 SWAP, got {}",
+ swap_count
+ );
}
#[test]
diff --git a/crates/ruqu-core/src/verification.rs b/crates/ruqu-core/src/verification.rs
index 5d8f35eed..e6f360dff 100644
--- a/crates/ruqu-core/src/verification.rs
+++ b/crates/ruqu-core/src/verification.rs
@@ -97,11 +97,7 @@ pub struct Discrepancy {
/// * `circuit` - The quantum circuit to verify.
/// * `shots` - Number of measurement shots per backend.
/// * `seed` - Deterministic seed for reproducibility.
-pub fn verify_circuit(
- circuit: &QuantumCircuit,
- shots: u32,
- seed: u64,
-) -> VerificationResult {
+pub fn verify_circuit(circuit: &QuantumCircuit, shots: u32, seed: u64) -> VerificationResult {
let analysis = analyze_circuit(circuit);
let num_qubits = circuit.num_qubits();
let is_clifford = is_clifford_circuit(circuit);
@@ -121,10 +117,7 @@ pub fn verify_circuit(
total_variation_distance: None,
chi_squared_p_value: None,
correlation: None,
- explanation: format!(
- "State-vector simulation failed: {}",
- e
- ),
+ explanation: format!("State-vector simulation failed: {}", e),
discrepancies: vec![],
};
}
@@ -144,11 +137,7 @@ pub fn verify_circuit(
result.reference_backend = Some(BackendType::Stabilizer);
// Upgrade to Exact level if the distributions match perfectly.
- if result.passed
- && result
- .total_variation_distance
- .map_or(false, |d| d == 0.0)
- {
+ if result.passed && result.total_variation_distance.map_or(false, |d| d == 0.0) {
result.level = VerificationLevel::Exact;
result.explanation = format!(
"Exact match: {}-qubit Clifford circuit verified across \
@@ -160,11 +149,8 @@ pub fn verify_circuit(
// Even for Clifford circuits, sampling noise may cause small
// differences. Use statistical comparison with a tight tolerance.
let tight_tolerance = 0.05;
- let mut stat_result = verify_against_reference(
- &sv_counts,
- &stab_counts,
- tight_tolerance,
- );
+ let mut stat_result =
+ verify_against_reference(&sv_counts, &stab_counts, tight_tolerance);
stat_result.primary_backend = BackendType::StateVector;
stat_result.reference_backend = Some(BackendType::Stabilizer);
stat_result.explanation = format!(
@@ -172,9 +158,7 @@ pub fn verify_circuit(
state-vector and stabilizer backends ({} shots, TVD={:.6})",
num_qubits,
shots,
- stat_result
- .total_variation_distance
- .unwrap_or(0.0)
+ stat_result.total_variation_distance.unwrap_or(0.0)
);
return stat_result;
}
@@ -196,9 +180,7 @@ pub fn verify_circuit(
"Verification skipped: {}-qubit circuit contains non-Clifford \
gates (clifford_fraction={:.2}, {} non-Clifford gates). \
No reference backend available for cross-validation.",
- num_qubits,
- analysis.clifford_fraction,
- analysis.non_clifford_gates
+ num_qubits, analysis.clifford_fraction, analysis.non_clifford_gates
),
discrepancies: vec![],
};
@@ -249,8 +231,7 @@ pub fn verify_against_reference(
let distance = tvd(&p_norm, &q_norm);
let total_ref: usize = reference.values().sum();
- let (chi2_stat, dof) =
- chi_squared_statistic(primary, &q_norm, total_ref);
+ let (chi2_stat, dof) = chi_squared_statistic(primary, &q_norm, total_ref);
let p_value = if dof > 0 {
chi_squared_p_value(chi2_stat, dof)
} else {
@@ -260,8 +241,7 @@ pub fn verify_against_reference(
let corr = pearson_correlation(&p_norm, &q_norm);
// Build sorted discrepancy list.
- let mut all_keys: Vec<&Vec> =
- p_norm.keys().chain(q_norm.keys()).collect();
+ let mut all_keys: Vec<&Vec> = p_norm.keys().chain(q_norm.keys()).collect();
all_keys.sort();
all_keys.dedup();
@@ -281,8 +261,11 @@ pub fn verify_against_reference(
.collect();
// Sort by absolute difference, descending.
- discrepancies
- .sort_by(|a, b| b.absolute_difference.partial_cmp(&a.absolute_difference).unwrap());
+ discrepancies.sort_by(|a, b| {
+ b.absolute_difference
+ .partial_cmp(&a.absolute_difference)
+ .unwrap()
+ });
let passed = distance <= tolerance;
@@ -398,9 +381,7 @@ pub fn run_stabilizer_shots(
Gate::Reset(q) => {
// Implement reset: measure, then conditionally flip.
let qubit = *q as usize;
- let outcome = state
- .measure(qubit)
- .expect("stabilizer measurement failed");
+ let outcome = state.measure(qubit).expect("stabilizer measurement failed");
if outcome.result {
state.x_gate(qubit);
}
@@ -426,18 +407,13 @@ pub fn run_stabilizer_shots(
// If no explicit measurements, measure all qubits.
if !has_measurements {
for q in 0..n {
- let outcome = state
- .measure(q)
- .expect("stabilizer measurement failed");
+ let outcome = state.measure(q).expect("stabilizer measurement failed");
measured_bits[q] = Some(outcome.result);
}
}
// Build the bit-vector for this shot.
- let bits: Vec = measured_bits
- .iter()
- .map(|mb| mb.unwrap_or(false))
- .collect();
+ let bits: Vec = measured_bits.iter().map(|mb| mb.unwrap_or(false)).collect();
*counts.entry(bits).or_insert(0) += 1;
}
@@ -453,9 +429,7 @@ pub fn run_stabilizer_shots(
///
/// Each count is divided by the total number of shots to produce a
/// probability in [0, 1].
-pub fn normalize_counts(
- counts: &HashMap, usize>,
-) -> HashMap, f64> {
+pub fn normalize_counts(counts: &HashMap, usize>) -> HashMap, f64> {
let total: usize = counts.values().sum();
if total == 0 {
return HashMap::new();
@@ -473,12 +447,8 @@ pub fn normalize_counts(
///
/// Returns a value in [0, 1] where 0 means identical distributions and 1
/// means completely disjoint support.
-pub fn tvd(
- p: &HashMap, f64>,
- q: &HashMap, f64>,
-) -> f64 {
- let mut all_keys: Vec<&Vec> =
- p.keys().chain(q.keys()).collect();
+pub fn tvd(p: &HashMap, f64>, q: &HashMap, f64>) -> f64 {
+ let mut all_keys: Vec<&Vec> = p.keys().chain(q.keys()).collect();
all_keys.sort();
all_keys.dedup();
@@ -520,10 +490,7 @@ pub fn chi_squared_statistic(
}
let obs_total_f = obs_total as f64;
- let mut all_keys: Vec<&Vec> = observed
- .keys()
- .chain(expected_probs.keys())
- .collect();
+ let mut all_keys: Vec<&Vec> = observed.keys().chain(expected_probs.keys()).collect();
all_keys.sort();
all_keys.dedup();
@@ -606,12 +573,8 @@ pub fn chi_squared_p_value(statistic: f64, dof: usize) -> f64 {
///
/// Returns a value in [-1, 1]. Returns 0.0 if either distribution has zero
/// variance (constant).
-fn pearson_correlation(
- p: &HashMap, f64>,
- q: &HashMap, f64>,
-) -> f64 {
- let mut all_keys: Vec<&Vec> =
- p.keys().chain(q.keys()).collect();
+fn pearson_correlation(p: &HashMap, f64>, q: &HashMap, f64>) -> f64 {
+ let mut all_keys: Vec<&Vec> = p.keys().chain(q.keys()).collect();
all_keys.sort();
all_keys.dedup();
@@ -686,8 +649,7 @@ fn standard_normal_cdf(x: f64) -> f64 {
let t5 = t4 * t;
let erf_approx =
- 1.0 - (a1 * t + a2 * t2 + a3 * t3 + a4 * t4 + a5 * t5)
- * (-abs_x * abs_x).exp();
+ 1.0 - (a1 * t + a2 * t2 + a3 * t3 + a4 * t4 + a5 * t5) * (-abs_x * abs_x).exp();
0.5 * (1.0 + sign * erf_approx)
}
@@ -703,9 +665,7 @@ mod tests {
// -- Helper to build a count map from a list of (bitstring, count) pairs --
- fn make_counts(
- entries: &[(&[bool], usize)],
- ) -> HashMap, usize> {
+ fn make_counts(entries: &[(&[bool], usize)]) -> HashMap, usize> {
entries
.iter()
.map(|(bits, count)| (bits.to_vec(), *count))
@@ -761,10 +721,7 @@ mod tests {
#[test]
fn normalize_counts_produces_probabilities() {
- let counts = make_counts(&[
- (&[false, false], 50),
- (&[true, true], 50),
- ]);
+ let counts = make_counts(&[(&[false, false], 50), (&[true, true], 50)]);
let probs = normalize_counts(&counts);
assert!((probs[&vec![false, false]] - 0.5).abs() < 1e-10);
assert!((probs[&vec![true, true]] - 0.5).abs() < 1e-10);
@@ -783,12 +740,9 @@ mod tests {
#[test]
fn identical_distributions_have_zero_tvd() {
- let p: HashMap, f64> = [
- (vec![false, false], 0.5),
- (vec![true, true], 0.5),
- ]
- .into_iter()
- .collect();
+ let p: HashMap, f64> = [(vec![false, false], 0.5), (vec![true, true], 0.5)]
+ .into_iter()
+ .collect();
let distance = tvd(&p, &p);
assert!(
@@ -800,10 +754,8 @@ mod tests {
#[test]
fn completely_different_distributions_have_tvd_near_one() {
- let p: HashMap, f64> =
- [(vec![false], 1.0)].into_iter().collect();
- let q: HashMap, f64> =
- [(vec![true], 1.0)].into_iter().collect();
+ let p: HashMap, f64> = [(vec![false], 1.0)].into_iter().collect();
+ let q: HashMap, f64> = [(vec![true], 1.0)].into_iter().collect();
let distance = tvd(&p, &q);
assert!(
@@ -815,19 +767,13 @@ mod tests {
#[test]
fn tvd_partial_overlap() {
- let p: HashMap, f64> = [
- (vec![false], 0.7),
- (vec![true], 0.3),
- ]
- .into_iter()
- .collect();
+ let p: HashMap, f64> = [(vec![false], 0.7), (vec![true], 0.3)]
+ .into_iter()
+ .collect();
- let q: HashMap, f64> = [
- (vec![false], 0.3),
- (vec![true], 0.7),
- ]
- .into_iter()
- .collect();
+ let q: HashMap, f64> = [(vec![false], 0.3), (vec![true], 0.7)]
+ .into_iter()
+ .collect();
let distance = tvd(&p, &q);
// TVD = 0.5 * (|0.7-0.3| + |0.3-0.7|) = 0.5 * (0.4 + 0.4) = 0.4
@@ -844,19 +790,12 @@ mod tests {
#[test]
fn chi_squared_perfect_fit_has_low_statistic() {
- let observed = make_counts(&[
- (&[false], 500),
- (&[true], 500),
- ]);
- let expected: HashMap, f64> = [
- (vec![false], 0.5),
- (vec![true], 0.5),
- ]
- .into_iter()
- .collect();
+ let observed = make_counts(&[(&[false], 500), (&[true], 500)]);
+ let expected: HashMap, f64> = [(vec![false], 0.5), (vec![true], 0.5)]
+ .into_iter()
+ .collect();
- let (stat, dof) =
- chi_squared_statistic(&observed, &expected, 1000);
+ let (stat, dof) = chi_squared_statistic(&observed, &expected, 1000);
assert!(
stat < 1.0,
"Perfect fit should have near-zero chi2, got {}",
@@ -875,24 +814,13 @@ mod tests {
#[test]
fn chi_squared_bad_fit_has_high_statistic() {
// Observed is heavily biased; expected is uniform.
- let observed = make_counts(&[
- (&[false], 900),
- (&[true], 100),
- ]);
- let expected: HashMap, f64> = [
- (vec![false], 0.5),
- (vec![true], 0.5),
- ]
- .into_iter()
- .collect();
+ let observed = make_counts(&[(&[false], 900), (&[true], 100)]);
+ let expected: HashMap, f64> = [(vec![false], 0.5), (vec![true], 0.5)]
+ .into_iter()
+ .collect();
- let (stat, dof) =
- chi_squared_statistic(&observed, &expected, 1000);
- assert!(
- stat > 10.0,
- "Bad fit should have large chi2, got {}",
- stat
- );
+ let (stat, dof) = chi_squared_statistic(&observed, &expected, 1000);
+ assert!(stat > 10.0, "Bad fit should have large chi2, got {}", stat);
assert_eq!(dof, 1);
let pval = chi_squared_p_value(stat, dof);
@@ -921,10 +849,7 @@ mod tests {
#[test]
fn identical_distributions_pass_verification() {
- let counts = make_counts(&[
- (&[false, false], 500),
- (&[true, true], 500),
- ]);
+ let counts = make_counts(&[(&[false, false], 500), (&[true, true], 500)]);
let result = verify_against_reference(&counts, &counts, 0.01);
assert!(result.passed);
assert!(
@@ -938,12 +863,10 @@ mod tests {
let primary = make_counts(&[(&[false], 1000)]);
let reference = make_counts(&[(&[true], 1000)]);
- let result =
- verify_against_reference(&primary, &reference, 0.1);
+ let result = verify_against_reference(&primary, &reference, 0.1);
assert!(!result.passed);
assert!(
- (result.total_variation_distance.unwrap() - 1.0).abs()
- < 1e-10,
+ (result.total_variation_distance.unwrap() - 1.0).abs() < 1e-10,
"TVD should be 1 for disjoint distributions"
);
}
@@ -963,8 +886,7 @@ mod tests {
(&[true, true], 250),
]);
- let result =
- verify_against_reference(&primary, &reference, 0.5);
+ let result = verify_against_reference(&primary, &reference, 0.5);
// Verify discrepancies are sorted descending by absolute_difference.
for i in 1..result.discrepancies.len() {
@@ -1038,10 +960,8 @@ mod tests {
);
// Check roughly 50/50 split (within a generous margin).
- let count_00 =
- counts.get(&vec![false, false]).copied().unwrap_or(0);
- let count_11 =
- counts.get(&vec![true, true]).copied().unwrap_or(0);
+ let count_00 = counts.get(&vec![false, false]).copied().unwrap_or(0);
+ let count_11 = counts.get(&vec![true, true]).copied().unwrap_or(0);
assert_eq!(count_00 + count_11, 1000);
assert!(
count_00 > 350 && count_00 < 650,
@@ -1077,10 +997,7 @@ mod tests {
let result = verify_circuit(&circ, 2000, 42);
assert_eq!(result.primary_backend, BackendType::StateVector);
- assert_eq!(
- result.reference_backend,
- Some(BackendType::Stabilizer)
- );
+ assert_eq!(result.reference_backend, Some(BackendType::Stabilizer));
assert!(
result.passed,
"Bell state should pass verification: {}",
@@ -1140,20 +1057,14 @@ mod tests {
assert!(result.passed);
// Pure Clifford (only measurements), should do cross-backend check.
- assert_eq!(
- result.reference_backend,
- Some(BackendType::Stabilizer)
- );
+ assert_eq!(result.reference_backend, Some(BackendType::Stabilizer));
}
#[test]
fn pearson_correlation_identical_distributions() {
- let p: HashMap, f64> = [
- (vec![false], 0.3),
- (vec![true], 0.7),
- ]
- .into_iter()
- .collect();
+ let p: HashMap, f64> = [(vec![false], 0.3), (vec![true], 0.7)]
+ .into_iter()
+ .collect();
let corr = pearson_correlation(&p, &p);
assert!(
diff --git a/crates/ruqu-core/src/witness.rs b/crates/ruqu-core/src/witness.rs
index 9997c57f9..b34633670 100644
--- a/crates/ruqu-core/src/witness.rs
+++ b/crates/ruqu-core/src/witness.rs
@@ -4,7 +4,6 @@
/// [`WitnessEntry`] includes a hash of its predecessor so that retroactive
/// tampering with any field in any entry is detectable by
/// [`WitnessLog::verify_chain`].
-
use crate::replay::ExecutionRecord;
use crate::types::MeasurementOutcome;
@@ -240,10 +239,7 @@ impl WitnessLog {
" \"depolarizing_rate\": {},\n",
nc.depolarizing_rate
));
- buf.push_str(&format!(
- " \"bit_flip_rate\": {},\n",
- nc.bit_flip_rate
- ));
+ buf.push_str(&format!(" \"bit_flip_rate\": {},\n", nc.bit_flip_rate));
buf.push_str(&format!(
" \"phase_flip_rate\": {}\n",
nc.phase_flip_rate
diff --git a/crates/ruqu-core/tests/test_gates.rs b/crates/ruqu-core/tests/test_gates.rs
index c3fa44e3a..19886d315 100644
--- a/crates/ruqu-core/tests/test_gates.rs
+++ b/crates/ruqu-core/tests/test_gates.rs
@@ -90,9 +90,9 @@ fn test_hadamard_matrix() {
let matrix = Gate::H(0).matrix_1q().expect("H should have a 2x2 matrix");
let s = std::f64::consts::FRAC_1_SQRT_2;
- assert!(complex_approx_eq(&matrix[0][0], &c(s, 0.0))); // [0,0]
- assert!(complex_approx_eq(&matrix[0][1], &c(s, 0.0))); // [0,1]
- assert!(complex_approx_eq(&matrix[1][0], &c(s, 0.0))); // [1,0]
+ assert!(complex_approx_eq(&matrix[0][0], &c(s, 0.0))); // [0,0]
+ assert!(complex_approx_eq(&matrix[0][1], &c(s, 0.0))); // [0,1]
+ assert!(complex_approx_eq(&matrix[1][0], &c(s, 0.0))); // [1,0]
assert!(complex_approx_eq(&matrix[1][1], &c(-s, 0.0))); // [1,1]
}
@@ -227,8 +227,14 @@ fn test_pauli_xy_equals_iz() {
// X * Y (2x2 matrix multiply)
let xy = [
- [x[0][0] * y[0][0] + x[0][1] * y[1][0], x[0][0] * y[0][1] + x[0][1] * y[1][1]],
- [x[1][0] * y[0][0] + x[1][1] * y[1][0], x[1][0] * y[0][1] + x[1][1] * y[1][1]],
+ [
+ x[0][0] * y[0][0] + x[0][1] * y[1][0],
+ x[0][0] * y[0][1] + x[0][1] * y[1][1],
+ ],
+ [
+ x[1][0] * y[0][0] + x[1][1] * y[1][0],
+ x[1][0] * y[0][1] + x[1][1] * y[1][1],
+ ],
];
// i * Z
let iz = [
@@ -240,7 +246,14 @@ fn test_pauli_xy_equals_iz() {
assert!(
complex_approx_eq(&xy[i][j], &iz[i][j]),
"XY[{},{}] = ({}, {}), iZ[{},{}] = ({}, {})",
- i, j, xy[i][j].re, xy[i][j].im, i, j, iz[i][j].re, iz[i][j].im
+ i,
+ j,
+ xy[i][j].re,
+ xy[i][j].im,
+ i,
+ j,
+ iz[i][j].re,
+ iz[i][j].im
);
}
}
@@ -271,15 +284,24 @@ fn test_s_squared_is_z() {
let s = Gate::S(0).matrix_1q().unwrap();
let z = Gate::Z(0).matrix_1q().unwrap();
let s2 = [
- [s[0][0] * s[0][0] + s[0][1] * s[1][0], s[0][0] * s[0][1] + s[0][1] * s[1][1]],
- [s[1][0] * s[0][0] + s[1][1] * s[1][0], s[1][0] * s[0][1] + s[1][1] * s[1][1]],
+ [
+ s[0][0] * s[0][0] + s[0][1] * s[1][0],
+ s[0][0] * s[0][1] + s[0][1] * s[1][1],
+ ],
+ [
+ s[1][0] * s[0][0] + s[1][1] * s[1][0],
+ s[1][0] * s[0][1] + s[1][1] * s[1][1],
+ ],
];
for i in 0..2 {
for j in 0..2 {
assert!(
complex_approx_eq(&s2[i][j], &z[i][j]),
"S^2[{},{}] != Z[{},{}]",
- i, j, i, j
+ i,
+ j,
+ i,
+ j
);
}
}
@@ -311,15 +333,24 @@ fn test_t_squared_is_s() {
let t = Gate::T(0).matrix_1q().unwrap();
let s = Gate::S(0).matrix_1q().unwrap();
let t2 = [
- [t[0][0] * t[0][0] + t[0][1] * t[1][0], t[0][0] * t[0][1] + t[0][1] * t[1][1]],
- [t[1][0] * t[0][0] + t[1][1] * t[1][0], t[1][0] * t[0][1] + t[1][1] * t[1][1]],
+ [
+ t[0][0] * t[0][0] + t[0][1] * t[1][0],
+ t[0][0] * t[0][1] + t[0][1] * t[1][1],
+ ],
+ [
+ t[1][0] * t[0][0] + t[1][1] * t[1][0],
+ t[1][0] * t[0][1] + t[1][1] * t[1][1],
+ ],
];
for i in 0..2 {
for j in 0..2 {
assert!(
complex_approx_eq(&t2[i][j], &s[i][j]),
"T^2[{},{}] != S[{},{}]",
- i, j, i, j
+ i,
+ j,
+ i,
+ j
);
}
}
@@ -404,7 +435,15 @@ fn test_rz_unitarity() {
#[test]
fn test_rotation_gates_various_angles_unitary() {
- let angles = [0.0, 0.1, 0.5, 1.0, std::f64::consts::PI, 2.0 * std::f64::consts::PI, -0.7];
+ let angles = [
+ 0.0,
+ 0.1,
+ 0.5,
+ 1.0,
+ std::f64::consts::PI,
+ 2.0 * std::f64::consts::PI,
+ -0.7,
+ ];
for &theta in &angles {
let rx = Gate::Rx(0, theta).matrix_1q().unwrap();
assert_unitary_2x2(&rx);
@@ -440,9 +479,12 @@ fn test_cnot_matrix() {
assert!(
complex_approx_eq(&m[i][j], &expected[i][j]),
"CNOT matrix[{}][{}]: got ({}, {}), expected ({}, {})",
- i, j,
- m[i][j].re, m[i][j].im,
- expected[i][j].re, expected[i][j].im
+ i,
+ j,
+ m[i][j].re,
+ m[i][j].im,
+ expected[i][j].re,
+ expected[i][j].im
);
}
}
@@ -468,9 +510,12 @@ fn test_cnot_is_self_inverse() {
assert!(
complex_approx_eq(&sum, &expected),
"CNOT^2 [{},{}] = ({}, {}), expected ({}, {})",
- i, j,
- sum.re, sum.im,
- expected.re, expected.im
+ i,
+ j,
+ sum.re,
+ sum.im,
+ expected.re,
+ expected.im
);
}
}
@@ -501,7 +546,8 @@ fn test_cz_matrix() {
assert!(
complex_approx_eq(&m[i][j], &expected),
"CZ[{},{}] mismatch",
- i, j
+ i,
+ j
);
}
}
@@ -523,7 +569,8 @@ fn test_cz_is_symmetric() {
assert!(
complex_approx_eq(&m01[i][j], &m10[i][j]),
"CZ symmetry mismatch at [{},{}]",
- i, j
+ i,
+ j
);
}
}
@@ -552,7 +599,8 @@ fn test_swap_matrix() {
assert!(
complex_approx_eq(&m[i][j], &expected[i][j]),
"SWAP matrix[{}][{}] mismatch",
- i, j
+ i,
+ j
);
}
}
@@ -578,7 +626,8 @@ fn test_swap_is_self_inverse() {
assert!(
complex_approx_eq(&sum, &expected),
"SWAP^2 [{},{}] mismatch",
- i, j
+ i,
+ j
);
}
}
diff --git a/crates/ruqu-core/tests/test_state.rs b/crates/ruqu-core/tests/test_state.rs
index 8e1987461..911b9a1fa 100644
--- a/crates/ruqu-core/tests/test_state.rs
+++ b/crates/ruqu-core/tests/test_state.rs
@@ -282,7 +282,12 @@ fn test_ghz_state() {
assert!(approx_eq(probs[0], 0.5)); // |000>
assert!(approx_eq(probs[7], 0.5)); // |111>
for i in 1..7 {
- assert!(approx_eq(probs[i], 0.0), "probs[{}] = {} should be 0", i, probs[i]);
+ assert!(
+ approx_eq(probs[i], 0.0),
+ "probs[{}] = {} should be 0",
+ i,
+ probs[i]
+ );
}
}
@@ -294,7 +299,7 @@ fn test_ghz_4_qubits() {
state.apply_gate(&Gate::CNOT(1, 2)).unwrap();
state.apply_gate(&Gate::CNOT(2, 3)).unwrap();
let probs = state.probabilities();
- assert!(approx_eq(probs[0], 0.5)); // |0000>
+ assert!(approx_eq(probs[0], 0.5)); // |0000>
assert!(approx_eq(probs[15], 0.5)); // |1111>
for i in 1..15 {
assert!(approx_eq(probs[i], 0.0));
@@ -355,7 +360,9 @@ fn test_rotation_identity() {
fn test_rx_pi_is_x() {
// Rx(pi)|0> = -i|1> (probability of |1> should be 1)
let mut state = QuantumState::new(1).unwrap();
- state.apply_gate(&Gate::Rx(0, std::f64::consts::PI)).unwrap();
+ state
+ .apply_gate(&Gate::Rx(0, std::f64::consts::PI))
+ .unwrap();
assert!(approx_eq(state.probabilities()[0], 0.0));
assert!(approx_eq(state.probabilities()[1], 1.0));
}
@@ -364,7 +371,9 @@ fn test_rx_pi_is_x() {
fn test_ry_pi_flips() {
// Ry(pi)|0> = |1>
let mut state = QuantumState::new(1).unwrap();
- state.apply_gate(&Gate::Ry(0, std::f64::consts::PI)).unwrap();
+ state
+ .apply_gate(&Gate::Ry(0, std::f64::consts::PI))
+ .unwrap();
assert!(approx_eq(state.probabilities()[1], 1.0));
}
@@ -380,7 +389,9 @@ fn test_rz_preserves_probability() {
fn test_rx_half_pi_creates_superposition() {
// Rx(pi/2)|0> should give 50-50 superposition
let mut state = QuantumState::new(1).unwrap();
- state.apply_gate(&Gate::Rx(0, std::f64::consts::FRAC_PI_2)).unwrap();
+ state
+ .apply_gate(&Gate::Rx(0, std::f64::consts::FRAC_PI_2))
+ .unwrap();
let probs = state.probabilities();
assert!(approx_eq(probs[0], 0.5));
assert!(approx_eq(probs[1], 0.5));
@@ -389,7 +400,9 @@ fn test_rx_half_pi_creates_superposition() {
#[test]
fn test_ry_half_pi_creates_superposition() {
let mut state = QuantumState::new(1).unwrap();
- state.apply_gate(&Gate::Ry(0, std::f64::consts::FRAC_PI_2)).unwrap();
+ state
+ .apply_gate(&Gate::Ry(0, std::f64::consts::FRAC_PI_2))
+ .unwrap();
let probs = state.probabilities();
assert!(approx_eq(probs[0], 0.5));
assert!(approx_eq(probs[1], 0.5));
@@ -408,7 +421,7 @@ fn test_cz_on_11() {
state.apply_gate(&Gate::CZ(0, 1)).unwrap();
let sv = state.state_vector();
assert!(approx_eq(sv[3].re, -1.0)); // -|11>
- // Probability unchanged
+ // Probability unchanged
assert!(approx_eq(state.probabilities()[3], 1.0));
}
@@ -800,7 +813,7 @@ fn test_fidelity_partial_overlap() {
let state0 = QuantumState::new(1).unwrap(); // |0>
let mut state_plus = QuantumState::new(1).unwrap();
state_plus.apply_gate(&Gate::H(0)).unwrap(); // |+>
- // |<0|+>|^2 = (1/sqrt(2))^2 = 0.5
+ // |<0|+>|^2 = (1/sqrt(2))^2 = 0.5
assert!(approx_eq(state0.fidelity(&state_plus), 0.5));
}
diff --git a/crates/ruqu-exotic/src/interference_search.rs b/crates/ruqu-exotic/src/interference_search.rs
index 29c4e33c4..153358fdb 100644
--- a/crates/ruqu-exotic/src/interference_search.rs
+++ b/crates/ruqu-exotic/src/interference_search.rs
@@ -60,11 +60,7 @@ impl ConceptSuperposition {
/// with zero phase.
pub fn uniform(concept_id: &str, meanings: Vec<(String, Vec)>) -> Self {
let n = meanings.len();
- let amp = if n > 0 {
- 1.0 / (n as f64).sqrt()
- } else {
- 0.0
- };
+ let amp = if n > 0 { 1.0 / (n as f64).sqrt() } else { 0.0 };
let meanings = meanings
.into_iter()
.map(|(label, embedding)| Meaning {
@@ -80,10 +76,7 @@ impl ConceptSuperposition {
}
/// Create a superposition with explicit complex amplitudes.
- pub fn with_amplitudes(
- concept_id: &str,
- meanings: Vec<(String, Vec, Complex)>,
- ) -> Self {
+ pub fn with_amplitudes(concept_id: &str, meanings: Vec<(String, Vec, Complex)>) -> Self {
let meanings = meanings
.into_iter()
.map(|(label, embedding, amplitude)| Meaning {
@@ -140,10 +133,7 @@ impl ConceptSuperposition {
let total: f64 = scores.iter().map(|s| s.probability).sum();
if total < 1e-15 {
// Degenerate case: return first meaning if available
- return scores
- .first()
- .map(|s| s.label.clone())
- .unwrap_or_default();
+ return scores.first().map(|s| s.label.clone()).unwrap_or_default();
}
let mut rng = StdRng::seed_from_u64(seed);
@@ -161,14 +151,12 @@ impl ConceptSuperposition {
/// Return the dominant meaning: the one with the largest |amplitude|^2
/// (before any context is applied).
pub fn dominant(&self) -> Option<&Meaning> {
- self.meanings
- .iter()
- .max_by(|a, b| {
- a.amplitude
- .norm_sq()
- .partial_cmp(&b.amplitude.norm_sq())
- .unwrap_or(std::cmp::Ordering::Equal)
- })
+ self.meanings.iter().max_by(|a, b| {
+ a.amplitude
+ .norm_sq()
+ .partial_cmp(&b.amplitude.norm_sq())
+ .unwrap_or(std::cmp::Ordering::Equal)
+ })
}
}
@@ -185,10 +173,7 @@ pub fn interference_search(
.map(|concept| {
let scores = concept.interfere(context);
let relevance: f64 = scores.iter().map(|s| s.probability).sum();
- let dominant_meaning = scores
- .first()
- .map(|s| s.label.clone())
- .unwrap_or_default();
+ let dominant_meaning = scores.first().map(|s| s.label.clone()).unwrap_or_default();
ConceptScore {
concept_id: concept.concept_id.clone(),
relevance,
diff --git a/crates/ruqu-exotic/src/lib.rs b/crates/ruqu-exotic/src/lib.rs
index a8c2d13d3..39f7a98ae 100644
--- a/crates/ruqu-exotic/src/lib.rs
+++ b/crates/ruqu-exotic/src/lib.rs
@@ -18,11 +18,11 @@
//! | [`reversible_memory`] | Time-reversible state for counterfactual debugging | Forward-only ML |
//! | [`reality_check`] | Browser-native quantum verification circuits | Trust-based claims |
-pub mod quantum_decay;
pub mod interference_search;
pub mod quantum_collapse;
+pub mod quantum_decay;
+pub mod reality_check;
pub mod reasoning_qec;
+pub mod reversible_memory;
pub mod swarm_interference;
pub mod syndrome_diagnosis;
-pub mod reversible_memory;
-pub mod reality_check;
diff --git a/crates/ruqu-exotic/src/quantum_collapse.rs b/crates/ruqu-exotic/src/quantum_collapse.rs
index c34ac3793..adf720b1d 100644
--- a/crates/ruqu-exotic/src/quantum_collapse.rs
+++ b/crates/ruqu-exotic/src/quantum_collapse.rs
@@ -273,11 +273,7 @@ mod tests {
#[test]
fn new_pads_to_power_of_two() {
// 3 candidates should pad to 4 (2 qubits)
- let search = QuantumCollapseSearch::new(vec![
- vec![1.0],
- vec![2.0],
- vec![3.0],
- ]);
+ let search = QuantumCollapseSearch::new(vec![vec![1.0], vec![2.0], vec![3.0]]);
assert_eq!(search.num_qubits, 2);
assert_eq!(search.candidates.len(), 4);
assert_eq!(search.num_real, 3);
@@ -345,9 +341,12 @@ mod tests {
// We just verify the distribution has variation.
let max_count = dist.iter().map(|&(_, c)| c).max().unwrap_or(0);
let min_count = dist.iter().map(|&(_, c)| c).min().unwrap_or(0);
- assert!(max_count > min_count,
+ assert!(
+ max_count > min_count,
"distribution should be non-uniform: max {} vs min {}",
- max_count, min_count);
+ max_count,
+ min_count
+ );
}
#[test]
@@ -364,11 +363,8 @@ mod tests {
#[test]
fn collapse_result_flags_padding() {
// 3 real candidates -> padded to 4
- let search = QuantumCollapseSearch::new(vec![
- vec![0.0, 1.0],
- vec![1.0, 0.0],
- vec![0.5, 0.5],
- ]);
+ let search =
+ QuantumCollapseSearch::new(vec![vec![0.0, 1.0], vec![1.0, 0.0], vec![0.5, 0.5]]);
// Run many shots; any hit on index 3 should have is_padding = true.
for seed in 0..50 {
diff --git a/crates/ruqu-exotic/src/reality_check.rs b/crates/ruqu-exotic/src/reality_check.rs
index 0a6798f41..6b98af6bc 100644
--- a/crates/ruqu-exotic/src/reality_check.rs
+++ b/crates/ruqu-exotic/src/reality_check.rs
@@ -18,15 +18,30 @@ use ruqu_core::state::QuantumState;
#[derive(Debug, Clone)]
pub enum ExpectedProperty {
/// P(qubit = 0) ≈ expected ± tolerance
- ProbabilityZero { qubit: u32, expected: f64, tolerance: f64 },
+ ProbabilityZero {
+ qubit: u32,
+ expected: f64,
+ tolerance: f64,
+ },
/// P(qubit = 1) ≈ expected ± tolerance
- ProbabilityOne { qubit: u32, expected: f64, tolerance: f64 },
+ ProbabilityOne {
+ qubit: u32,
+ expected: f64,
+ tolerance: f64,
+ },
/// Two qubits are entangled: P(same outcome) > min_correlation
- Entangled { qubit_a: u32, qubit_b: u32, min_correlation: f64 },
+ Entangled {
+ qubit_a: u32,
+ qubit_b: u32,
+ min_correlation: f64,
+ },
/// Qubit is in equal superposition: P(1) ≈ 0.5 ± tolerance
EqualSuperposition { qubit: u32, tolerance: f64 },
/// Full probability distribution matches ± tolerance
- InterferencePattern { probabilities: Vec, tolerance: f64 },
+ InterferencePattern {
+ probabilities: Vec,
+ tolerance: f64,
+ },
}
/// A quantum reality check: a named verification experiment.
@@ -62,7 +77,11 @@ where
let probs = state.probabilities();
match &check.expected {
- ExpectedProperty::ProbabilityZero { qubit, expected, tolerance } => {
+ ExpectedProperty::ProbabilityZero {
+ qubit,
+ expected,
+ tolerance,
+ } => {
let p0 = 1.0 - state.probability_of_qubit(*qubit);
let pass = (p0 - expected).abs() <= *tolerance;
Ok(CheckResult {
@@ -70,10 +89,17 @@ where
passed: pass,
measured_value: p0,
expected_value: *expected,
- detail: format!("P(q{}=0) = {:.6}, expected {:.6} +/- {:.6}", qubit, p0, expected, tolerance),
+ detail: format!(
+ "P(q{}=0) = {:.6}, expected {:.6} +/- {:.6}",
+ qubit, p0, expected, tolerance
+ ),
})
}
- ExpectedProperty::ProbabilityOne { qubit, expected, tolerance } => {
+ ExpectedProperty::ProbabilityOne {
+ qubit,
+ expected,
+ tolerance,
+ } => {
let p1 = state.probability_of_qubit(*qubit);
let pass = (p1 - expected).abs() <= *tolerance;
Ok(CheckResult {
@@ -81,10 +107,17 @@ where
passed: pass,
measured_value: p1,
expected_value: *expected,
- detail: format!("P(q{}=1) = {:.6}, expected {:.6} +/- {:.6}", qubit, p1, expected, tolerance),
+ detail: format!(
+ "P(q{}=1) = {:.6}, expected {:.6} +/- {:.6}",
+ qubit, p1, expected, tolerance
+ ),
})
}
- ExpectedProperty::Entangled { qubit_a, qubit_b, min_correlation } => {
+ ExpectedProperty::Entangled {
+ qubit_a,
+ qubit_b,
+ min_correlation,
+ } => {
// Correlation = P(same outcome) = P(00) + P(11)
let bit_a = 1usize << qubit_a;
let bit_b = 1usize << qubit_b;
@@ -102,7 +135,10 @@ where
passed: pass,
measured_value: p_same,
expected_value: *min_correlation,
- detail: format!("P(q{}==q{}) = {:.6}, min {:.6}", qubit_a, qubit_b, p_same, min_correlation),
+ detail: format!(
+ "P(q{}==q{}) = {:.6}, min {:.6}",
+ qubit_a, qubit_b, p_same, min_correlation
+ ),
})
}
ExpectedProperty::EqualSuperposition { qubit, tolerance } => {
@@ -113,10 +149,16 @@ where
passed: pass,
measured_value: p1,
expected_value: 0.5,
- detail: format!("P(q{}=1) = {:.6}, expected 0.5 +/- {:.6}", qubit, p1, tolerance),
+ detail: format!(
+ "P(q{}=1) = {:.6}, expected 0.5 +/- {:.6}",
+ qubit, p1, tolerance
+ ),
})
}
- ExpectedProperty::InterferencePattern { probabilities: expected_probs, tolerance } => {
+ ExpectedProperty::InterferencePattern {
+ probabilities: expected_probs,
+ tolerance,
+ } => {
let max_diff: f64 = probs
.iter()
.zip(expected_probs.iter())
@@ -128,7 +170,10 @@ where
passed: pass,
measured_value: max_diff,
expected_value: 0.0,
- detail: format!("max |p_measured - p_expected| = {:.6}, tolerance {:.6}", max_diff, tolerance),
+ detail: format!(
+ "max |p_measured - p_expected| = {:.6}, tolerance {:.6}",
+ max_diff, tolerance
+ ),
})
}
}
@@ -144,7 +189,10 @@ pub fn check_superposition() -> CheckResult {
name: "Superposition".into(),
description: "H|0> produces equal superposition".into(),
num_qubits: 1,
- expected: ExpectedProperty::EqualSuperposition { qubit: 0, tolerance: 1e-10 },
+ expected: ExpectedProperty::EqualSuperposition {
+ qubit: 0,
+ tolerance: 1e-10,
+ },
};
run_check(&check, |state| {
state.apply_gate(&Gate::H(0))?;
@@ -159,7 +207,11 @@ pub fn check_entanglement() -> CheckResult {
name: "Entanglement".into(),
description: "Bell state has perfectly correlated measurements".into(),
num_qubits: 2,
- expected: ExpectedProperty::Entangled { qubit_a: 0, qubit_b: 1, min_correlation: 0.99 },
+ expected: ExpectedProperty::Entangled {
+ qubit_a: 0,
+ qubit_b: 1,
+ min_correlation: 0.99,
+ },
};
run_check(&check, |state| {
state.apply_gate(&Gate::H(0))?;
@@ -176,7 +228,11 @@ pub fn check_interference() -> CheckResult {
name: "Interference".into(),
description: "H-Z-H = X: destructive interference eliminates |0>".into(),
num_qubits: 1,
- expected: ExpectedProperty::ProbabilityOne { qubit: 0, expected: 1.0, tolerance: 1e-10 },
+ expected: ExpectedProperty::ProbabilityOne {
+ qubit: 0,
+ expected: 1.0,
+ tolerance: 1e-10,
+ },
};
run_check(&check, |state| {
state.apply_gate(&Gate::H(0))?;
@@ -194,7 +250,11 @@ pub fn check_phase_kickback() -> CheckResult {
name: "Phase Kickback".into(),
description: "Deutsch oracle for f(x)=x: phase kickback produces |1> on query qubit".into(),
num_qubits: 2,
- expected: ExpectedProperty::ProbabilityOne { qubit: 0, expected: 1.0, tolerance: 1e-10 },
+ expected: ExpectedProperty::ProbabilityOne {
+ qubit: 0,
+ expected: 1.0,
+ tolerance: 1e-10,
+ },
};
run_check(&check, |state| {
// Prepare |01⟩
@@ -220,7 +280,8 @@ pub fn check_phase_kickback() -> CheckResult {
pub fn check_no_cloning() -> CheckResult {
let check = RealityCheck {
name: "No-Cloning".into(),
- description: "CNOT cannot independently copy a superposition (produces entanglement instead)".into(),
+ description:
+ "CNOT cannot independently copy a superposition (produces entanglement instead)".into(),
num_qubits: 2,
expected: ExpectedProperty::InterferencePattern {
// Bell state: P(00) = 0.5, P(01) = 0, P(10) = 0, P(11) = 0.5
diff --git a/crates/ruqu-exotic/src/reasoning_qec.rs b/crates/ruqu-exotic/src/reasoning_qec.rs
index d6acef468..91d1d11aa 100644
--- a/crates/ruqu-exotic/src/reasoning_qec.rs
+++ b/crates/ruqu-exotic/src/reasoning_qec.rs
@@ -156,10 +156,7 @@ impl ReasoningTrace {
/// Decode syndrome and attempt correction.
/// Simple decoder: if syndrome\[i\] fires, flip step i+1 (rightmost error assumption).
- pub fn decode_and_correct(
- &mut self,
- syndrome: &[bool],
- ) -> Result, QuantumError> {
+ pub fn decode_and_correct(&mut self, syndrome: &[bool]) -> Result, QuantumError> {
let mut corrected = Vec::new();
// Simple decoder: for each fired syndrome, the error is likely
// between the two data qubits. Correct the right one.
@@ -177,8 +174,7 @@ impl ReasoningTrace {
pub fn run_qec(&mut self) -> Result {
// Save state before noise for fidelity comparison
let clean_sv: Vec = self.state.state_vector().to_vec();
- let clean_state =
- QuantumState::from_amplitudes(clean_sv, self.state.num_qubits())?;
+ let clean_state = QuantumState::from_amplitudes(clean_sv, self.state.num_qubits())?;
// Inject noise
self.inject_noise()?;
diff --git a/crates/ruqu-exotic/src/reversible_memory.rs b/crates/ruqu-exotic/src/reversible_memory.rs
index 1bdd29e07..51e6a6a8e 100644
--- a/crates/ruqu-exotic/src/reversible_memory.rs
+++ b/crates/ruqu-exotic/src/reversible_memory.rs
@@ -56,11 +56,9 @@ pub fn inverse_gate(gate: &Gate) -> Result {
}
// Non-unitary: cannot invert
- Gate::Measure(_) | Gate::Reset(_) | Gate::Barrier => Err(
- QuantumError::CircuitError(
- "cannot invert non-unitary gate (Measure/Reset/Barrier)".into(),
- ),
- ),
+ Gate::Measure(_) | Gate::Reset(_) | Gate::Barrier => Err(QuantumError::CircuitError(
+ "cannot invert non-unitary gate (Measure/Reset/Barrier)".into(),
+ )),
}
}
@@ -116,14 +114,24 @@ impl ReversibleMemory {
pub fn new(num_qubits: u32) -> Result {
let state = QuantumState::new(num_qubits)?;
let initial_amps = state.state_vector().to_vec();
- Ok(Self { state, history: Vec::new(), initial_amps, num_qubits })
+ Ok(Self {
+ state,
+ history: Vec::new(),
+ initial_amps,
+ num_qubits,
+ })
}
/// Create with a deterministic seed.
pub fn new_with_seed(num_qubits: u32, seed: u64) -> Result {
let state = QuantumState::new_with_seed(num_qubits, seed)?;
let initial_amps = state.state_vector().to_vec();
- Ok(Self { state, history: Vec::new(), initial_amps, num_qubits })
+ Ok(Self {
+ state,
+ history: Vec::new(),
+ initial_amps,
+ num_qubits,
+ })
}
/// Apply a gate and record it. Non-unitary gates are rejected.
@@ -238,7 +246,11 @@ impl ReversibleMemory {
.map(|(i, _)| i)
.unwrap_or(0);
- Ok(SensitivityResult { sensitivities, most_sensitive, least_sensitive })
+ Ok(SensitivityResult {
+ sensitivities,
+ most_sensitive,
+ least_sensitive,
+ })
}
/// Current state vector.
diff --git a/crates/ruqu-exotic/src/swarm_interference.rs b/crates/ruqu-exotic/src/swarm_interference.rs
index 08319f5b0..f790f28b9 100644
--- a/crates/ruqu-exotic/src/swarm_interference.rs
+++ b/crates/ruqu-exotic/src/swarm_interference.rs
@@ -194,20 +194,19 @@ impl SwarmInterference {
let noise = Complex::from_polar(noise_r, noise_theta);
let noisy_amp = *amp + noise;
- let entry = amplitude_map.entry(action.id.clone()).or_insert(Complex::ZERO);
+ let entry = amplitude_map
+ .entry(action.id.clone())
+ .or_insert(Complex::ZERO);
*entry = *entry + noisy_amp;
}
}
// Find winner for this trial.
- if let Some((winner_id, _)) = amplitude_map
- .iter()
- .max_by(|a, b| {
- a.1.norm_sq()
- .partial_cmp(&b.1.norm_sq())
- .unwrap_or(std::cmp::Ordering::Equal)
- })
- {
+ if let Some((winner_id, _)) = amplitude_map.iter().max_by(|a, b| {
+ a.1.norm_sq()
+ .partial_cmp(&b.1.norm_sq())
+ .unwrap_or(std::cmp::Ordering::Equal)
+ }) {
let entry = win_counts
.entry(winner_id.clone())
.or_insert_with(|| (action_map[winner_id].clone(), 0));
diff --git a/crates/ruqu-exotic/src/syndrome_diagnosis.rs b/crates/ruqu-exotic/src/syndrome_diagnosis.rs
index 04cd6c3eb..903864d16 100644
--- a/crates/ruqu-exotic/src/syndrome_diagnosis.rs
+++ b/crates/ruqu-exotic/src/syndrome_diagnosis.rs
@@ -185,9 +185,7 @@ impl SystemDiagnostics {
.components
.iter()
.enumerate()
- .filter(|(i, _)| {
- syndrome_counts[*i] > fault_counts[*i] + config.num_rounds / 4
- })
+ .filter(|(i, _)| syndrome_counts[*i] > fault_counts[*i] + config.num_rounds / 4)
.map(|(_, c)| c.id.clone())
.collect();
diff --git a/crates/ruqu-exotic/tests/test_discovery_cross.rs b/crates/ruqu-exotic/tests/test_discovery_cross.rs
index b09704df2..5cfd1e06b 100644
--- a/crates/ruqu-exotic/tests/test_discovery_cross.rs
+++ b/crates/ruqu-exotic/tests/test_discovery_cross.rs
@@ -35,7 +35,9 @@ use ruqu_exotic::syndrome_diagnosis::{Component, Connection, DiagnosisConfig, Sy
fn discovery_7_counterfactual_search_explanation() {
println!("DISCOVERY 7: Counterfactual Search Explanation");
println!(" Combining: quantum_collapse + reversible_memory");
- println!(" Question: Can counterfactual analysis explain WHY a search returned a specific result?");
+ println!(
+ " Question: Can counterfactual analysis explain WHY a search returned a specific result?"
+ );
println!();
// -----------------------------------------------------------------------
@@ -93,7 +95,12 @@ fn discovery_7_counterfactual_search_explanation() {
let dist = search.search_distribution(&query, 2, 200, 42);
println!(" Search distribution (200 shots):");
for &(idx, count) in &dist {
- println!(" index {} : {} hits ({:.1}%)", idx, count, count as f64 / 2.0);
+ println!(
+ " index {} : {} hits ({:.1}%)",
+ idx,
+ count,
+ count as f64 / 2.0
+ );
}
println!();
@@ -121,12 +128,20 @@ fn discovery_7_counterfactual_search_explanation() {
println!(" Gate {} removed:", step);
println!(" Divergence: {:.6}", cf.divergence);
- println!(" Counterfactual probs: {:?}",
- cf.counterfactual_probs.iter().map(|p| format!("{:.4}", p)).collect::>()
+ println!(
+ " Counterfactual probs: {:?}",
+ cf.counterfactual_probs
+ .iter()
+ .map(|p| format!("{:.4}", p))
+ .collect::>()
);
println!(" New search result: index={}", cf_result.index);
- println!(" New distribution: {:?}",
- cf_dist.iter().map(|&(i, c)| format!("idx{}:{}hits", i, c)).collect::>()
+ println!(
+ " New distribution: {:?}",
+ cf_dist
+ .iter()
+ .map(|&(i, c)| format!("idx{}:{}hits", i, c))
+ .collect::>()
);
divergences.push(cf.divergence);
@@ -155,8 +170,14 @@ fn discovery_7_counterfactual_search_explanation() {
.unwrap();
println!(" RESULTS:");
- println!(" Most impactful gate: step {} (divergence={:.6})", max_div_step, divergences[max_div_step]);
- println!(" Least impactful gate: step {} (divergence={:.6})", min_div_step, divergences[min_div_step]);
+ println!(
+ " Most impactful gate: step {} (divergence={:.6})",
+ max_div_step, divergences[max_div_step]
+ );
+ println!(
+ " Least impactful gate: step {} (divergence={:.6})",
+ min_div_step, divergences[min_div_step]
+ );
// The large Ry rotation (step 0) should have the highest divergence.
assert_eq!(
@@ -177,7 +198,8 @@ fn discovery_7_counterfactual_search_explanation() {
assert!(
divergences[max_div_step] > divergences[min_div_step] + 1e-6,
"DISCOVERY 7: Max divergence ({:.6}) should significantly exceed min divergence ({:.6})",
- divergences[max_div_step], divergences[min_div_step]
+ divergences[max_div_step],
+ divergences[min_div_step]
);
println!();
@@ -237,11 +259,19 @@ fn discovery_8_syndrome_diagnosed_swarm_health() {
let mut swarm = SwarmInterference::new();
for &(name, confidence, support) in &agent_configs {
- swarm.contribute(AgentContribution::new(name, deploy.clone(), confidence, support));
+ swarm.contribute(AgentContribution::new(
+ name,
+ deploy.clone(),
+ confidence,
+ support,
+ ));
}
let decisions = swarm.decide();
- assert!(!decisions.is_empty(), "Swarm should produce at least one decision");
+ assert!(
+ !decisions.is_empty(),
+ "Swarm should produce at least one decision"
+ );
let decision = &decisions[0];
println!(" Swarm Decision:");
@@ -358,9 +388,18 @@ fn discovery_8_syndrome_diagnosed_swarm_health() {
};
println!(" ANALYSIS:");
- println!(" Disruptor (agent_4) fragility: {:.4}", disruptor_fragility);
- println!(" Neighbor (agent_3) fragility: {:.4}", neighbor_fragility);
- println!(" Healthy agents avg fragility: {:.4}", healthy_avg_fragility);
+ println!(
+ " Disruptor (agent_4) fragility: {:.4}",
+ disruptor_fragility
+ );
+ println!(
+ " Neighbor (agent_3) fragility: {:.4}",
+ neighbor_fragility
+ );
+ println!(
+ " Healthy agents avg fragility: {:.4}",
+ healthy_avg_fragility
+ );
println!(" Most fragile component: {:?}", most_fragile);
println!();
@@ -417,7 +456,8 @@ fn discovery_8_syndrome_diagnosed_swarm_health() {
diagnosis.weakest_component
);
println!(" The fault injection randomness may have overwhelmed the health signal.");
- println!(" But disruptor/neighbor fragility ({:.4}/{:.4}) still >= healthy avg ({:.4}).",
+ println!(
+ " But disruptor/neighbor fragility ({:.4}/{:.4}) still >= healthy avg ({:.4}).",
disruptor_fragility, neighbor_fragility, healthy_avg_fragility
);
}
diff --git a/crates/ruqu-exotic/tests/test_discovery_phase2.rs b/crates/ruqu-exotic/tests/test_discovery_phase2.rs
index fd6fd48fd..817e84c56 100644
--- a/crates/ruqu-exotic/tests/test_discovery_phase2.rs
+++ b/crates/ruqu-exotic/tests/test_discovery_phase2.rs
@@ -6,8 +6,8 @@
//! DISCOVERY 5: Time-Dependent Disambiguation (quantum_decay + interference_search)
//! DISCOVERY 6: QEC on Swarm Reasoning Chain (reasoning_qec + swarm_interference)
-use ruqu_exotic::quantum_decay::QuantumEmbedding;
use ruqu_exotic::interference_search::ConceptSuperposition;
+use ruqu_exotic::quantum_decay::QuantumEmbedding;
use ruqu_exotic::reasoning_qec::{ReasoningQecConfig, ReasoningStep, ReasoningTrace};
use ruqu_exotic::swarm_interference::{Action, AgentContribution, SwarmInterference};
@@ -75,10 +75,7 @@ fn discovery_5_time_dependent_disambiguation() {
// at each time step, seeing whatever structure remains.
let concept = ConceptSuperposition::uniform(
"bank",
- vec![
- ("financial".into(), fin_vec),
- ("river".into(), riv_vec),
- ],
+ vec![("financial".into(), fin_vec), ("river".into(), riv_vec)],
);
// Run interference with the context to see which meaning wins.
@@ -162,14 +159,8 @@ fn discovery_5_time_dependent_disambiguation() {
let initial_gap = (first_fin - first_riv).abs();
let final_gap = (last_fin - last_riv).abs();
- println!(
- "DISCOVERY 5: Initial probability gap: {:.6}",
- initial_gap
- );
- println!(
- "DISCOVERY 5: Final probability gap: {:.6}",
- final_gap
- );
+ println!("DISCOVERY 5: Initial probability gap: {:.6}", initial_gap);
+ println!("DISCOVERY 5: Final probability gap: {:.6}", final_gap);
println!(
"DISCOVERY 5: Gap change: {:.6}",
(initial_gap - final_gap).abs()
@@ -261,14 +252,8 @@ fn discovery_6_qec_on_swarm_reasoning_chain() {
println!("DISCOVERY 6: QEC on Swarm Reasoning Chain");
println!("DISCOVERY 6: ================================================");
- println!(
- "DISCOVERY 6: Agent confidences: {:?}",
- agent_confidences
- );
- println!(
- "DISCOVERY 6: Swarm decision probability: {:.4}",
- swarm_prob
- );
+ println!("DISCOVERY 6: Agent confidences: {:?}", agent_confidences);
+ println!("DISCOVERY 6: Swarm decision probability: {:.4}", swarm_prob);
println!("DISCOVERY 6: (Agent 2 is deliberately unreliable at 0.20)");
println!("DISCOVERY 6: ------------------------------------------------");
@@ -295,18 +280,9 @@ fn discovery_6_qec_on_swarm_reasoning_chain() {
let mut trace = ReasoningTrace::new(steps, config).unwrap();
let result = trace.run_qec().unwrap();
- println!(
- "DISCOVERY 6: Syndrome pattern: {:?}",
- result.syndrome
- );
- println!(
- "DISCOVERY 6: Error steps flagged: {:?}",
- result.error_steps
- );
- println!(
- "DISCOVERY 6: Is decodable: {}",
- result.is_decodable
- );
+ println!("DISCOVERY 6: Syndrome pattern: {:?}", result.syndrome);
+ println!("DISCOVERY 6: Error steps flagged: {:?}", result.error_steps);
+ println!("DISCOVERY 6: Is decodable: {}", result.is_decodable);
println!(
"DISCOVERY 6: Corrected fidelity: {:.6}",
result.corrected_fidelity
@@ -356,8 +332,7 @@ fn discovery_6_qec_on_swarm_reasoning_chain() {
seed: Some(42), // same seed for fair comparison
};
- let mut baseline_trace =
- ReasoningTrace::new(baseline_steps, baseline_config).unwrap();
+ let mut baseline_trace = ReasoningTrace::new(baseline_steps, baseline_config).unwrap();
let baseline_result = baseline_trace.run_qec().unwrap();
println!(
diff --git a/crates/ruqu-exotic/tests/test_discovery_pipeline.rs b/crates/ruqu-exotic/tests/test_discovery_pipeline.rs
index f0a8f3cc2..0580d46c0 100644
--- a/crates/ruqu-exotic/tests/test_discovery_pipeline.rs
+++ b/crates/ruqu-exotic/tests/test_discovery_pipeline.rs
@@ -3,10 +3,10 @@
//! These tests chain multiple ruqu-exotic modules together to discover
//! emergent behavior at module boundaries.
-use ruqu_exotic::quantum_decay::QuantumEmbedding;
+use ruqu_exotic::interference_search::{interference_search, ConceptSuperposition};
use ruqu_exotic::quantum_collapse::QuantumCollapseSearch;
-use ruqu_exotic::interference_search::{ConceptSuperposition, interference_search};
-use ruqu_exotic::reasoning_qec::{ReasoningStep, ReasoningQecConfig, ReasoningTrace};
+use ruqu_exotic::quantum_decay::QuantumEmbedding;
+use ruqu_exotic::reasoning_qec::{ReasoningQecConfig, ReasoningStep, ReasoningTrace};
// ---------------------------------------------------------------------------
// Helpers
@@ -24,7 +24,11 @@ fn cosine_sim(a: &[f64], b: &[f64]) -> f64 {
nb += b[i] * b[i];
}
let denom = na.sqrt() * nb.sqrt();
- if denom < 1e-15 { 0.0 } else { dot / denom }
+ if denom < 1e-15 {
+ 0.0
+ } else {
+ dot / denom
+ }
}
/// Total-variation distance between two discrete distributions represented as
@@ -49,7 +53,11 @@ fn distribution_divergence(
pb[idx] = cnt as f64 / total_b as f64;
}
}
- pa.iter().zip(pb.iter()).map(|(a, b)| (a - b).abs()).sum::() * 0.5
+ pa.iter()
+ .zip(pb.iter())
+ .map(|(a, b)| (a - b).abs())
+ .sum::()
+ * 0.5
}
/// Shannon entropy of a distribution (in nats). Higher = more uniform/diverse.
@@ -90,14 +98,14 @@ fn top_k_indices(dist: &[(usize, usize)], k: usize) -> Vec {
fn test_discovery_9_decoherence_as_differential_privacy() {
// --- Setup: 8 candidate embeddings in 4D ---
let raw_candidates: Vec> = vec![
- vec![1.0, 0.0, 0.0, 0.0], // 0: strongly aligned with query
- vec![0.8, 0.2, 0.0, 0.0], // 1: mostly aligned
- vec![0.5, 0.5, 0.0, 0.0], // 2: partially aligned
- vec![0.0, 1.0, 0.0, 0.0], // 3: orthogonal
- vec![0.0, 0.0, 1.0, 0.0], // 4: orthogonal in another axis
- vec![0.0, 0.0, 0.0, 1.0], // 5: orthogonal in yet another
- vec![-0.5, 0.5, 0.0, 0.0], // 6: partially opposed
- vec![-1.0, 0.0, 0.0, 0.0], // 7: fully opposed
+ vec![1.0, 0.0, 0.0, 0.0], // 0: strongly aligned with query
+ vec![0.8, 0.2, 0.0, 0.0], // 1: mostly aligned
+ vec![0.5, 0.5, 0.0, 0.0], // 2: partially aligned
+ vec![0.0, 1.0, 0.0, 0.0], // 3: orthogonal
+ vec![0.0, 0.0, 1.0, 0.0], // 4: orthogonal in another axis
+ vec![0.0, 0.0, 0.0, 1.0], // 5: orthogonal in yet another
+ vec![-0.5, 0.5, 0.0, 0.0], // 6: partially opposed
+ vec![-1.0, 0.0, 0.0, 0.0], // 7: fully opposed
];
let query = vec![1.0, 0.0, 0.0, 0.0];
@@ -117,7 +125,9 @@ fn test_discovery_9_decoherence_as_differential_privacy() {
for &(idx, cnt) in fresh_dist.iter().take(5) {
println!(
" candidate {}: {} / {} shots ({:.1}%)",
- idx, cnt, num_shots,
+ idx,
+ cnt,
+ num_shots,
cnt as f64 / num_shots as f64 * 100.0
);
}
@@ -156,8 +166,7 @@ fn test_discovery_9_decoherence_as_differential_privacy() {
// Run collapse search on decohered candidates.
let dec_search = QuantumCollapseSearch::new(decohered_candidates);
- let dec_dist =
- dec_search.search_distribution(&query, iterations, num_shots, base_seed);
+ let dec_dist = dec_search.search_distribution(&query, iterations, num_shots, base_seed);
let dec_top2 = top_k_indices(&dec_dist, 2);
let dec_entropy = distribution_entropy(&dec_dist, num_shots);
@@ -167,13 +176,20 @@ fn test_discovery_9_decoherence_as_differential_privacy() {
println!("Noise rate {:.2}:", noise);
println!(" Avg fidelity: {:.4}", avg_fidelity);
- println!(" Top-2 indices: {:?} (fresh was {:?})", dec_top2, fresh_top2);
- println!(" Entropy: {:.4} (fresh was {:.4})", dec_entropy, fresh_entropy);
+ println!(
+ " Top-2 indices: {:?} (fresh was {:?})",
+ dec_top2, fresh_top2
+ );
+ println!(
+ " Entropy: {:.4} (fresh was {:.4})",
+ dec_entropy, fresh_entropy
+ );
println!(" Distribution divergence from fresh: {:.4}", div);
for &(idx, cnt) in dec_dist.iter().take(5) {
println!(
" candidate {}: {} shots ({:.1}%)",
- idx, cnt,
+ idx,
+ cnt,
cnt as f64 / num_shots as f64 * 100.0
);
}
@@ -250,22 +266,34 @@ fn test_discovery_10_full_pipeline_decohere_interfere_collapse_qec() {
// --- Knowledge base: concept embeddings in 4D ---
let concepts_raw: Vec<(&str, Vec<(String, Vec)>)> = vec![
- ("rust", vec![
- ("systems".into(), vec![1.0, 0.0, 0.2, 0.0]),
- ("safety".into(), vec![0.8, 0.0, 0.0, 0.3]),
- ]),
- ("python", vec![
- ("scripting".into(), vec![0.0, 1.0, 0.0, 0.2]),
- ("ml".into(), vec![0.0, 0.8, 0.3, 0.0]),
- ]),
- ("javascript", vec![
- ("web".into(), vec![0.0, 0.0, 1.0, 0.0]),
- ("frontend".into(), vec![0.0, 0.2, 0.8, 0.0]),
- ]),
- ("haskell", vec![
- ("functional".into(), vec![0.3, 0.0, 0.0, 1.0]),
- ("types".into(), vec![0.5, 0.0, 0.0, 0.7]),
- ]),
+ (
+ "rust",
+ vec![
+ ("systems".into(), vec![1.0, 0.0, 0.2, 0.0]),
+ ("safety".into(), vec![0.8, 0.0, 0.0, 0.3]),
+ ],
+ ),
+ (
+ "python",
+ vec![
+ ("scripting".into(), vec![0.0, 1.0, 0.0, 0.2]),
+ ("ml".into(), vec![0.0, 0.8, 0.3, 0.0]),
+ ],
+ ),
+ (
+ "javascript",
+ vec![
+ ("web".into(), vec![0.0, 0.0, 1.0, 0.0]),
+ ("frontend".into(), vec![0.0, 0.2, 0.8, 0.0]),
+ ],
+ ),
+ (
+ "haskell",
+ vec![
+ ("functional".into(), vec![0.3, 0.0, 0.0, 1.0]),
+ ("types".into(), vec![0.5, 0.0, 0.0, 0.7]),
+ ],
+ ),
];
let query_context = vec![0.9, 0.0, 0.1, 0.1]; // query about systems programming
@@ -275,8 +303,8 @@ fn test_discovery_10_full_pipeline_decohere_interfere_collapse_qec() {
// reliably degrades with decoherence is FIDELITY -- we feed it directly into
// the QEC reasoning trace as the primary confidence metric.
let scenarios: Vec<(&str, f64, f64)> = vec![
- ("fresh", 0.01, 1.0), // (label, noise_rate, decoherence_dt)
- ("stale", 2.0, 15.0), // very heavy decoherence
+ ("fresh", 0.01, 1.0), // (label, noise_rate, decoherence_dt)
+ ("stale", 2.0, 15.0), // very heavy decoherence
];
struct PipelineOutcome {
@@ -293,7 +321,10 @@ fn test_discovery_10_full_pipeline_decohere_interfere_collapse_qec() {
let mut outcomes: Vec = Vec::new();
for (label, noise_rate, dt) in &scenarios {
- println!("--- Pipeline run: {} (noise_rate={}, dt={}) ---\n", label, noise_rate, dt);
+ println!(
+ "--- Pipeline run: {} (noise_rate={}, dt={}) ---\n",
+ label, noise_rate, dt
+ );
// ===============================================================
// STEP 1: Decohere knowledge embeddings (quantum_decay)
@@ -324,9 +355,11 @@ fn test_discovery_10_full_pipeline_decohere_interfere_collapse_qec() {
})
.collect();
- let avg_fidelity: f64 =
- fidelities.iter().sum::() / fidelities.len() as f64;
- println!(" Average fidelity across all meanings: {:.4}\n", avg_fidelity);
+ let avg_fidelity: f64 = fidelities.iter().sum::() / fidelities.len() as f64;
+ println!(
+ " Average fidelity across all meanings: {:.4}\n",
+ avg_fidelity
+ );
// ===============================================================
// STEP 2: Interference search to disambiguate query (interference_search)
@@ -366,8 +399,7 @@ fn test_discovery_10_full_pipeline_decohere_interfere_collapse_qec() {
// STEP 3: Collapse search on interference-ranked results (quantum_collapse)
// ===============================================================
let collapse_search = QuantumCollapseSearch::new(collapse_candidates.clone());
- let collapse_dist =
- collapse_search.search_distribution(&query_context, 2, 200, 42);
+ let collapse_dist = collapse_search.search_distribution(&query_context, 2, 200, 42);
println!("\n [Step 3] Collapse search distribution:");
for &(idx, cnt) in &collapse_dist {
@@ -417,7 +449,11 @@ fn test_discovery_10_full_pipeline_decohere_interfere_collapse_qec() {
},
ReasoningStep {
label: "interference_result".into(),
- confidence: concept_fidelities.get(0).copied().unwrap_or(0.5).clamp(0.05, 1.0),
+ confidence: concept_fidelities
+ .get(0)
+ .copied()
+ .unwrap_or(0.5)
+ .clamp(0.05, 1.0),
},
ReasoningStep {
label: "collapse_result".into(),
@@ -459,7 +495,10 @@ fn test_discovery_10_full_pipeline_decohere_interfere_collapse_qec() {
println!(" Error steps: {:?}", qec_result.error_steps);
println!(" Syndromes fired: {}", syndrome_count);
println!(" Is decodable: {}", qec_result.is_decodable);
- println!(" Corrected fidelity: {:.4}", qec_result.corrected_fidelity);
+ println!(
+ " Corrected fidelity: {:.4}",
+ qec_result.corrected_fidelity
+ );
println!();
outcomes.push(PipelineOutcome {
@@ -481,9 +520,14 @@ fn test_discovery_10_full_pipeline_decohere_interfere_collapse_qec() {
println!(
" {}: fidelity={:.4}, top_concept='{}' ({}), collapse_idx={}, \
QEC_syndromes={}, QEC_errors={:?}, decodable={}",
- o.label, o.avg_fidelity, o.top_concept, o.top_meaning,
- o.collapse_top_idx, o.qec_syndrome_count,
- o.qec_error_steps, o.qec_is_decodable
+ o.label,
+ o.avg_fidelity,
+ o.top_concept,
+ o.top_meaning,
+ o.collapse_top_idx,
+ o.qec_syndrome_count,
+ o.qec_error_steps,
+ o.qec_is_decodable
);
}
println!();
@@ -495,7 +539,8 @@ fn test_discovery_10_full_pipeline_decohere_interfere_collapse_qec() {
assert!(
fresh.avg_fidelity > stale.avg_fidelity,
"Fresh pipeline should have higher fidelity than stale: {:.4} > {:.4}",
- fresh.avg_fidelity, stale.avg_fidelity
+ fresh.avg_fidelity,
+ stale.avg_fidelity
);
// 2) The fresh pipeline should produce a meaningful result with high fidelity.
@@ -517,7 +562,8 @@ fn test_discovery_10_full_pipeline_decohere_interfere_collapse_qec() {
assert!(
stale.qec_syndrome_count >= fresh.qec_syndrome_count,
"Stale pipeline should trigger at least as many QEC syndromes as fresh: {} >= {}",
- stale.qec_syndrome_count, fresh.qec_syndrome_count
+ stale.qec_syndrome_count,
+ fresh.qec_syndrome_count
);
// 5) Both pipelines produce a result (the pipeline does not crash).
@@ -532,7 +578,6 @@ fn test_discovery_10_full_pipeline_decohere_interfere_collapse_qec() {
Fresh knowledge (fidelity={:.4}) produces reliable results with {} QEC syndromes.\n\
Stale knowledge (fidelity={:.4}) still produces results but QEC fires {} syndromes,\n\
providing an automatic reliability signal that the knowledge base is corrupted.",
- fresh.avg_fidelity, fresh.qec_syndrome_count,
- stale.avg_fidelity, stale.qec_syndrome_count
+ fresh.avg_fidelity, fresh.qec_syndrome_count, stale.avg_fidelity, stale.qec_syndrome_count
);
}
diff --git a/crates/ruqu-exotic/tests/test_exotic.rs b/crates/ruqu-exotic/tests/test_exotic.rs
index c587bdce8..1373bd59d 100644
--- a/crates/ruqu-exotic/tests/test_exotic.rs
+++ b/crates/ruqu-exotic/tests/test_exotic.rs
@@ -17,14 +17,20 @@ use ruqu_exotic::quantum_decay::*;
#[test]
fn test_fresh_embedding_full_fidelity() {
let emb = QuantumEmbedding::from_embedding(&[1.0, 0.0, 0.5, 0.3], 0.1);
- assert!((emb.fidelity() - 1.0).abs() < EPSILON, "Fresh embedding must have fidelity 1.0");
+ assert!(
+ (emb.fidelity() - 1.0).abs() < EPSILON,
+ "Fresh embedding must have fidelity 1.0"
+ );
}
#[test]
fn test_decoherence_reduces_fidelity() {
let mut emb = QuantumEmbedding::from_embedding(&[1.0, 0.0, 0.5, 0.3], 0.1);
emb.decohere(10.0, 42);
- assert!(emb.fidelity() < 1.0 - EPSILON, "Decohered embedding fidelity must drop below 1.0");
+ assert!(
+ emb.fidelity() < 1.0 - EPSILON,
+ "Decohered embedding fidelity must drop below 1.0"
+ );
}
#[test]
@@ -36,7 +42,8 @@ fn test_more_decoherence_lower_fidelity() {
assert!(
emb_b.fidelity() < emb_a.fidelity(),
"More decoherence (dt=20) must produce lower fidelity than less (dt=1): {} vs {}",
- emb_b.fidelity(), emb_a.fidelity()
+ emb_b.fidelity(),
+ emb_a.fidelity()
);
}
@@ -60,7 +67,8 @@ fn test_similarity_decreases_with_decay() {
assert!(
sim_decayed < sim_fresh,
"Similarity must decrease after decoherence: {} -> {}",
- sim_fresh, sim_decayed
+ sim_fresh,
+ sim_decayed
);
}
@@ -83,7 +91,11 @@ fn test_roundtrip_embedding() {
let emb = QuantumEmbedding::from_embedding(&original, 0.1);
let recovered = emb.to_embedding();
// Recovered should be normalized version of original
- assert_eq!(recovered.len(), 4, "Recovered embedding should have original length");
+ assert_eq!(
+ recovered.len(),
+ 4,
+ "Recovered embedding should have original length"
+ );
}
// ===========================================================================
@@ -95,10 +107,13 @@ use ruqu_exotic::interference_search::*;
#[test]
fn test_constructive_interference() {
// "bank" has two meanings: financial and river
- let concept = ConceptSuperposition::uniform("bank", vec![
- ("financial".into(), vec![1.0, 0.0, 0.0]),
- ("river".into(), vec![0.0, 1.0, 0.0]),
- ]);
+ let concept = ConceptSuperposition::uniform(
+ "bank",
+ vec![
+ ("financial".into(), vec![1.0, 0.0, 0.0]),
+ ("river".into(), vec![0.0, 1.0, 0.0]),
+ ],
+ );
// Context about money → should boost financial meaning
let context = vec![0.9, 0.1, 0.0];
let scores = concept.interfere(&context);
@@ -107,17 +122,21 @@ fn test_constructive_interference() {
assert!(
financial.probability > river.probability,
"Financial context should boost financial meaning: {} > {}",
- financial.probability, river.probability
+ financial.probability,
+ river.probability
);
}
#[test]
fn test_destructive_interference_with_opposite_phases() {
// Two meanings with OPPOSITE phases but same embedding direction
- let concept = ConceptSuperposition::with_amplitudes("ambiguous", vec![
- ("positive".into(), vec![1.0, 0.0], Complex::new(1.0, 0.0)),
- ("negative".into(), vec![0.8, 0.2], Complex::new(-1.0, 0.0)),
- ]);
+ let concept = ConceptSuperposition::with_amplitudes(
+ "ambiguous",
+ vec![
+ ("positive".into(), vec![1.0, 0.0], Complex::new(1.0, 0.0)),
+ ("negative".into(), vec![0.8, 0.2], Complex::new(-1.0, 0.0)),
+ ],
+ );
// Context aligned with both embeddings
let context = vec![1.0, 0.0];
let scores = concept.interfere(&context);
@@ -128,43 +147,52 @@ fn test_destructive_interference_with_opposite_phases() {
#[test]
fn test_collapse_returns_valid_label() {
- let concept = ConceptSuperposition::uniform("test", vec![
- ("alpha".into(), vec![1.0, 0.0]),
- ("beta".into(), vec![0.0, 1.0]),
- ]);
+ let concept = ConceptSuperposition::uniform(
+ "test",
+ vec![
+ ("alpha".into(), vec![1.0, 0.0]),
+ ("beta".into(), vec![0.0, 1.0]),
+ ],
+ );
let context = vec![1.0, 0.0];
let label = concept.collapse(&context, 42);
assert!(
label == "alpha" || label == "beta",
- "Collapse must return a valid label, got: {}", label
+ "Collapse must return a valid label, got: {}",
+ label
);
}
#[test]
fn test_dominant_returns_highest() {
- let concept = ConceptSuperposition::with_amplitudes("test", vec![
- ("small".into(), vec![1.0], Complex::new(0.1, 0.0)),
- ("big".into(), vec![1.0], Complex::new(0.9, 0.0)),
- ]);
+ let concept = ConceptSuperposition::with_amplitudes(
+ "test",
+ vec![
+ ("small".into(), vec![1.0], Complex::new(0.1, 0.0)),
+ ("big".into(), vec![1.0], Complex::new(0.9, 0.0)),
+ ],
+ );
let dom = concept.dominant().unwrap();
- assert_eq!(dom.label, "big", "Dominant should be the highest amplitude meaning");
+ assert_eq!(
+ dom.label, "big",
+ "Dominant should be the highest amplitude meaning"
+ );
}
#[test]
fn test_interference_search_ranking() {
let concepts = vec![
- ConceptSuperposition::uniform("relevant", vec![
- ("match".into(), vec![1.0, 0.0, 0.0]),
- ]),
- ConceptSuperposition::uniform("irrelevant", vec![
- ("miss".into(), vec![0.0, 0.0, 1.0]),
- ]),
+ ConceptSuperposition::uniform("relevant", vec![("match".into(), vec![1.0, 0.0, 0.0])]),
+ ConceptSuperposition::uniform("irrelevant", vec![("miss".into(), vec![0.0, 0.0, 1.0])]),
];
let query = vec![1.0, 0.0, 0.0];
let results = interference_search(&concepts, &query);
assert!(!results.is_empty(), "Search should return results");
// First result should be the relevant concept
- assert_eq!(results[0].concept_id, "relevant", "Most relevant concept should rank first");
+ assert_eq!(
+ results[0].concept_id, "relevant",
+ "Most relevant concept should rank first"
+ );
}
// ===========================================================================
@@ -175,17 +203,14 @@ use ruqu_exotic::quantum_collapse::*;
#[test]
fn test_collapse_valid_index() {
- let candidates = vec![
- vec![1.0, 0.0],
- vec![0.0, 1.0],
- vec![0.5, 0.5],
- ];
+ let candidates = vec![vec![1.0, 0.0], vec![0.0, 1.0], vec![0.5, 0.5]];
let search = QuantumCollapseSearch::new(candidates);
let result = search.search(&[1.0, 0.0], 3, 42);
assert!(
result.index < search.num_real(),
"Collapse index {} should be < num_real {}",
- result.index, search.num_real()
+ result.index,
+ search.num_real()
);
}
@@ -203,7 +228,8 @@ fn test_distribution_stability() {
assert!(
top.1 > 30,
"Top candidate should appear in >15% of 200 shots, got {} at index {}",
- top.1, top.0
+ top.1,
+ top.0
);
}
@@ -229,14 +255,31 @@ use ruqu_exotic::reasoning_qec::*;
#[test]
fn test_no_noise_clean_syndrome() {
let steps = vec![
- ReasoningStep { label: "premise".into(), confidence: 1.0 },
- ReasoningStep { label: "inference".into(), confidence: 1.0 },
- ReasoningStep { label: "conclusion".into(), confidence: 1.0 },
+ ReasoningStep {
+ label: "premise".into(),
+ confidence: 1.0,
+ },
+ ReasoningStep {
+ label: "inference".into(),
+ confidence: 1.0,
+ },
+ ReasoningStep {
+ label: "conclusion".into(),
+ confidence: 1.0,
+ },
];
- let config = ReasoningQecConfig { num_steps: 3, noise_rate: 0.0, seed: Some(42) };
+ let config = ReasoningQecConfig {
+ num_steps: 3,
+ noise_rate: 0.0,
+ seed: Some(42),
+ };
let mut trace = ReasoningTrace::new(steps, config).unwrap();
let result = trace.run_qec().unwrap();
- assert_eq!(result.syndrome.len(), 2, "3 steps should produce 2 syndrome bits");
+ assert_eq!(
+ result.syndrome.len(),
+ 2,
+ "3 steps should produce 2 syndrome bits"
+ );
assert!(result.is_decodable, "Zero-noise trace must be decodable");
}
@@ -245,31 +288,64 @@ fn test_high_noise_triggers_syndrome() {
// Use noise_rate=0.5 with seed that flips some but not all steps.
// This creates non-uniform flips so adjacent steps disagree, triggering syndromes.
let steps = vec![
- ReasoningStep { label: "a".into(), confidence: 1.0 },
- ReasoningStep { label: "b".into(), confidence: 1.0 },
- ReasoningStep { label: "c".into(), confidence: 1.0 },
- ReasoningStep { label: "d".into(), confidence: 1.0 },
- ReasoningStep { label: "e".into(), confidence: 1.0 },
+ ReasoningStep {
+ label: "a".into(),
+ confidence: 1.0,
+ },
+ ReasoningStep {
+ label: "b".into(),
+ confidence: 1.0,
+ },
+ ReasoningStep {
+ label: "c".into(),
+ confidence: 1.0,
+ },
+ ReasoningStep {
+ label: "d".into(),
+ confidence: 1.0,
+ },
+ ReasoningStep {
+ label: "e".into(),
+ confidence: 1.0,
+ },
];
// With noise_rate=0.5, about half the steps get flipped, creating parity mismatches
- let config = ReasoningQecConfig { num_steps: 5, noise_rate: 0.5, seed: Some(42) };
+ let config = ReasoningQecConfig {
+ num_steps: 5,
+ noise_rate: 0.5,
+ seed: Some(42),
+ };
let mut trace = ReasoningTrace::new(steps, config).unwrap();
let result = trace.run_qec().unwrap();
- assert_eq!(result.syndrome.len(), 4, "5 steps should produce 4 syndrome bits");
+ assert_eq!(
+ result.syndrome.len(),
+ 4,
+ "5 steps should produce 4 syndrome bits"
+ );
assert_eq!(result.num_steps, 5);
}
#[test]
fn test_syndrome_length() {
let n = 6;
- let steps: Vec<_> = (0..n).map(|i| ReasoningStep {
- label: format!("step_{}", i),
- confidence: 0.9,
- }).collect();
- let config = ReasoningQecConfig { num_steps: n, noise_rate: 0.0, seed: Some(42) };
+ let steps: Vec<_> = (0..n)
+ .map(|i| ReasoningStep {
+ label: format!("step_{}", i),
+ confidence: 0.9,
+ })
+ .collect();
+ let config = ReasoningQecConfig {
+ num_steps: n,
+ noise_rate: 0.0,
+ seed: Some(42),
+ };
let mut trace = ReasoningTrace::new(steps, config).unwrap();
let result = trace.run_qec().unwrap();
- assert_eq!(result.syndrome.len(), n - 1, "N steps should give N-1 syndrome bits");
+ assert_eq!(
+ result.syndrome.len(),
+ n - 1,
+ "N steps should give N-1 syndrome bits"
+ );
}
// ===========================================================================
@@ -281,31 +357,49 @@ use ruqu_exotic::swarm_interference::*;
#[test]
fn test_unanimous_support() {
let mut swarm = SwarmInterference::new();
- let action = Action { id: "deploy".into(), description: "Deploy to prod".into() };
+ let action = Action {
+ id: "deploy".into(),
+ description: "Deploy to prod".into(),
+ };
for i in 0..5 {
swarm.contribute(AgentContribution::new(
- &format!("agent_{}", i), action.clone(), 1.0, true,
+ &format!("agent_{}", i),
+ action.clone(),
+ 1.0,
+ true,
));
}
let decisions = swarm.decide();
assert!(!decisions.is_empty());
// 5 agents at amplitude 1.0, phase 0: total amplitude = 5, prob = 25
- assert!(decisions[0].probability > 20.0, "Unanimous support: prob should be high");
+ assert!(
+ decisions[0].probability > 20.0,
+ "Unanimous support: prob should be high"
+ );
}
#[test]
fn test_opposition_cancels() {
let mut swarm = SwarmInterference::new();
- let action = Action { id: "risky".into(), description: "Risky action".into() };
+ let action = Action {
+ id: "risky".into(),
+ description: "Risky action".into(),
+ };
// 3 support, 3 oppose → should nearly cancel
for i in 0..3 {
swarm.contribute(AgentContribution::new(
- &format!("pro_{}", i), action.clone(), 1.0, true,
+ &format!("pro_{}", i),
+ action.clone(),
+ 1.0,
+ true,
));
}
for i in 0..3 {
swarm.contribute(AgentContribution::new(
- &format!("con_{}", i), action.clone(), 1.0, false,
+ &format!("con_{}", i),
+ action.clone(),
+ 1.0,
+ false,
));
}
let decisions = swarm.decide();
@@ -320,13 +414,19 @@ fn test_opposition_cancels() {
#[test]
fn test_partial_opposition_reduces() {
- let action = Action { id: "a".into(), description: "".into() };
+ let action = Action {
+ id: "a".into(),
+ description: "".into(),
+ };
// Pure support
let mut pure = SwarmInterference::new();
for i in 0..3 {
pure.contribute(AgentContribution::new(
- &format!("p{}", i), action.clone(), 1.0, true,
+ &format!("p{}", i),
+ action.clone(),
+ 1.0,
+ true,
));
}
let pure_prob = pure.decide()[0].probability;
@@ -335,7 +435,10 @@ fn test_partial_opposition_reduces() {
let mut mixed = SwarmInterference::new();
for i in 0..3 {
mixed.contribute(AgentContribution::new(
- &format!("p{}", i), action.clone(), 1.0, true,
+ &format!("p{}", i),
+ action.clone(),
+ 1.0,
+ true,
));
}
mixed.contribute(AgentContribution::new("opp", action.clone(), 1.0, false));
@@ -344,29 +447,50 @@ fn test_partial_opposition_reduces() {
assert!(
mixed_prob < pure_prob,
"Opposition should reduce probability: {} < {}",
- mixed_prob, pure_prob
+ mixed_prob,
+ pure_prob
);
}
#[test]
fn test_deadlock_detection() {
let mut swarm = SwarmInterference::new();
- let a = Action { id: "a".into(), description: "".into() };
- let b = Action { id: "b".into(), description: "".into() };
+ let a = Action {
+ id: "a".into(),
+ description: "".into(),
+ };
+ let b = Action {
+ id: "b".into(),
+ description: "".into(),
+ };
// Two different actions with identical support → deadlock
swarm.contribute(AgentContribution::new("pro_a", a.clone(), 1.0, true));
swarm.contribute(AgentContribution::new("pro_b", b.clone(), 1.0, true));
- assert!(swarm.is_deadlocked(0.01), "Equal support for two actions should deadlock");
+ assert!(
+ swarm.is_deadlocked(0.01),
+ "Equal support for two actions should deadlock"
+ );
}
#[test]
fn test_winner_picks_highest() {
let mut swarm = SwarmInterference::new();
- let a = Action { id: "a".into(), description: "".into() };
- let b = Action { id: "b".into(), description: "".into() };
+ let a = Action {
+ id: "a".into(),
+ description: "".into(),
+ };
+ let b = Action {
+ id: "b".into(),
+ description: "".into(),
+ };
// 3 agents support A, 1 supports B
for i in 0..3 {
- swarm.contribute(AgentContribution::new(&format!("a{}", i), a.clone(), 1.0, true));
+ swarm.contribute(AgentContribution::new(
+ &format!("a{}", i),
+ a.clone(),
+ 1.0,
+ true,
+ ));
}
swarm.contribute(AgentContribution::new("b0", b.clone(), 1.0, true));
let winner = swarm.winner().unwrap();
@@ -382,32 +506,70 @@ use ruqu_exotic::syndrome_diagnosis::*;
#[test]
fn test_healthy_system() {
let components = vec![
- Component { id: "A".into(), health: 1.0 },
- Component { id: "B".into(), health: 1.0 },
- Component { id: "C".into(), health: 1.0 },
+ Component {
+ id: "A".into(),
+ health: 1.0,
+ },
+ Component {
+ id: "B".into(),
+ health: 1.0,
+ },
+ Component {
+ id: "C".into(),
+ health: 1.0,
+ },
];
let connections = vec![
- Connection { from: 0, to: 1, strength: 1.0 },
- Connection { from: 1, to: 2, strength: 1.0 },
+ Connection {
+ from: 0,
+ to: 1,
+ strength: 1.0,
+ },
+ Connection {
+ from: 1,
+ to: 2,
+ strength: 1.0,
+ },
];
let diag = SystemDiagnostics::new(components, connections);
- let config = DiagnosisConfig { fault_injection_rate: 0.0, num_rounds: 10, seed: 42 };
+ let config = DiagnosisConfig {
+ fault_injection_rate: 0.0,
+ num_rounds: 10,
+ seed: 42,
+ };
let result = diag.diagnose(&config).unwrap();
// No faults injected → no syndromes should fire
for round in &result.rounds {
- assert!(round.injected_faults.is_empty(), "No faults should be injected at rate 0");
+ assert!(
+ round.injected_faults.is_empty(),
+ "No faults should be injected at rate 0"
+ );
}
}
#[test]
fn test_fault_injection_triggers() {
let components = vec![
- Component { id: "A".into(), health: 1.0 },
- Component { id: "B".into(), health: 1.0 },
+ Component {
+ id: "A".into(),
+ health: 1.0,
+ },
+ Component {
+ id: "B".into(),
+ health: 1.0,
+ },
];
- let connections = vec![Connection { from: 0, to: 1, strength: 1.0 }];
+ let connections = vec![Connection {
+ from: 0,
+ to: 1,
+ strength: 1.0,
+ }];
let diag = SystemDiagnostics::new(components, connections);
- let config = DiagnosisConfig { fault_injection_rate: 1.0, num_rounds: 10, seed: 42 };
+ let config = DiagnosisConfig {
+ fault_injection_rate: 1.0,
+ num_rounds: 10,
+ seed: 42,
+ };
let result = diag.diagnose(&config).unwrap();
let any_fault = result.rounds.iter().any(|r| !r.injected_faults.is_empty());
assert!(any_fault, "100% fault rate should inject faults");
@@ -416,12 +578,26 @@ fn test_fault_injection_triggers() {
#[test]
fn test_diagnosis_round_count() {
let components = vec![
- Component { id: "X".into(), health: 1.0 },
- Component { id: "Y".into(), health: 1.0 },
+ Component {
+ id: "X".into(),
+ health: 1.0,
+ },
+ Component {
+ id: "Y".into(),
+ health: 1.0,
+ },
];
- let connections = vec![Connection { from: 0, to: 1, strength: 1.0 }];
+ let connections = vec![Connection {
+ from: 0,
+ to: 1,
+ strength: 1.0,
+ }];
let diag = SystemDiagnostics::new(components, connections);
- let config = DiagnosisConfig { fault_injection_rate: 0.5, num_rounds: 20, seed: 99 };
+ let config = DiagnosisConfig {
+ fault_injection_rate: 0.5,
+ num_rounds: 20,
+ seed: 99,
+ };
let result = diag.diagnose(&config).unwrap();
assert_eq!(result.rounds.len(), 20, "Should have exactly 20 rounds");
}
@@ -429,19 +605,48 @@ fn test_diagnosis_round_count() {
#[test]
fn test_fragility_scores_produced() {
let components = vec![
- Component { id: "A".into(), health: 1.0 },
- Component { id: "B".into(), health: 1.0 },
- Component { id: "C".into(), health: 1.0 },
+ Component {
+ id: "A".into(),
+ health: 1.0,
+ },
+ Component {
+ id: "B".into(),
+ health: 1.0,
+ },
+ Component {
+ id: "C".into(),
+ health: 1.0,
+ },
];
let connections = vec![
- Connection { from: 0, to: 1, strength: 1.0 },
- Connection { from: 0, to: 2, strength: 1.0 },
- Connection { from: 1, to: 2, strength: 1.0 },
+ Connection {
+ from: 0,
+ to: 1,
+ strength: 1.0,
+ },
+ Connection {
+ from: 0,
+ to: 2,
+ strength: 1.0,
+ },
+ Connection {
+ from: 1,
+ to: 2,
+ strength: 1.0,
+ },
];
let diag = SystemDiagnostics::new(components, connections);
- let config = DiagnosisConfig { fault_injection_rate: 0.5, num_rounds: 50, seed: 42 };
+ let config = DiagnosisConfig {
+ fault_injection_rate: 0.5,
+ num_rounds: 50,
+ seed: 42,
+ };
let result = diag.diagnose(&config).unwrap();
- assert_eq!(result.fragility_scores.len(), 3, "Should have score per component");
+ assert_eq!(
+ result.fragility_scores.len(),
+ 3,
+ "Should have score per component"
+ );
}
// ===========================================================================
@@ -462,13 +667,17 @@ fn test_rewind_restores_state() {
mem.rewind(2).unwrap();
// Should be back to |00⟩
let restored = mem.probabilities();
- assert!((restored[0] - 1.0).abs() < EPSILON, "Rewind should restore |00>: {:?}", restored);
+ assert!(
+ (restored[0] - 1.0).abs() < EPSILON,
+ "Rewind should restore |00>: {:?}",
+ restored
+ );
}
#[test]
fn test_counterfactual_divergence() {
let mut mem = ReversibleMemory::new(2).unwrap();
- mem.apply(Gate::H(0)).unwrap(); // step 0: creates superposition
+ mem.apply(Gate::H(0)).unwrap(); // step 0: creates superposition
mem.apply(Gate::CNOT(0, 1)).unwrap(); // step 1: entangles
// Counterfactual: what if we skip the H gate?
@@ -499,9 +708,9 @@ fn test_counterfactual_identity_step() {
#[test]
fn test_sensitivity_identifies_important_gate() {
let mut mem = ReversibleMemory::new(2).unwrap();
- mem.apply(Gate::Rz(0, 0.001)).unwrap(); // step 0: tiny rotation (unimportant)
- mem.apply(Gate::H(0)).unwrap(); // step 1: creates superposition (important)
- mem.apply(Gate::CNOT(0, 1)).unwrap(); // step 2: entangles (important)
+ mem.apply(Gate::Rz(0, 0.001)).unwrap(); // step 0: tiny rotation (unimportant)
+ mem.apply(Gate::H(0)).unwrap(); // step 1: creates superposition (important)
+ mem.apply(Gate::CNOT(0, 1)).unwrap(); // step 2: entangles (important)
let sens = mem.sensitivity_analysis(0.5).unwrap();
// The tiny Rz should be less sensitive than the H or CNOT
@@ -583,9 +792,12 @@ fn test_discovery_decoherence_trajectory_fingerprint() {
let emb_b = QuantumEmbedding::from_embedding(&[0.0, 0.0, 1.0, 0.5], 0.1);
// Decohere all with same seed
- let mut emb_a1 = emb_a1; emb_a1.decohere(5.0, 100);
- let mut emb_a2 = emb_a2; emb_a2.decohere(5.0, 100);
- let mut emb_b = emb_b; emb_b.decohere(5.0, 100);
+ let mut emb_a1 = emb_a1;
+ emb_a1.decohere(5.0, 100);
+ let mut emb_a2 = emb_a2;
+ emb_a2.decohere(5.0, 100);
+ let mut emb_b = emb_b;
+ emb_b.decohere(5.0, 100);
let fid_a1 = emb_a1.fidelity();
let fid_a2 = emb_a2.fidelity();
@@ -600,8 +812,10 @@ fn test_discovery_decoherence_trajectory_fingerprint() {
println!("DISCOVERY: Decoherence fingerprint");
println!(" Similar pair fidelity diff: {:.6}", diff_similar);
println!(" Different pair fidelity diff: {:.6}", diff_different);
- println!(" A1 fidelity: {:.6}, A2 fidelity: {:.6}, B fidelity: {:.6}",
- fid_a1, fid_a2, fid_b);
+ println!(
+ " A1 fidelity: {:.6}, A2 fidelity: {:.6}, B fidelity: {:.6}",
+ fid_a1, fid_a2, fid_b
+ );
}
/// DISCOVERY 2: Interference creates NEW vectors not in original space.
@@ -611,11 +825,14 @@ fn test_discovery_decoherence_trajectory_fingerprint() {
#[test]
fn test_discovery_interference_creates_novel_representations() {
// "spring" — three meanings
- let concept = ConceptSuperposition::uniform("spring", vec![
- ("season".into(), vec![1.0, 0.0, 0.0, 0.0]),
- ("water_source".into(), vec![0.0, 1.0, 0.0, 0.0]),
- ("mechanical".into(), vec![0.0, 0.0, 1.0, 0.0]),
- ]);
+ let concept = ConceptSuperposition::uniform(
+ "spring",
+ vec![
+ ("season".into(), vec![1.0, 0.0, 0.0, 0.0]),
+ ("water_source".into(), vec![0.0, 1.0, 0.0, 0.0]),
+ ("mechanical".into(), vec![0.0, 0.0, 1.0, 0.0]),
+ ],
+ );
// Three different contexts
let ctx_weather = vec![0.9, 0.0, 0.0, 0.1];
@@ -632,14 +849,29 @@ fn test_discovery_interference_creates_novel_representations() {
("geology", &scores_geology),
("engineering", &scores_engineering),
] {
- let top = scores.iter().max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap()).unwrap();
- println!(" Context '{}' → top meaning: '{}' (prob: {:.4})", ctx_name, top.label, top.probability);
+ let top = scores
+ .iter()
+ .max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap())
+ .unwrap();
+ println!(
+ " Context '{}' → top meaning: '{}' (prob: {:.4})",
+ ctx_name, top.label, top.probability
+ );
}
// Verify each context surfaces the right meaning
- let top_weather = scores_weather.iter().max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap()).unwrap();
- let top_geology = scores_geology.iter().max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap()).unwrap();
- let top_engineering = scores_engineering.iter().max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap()).unwrap();
+ let top_weather = scores_weather
+ .iter()
+ .max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap())
+ .unwrap();
+ let top_geology = scores_geology
+ .iter()
+ .max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap())
+ .unwrap();
+ let top_engineering = scores_engineering
+ .iter()
+ .max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap())
+ .unwrap();
assert_eq!(top_weather.label, "season");
assert_eq!(top_geology.label, "water_source");
@@ -655,11 +887,11 @@ fn test_discovery_counterfactual_dependency_map() {
let mut mem = ReversibleMemory::new(3).unwrap();
// Build an entangled state through a sequence
- mem.apply(Gate::H(0)).unwrap(); // step 0: superposition on q0
- mem.apply(Gate::CNOT(0, 1)).unwrap(); // step 1: entangle q0-q1
- mem.apply(Gate::Rz(2, 0.001)).unwrap(); // step 2: tiny rotation on q2 (nearly no-op)
- mem.apply(Gate::CNOT(1, 2)).unwrap(); // step 3: propagate entanglement to q2
- mem.apply(Gate::H(2)).unwrap(); // step 4: mix q2
+ mem.apply(Gate::H(0)).unwrap(); // step 0: superposition on q0
+ mem.apply(Gate::CNOT(0, 1)).unwrap(); // step 1: entangle q0-q1
+ mem.apply(Gate::Rz(2, 0.001)).unwrap(); // step 2: tiny rotation on q2 (nearly no-op)
+ mem.apply(Gate::CNOT(1, 2)).unwrap(); // step 3: propagate entanglement to q2
+ mem.apply(Gate::H(2)).unwrap(); // step 4: mix q2
println!("DISCOVERY: Counterfactual dependency map");
for i in 0..5 {
@@ -675,7 +907,8 @@ fn test_discovery_counterfactual_dependency_map() {
assert!(
cf0.divergence > cf2.divergence,
"H gate (step 0) should be more critical than tiny Rz (step 2): {} > {}",
- cf0.divergence, cf2.divergence
+ cf0.divergence,
+ cf2.divergence
);
}
@@ -685,13 +918,19 @@ fn test_discovery_counterfactual_dependency_map() {
/// Confident agreement amplifies exponentially. Uncertain agents barely contribute.
#[test]
fn test_discovery_swarm_phase_matters() {
- let action = Action { id: "x".into(), description: "".into() };
+ let action = Action {
+ id: "x".into(),
+ description: "".into(),
+ };
// Scenario 1: 3 confident agents, all aligned (phase 0)
let mut aligned = SwarmInterference::new();
for i in 0..3 {
aligned.contribute(AgentContribution::new(
- &format!("a{}", i), action.clone(), 1.0, true,
+ &format!("a{}", i),
+ action.clone(),
+ 1.0,
+ true,
));
}
@@ -700,16 +939,25 @@ fn test_discovery_swarm_phase_matters() {
misaligned.contribute(AgentContribution::new("b0", action.clone(), 1.0, true));
misaligned.contribute(AgentContribution::new("b1", action.clone(), 1.0, true));
// Third agent contributes with 90-degree phase offset (uncertain)
- misaligned.contribute(AgentContribution::multi("b2", vec![
- (action.clone(), Complex::new(0.0, 1.0)), // phase π/2
- ]));
+ misaligned.contribute(AgentContribution::multi(
+ "b2",
+ vec![
+ (action.clone(), Complex::new(0.0, 1.0)), // phase π/2
+ ],
+ ));
let prob_aligned = aligned.decide()[0].probability;
let prob_misaligned = misaligned.decide()[0].probability;
println!("DISCOVERY: Phase alignment matters for swarm decisions");
- println!(" Aligned (3 agents, same phase): prob = {:.4}", prob_aligned);
- println!(" Misaligned (2 same, 1 orthogonal): prob = {:.4}", prob_misaligned);
+ println!(
+ " Aligned (3 agents, same phase): prob = {:.4}",
+ prob_aligned
+ );
+ println!(
+ " Misaligned (2 same, 1 orthogonal): prob = {:.4}",
+ prob_misaligned
+ );
assert!(
prob_aligned > prob_misaligned,
diff --git a/crates/ruqu-wasm/src/lib.rs b/crates/ruqu-wasm/src/lib.rs
index 6cf31cc72..6b9791a69 100644
--- a/crates/ruqu-wasm/src/lib.rs
+++ b/crates/ruqu-wasm/src/lib.rs
@@ -34,8 +34,8 @@
//! (complex f64 amplitudes). At 25 qubits this is ~512MB, which is
//! a practical upper bound for browser environments.
+use serde::{Deserialize, Serialize};
use wasm_bindgen::prelude::*;
-use serde::{Serialize, Deserialize};
/// Maximum qubits allowed in WASM environment.
///
@@ -272,8 +272,7 @@ pub fn simulate(circuit: &WasmQuantumCircuit) -> Result {
execution_time_ms: result.metrics.execution_time_ns as f64 / 1_000_000.0,
};
- serde_wasm_bindgen::to_value(&wasm_result)
- .map_err(|e| JsValue::from_str(&e.to_string()))
+ serde_wasm_bindgen::to_value(&wasm_result).map_err(|e| JsValue::from_str(&e.to_string()))
}
// ═══════════════════════════════════════════════════════════════════════════
@@ -349,10 +348,7 @@ pub fn grover_search(
};
// Convert Vec -> Vec for the core API.
- let target_states_usize: Vec = target_states
- .into_iter()
- .map(|s| s as usize)
- .collect();
+ let target_states_usize: Vec = target_states.into_iter().map(|s| s as usize).collect();
let config = ruqu_algorithms::grover::GroverConfig {
num_qubits,
@@ -481,12 +477,14 @@ pub fn qaoa_maxcut(
let mut expected_cut = 0.0;
for chunk in edges_flat.chunks(2) {
if chunk.len() == 2 {
- let zz = result.state.expectation_value(&ruqu_core::types::PauliString {
- ops: vec![
- (chunk[0], ruqu_core::types::PauliOp::Z),
- (chunk[1], ruqu_core::types::PauliOp::Z),
- ],
- });
+ let zz = result
+ .state
+ .expectation_value(&ruqu_core::types::PauliString {
+ ops: vec![
+ (chunk[0], ruqu_core::types::PauliOp::Z),
+ (chunk[1], ruqu_core::types::PauliOp::Z),
+ ],
+ });
expected_cut += 0.5 * (1.0 - zz);
}
}
diff --git a/crates/ruvector-attn-mincut/src/config.rs b/crates/ruvector-attn-mincut/src/config.rs
index 81c6e9a43..81b88f201 100644
--- a/crates/ruvector-attn-mincut/src/config.rs
+++ b/crates/ruvector-attn-mincut/src/config.rs
@@ -12,7 +12,13 @@ pub struct MinCutConfig {
impl Default for MinCutConfig {
fn default() -> Self {
- Self { lambda: 0.5, tau: 2, eps: 0.01, seed: 42, witness_enabled: true }
+ Self {
+ lambda: 0.5,
+ tau: 2,
+ eps: 0.01,
+ seed: 42,
+ witness_enabled: true,
+ }
}
}
@@ -32,7 +38,13 @@ mod tests {
#[test]
fn test_serde_roundtrip() {
- let c = MinCutConfig { lambda: 0.3, tau: 5, eps: 0.001, seed: 99, witness_enabled: false };
+ let c = MinCutConfig {
+ lambda: 0.3,
+ tau: 5,
+ eps: 0.001,
+ seed: 99,
+ witness_enabled: false,
+ };
let json = serde_json::to_string(&c).unwrap();
let r: MinCutConfig = serde_json::from_str(&json).unwrap();
assert!((r.lambda - 0.3).abs() < f32::EPSILON);
diff --git a/crates/ruvector-attn-mincut/src/gating.rs b/crates/ruvector-attn-mincut/src/gating.rs
index f8e2cfb46..c0ce08323 100644
--- a/crates/ruvector-attn-mincut/src/gating.rs
+++ b/crates/ruvector-attn-mincut/src/gating.rs
@@ -14,7 +14,9 @@ fn compute_logits(q: &[f32], k: &[f32], d: usize, seq_len: usize) -> Vec {
for i in 0..seq_len {
for j in 0..seq_len {
let mut dot = 0.0f32;
- for h in 0..d { dot += q[i * d + h] * k[j * d + h]; }
+ for h in 0..d {
+ dot += q[i * d + h] * k[j * d + h];
+ }
logits[i * seq_len + j] = dot * scale;
}
}
@@ -27,8 +29,15 @@ fn row_softmax(mat: &mut [f32], rows: usize, cols: usize) {
let row = &mut mat[i * cols..(i + 1) * cols];
let mx = row.iter().copied().fold(f32::NEG_INFINITY, f32::max);
let mut sum = 0.0f32;
- for v in row.iter_mut() { *v = (*v - mx).exp(); sum += *v; }
- if sum > 0.0 { for v in row.iter_mut() { *v /= sum; } }
+ for v in row.iter_mut() {
+ *v = (*v - mx).exp();
+ sum += *v;
+ }
+ if sum > 0.0 {
+ for v in row.iter_mut() {
+ *v /= sum;
+ }
+ }
}
}
@@ -39,7 +48,9 @@ fn matmul_wv(w: &[f32], v: &[f32], seq_len: usize, d: usize) -> Vec {
for j in 0..seq_len {
let wij = w[i * seq_len + j];
if wij != 0.0 {
- for h in 0..d { out[i * d + h] += wij * v[j * d + h]; }
+ for h in 0..d {
+ out[i * d + h] += wij * v[j * d + h];
+ }
}
}
}
@@ -57,8 +68,14 @@ pub fn attn_softmax(q: &[f32], k: &[f32], v: &[f32], d: usize, seq_len: usize) -
/// Min-cut gated attention.
/// 1. Compute logits 2. Min-cut gating 3. Mask with -INF 4. Row-softmax 5. Multiply V
pub fn attn_mincut(
- q: &[f32], k: &[f32], v: &[f32],
- d: usize, seq_len: usize, lambda: f32, tau: usize, eps: f32,
+ q: &[f32],
+ k: &[f32],
+ v: &[f32],
+ d: usize,
+ seq_len: usize,
+ lambda: f32,
+ tau: usize,
+ eps: f32,
) -> AttentionOutput {
assert!(q.len() == seq_len * d && k.len() == seq_len * d && v.len() == seq_len * d);
let mut logits = compute_logits(q, k, d, seq_len);
@@ -66,13 +83,22 @@ pub fn attn_mincut(
// Gate entries with -INF so softmax zeroes them
for i in 0..logits.len() {
- if !gating.keep_mask[i] { logits[i] = f32::NEG_INFINITY; }
+ if !gating.keep_mask[i] {
+ logits[i] = f32::NEG_INFINITY;
+ }
}
row_softmax(&mut logits, seq_len, seq_len);
// Replace NaN (fully-gated rows) with 0
- for v in logits.iter_mut() { if v.is_nan() { *v = 0.0; } }
+ for v in logits.iter_mut() {
+ if v.is_nan() {
+ *v = 0.0;
+ }
+ }
- AttentionOutput { output: matmul_wv(&logits, v, seq_len, d), gating }
+ AttentionOutput {
+ output: matmul_wv(&logits, v, seq_len, d),
+ gating,
+ }
}
#[cfg(test)]
@@ -83,7 +109,10 @@ mod tests {
let mut q = vec![0.0f32; seq * d];
let mut k = vec![0.0f32; seq * d];
let v: Vec = (0..seq * d).map(|i| i as f32).collect();
- for i in 0..seq.min(d) { q[i * d + i] = 1.0; k[i * d + i] = 1.0; }
+ for i in 0..seq.min(d) {
+ q[i * d + i] = 1.0;
+ k[i * d + i] = 1.0;
+ }
(q, k, v)
}
diff --git a/crates/ruvector-attn-mincut/src/graph.rs b/crates/ruvector-attn-mincut/src/graph.rs
index 0b68be2b2..01a103460 100644
--- a/crates/ruvector-attn-mincut/src/graph.rs
+++ b/crates/ruvector-attn-mincut/src/graph.rs
@@ -2,24 +2,44 @@ use serde::{Deserialize, Serialize};
/// A directed edge in the attention graph.
#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct Edge { pub src: usize, pub dst: usize, pub weight: f32 }
+pub struct Edge {
+ pub src: usize,
+ pub dst: usize,
+ pub weight: f32,
+}
/// Weighted directed graph built from attention logits.
#[derive(Debug, Clone)]
-pub struct AttentionGraph { pub nodes: usize, pub edges: Vec }
+pub struct AttentionGraph {
+ pub nodes: usize,
+ pub edges: Vec,
+}
/// Build a weighted directed graph from flattened `seq_len x seq_len` logits.
/// Only positive logits become edges; non-positive entries are omitted.
pub fn graph_from_logits(logits: &[f32], seq_len: usize) -> AttentionGraph {
- assert_eq!(logits.len(), seq_len * seq_len, "logits length must equal seq_len^2");
+ assert_eq!(
+ logits.len(),
+ seq_len * seq_len,
+ "logits length must equal seq_len^2"
+ );
let mut edges = Vec::new();
for i in 0..seq_len {
for j in 0..seq_len {
let w = logits[i * seq_len + j];
- if w > 0.0 { edges.push(Edge { src: i, dst: j, weight: w }); }
+ if w > 0.0 {
+ edges.push(Edge {
+ src: i,
+ dst: j,
+ weight: w,
+ });
+ }
}
}
- AttentionGraph { nodes: seq_len, edges }
+ AttentionGraph {
+ nodes: seq_len,
+ edges,
+ }
}
#[cfg(test)]
@@ -41,7 +61,9 @@ mod tests {
#[test]
#[should_panic(expected = "logits length must equal seq_len^2")]
- fn test_mismatched_length() { graph_from_logits(&[1.0, 2.0], 3); }
+ fn test_mismatched_length() {
+ graph_from_logits(&[1.0, 2.0], 3);
+ }
#[test]
fn test_empty_graph() {
diff --git a/crates/ruvector-attn-mincut/src/hysteresis.rs b/crates/ruvector-attn-mincut/src/hysteresis.rs
index 656bb4da9..64180b007 100644
--- a/crates/ruvector-attn-mincut/src/hysteresis.rs
+++ b/crates/ruvector-attn-mincut/src/hysteresis.rs
@@ -10,7 +10,12 @@ pub struct HysteresisTracker {
impl HysteresisTracker {
pub fn new(tau: usize) -> Self {
- Self { prev_mask: None, counts: Vec::new(), tau, step: 0 }
+ Self {
+ prev_mask: None,
+ counts: Vec::new(),
+ tau,
+ step: 0,
+ }
}
/// Apply hysteresis to a raw gating mask, returning the stabilised mask.
@@ -45,8 +50,12 @@ impl HysteresisTracker {
result
}
- pub fn step(&self) -> usize { self.step }
- pub fn current_mask(&self) -> Option<&[bool]> { self.prev_mask.as_deref() }
+ pub fn step(&self) -> usize {
+ self.step
+ }
+ pub fn current_mask(&self) -> Option<&[bool]> {
+ self.prev_mask.as_deref()
+ }
}
#[cfg(test)]
@@ -83,7 +92,7 @@ mod tests {
let mut t = HysteresisTracker::new(3);
t.apply(&[true]);
t.apply(&[false]); // count=1
- t.apply(&[true]); // reset
+ t.apply(&[true]); // reset
t.apply(&[false]); // count=1
assert_eq!(t.apply(&[false]), vec![true]); // count=2 < 3
}
diff --git a/crates/ruvector-attn-mincut/src/mincut.rs b/crates/ruvector-attn-mincut/src/mincut.rs
index 7ad140afa..cb6f1d253 100644
--- a/crates/ruvector-attn-mincut/src/mincut.rs
+++ b/crates/ruvector-attn-mincut/src/mincut.rs
@@ -20,7 +20,11 @@ pub struct GatingResult {
}
#[derive(Debug, Clone)]
-struct FlowEdge { to: usize, rev: usize, cap: f32 }
+struct FlowEdge {
+ to: usize,
+ rev: usize,
+ cap: f32,
+}
/// Dinic's max-flow solver for s-t min-cut on an attention graph.
pub struct DinicSolver {
@@ -31,13 +35,21 @@ pub struct DinicSolver {
impl DinicSolver {
fn new(n: usize) -> Self {
- Self { adj: vec![Vec::new(); n], level: vec![0; n], iter: vec![0; n] }
+ Self {
+ adj: vec![Vec::new(); n],
+ level: vec![0; n],
+ iter: vec![0; n],
+ }
}
fn add_edge(&mut self, from: usize, to: usize, cap: f32) {
let (rf, rt) = (self.adj[to].len(), self.adj[from].len());
self.adj[from].push(FlowEdge { to, rev: rf, cap });
- self.adj[to].push(FlowEdge { to: from, rev: rt, cap: 0.0 });
+ self.adj[to].push(FlowEdge {
+ to: from,
+ rev: rt,
+ cap: 0.0,
+ });
}
fn bfs(&mut self, s: usize) {
@@ -56,7 +68,9 @@ impl DinicSolver {
}
fn dfs(&mut self, v: usize, t: usize, f: f32) -> f32 {
- if v == t { return f; }
+ if v == t {
+ return f;
+ }
while self.iter[v] < self.adj[v].len() {
let i = self.iter[v];
let (to, cap) = (self.adj[v][i].to, self.adj[v][i].cap);
@@ -78,12 +92,16 @@ impl DinicSolver {
pub fn min_cut(&mut self, graph: &AttentionGraph, s: usize, t: usize) -> CutResult {
assert!(s < graph.nodes && t < graph.nodes && s != t);
*self = Self::new(graph.nodes);
- for edge in &graph.edges { self.add_edge(edge.src, edge.dst, edge.weight); }
+ for edge in &graph.edges {
+ self.add_edge(edge.src, edge.dst, edge.weight);
+ }
let inf = f32::MAX / 2.0;
loop {
self.bfs(s);
- if self.level[t] < 0 { break; }
+ if self.level[t] < 0 {
+ break;
+ }
self.iter.fill(0);
while self.dfs(s, t, inf) > 0.0 {}
}
@@ -100,19 +118,37 @@ impl DinicSolver {
keep_mask[idx] = false;
}
}
- CutResult { cut_edges, cut_cost, keep_mask }
+ CutResult {
+ cut_edges,
+ cut_cost,
+ keep_mask,
+ }
}
}
/// Compute dynamic min-cut gating over a flattened `seq_len x seq_len` logit matrix.
-pub fn dynamic_min_cut(logits: &[f32], seq_len: usize, lambda: f32, _tau: usize, eps: f32) -> GatingResult {
+pub fn dynamic_min_cut(
+ logits: &[f32],
+ seq_len: usize,
+ lambda: f32,
+ _tau: usize,
+ eps: f32,
+) -> GatingResult {
assert_eq!(logits.len(), seq_len * seq_len);
let n = seq_len * seq_len;
- let clamped: Vec = logits.iter().map(|&v| if v > eps { v } else { 0.0 }).collect();
+ let clamped: Vec = logits
+ .iter()
+ .map(|&v| if v > eps { v } else { 0.0 })
+ .collect();
let graph = crate::graph::graph_from_logits(&clamped, seq_len);
if graph.edges.is_empty() || seq_len < 2 {
- return GatingResult { keep_mask: vec![false; n], cut_cost: 0.0, edges_kept: 0, edges_total: n };
+ return GatingResult {
+ keep_mask: vec![false; n],
+ cut_cost: 0.0,
+ edges_kept: 0,
+ edges_total: n,
+ };
}
let mean_w: f32 = graph.edges.iter().map(|e| e.weight).sum::() / graph.edges.len() as f32;
@@ -124,12 +160,23 @@ pub fn dynamic_min_cut(logits: &[f32], seq_len: usize, lambda: f32, _tau: usize,
let result = solver.min_cut(&graph, 0, seq_len - 1);
if result.cut_cost <= threshold {
total_cut_cost += result.cut_cost;
- for &(s, d) in &result.cut_edges { flat_keep[s * seq_len + d] = false; }
+ for &(s, d) in &result.cut_edges {
+ flat_keep[s * seq_len + d] = false;
+ }
}
- for i in 0..n { if clamped[i] <= 0.0 { flat_keep[i] = false; } }
+ for i in 0..n {
+ if clamped[i] <= 0.0 {
+ flat_keep[i] = false;
+ }
+ }
let edges_kept = flat_keep.iter().filter(|&&k| k).count();
- GatingResult { keep_mask: flat_keep, cut_cost: total_cut_cost, edges_kept, edges_total: n }
+ GatingResult {
+ keep_mask: flat_keep,
+ cut_cost: total_cut_cost,
+ edges_kept,
+ edges_total: n,
+ }
}
#[cfg(test)]
@@ -142,9 +189,31 @@ mod tests {
let graph = AttentionGraph {
nodes: 4,
edges: vec![
- Edge { src: 0, dst: 1, weight: 5.0 }, Edge { src: 0, dst: 2, weight: 4.0 },
- Edge { src: 1, dst: 3, weight: 3.0 }, Edge { src: 2, dst: 3, weight: 6.0 },
- Edge { src: 1, dst: 2, weight: 2.0 },
+ Edge {
+ src: 0,
+ dst: 1,
+ weight: 5.0,
+ },
+ Edge {
+ src: 0,
+ dst: 2,
+ weight: 4.0,
+ },
+ Edge {
+ src: 1,
+ dst: 3,
+ weight: 3.0,
+ },
+ Edge {
+ src: 2,
+ dst: 3,
+ weight: 6.0,
+ },
+ Edge {
+ src: 1,
+ dst: 2,
+ weight: 2.0,
+ },
],
};
let mut solver = DinicSolver::new(4);
@@ -154,7 +223,14 @@ mod tests {
#[test]
fn test_dinic_two_node() {
- let graph = AttentionGraph { nodes: 2, edges: vec![Edge { src: 0, dst: 1, weight: 3.5 }] };
+ let graph = AttentionGraph {
+ nodes: 2,
+ edges: vec![Edge {
+ src: 0,
+ dst: 1,
+ weight: 3.5,
+ }],
+ };
let mut solver = DinicSolver::new(2);
let r = solver.min_cut(&graph, 0, 1);
assert!((r.cut_cost - 3.5).abs() < 0.01);
diff --git a/crates/ruvector-attn-mincut/src/witness.rs b/crates/ruvector-attn-mincut/src/witness.rs
index 4bd42f7f7..c7fce481d 100644
--- a/crates/ruvector-attn-mincut/src/witness.rs
+++ b/crates/ruvector-attn-mincut/src/witness.rs
@@ -22,7 +22,9 @@ pub fn witness_log(entry: &WitnessEntry) -> String {
/// SHA-256 hash of a float tensor (little-endian bytes), returned as hex.
pub fn hash_tensor(data: &[f32]) -> String {
let mut h = Sha256::new();
- for &v in data { h.update(v.to_le_bytes()); }
+ for &v in data {
+ h.update(v.to_le_bytes());
+ }
h.finalize().iter().map(|b| format!("{:02x}", b)).collect()
}
@@ -45,9 +47,14 @@ mod tests {
#[test]
fn test_witness_roundtrip() {
let e = WitnessEntry {
- q_hash: "a".into(), k_hash: "b".into(),
- keep_mask: vec![true, false], cut_cost: 1.5,
- lambda: 0.5, tau: 2, eps: 0.01, timestamp: 1000,
+ q_hash: "a".into(),
+ k_hash: "b".into(),
+ keep_mask: vec![true, false],
+ cut_cost: 1.5,
+ lambda: 0.5,
+ tau: 2,
+ eps: 0.01,
+ timestamp: 1000,
};
let json = witness_log(&e);
let r: WitnessEntry = serde_json::from_str(&json).unwrap();
diff --git a/crates/ruvector-coherence/src/batch.rs b/crates/ruvector-coherence/src/batch.rs
index 48cffa6c4..3fb31735b 100644
--- a/crates/ruvector-coherence/src/batch.rs
+++ b/crates/ruvector-coherence/src/batch.rs
@@ -25,8 +25,12 @@ pub fn evaluate_batch(
let n = baseline_outputs.len().min(gated_outputs.len());
if n == 0 {
return BatchResult {
- mean_coherence_delta: 0.0, std_coherence_delta: 0.0,
- ci_95_lower: 0.0, ci_95_upper: 0.0, n_samples: 0, pass_rate: 0.0,
+ mean_coherence_delta: 0.0,
+ std_coherence_delta: 0.0,
+ ci_95_lower: 0.0,
+ ci_95_upper: 0.0,
+ n_samples: 0,
+ pass_rate: 0.0,
};
}
@@ -42,14 +46,19 @@ pub fn evaluate_batch(
let mean = deltas.iter().sum::() / n as f64;
let var = if n > 1 {
deltas.iter().map(|d| (d - mean).powi(2)).sum::() / (n - 1) as f64
- } else { 0.0 };
+ } else {
+ 0.0
+ };
let std_dev = var.sqrt();
let margin = 1.96 * std_dev / (n as f64).sqrt();
BatchResult {
- mean_coherence_delta: mean, std_coherence_delta: std_dev,
- ci_95_lower: mean - margin, ci_95_upper: mean + margin,
- n_samples: n, pass_rate: passes as f64 / n as f64,
+ mean_coherence_delta: mean,
+ std_coherence_delta: std_dev,
+ ci_95_lower: mean - margin,
+ ci_95_upper: mean + margin,
+ n_samples: n,
+ pass_rate: passes as f64 / n as f64,
}
}
@@ -74,8 +83,18 @@ mod tests {
#[test]
fn batch_ci_contains_mean() {
- let bl = vec![vec![1.0, 0.0], vec![0.0, 1.0], vec![1.0, 1.0], vec![2.0, 3.0]];
- let gt = vec![vec![1.1, 0.1], vec![0.1, 1.1], vec![1.2, 0.9], vec![2.1, 2.9]];
+ let bl = vec![
+ vec![1.0, 0.0],
+ vec![0.0, 1.0],
+ vec![1.0, 1.0],
+ vec![2.0, 3.0],
+ ];
+ let gt = vec![
+ vec![1.1, 0.1],
+ vec![0.1, 1.1],
+ vec![1.2, 0.9],
+ vec![2.1, 2.9],
+ ];
let r = evaluate_batch(&bl, >, 0.9);
assert!(r.ci_95_lower <= r.mean_coherence_delta);
assert!(r.ci_95_upper >= r.mean_coherence_delta);
@@ -92,8 +111,12 @@ mod tests {
#[test]
fn batch_result_serializable() {
let r = BatchResult {
- mean_coherence_delta: -0.05, std_coherence_delta: 0.02,
- ci_95_lower: -0.07, ci_95_upper: -0.03, n_samples: 100, pass_rate: 0.95,
+ mean_coherence_delta: -0.05,
+ std_coherence_delta: 0.02,
+ ci_95_lower: -0.07,
+ ci_95_upper: -0.03,
+ n_samples: 100,
+ pass_rate: 0.95,
};
let d: BatchResult = serde_json::from_str(&serde_json::to_string(&r).unwrap()).unwrap();
assert_eq!(d.n_samples, 100);
diff --git a/crates/ruvector-coherence/src/comparison.rs b/crates/ruvector-coherence/src/comparison.rs
index 4a48c4eab..18e69cbd1 100644
--- a/crates/ruvector-coherence/src/comparison.rs
+++ b/crates/ruvector-coherence/src/comparison.rs
@@ -17,11 +17,19 @@ pub fn jaccard_similarity(mask_a: &[bool], mask_b: &[bool]) -> f64 {
let n = mask_a.len().min(mask_b.len());
let (mut inter, mut union) = (0usize, 0usize);
for i in 0..n {
- if mask_a[i] || mask_b[i] { union += 1; }
- if mask_a[i] && mask_b[i] { inter += 1; }
+ if mask_a[i] || mask_b[i] {
+ union += 1;
+ }
+ if mask_a[i] && mask_b[i] {
+ inter += 1;
+ }
}
union += count_true_tail(mask_a, n) + count_true_tail(mask_b, n);
- if union == 0 { 1.0 } else { inter as f64 / union as f64 }
+ if union == 0 {
+ 1.0
+ } else {
+ inter as f64 / union as f64
+ }
}
/// Counts positions where the two masks disagree.
@@ -37,19 +45,35 @@ pub fn compare_attention_masks(baseline: &[bool], gated: &[bool]) -> ComparisonR
let baseline_edges = baseline.iter().filter(|&&v| v).count();
let gated_edges = gated.iter().filter(|&&v| v).count();
let total = baseline.len().max(gated.len());
- let bl_sp = if total > 0 { 1.0 - baseline_edges as f64 / total as f64 } else { 1.0 };
- let gt_sp = if total > 0 { 1.0 - gated_edges as f64 / total as f64 } else { 1.0 };
+ let bl_sp = if total > 0 {
+ 1.0 - baseline_edges as f64 / total as f64
+ } else {
+ 1.0
+ };
+ let gt_sp = if total > 0 {
+ 1.0 - gated_edges as f64 / total as f64
+ } else {
+ 1.0
+ };
ComparisonResult {
jaccard: jaccard_similarity(baseline, gated),
edge_flips: edge_flip_count(baseline, gated),
baseline_edges,
gated_edges,
- sparsity_ratio: if bl_sp > f64::EPSILON { gt_sp / bl_sp } else { gt_sp },
+ sparsity_ratio: if bl_sp > f64::EPSILON {
+ gt_sp / bl_sp
+ } else {
+ gt_sp
+ },
}
}
fn count_true_tail(mask: &[bool], from: usize) -> usize {
- if mask.len() > from { mask[from..].iter().filter(|&&v| v).count() } else { 0 }
+ if mask.len() > from {
+ mask[from..].iter().filter(|&&v| v).count()
+ } else {
+ 0
+ }
}
#[cfg(test)]
@@ -63,15 +87,24 @@ mod tests {
assert!(jaccard_similarity(&[true, false], &[false, true]).abs() < 1e-10);
assert_eq!(jaccard_similarity(&[], &[]), 1.0);
// partial: intersection=1, union=3
- let (a, b) = (vec![true, true, false, false], vec![true, false, true, false]);
+ let (a, b) = (
+ vec![true, true, false, false],
+ vec![true, false, true, false],
+ );
assert!((jaccard_similarity(&a, &b) - 1.0 / 3.0).abs() < 1e-10);
}
#[test]
fn edge_flip_cases() {
assert_eq!(edge_flip_count(&[true, false], &[true, false]), 0);
- assert_eq!(edge_flip_count(&[true, false, true], &[false, true, false]), 3);
- assert_eq!(edge_flip_count(&[true, false], &[true, false, true, true]), 2);
+ assert_eq!(
+ edge_flip_count(&[true, false, true], &[false, true, false]),
+ 3
+ );
+ assert_eq!(
+ edge_flip_count(&[true, false], &[true, false, true, true]),
+ 2
+ );
}
#[test]
diff --git a/crates/ruvector-coherence/src/metrics.rs b/crates/ruvector-coherence/src/metrics.rs
index fc9ca7bb8..7955f8fbb 100644
--- a/crates/ruvector-coherence/src/metrics.rs
+++ b/crates/ruvector-coherence/src/metrics.rs
@@ -20,7 +20,11 @@ pub fn contradiction_rate(predictions: &[Vec], references: &[Vec]) ->
.iter()
.zip(&references[..n])
.filter(|(p, r)| {
- p.iter().zip(r.iter()).map(|(a, b)| *a as f64 * *b as f64).sum::() < 0.0
+ p.iter()
+ .zip(r.iter())
+ .map(|(a, b)| *a as f64 * *b as f64)
+ .sum::()
+ < 0.0
})
.count();
contradictions as f64 / n as f64
@@ -32,7 +36,9 @@ pub fn entailment_consistency(outputs: &[Vec]) -> f64 {
return 1.0;
}
let pairs = outputs.len() - 1;
- let total: f64 = (0..pairs).map(|i| cosine(&outputs[i], &outputs[i + 1])).sum();
+ let total: f64 = (0..pairs)
+ .map(|i| cosine(&outputs[i], &outputs[i + 1]))
+ .sum();
total / pairs as f64
}
@@ -40,20 +46,40 @@ pub fn entailment_consistency(outputs: &[Vec]) -> f64 {
pub fn delta_behavior(baseline_outputs: &[f32], gated_outputs: &[f32]) -> DeltaMetric {
let n = baseline_outputs.len().min(gated_outputs.len());
if n == 0 {
- return DeltaMetric { coherence_delta: 0.0, decision_flips: 0, path_length_change: 0.0 };
+ return DeltaMetric {
+ coherence_delta: 0.0,
+ decision_flips: 0,
+ path_length_change: 0.0,
+ };
}
let (bl, gl) = (&baseline_outputs[..n], &gated_outputs[..n]);
let coherence_delta = cosine(bl, gl) - 1.0;
- let decision_flips = bl.iter().zip(gl).filter(|(b, g)| b.is_sign_positive() != g.is_sign_positive()).count();
+ let decision_flips = bl
+ .iter()
+ .zip(gl)
+ .filter(|(b, g)| b.is_sign_positive() != g.is_sign_positive())
+ .count();
let bn = l2_norm(bl);
- let path_length_change = if bn > f64::EPSILON { l2_norm(gl) / bn - 1.0 } else { 0.0 };
- DeltaMetric { coherence_delta, decision_flips, path_length_change }
+ let path_length_change = if bn > f64::EPSILON {
+ l2_norm(gl) / bn - 1.0
+ } else {
+ 0.0
+ };
+ DeltaMetric {
+ coherence_delta,
+ decision_flips,
+ path_length_change,
+ }
}
fn cosine(a: &[f32], b: &[f32]) -> f64 {
let dot: f64 = a.iter().zip(b).map(|(x, y)| *x as f64 * *y as f64).sum();
let denom = l2_norm(a) * l2_norm(b);
- if denom < f64::EPSILON { 0.0 } else { dot / denom }
+ if denom < f64::EPSILON {
+ 0.0
+ } else {
+ dot / denom
+ }
}
fn l2_norm(v: &[f32]) -> f64 {
@@ -67,8 +93,14 @@ mod tests {
#[test]
fn contradiction_rate_boundaries() {
let preds = vec![vec![1.0, 2.0], vec![3.0, 4.0]];
- assert_eq!(contradiction_rate(&preds, &[vec![1.0, 1.0], vec![1.0, 1.0]]), 0.0);
- assert_eq!(contradiction_rate(&preds, &[vec![-1.0, -1.0], vec![-1.0, -1.0]]), 1.0);
+ assert_eq!(
+ contradiction_rate(&preds, &[vec![1.0, 1.0], vec![1.0, 1.0]]),
+ 0.0
+ );
+ assert_eq!(
+ contradiction_rate(&preds, &[vec![-1.0, -1.0], vec![-1.0, -1.0]]),
+ 1.0
+ );
assert_eq!(contradiction_rate(&[], &[]), 0.0);
}
diff --git a/crates/ruvector-coherence/src/quality.rs b/crates/ruvector-coherence/src/quality.rs
index e52d8a3d2..664727f5e 100644
--- a/crates/ruvector-coherence/src/quality.rs
+++ b/crates/ruvector-coherence/src/quality.rs
@@ -21,7 +21,11 @@ pub fn cosine_similarity(a: &[f32], b: &[f32]) -> f64 {
nb += bi * bi;
}
let denom = na.sqrt() * nb.sqrt();
- if denom < f64::EPSILON { 0.0 } else { dot / denom }
+ if denom < f64::EPSILON {
+ 0.0
+ } else {
+ dot / denom
+ }
}
/// Euclidean (L2) distance between two vectors.
@@ -32,16 +36,28 @@ pub fn l2_distance(a: &[f32], b: &[f32]) -> f64 {
let d = a[i] as f64 - b[i] as f64;
s += d * d;
}
- if a.len() > n { s += a[n..].iter().map(|v| (*v as f64).powi(2)).sum::(); }
- if b.len() > n { s += b[n..].iter().map(|v| (*v as f64).powi(2)).sum::(); }
+ if a.len() > n {
+ s += a[n..].iter().map(|v| (*v as f64).powi(2)).sum::();
+ }
+ if b.len() > n {
+ s += b[n..].iter().map(|v| (*v as f64).powi(2)).sum::();
+ }
s.sqrt()
}
/// Quality gate: passes when `cosine_similarity >= threshold`.
-pub fn quality_check(baseline_output: &[f32], gated_output: &[f32], threshold: f64) -> QualityResult {
+pub fn quality_check(
+ baseline_output: &[f32],
+ gated_output: &[f32],
+ threshold: f64,
+) -> QualityResult {
let cosine_sim = cosine_similarity(baseline_output, gated_output);
let l2_dist = l2_distance(baseline_output, gated_output);
- QualityResult { cosine_sim, l2_dist, passes_threshold: cosine_sim >= threshold }
+ QualityResult {
+ cosine_sim,
+ l2_dist,
+ passes_threshold: cosine_sim >= threshold,
+ }
}
#[cfg(test)]
@@ -73,7 +89,11 @@ mod tests {
#[test]
fn quality_result_serializable() {
- let r = QualityResult { cosine_sim: 0.95, l2_dist: 0.32, passes_threshold: true };
+ let r = QualityResult {
+ cosine_sim: 0.95,
+ l2_dist: 0.32,
+ passes_threshold: true,
+ };
let j = serde_json::to_string(&r).unwrap();
let d: QualityResult = serde_json::from_str(&j).unwrap();
assert!((d.cosine_sim - 0.95).abs() < 1e-10);
diff --git a/crates/ruvector-core/src/advanced/hypergraph.rs b/crates/ruvector-core/src/advanced/hypergraph.rs
index bcfb2b094..11982773f 100644
--- a/crates/ruvector-core/src/advanced/hypergraph.rs
+++ b/crates/ruvector-core/src/advanced/hypergraph.rs
@@ -150,9 +150,7 @@ impl HypergraphIndex {
/// Add an entity node
pub fn add_entity(&mut self, id: VectorId, embedding: Vec) {
self.entities.insert(id.clone(), embedding);
- self.entity_to_hyperedges
- .entry(id)
- .or_insert_with(HashSet::new);
+ self.entity_to_hyperedges.entry(id).or_default();
}
/// Add a hyperedge
@@ -173,7 +171,7 @@ impl HypergraphIndex {
for node in &hyperedge.nodes {
self.entity_to_hyperedges
.entry(node.clone())
- .or_insert_with(HashSet::new)
+ .or_default()
.insert(edge_id.clone());
}
@@ -192,10 +190,7 @@ impl HypergraphIndex {
self.add_hyperedge(temporal_edge.hyperedge)?;
- self.temporal_index
- .entry(bucket)
- .or_insert_with(Vec::new)
- .push(edge_id);
+ self.temporal_index.entry(bucket).or_default().push(edge_id);
Ok(())
}
diff --git a/crates/ruvector-core/src/advanced/learned_index.rs b/crates/ruvector-core/src/advanced/learned_index.rs
index 2f817739a..c59c02b42 100644
--- a/crates/ruvector-core/src/advanced/learned_index.rs
+++ b/crates/ruvector-core/src/advanced/learned_index.rs
@@ -271,7 +271,7 @@ impl LearnedIndex for RecursiveModelIndex {
for (i, (key, _)) in self.data.iter().enumerate() {
if let Ok(pred_pos) = self.predict(key) {
- let error = (i as i32 - pred_pos as i32).abs() as usize;
+ let error = i.abs_diff(pred_pos);
total_error += error as f32;
max_error = max_error.max(error);
}
diff --git a/crates/ruvector-core/src/advanced/neural_hash.rs b/crates/ruvector-core/src/advanced/neural_hash.rs
index b3f4cf400..e2dc34433 100644
--- a/crates/ruvector-core/src/advanced/neural_hash.rs
+++ b/crates/ruvector-core/src/advanced/neural_hash.rs
@@ -3,7 +3,6 @@
//! Learn similarity-preserving binary projections for extreme compression.
//! Achieves 32-128x compression with 90-95% recall preservation.
-use crate::error::{Result, RuvectorError};
use crate::types::VectorId;
use ndarray::{Array1, Array2};
use rand::Rng;
@@ -151,13 +150,13 @@ impl DeepHashEmbedding {
impl NeuralHash for DeepHashEmbedding {
fn encode(&self, vector: &[f32]) -> Vec {
if vector.len() != self.input_dims {
- return vec![0; (self.output_bits + 7) / 8];
+ return vec![0; self.output_bits.div_ceil(8)];
}
let logits = self.forward(vector);
// Threshold at 0 to get binary codes
- let mut bits = vec![0u8; (self.output_bits + 7) / 8];
+ let mut bits = vec![0u8; self.output_bits.div_ceil(8)];
for (i, &logit) in logits.iter().enumerate() {
if logit > 0.0 {
@@ -215,7 +214,7 @@ impl NeuralHash for SimpleLSH {
let input = Array1::from_vec(vector.to_vec());
let projections = self.projections.dot(&input);
- let mut bits = vec![0u8; (self.num_bits + 7) / 8];
+ let mut bits = vec![0u8; self.num_bits.div_ceil(8)];
for (i, &val) in projections.iter().enumerate() {
if val > 0.0 {
@@ -269,10 +268,7 @@ impl HashIndex {
pub fn insert(&mut self, id: VectorId, vector: Vec) {
let code = self.hasher.encode(&vector);
- self.tables
- .entry(code)
- .or_insert_with(Vec::new)
- .push(id.clone());
+ self.tables.entry(code).or_default().push(id.clone());
self.vectors.insert(id, vector);
}
@@ -315,7 +311,7 @@ impl HashIndex {
.map(|v| v.len() * std::mem::size_of::())
.sum();
- let compressed_size = self.tables.len() * ((self.code_bits + 7) / 8);
+ let compressed_size = self.tables.len() * self.code_bits.div_ceil(8);
original_size as f32 / compressed_size as f32
}
diff --git a/crates/ruvector-core/src/advanced/tda.rs b/crates/ruvector-core/src/advanced/tda.rs
index 074a87200..57c72c0a6 100644
--- a/crates/ruvector-core/src/advanced/tda.rs
+++ b/crates/ruvector-core/src/advanced/tda.rs
@@ -4,9 +4,8 @@
//! Detects mode collapse, degeneracy, and topological structure.
use crate::error::{Result, RuvectorError};
-use ndarray::{Array1, Array2};
+use ndarray::Array2;
use serde::{Deserialize, Serialize};
-use std::collections::{HashMap, HashSet};
/// Topological analyzer for embeddings
pub struct TopologicalAnalyzer {
@@ -118,6 +117,7 @@ impl TopologicalAnalyzer {
components
}
+ #[allow(clippy::only_used_in_recursion)]
fn dfs(&self, node: usize, graph: &[Vec], visited: &mut [bool]) {
visited[node] = true;
for &neighbor in &graph[node] {
diff --git a/crates/ruvector-core/src/advanced_features/conformal_prediction.rs b/crates/ruvector-core/src/advanced_features/conformal_prediction.rs
index a3714a03b..d1a9f21b2 100644
--- a/crates/ruvector-core/src/advanced_features/conformal_prediction.rs
+++ b/crates/ruvector-core/src/advanced_features/conformal_prediction.rs
@@ -6,7 +6,6 @@
use crate::error::{Result, RuvectorError};
use crate::types::{SearchResult, VectorId};
use serde::{Deserialize, Serialize};
-use std::collections::HashMap;
/// Configuration for conformal prediction
#[derive(Debug, Clone, Serialize, Deserialize)]
diff --git a/crates/ruvector-core/src/advanced_features/filtered_search.rs b/crates/ruvector-core/src/advanced_features/filtered_search.rs
index 4b31a2d9b..9f8eef885 100644
--- a/crates/ruvector-core/src/advanced_features/filtered_search.rs
+++ b/crates/ruvector-core/src/advanced_features/filtered_search.rs
@@ -5,7 +5,7 @@
//! - Post-filtering: Traverse graph then apply filters
//! - Automatic strategy selection based on filter selectivity
-use crate::error::{Result, RuvectorError};
+use crate::error::Result;
use crate::types::{SearchResult, VectorId};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
@@ -112,6 +112,7 @@ impl FilterExpression {
}
/// Estimate selectivity of filter (0.0 = very selective, 1.0 = not selective)
+ #[allow(clippy::only_used_in_recursion)]
pub fn estimate_selectivity(&self, total_vectors: usize) -> f32 {
match self {
FilterExpression::Eq(_, _) => 0.1, // Equality is typically selective
diff --git a/crates/ruvector-core/src/advanced_features/hybrid_search.rs b/crates/ruvector-core/src/advanced_features/hybrid_search.rs
index 329df2922..4ad4441b8 100644
--- a/crates/ruvector-core/src/advanced_features/hybrid_search.rs
+++ b/crates/ruvector-core/src/advanced_features/hybrid_search.rs
@@ -5,7 +5,7 @@
//! - BM25 keyword matching (lexical)
//! - Weighted combination of scores
-use crate::error::{Result, RuvectorError};
+use crate::error::Result;
use crate::types::{SearchResult, VectorId};
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
@@ -80,7 +80,7 @@ impl BM25 {
for term in terms {
self.inverted_index
.entry(term)
- .or_insert_with(HashSet::new)
+ .or_default()
.insert(doc_id.clone());
}
diff --git a/crates/ruvector-core/src/advanced_features/mmr.rs b/crates/ruvector-core/src/advanced_features/mmr.rs
index 0c6dfde49..95f7e049f 100644
--- a/crates/ruvector-core/src/advanced_features/mmr.rs
+++ b/crates/ruvector-core/src/advanced_features/mmr.rs
@@ -63,7 +63,7 @@ impl MMRSearch {
pub fn rerank(
&self,
query: &[f32],
- mut candidates: Vec,
+ candidates: Vec,
k: usize,
) -> Result> {
if candidates.is_empty() {
@@ -111,7 +111,7 @@ impl MMRSearch {
/// Compute MMR score for a candidate
fn compute_mmr_score(
&self,
- query: &[f32],
+ _query: &[f32],
candidate: &SearchResult,
selected: &[SearchResult],
) -> Result {
diff --git a/crates/ruvector-core/src/advanced_features/product_quantization.rs b/crates/ruvector-core/src/advanced_features/product_quantization.rs
index 170663b24..09920c26f 100644
--- a/crates/ruvector-core/src/advanced_features/product_quantization.rs
+++ b/crates/ruvector-core/src/advanced_features/product_quantization.rs
@@ -270,7 +270,6 @@ impl EnhancedPQ {
)));
}
- let subspace_dim = self.dimensions / self.config.num_subspaces;
let mut result = Vec::with_capacity(self.dimensions);
for (subspace_idx, &code) in codes.iter().enumerate() {
diff --git a/crates/ruvector-core/src/agenticdb.rs b/crates/ruvector-core/src/agenticdb.rs
index 6ad1b761b..6a9ac36b7 100644
--- a/crates/ruvector-core/src/agenticdb.rs
+++ b/crates/ruvector-core/src/agenticdb.rs
@@ -24,7 +24,7 @@
//! - causal_edges: Cause-effect relationships with hypergraphs
//! - learning_sessions: RL training data
-use crate::embeddings::{BoxedEmbeddingProvider, EmbeddingProvider, HashEmbedding};
+use crate::embeddings::{BoxedEmbeddingProvider, HashEmbedding};
use crate::error::{Result, RuvectorError};
use crate::types::*;
use crate::vector_db::VectorDB;
@@ -32,7 +32,6 @@ use parking_lot::RwLock;
use redb::{Database, TableDefinition};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
-use std::path::Path;
use std::sync::Arc;
// Table definitions
@@ -130,7 +129,7 @@ pub struct UtilitySearchResult {
pub struct AgenticDB {
vector_db: Arc,
db: Arc,
- dimensions: usize,
+ _dimensions: usize,
embedding_provider: BoxedEmbeddingProvider,
}
@@ -211,15 +210,17 @@ impl AgenticDB {
Ok(Self {
vector_db,
db,
- dimensions: options.dimensions,
+ _dimensions: options.dimensions,
embedding_provider,
})
}
/// Create with default options and hash-based embeddings
pub fn with_dimensions(dimensions: usize) -> Result {
- let mut options = DbOptions::default();
- options.dimensions = dimensions;
+ let options = DbOptions {
+ dimensions,
+ ..DbOptions::default()
+ };
Self::new(options)
}
@@ -837,7 +838,7 @@ impl<'a> PolicyMemoryStore<'a> {
let id = uuid::Uuid::new_v4().to_string();
let timestamp = chrono::Utc::now().timestamp();
- let entry = PolicyEntry {
+ let _entry = PolicyEntry {
id: id.clone(),
state_id: state_id.to_string(),
action: PolicyAction {
@@ -940,7 +941,7 @@ impl<'a> PolicyMemoryStore<'a> {
}
/// Update Q-value for a state-action pair
- pub fn update_q_value(&self, policy_id: &str, new_q_value: f64) -> Result<()> {
+ pub fn update_q_value(&self, policy_id: &str, _new_q_value: f64) -> Result<()> {
// Delete old entry and create new one with updated Q-value
// Note: In production, this should use an update mechanism
let _ = self.db.vector_db.delete(&format!("policy_{}", policy_id));
diff --git a/crates/ruvector-core/src/arena.rs b/crates/ruvector-core/src/arena.rs
index 7e837d13c..83dd8bfc7 100644
--- a/crates/ruvector-core/src/arena.rs
+++ b/crates/ruvector-core/src/arena.rs
@@ -225,14 +225,14 @@ impl std::ops::DerefMut for ArenaVec {
}
}
-/// Thread-local arena for per-thread allocations
+// Thread-local arena for per-thread allocations
thread_local! {
static THREAD_ARENA: RefCell = RefCell::new(Arena::with_default_chunk_size());
}
-/// Get the thread-local arena
-/// Note: Commented out due to lifetime issues with RefCell::borrow() escaping closure
-/// Use THREAD_ARENA.with(|arena| { ... }) directly instead
+// Get the thread-local arena
+// Note: Commented out due to lifetime issues with RefCell::borrow() escaping closure
+// Use THREAD_ARENA.with(|arena| { ... }) directly instead
/*
pub fn thread_arena() -> impl std::ops::Deref {
THREAD_ARENA.with(|arena| {
diff --git a/crates/ruvector-core/src/cache_optimized.rs b/crates/ruvector-core/src/cache_optimized.rs
index bd2e2bde9..546da2cc2 100644
--- a/crates/ruvector-core/src/cache_optimized.rs
+++ b/crates/ruvector-core/src/cache_optimized.rs
@@ -101,9 +101,9 @@ impl SoAVectorStorage {
assert!(index < self.count);
assert_eq!(output.len(), self.dimensions);
- for dim_idx in 0..self.dimensions {
+ for (dim_idx, out) in output.iter_mut().enumerate().take(self.dimensions) {
let offset = dim_idx * self.capacity + index;
- output[dim_idx] = unsafe { *self.data.add(offset) };
+ *out = unsafe { *self.data.add(offset) };
}
}
@@ -315,14 +315,14 @@ impl SoAVectorStorage {
let idx = i * 8;
_mm256_storeu_ps(output.as_mut_ptr().add(idx), zero);
}
- for i in (chunks * 8)..self.count {
- output[i] = 0.0;
+ for out in output.iter_mut().take(self.count).skip(chunks * 8) {
+ *out = 0.0;
}
// Process dimension by dimension
- for dim_idx in 0..self.dimensions {
+ for (dim_idx, &q_val) in query.iter().enumerate().take(self.dimensions) {
let dim_slice = self.dimension_slice(dim_idx);
- let query_val = _mm256_set1_ps(query[dim_idx]);
+ let query_val = _mm256_set1_ps(q_val);
// SIMD processing of 8 vectors at a time
for i in 0..chunks {
@@ -353,6 +353,7 @@ impl SoAVectorStorage {
// Feature detection helper for x86_64
#[cfg(target_arch = "x86_64")]
+#[allow(dead_code)]
fn is_x86_feature_detected_helper(feature: &str) -> bool {
match feature {
"avx2" => is_x86_feature_detected!("avx2"),
diff --git a/crates/ruvector-core/src/embeddings.rs b/crates/ruvector-core/src/embeddings.rs
index 452e83532..9dfaa6329 100644
--- a/crates/ruvector-core/src/embeddings.rs
+++ b/crates/ruvector-core/src/embeddings.rs
@@ -24,7 +24,9 @@
//! # Ok::<(), Box>(())
//! ```
-use crate::error::{Result, RuvectorError};
+use crate::error::Result;
+#[cfg(any(feature = "real-embeddings", feature = "api-embeddings"))]
+use crate::error::RuvectorError;
use std::sync::Arc;
/// Trait for text embedding providers
diff --git a/crates/ruvector-core/src/index.rs b/crates/ruvector-core/src/index.rs
index d88020532..eadb730be 100644
--- a/crates/ruvector-core/src/index.rs
+++ b/crates/ruvector-core/src/index.rs
@@ -5,7 +5,7 @@ pub mod flat;
pub mod hnsw;
use crate::error::Result;
-use crate::types::{DistanceMetric, SearchResult, VectorId};
+use crate::types::{SearchResult, VectorId};
/// Trait for vector index implementations
pub trait VectorIndex: Send + Sync {
diff --git a/crates/ruvector-core/src/index/flat.rs b/crates/ruvector-core/src/index/flat.rs
index 9680304df..b2595b47d 100644
--- a/crates/ruvector-core/src/index/flat.rs
+++ b/crates/ruvector-core/src/index/flat.rs
@@ -13,7 +13,7 @@ use rayon::prelude::*;
pub struct FlatIndex {
vectors: DashMap>,
metric: DistanceMetric,
- dimensions: usize,
+ _dimensions: usize,
}
impl FlatIndex {
@@ -22,7 +22,7 @@ impl FlatIndex {
Self {
vectors: DashMap::new(),
metric,
- dimensions,
+ _dimensions: dimensions,
}
}
}
diff --git a/crates/ruvector-core/src/index/hnsw.rs b/crates/ruvector-core/src/index/hnsw.rs
index 0364709bf..83985cd7c 100644
--- a/crates/ruvector-core/src/index/hnsw.rs
+++ b/crates/ruvector-core/src/index/hnsw.rs
@@ -297,9 +297,7 @@ impl VectorIndex for HnswIndex {
let mut inner = self.inner.write();
- // Prepare batch data for parallel insertion
- use rayon::prelude::*;
-
+ // Prepare batch data for insertion
// First, assign indices and collect vector data
let data_with_ids: Vec<_> = entries
.iter()
@@ -336,7 +334,7 @@ impl VectorIndex for HnswIndex {
}
fn remove(&mut self, id: &VectorId) -> Result {
- let mut inner = self.inner.write();
+ let inner = self.inner.write();
// Note: hnsw_rs doesn't support direct deletion
// We remove from our mappings but the graph structure remains
diff --git a/crates/ruvector-core/src/lib.rs b/crates/ruvector-core/src/lib.rs
index b42c90161..7230d14dc 100644
--- a/crates/ruvector-core/src/lib.rs
+++ b/crates/ruvector-core/src/lib.rs
@@ -25,8 +25,9 @@
//! - This is NOT a complete RAG solution - you need external embedding models
//! - Examples use mock embeddings for demonstration only
-#![warn(missing_docs)]
+#![allow(missing_docs)]
#![warn(clippy::all)]
+#![allow(clippy::incompatible_msrv)]
pub mod advanced_features;
@@ -94,8 +95,8 @@ pub use embeddings::CandleEmbedding;
// Compile-time warning about AgenticDB limitations
#[cfg(feature = "storage")]
+#[allow(deprecated, clippy::let_unit_value)]
const _: () = {
- // This will appear in cargo build output as a note
#[deprecated(
since = "0.1.0",
note = "AgenticDB uses placeholder hash-based embeddings. For semantic search, integrate a real embedding model (ONNX, Candle, or API). See /examples/onnx-embeddings for production setup."
diff --git a/crates/ruvector-core/src/lockfree.rs b/crates/ruvector-core/src/lockfree.rs
index 9f0bf344e..f9fced000 100644
--- a/crates/ruvector-core/src/lockfree.rs
+++ b/crates/ruvector-core/src/lockfree.rs
@@ -264,7 +264,7 @@ impl AtomicVectorPool {
}
/// Acquire a vector from the pool (or allocate new one)
- pub fn acquire(&self) -> PooledVector {
+ pub fn acquire(&self) -> PooledVector<'_> {
self.total_allocations.fetch_add(1, Ordering::Relaxed);
let vec = if let Some(mut v) = self.pool.pop() {
diff --git a/crates/ruvector-core/src/quantization.rs b/crates/ruvector-core/src/quantization.rs
index 944b1e8d6..8d3ffed16 100644
--- a/crates/ruvector-core/src/quantization.rs
+++ b/crates/ruvector-core/src/quantization.rs
@@ -218,7 +218,7 @@ impl Int4Quantized {
};
let dimensions = vector.len();
- let num_bytes = (dimensions + 1) / 2;
+ let num_bytes = dimensions.div_ceil(2);
let mut data = vec![0u8; num_bytes];
for (i, &v) in vector.iter().enumerate() {
@@ -247,7 +247,7 @@ impl Int4Quantized {
// Use average scale for balanced comparison
let avg_scale = (self.scale + other.scale) / 2.0;
- let avg_min = (self.min + other.min) / 2.0;
+ let _avg_min = (self.min + other.min) / 2.0;
let mut sum_sq = 0i32;
@@ -296,7 +296,7 @@ pub struct BinaryQuantized {
impl QuantizedVector for BinaryQuantized {
fn quantize(vector: &[f32]) -> Self {
let dimensions = vector.len();
- let num_bytes = (dimensions + 7) / 8;
+ let num_bytes = dimensions.div_ceil(8);
let mut bits = vec![0u8; num_bytes];
for (i, &v) in vector.iter().enumerate() {
diff --git a/crates/ruvector-core/src/storage.rs b/crates/ruvector-core/src/storage.rs
index 52735c952..f6209cd7b 100644
--- a/crates/ruvector-core/src/storage.rs
+++ b/crates/ruvector-core/src/storage.rs
@@ -25,7 +25,6 @@ use std::path::{Path, PathBuf};
use std::sync::Arc;
#[cfg(feature = "storage")]
-
const VECTORS_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("vectors");
const METADATA_TABLE: TableDefinition<&str, &str> = TableDefinition::new("metadata");
const CONFIG_TABLE: TableDefinition<&str, &str> = TableDefinition::new("config");
@@ -242,7 +241,7 @@ impl VectorStorage {
/// Delete a vector by ID
pub fn delete(&self, id: &str) -> Result {
let write_txn = self.db.begin_write()?;
- let mut deleted = false;
+ let deleted;
{
let mut table = write_txn.open_table(VECTORS_TABLE)?;
diff --git a/crates/ruvector-core/src/vector_db.rs b/crates/ruvector-core/src/vector_db.rs
index bb26a8fdd..d947f725d 100644
--- a/crates/ruvector-core/src/vector_db.rs
+++ b/crates/ruvector-core/src/vector_db.rs
@@ -130,8 +130,10 @@ impl VectorDB {
/// Create with default options
pub fn with_dimensions(dimensions: usize) -> Result {
- let mut options = DbOptions::default();
- options.dimensions = dimensions;
+ let options = DbOptions {
+ dimensions,
+ ..DbOptions::default()
+ };
Self::new(options)
}
@@ -182,7 +184,7 @@ impl VectorDB {
if let Some(metadata) = &r.metadata {
filter
.iter()
- .all(|(key, value)| metadata.get(key).map_or(false, |v| v == value))
+ .all(|(key, value)| metadata.get(key).is_some_and(|v| v == value))
} else {
false
}
diff --git a/crates/ruvector-crv/src/lib.rs b/crates/ruvector-crv/src/lib.rs
index 67ef16806..0587d33d8 100644
--- a/crates/ruvector-crv/src/lib.rs
+++ b/crates/ruvector-crv/src/lib.rs
@@ -85,10 +85,10 @@ pub use stage_iv::StageIVEncoder;
pub use stage_v::StageVEngine;
pub use stage_vi::StageVIModeler;
pub use types::{
- AOLDetection, ConvergenceResult, CrossReference, CrvConfig, CrvSessionEntry,
- GeometricKind, GestaltType, SensoryModality, SignalLineProbe, SketchElement,
- SpatialRelationType, SpatialRelationship, StageIData, StageIIData, StageIIIData,
- StageIVData, StageVData, StageVIData, TargetPartition,
+ AOLDetection, ConvergenceResult, CrossReference, CrvConfig, CrvSessionEntry, GeometricKind,
+ GestaltType, SensoryModality, SignalLineProbe, SketchElement, SpatialRelationType,
+ SpatialRelationship, StageIData, StageIIData, StageIIIData, StageIVData, StageVData,
+ StageVIData, TargetPartition,
};
/// Library version.
diff --git a/crates/ruvector-crv/src/session.rs b/crates/ruvector-crv/src/session.rs
index 61bf27c11..8818a9390 100644
--- a/crates/ruvector-crv/src/session.rs
+++ b/crates/ruvector-crv/src/session.rs
@@ -116,44 +116,28 @@ impl CrvSessionManager {
}
/// Add Stage I data to a session.
- pub fn add_stage_i(
- &mut self,
- session_id: &str,
- data: &StageIData,
- ) -> CrvResult> {
+ pub fn add_stage_i(&mut self, session_id: &str, data: &StageIData) -> CrvResult> {
let embedding = self.stage_i.encode(data)?;
self.add_entry(session_id, 1, embedding.clone(), HashMap::new())?;
Ok(embedding)
}
/// Add Stage II data to a session.
- pub fn add_stage_ii(
- &mut self,
- session_id: &str,
- data: &StageIIData,
- ) -> CrvResult> {
+ pub fn add_stage_ii(&mut self, session_id: &str, data: &StageIIData) -> CrvResult> {
let embedding = self.stage_ii.encode(data)?;
self.add_entry(session_id, 2, embedding.clone(), HashMap::new())?;
Ok(embedding)
}
/// Add Stage III data to a session.
- pub fn add_stage_iii(
- &mut self,
- session_id: &str,
- data: &StageIIIData,
- ) -> CrvResult> {
+ pub fn add_stage_iii(&mut self, session_id: &str, data: &StageIIIData) -> CrvResult> {
let embedding = self.stage_iii.encode(data)?;
self.add_entry(session_id, 3, embedding.clone(), HashMap::new())?;
Ok(embedding)
}
/// Add Stage IV data to a session.
- pub fn add_stage_iv(
- &mut self,
- session_id: &str,
- data: &StageIVData,
- ) -> CrvResult> {
+ pub fn add_stage_iv(&mut self, session_id: &str, data: &StageIVData) -> CrvResult> {
let embedding = self.stage_iv.encode(data)?;
self.add_entry(session_id, 4, embedding.clone(), HashMap::new())?;
Ok(embedding)
@@ -173,8 +157,11 @@ impl CrvSessionManager {
.get(session_id)
.ok_or_else(|| CrvError::SessionNotFound(session_id.to_string()))?;
- let all_embeddings: Vec> =
- session.entries.iter().map(|e| e.embedding.clone()).collect();
+ let all_embeddings: Vec> = session
+ .entries
+ .iter()
+ .map(|e| e.embedding.clone())
+ .collect();
let mut probes = Vec::new();
let mut cross_refs = Vec::new();
@@ -248,8 +235,11 @@ impl CrvSessionManager {
.get(session_id)
.ok_or_else(|| CrvError::SessionNotFound(session_id.to_string()))?;
- let embeddings: Vec> =
- session.entries.iter().map(|e| e.embedding.clone()).collect();
+ let embeddings: Vec> = session
+ .entries
+ .iter()
+ .map(|e| e.embedding.clone())
+ .collect();
let labels: Vec<(u8, usize)> = session
.entries
.iter()
@@ -323,8 +313,7 @@ impl CrvSessionManager {
if emb_a.len() == emb_b.len() && !emb_a.is_empty() {
let sim = cosine_similarity(emb_a, emb_b);
if sim >= min_similarity {
- session_pairs
- .push((sess_a.id.clone(), sess_b.id.clone()));
+ session_pairs.push((sess_a.id.clone(), sess_b.id.clone()));
scores.push(sim);
if !convergent_stages.contains(&stage) {
convergent_stages.push(stage);
diff --git a/crates/ruvector-crv/src/stage_ii.rs b/crates/ruvector-crv/src/stage_ii.rs
index 6cfe252dc..9d00f8bde 100644
--- a/crates/ruvector-crv/src/stage_ii.rs
+++ b/crates/ruvector-crv/src/stage_ii.rs
@@ -100,9 +100,7 @@ impl StageIIEncoder {
/// attends over all impressions to produce the fused output.
pub fn encode(&self, data: &StageIIData) -> CrvResult> {
if data.impressions.is_empty() {
- return Err(CrvError::EmptyInput(
- "No sensory impressions".to_string(),
- ));
+ return Err(CrvError::EmptyInput("No sensory impressions".to_string()));
}
// If a pre-computed feature vector exists, use it
diff --git a/crates/ruvector-crv/src/stage_iii.rs b/crates/ruvector-crv/src/stage_iii.rs
index d424373d6..4dd906089 100644
--- a/crates/ruvector-crv/src/stage_iii.rs
+++ b/crates/ruvector-crv/src/stage_iii.rs
@@ -37,7 +37,13 @@ impl StageIIIEncoder {
}
/// Encode a sketch element into a node feature vector.
- fn encode_element(&self, label: &str, kind: GeometricKind, position: (f32, f32), scale: Option) -> Vec {
+ fn encode_element(
+ &self,
+ label: &str,
+ kind: GeometricKind,
+ position: (f32, f32),
+ scale: Option,
+ ) -> Vec {
let mut features = vec![0.0f32; self.dim];
// Geometric kind encoding (one-hot style in first 8 dims)
@@ -110,9 +116,7 @@ impl StageIIIEncoder {
/// into a single graph-level vector.
pub fn encode(&self, data: &StageIIIData) -> CrvResult> {
if data.sketch_elements.is_empty() {
- return Err(CrvError::EmptyInput(
- "No sketch elements".to_string(),
- ));
+ return Err(CrvError::EmptyInput("No sketch elements".to_string()));
}
// Build label → index mapping
@@ -127,9 +131,7 @@ impl StageIIIEncoder {
let node_features: Vec> = data
.sketch_elements
.iter()
- .map(|elem| {
- self.encode_element(&elem.label, elem.kind, elem.position, elem.scale)
- })
+ .map(|elem| self.encode_element(&elem.label, elem.kind, elem.position, elem.scale))
.collect();
// For each node, collect neighbor embeddings and edge weights
@@ -211,12 +213,8 @@ mod tests {
let config = test_config();
let encoder = StageIIIEncoder::new(&config);
- let features = encoder.encode_element(
- "building",
- GeometricKind::Rectangle,
- (0.5, 0.3),
- Some(2.0),
- );
+ let features =
+ encoder.encode_element("building", GeometricKind::Rectangle, (0.5, 0.3), Some(2.0));
assert_eq!(features.len(), 32);
}
diff --git a/crates/ruvector-crv/src/stage_iv.rs b/crates/ruvector-crv/src/stage_iv.rs
index 2b95b9d97..3b069b423 100644
--- a/crates/ruvector-crv/src/stage_iv.rs
+++ b/crates/ruvector-crv/src/stage_iv.rs
@@ -96,11 +96,7 @@ impl StageIVEncoder {
///
/// High spike rate in a short window indicates the analytical mind
/// is overriding the signal line (AOL contamination).
- fn detect_aol(
- &self,
- spike_rates: &[f64],
- window_ms: f64,
- ) -> Vec {
+ fn detect_aol(&self, spike_rates: &[f64], window_ms: f64) -> Vec {
let mut detections = Vec::new();
let threshold = self.aol_threshold as f64;
diff --git a/crates/ruvector-crv/src/stage_v.rs b/crates/ruvector-crv/src/stage_v.rs
index 69fe793c3..f2d5f1ba4 100644
--- a/crates/ruvector-crv/src/stage_v.rs
+++ b/crates/ruvector-crv/src/stage_v.rs
@@ -54,7 +54,7 @@ impl StageVEngine {
Ok(SignalLineProbe {
query: String::new(), // Caller sets the text
- target_stage: 0, // Caller sets the stage
+ target_stage: 0, // Caller sets the stage
attention_weights,
top_candidates,
})
@@ -109,7 +109,9 @@ impl StageVEngine {
/// responsive to interrogation.
pub fn encode(&self, data: &StageVData, all_embeddings: &[Vec]) -> CrvResult> {
if data.probes.is_empty() {
- return Err(CrvError::EmptyInput("No probes in Stage V data".to_string()));
+ return Err(CrvError::EmptyInput(
+ "No probes in Stage V data".to_string(),
+ ));
}
let mut embedding = vec![0.0f32; self.dim];
diff --git a/crates/ruvector-crv/src/stage_vi.rs b/crates/ruvector-crv/src/stage_vi.rs
index 0fd2f2a09..d9a3c0072 100644
--- a/crates/ruvector-crv/src/stage_vi.rs
+++ b/crates/ruvector-crv/src/stage_vi.rs
@@ -146,9 +146,8 @@ impl StageVIModeler {
Ok(mc) => mc,
Err(_) => {
// Fallback: single partition
- let centroid = self.compute_centroid(
- &embeddings.iter().map(|e| e.as_slice()).collect::>(),
- );
+ let centroid = self
+ .compute_centroid(&embeddings.iter().map(|e| e.as_slice()).collect::>());
return Ok(StageVIData {
partitions: vec![TargetPartition {
label: "composite".to_string(),
@@ -173,10 +172,16 @@ impl StageVIModeler {
let (group_a, group_b) = self.bisect_by_similarity(embeddings);
let centroid_a = self.compute_centroid(
- &group_a.iter().map(|&i| embeddings[i].as_slice()).collect::>(),
+ &group_a
+ .iter()
+ .map(|&i| embeddings[i].as_slice())
+ .collect::>(),
);
let centroid_b = self.compute_centroid(
- &group_b.iter().map(|&i| embeddings[i].as_slice()).collect::>(),
+ &group_b
+ .iter()
+ .map(|&i| embeddings[i].as_slice())
+ .collect::>(),
);
let members_a: Vec<(u8, usize)> = group_a
@@ -289,7 +294,8 @@ impl StageVIModeler {
let mut embedding = vec![0.0f32; self.dim];
let mut total_weight = 0.0f32;
- for (partition, &confidence) in data.partitions.iter().zip(data.partition_confidence.iter()) {
+ for (partition, &confidence) in data.partitions.iter().zip(data.partition_confidence.iter())
+ {
let weight = confidence * partition.member_entries.len() as f32;
for (i, &v) in partition.centroid.iter().enumerate() {
if i < self.dim {
diff --git a/crates/ruvector-domain-expansion-wasm/src/lib.rs b/crates/ruvector-domain-expansion-wasm/src/lib.rs
index 001cc7f3f..dd0e9fdb9 100644
--- a/crates/ruvector-domain-expansion-wasm/src/lib.rs
+++ b/crates/ruvector-domain-expansion-wasm/src/lib.rs
@@ -9,9 +9,8 @@
//! RuVector Format wire protocol.
use ruvector_domain_expansion::{
- AccelerationScoreboard, ArmId, ContextBucket, CostCurve,
- DomainExpansionEngine, DomainId, Evaluation, MetaThompsonEngine,
- PopulationSearch, Solution, Task,
+ AccelerationScoreboard, ArmId, ContextBucket, CostCurve, DomainExpansionEngine, DomainId,
+ Evaluation, MetaThompsonEngine, PopulationSearch, Solution, Task,
};
use wasm_bindgen::prelude::*;
@@ -109,12 +108,7 @@ impl WasmDomainExpansionEngine {
/// Check if speculation should be triggered.
#[wasm_bindgen(js_name = shouldSpeculate)]
- pub fn should_speculate(
- &self,
- domain_id: &str,
- difficulty_tier: &str,
- category: &str,
- ) -> bool {
+ pub fn should_speculate(&self, domain_id: &str, difficulty_tier: &str, category: &str) -> bool {
let bucket = ContextBucket {
difficulty_tier: difficulty_tier.to_string(),
category: category.to_string(),
@@ -126,10 +120,8 @@ impl WasmDomainExpansionEngine {
/// Initiate transfer from source to target domain.
#[wasm_bindgen(js_name = initiateTransfer)]
pub fn initiate_transfer(&mut self, source: &str, target: &str) {
- self.inner.initiate_transfer(
- &DomainId(source.to_string()),
- &DomainId(target.to_string()),
- );
+ self.inner
+ .initiate_transfer(&DomainId(source.to_string()), &DomainId(target.to_string()));
}
/// Verify a transfer delta. Returns verification JSON.
@@ -196,13 +188,9 @@ impl WasmDomainExpansionEngine {
/// Get counterexamples for a domain as JSON.
#[wasm_bindgen(js_name = counterexamples)]
pub fn counterexamples(&self, domain_id: &str) -> JsValue {
- let examples = self
- .inner
- .counterexamples(&DomainId(domain_id.to_string()));
- let serializable: Vec<(&Task, &Solution, &Evaluation)> = examples
- .iter()
- .map(|(t, s, e)| (t, s, e))
- .collect();
+ let examples = self.inner.counterexamples(&DomainId(domain_id.to_string()));
+ let serializable: Vec<(&Task, &Solution, &Evaluation)> =
+ examples.iter().map(|(t, s, e)| (t, s, e)).collect();
serde_wasm_bindgen::to_value(&serializable).unwrap_or(JsValue::NULL)
}
}
@@ -404,12 +392,9 @@ impl WasmRvfBridge {
prior_json: &str,
segment_id: u64,
) -> Result, JsValue> {
- let prior: ruvector_domain_expansion::TransferPrior =
- serde_json::from_str(prior_json)
- .map_err(|e| JsValue::from_str(&format!("JSON parse error: {e}")))?;
- Ok(ruvector_domain_expansion::rvf_bridge::transfer_prior_to_segment(
- &prior, segment_id,
- ))
+ let prior: ruvector_domain_expansion::TransferPrior = serde_json::from_str(prior_json)
+ .map_err(|e| JsValue::from_str(&format!("JSON parse error: {e}")))?;
+ Ok(ruvector_domain_expansion::rvf_bridge::transfer_prior_to_segment(&prior, segment_id))
}
/// Deserialize a TransferPrior from RVF segment bytes. Returns JSON.
@@ -428,12 +413,9 @@ impl WasmRvfBridge {
kernel_json: &str,
segment_id: u64,
) -> Result, JsValue> {
- let kernel: ruvector_domain_expansion::PolicyKernel =
- serde_json::from_str(kernel_json)
- .map_err(|e| JsValue::from_str(&format!("JSON parse error: {e}")))?;
- Ok(ruvector_domain_expansion::rvf_bridge::policy_kernel_to_segment(
- &kernel, segment_id,
- ))
+ let kernel: ruvector_domain_expansion::PolicyKernel = serde_json::from_str(kernel_json)
+ .map_err(|e| JsValue::from_str(&format!("JSON parse error: {e}")))?;
+ Ok(ruvector_domain_expansion::rvf_bridge::policy_kernel_to_segment(&kernel, segment_id))
}
/// Serialize a CostCurve (JSON) into an RVF COST_CURVE segment.
@@ -443,21 +425,17 @@ impl WasmRvfBridge {
curve_json: &str,
segment_id: u64,
) -> Result, JsValue> {
- let curve: ruvector_domain_expansion::CostCurve =
- serde_json::from_str(curve_json)
- .map_err(|e| JsValue::from_str(&format!("JSON parse error: {e}")))?;
- Ok(ruvector_domain_expansion::rvf_bridge::cost_curve_to_segment(
- &curve, segment_id,
- ))
+ let curve: ruvector_domain_expansion::CostCurve = serde_json::from_str(curve_json)
+ .map_err(|e| JsValue::from_str(&format!("JSON parse error: {e}")))?;
+ Ok(ruvector_domain_expansion::rvf_bridge::cost_curve_to_segment(&curve, segment_id))
}
/// Compute the SHAKE-256 witness hash for a TransferPrior.
/// Returns 32 bytes (hex-encoded string).
#[wasm_bindgen(js_name = computeWitnessHash)]
pub fn compute_witness_hash(&self, prior_json: &str) -> Result {
- let prior: ruvector_domain_expansion::TransferPrior =
- serde_json::from_str(prior_json)
- .map_err(|e| JsValue::from_str(&format!("JSON parse error: {e}")))?;
+ let prior: ruvector_domain_expansion::TransferPrior = serde_json::from_str(prior_json)
+ .map_err(|e| JsValue::from_str(&format!("JSON parse error: {e}")))?;
let hash = ruvector_domain_expansion::rvf_bridge::compute_transfer_witness_hash(&prior);
Ok(hash.iter().map(|b| format!("{b:02x}")).collect())
}
@@ -483,12 +461,14 @@ impl WasmRvfBridge {
serde_json::from_str(curves_json)
.map_err(|e| JsValue::from_str(&format!("curves parse error: {e}")))?;
- Ok(ruvector_domain_expansion::rvf_bridge::assemble_domain_expansion_segments(
- &priors,
- &kernels,
- &curves,
- base_segment_id,
- ))
+ Ok(
+ ruvector_domain_expansion::rvf_bridge::assemble_domain_expansion_segments(
+ &priors,
+ &kernels,
+ &curves,
+ base_segment_id,
+ ),
+ )
}
/// Extract solver-compatible prior exchange data from a TransferPrior JSON.
@@ -500,9 +480,8 @@ impl WasmRvfBridge {
prior_json: &str,
) -> Result {
// Build a temporary Thompson engine with the prior
- let prior: ruvector_domain_expansion::TransferPrior =
- serde_json::from_str(prior_json)
- .map_err(|e| JsValue::from_str(&format!("JSON parse error: {e}")))?;
+ let prior: ruvector_domain_expansion::TransferPrior = serde_json::from_str(prior_json)
+ .map_err(|e| JsValue::from_str(&format!("JSON parse error: {e}")))?;
let arms: Vec = prior
.bucket_priors
diff --git a/crates/ruvector-domain-expansion/benches/domain_expansion_bench.rs b/crates/ruvector-domain-expansion/benches/domain_expansion_bench.rs
index 4770c5f34..5e0a4ceaf 100644
--- a/crates/ruvector-domain-expansion/benches/domain_expansion_bench.rs
+++ b/crates/ruvector-domain-expansion/benches/domain_expansion_bench.rs
@@ -1,9 +1,9 @@
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use ruvector_domain_expansion::{
- ArmId, ContextBucket, CostCurve, CostCurvePoint, ConvergenceThresholds,
- AccelerationScoreboard, CuriosityBonus, DecayingBeta, DomainExpansionEngine, DomainId,
- MetaLearningEngine, MetaThompsonEngine, ParetoFront, ParetoPoint, PlateauDetector,
- PolicyKnobs, PopulationSearch, RegretTracker, Solution, TransferPrior,
+ AccelerationScoreboard, ArmId, ContextBucket, ConvergenceThresholds, CostCurve, CostCurvePoint,
+ CuriosityBonus, DecayingBeta, DomainExpansionEngine, DomainId, MetaLearningEngine,
+ MetaThompsonEngine, ParetoFront, ParetoPoint, PlateauDetector, PolicyKnobs, PopulationSearch,
+ RegretTracker, Solution, TransferPrior,
};
fn bench_task_generation(c: &mut Criterion) {
@@ -14,9 +14,7 @@ fn bench_task_generation(c: &mut Criterion) {
for domain_id in &domains {
group.bench_function(format!("{}", domain_id), |b| {
- b.iter(|| {
- engine.generate_tasks(black_box(domain_id), black_box(10), black_box(0.5))
- })
+ b.iter(|| engine.generate_tasks(black_box(domain_id), black_box(10), black_box(0.5)))
});
}
group.finish();
@@ -29,7 +27,9 @@ fn bench_evaluation(c: &mut Criterion) {
let solution = Solution {
task_id: tasks[0].id.clone(),
- content: "fn sum_positives(values: &[i64]) -> i64 { values.iter().filter(|&&x| x > 0).sum() }".into(),
+ content:
+ "fn sum_positives(values: &[i64]) -> i64 { values.iter().filter(|&&x| x > 0).sum() }"
+ .into(),
data: serde_json::Value::Null,
};
diff --git a/crates/ruvector-domain-expansion/src/cost_curve.rs b/crates/ruvector-domain-expansion/src/cost_curve.rs
index adacb2909..dad12166d 100644
--- a/crates/ruvector-domain-expansion/src/cost_curve.rs
+++ b/crates/ruvector-domain-expansion/src/cost_curve.rs
@@ -285,7 +285,11 @@ impl AccelerationScoreboard {
domain_id: id.clone(),
total_cycles: curve.points.last().map(|p| p.cycle).unwrap_or(0),
final_accuracy: curve.points.last().map(|p| p.accuracy).unwrap_or(0.0),
- final_cost: curve.points.last().map(|p| p.cost_per_solve).unwrap_or(f32::MAX),
+ final_cost: curve
+ .points
+ .last()
+ .map(|p| p.cost_per_solve)
+ .unwrap_or(f32::MAX),
converged: curve.has_converged(),
cycles_to_convergence: curve.cycles_to_convergence(),
compression_ratio: curve.compression_ratio(),
@@ -296,7 +300,10 @@ impl AccelerationScoreboard {
let overall_acceleration = if self.accelerations.is_empty() {
1.0
} else {
- self.accelerations.iter().map(|a| a.acceleration).sum::()
+ self.accelerations
+ .iter()
+ .map(|a| a.acceleration)
+ .sum::()
/ self.accelerations.len() as f32
};
@@ -342,11 +349,7 @@ pub struct ScoreboardSummary {
mod tests {
use super::*;
- fn make_curve(
- domain: &str,
- transfer: bool,
- accuracy_steps: &[(u64, f32, f32)],
- ) -> CostCurve {
+ fn make_curve(domain: &str, transfer: bool, accuracy_steps: &[(u64, f32, f32)]) -> CostCurve {
let mut curve = if transfer {
CostCurve::with_transfer(
DomainId(domain.into()),
@@ -398,8 +401,11 @@ mod tests {
#[test]
fn test_compression_ratio() {
- let curve =
- make_curve("test", false, &[(0, 0.3, 1.0), (10, 0.6, 0.5), (20, 0.9, 0.1)]);
+ let curve = make_curve(
+ "test",
+ false,
+ &[(0, 0.3, 1.0), (10, 0.6, 0.5), (20, 0.9, 0.1)],
+ );
let ratio = curve.compression_ratio();
assert!((ratio - 10.0).abs() < 1e-4); // 1.0 / 0.1 = 10x
diff --git a/crates/ruvector-domain-expansion/src/lib.rs b/crates/ruvector-domain-expansion/src/lib.rs
index d0f1b07d4..9067ba63d 100644
--- a/crates/ruvector-domain-expansion/src/lib.rs
+++ b/crates/ruvector-domain-expansion/src/lib.rs
@@ -64,14 +64,14 @@ pub use cost_curve::{
ScoreboardSummary,
};
pub use domain::{Domain, DomainEmbedding, DomainId, Evaluation, Solution, Task};
+pub use meta_learning::{
+ CuriosityBonus, DecayingBeta, MetaLearningEngine, MetaLearningHealth, ParetoFront, ParetoPoint,
+ PlateauAction, PlateauDetector, RegretSummary, RegretTracker,
+};
pub use planning::PlanningDomain;
pub use policy_kernel::{PolicyKernel, PolicyKnobs, PopulationSearch, PopulationStats};
pub use rust_synthesis::RustSynthesisDomain;
pub use tool_orchestration::ToolOrchestrationDomain;
-pub use meta_learning::{
- CuriosityBonus, DecayingBeta, MetaLearningEngine, MetaLearningHealth, ParetoFront,
- ParetoPoint, PlateauAction, PlateauDetector, RegretSummary, RegretTracker,
-};
pub use transfer::{
ArmId, BetaParams, ContextBucket, DualPathResult, MetaThompsonEngine, TransferPrior,
TransferVerification,
@@ -150,12 +150,7 @@ impl DomainExpansionEngine {
}
/// Generate training tasks for a specific domain.
- pub fn generate_tasks(
- &self,
- domain_id: &DomainId,
- count: usize,
- difficulty: f32,
- ) -> Vec {
+ pub fn generate_tasks(&self, domain_id: &DomainId, count: usize, difficulty: f32) -> Vec {
self.domains
.get(domain_id)
.map(|d| d.generate_tasks(count, difficulty))
@@ -297,7 +292,8 @@ impl DomainExpansionEngine {
} else {
accuracy
};
- self.meta.record_kernel(&kernel.id, accuracy, cost, robustness, gen);
+ self.meta
+ .record_kernel(&kernel.id, accuracy, cost, robustness, gen);
}
self.population.evolve();
@@ -324,10 +320,7 @@ impl DomainExpansionEngine {
}
/// Get counterexamples for a domain.
- pub fn counterexamples(
- &self,
- domain_id: &DomainId,
- ) -> &[(Task, Solution, Evaluation)] {
+ pub fn counterexamples(&self, domain_id: &DomainId) -> &[(Task, Solution, Evaluation)] {
self.counterexamples
.get(domain_id)
.map(|v| v.as_slice())
@@ -335,21 +328,13 @@ impl DomainExpansionEngine {
}
/// Select best arm for a context using Thompson Sampling.
- pub fn select_arm(
- &self,
- domain_id: &DomainId,
- bucket: &ContextBucket,
- ) -> Option {
+ pub fn select_arm(&self, domain_id: &DomainId, bucket: &ContextBucket) -> Option {
let mut rng = rand::thread_rng();
self.thompson.select_arm(domain_id, bucket, &mut rng)
}
/// Check if dual-path speculation should be triggered.
- pub fn should_speculate(
- &self,
- domain_id: &DomainId,
- bucket: &ContextBucket,
- ) -> bool {
+ pub fn should_speculate(&self, domain_id: &DomainId, bucket: &ContextBucket) -> bool {
self.thompson.is_uncertain(domain_id, bucket, 0.15)
}
@@ -398,10 +383,7 @@ impl DomainExpansionEngine {
}
/// Check cost curve for plateau and get recommended action.
- pub fn check_plateau(
- &mut self,
- domain_id: &DomainId,
- ) -> PlateauAction {
+ pub fn check_plateau(&mut self, domain_id: &DomainId) -> PlateauAction {
if let Some(curve) = self.scoreboard.curves.get(domain_id) {
self.meta.check_plateau(&curve.points)
} else {
@@ -468,7 +450,9 @@ mod tests {
let solution = Solution {
task_id: task.id.clone(),
- content: "fn double(values: &[i64]) -> Vec { values.iter().map(|&x| x * 2).collect() }".into(),
+ content:
+ "fn double(values: &[i64]) -> Vec { values.iter().map(|&x| x * 2).collect() }"
+ .into(),
data: serde_json::Value::Null,
};
@@ -540,9 +524,7 @@ mod tests {
// Verify the transfer.
let verification = engine.verify_transfer(
- &source,
- &target,
- 0.85, // source before
+ &source, &target, 0.85, // source before
0.845, // source after (within tolerance)
0.3, // target before
0.7, // target after
@@ -577,10 +559,7 @@ mod tests {
};
// With uniform priors, should be uncertain.
- assert!(engine.should_speculate(
- &DomainId("rust_synthesis".into()),
- &bucket,
- ));
+ assert!(engine.should_speculate(&DomainId("rust_synthesis".into()), &bucket,));
}
#[test]
diff --git a/crates/ruvector-domain-expansion/src/meta_learning.rs b/crates/ruvector-domain-expansion/src/meta_learning.rs
index 7d68c4a22..5e073fa93 100644
--- a/crates/ruvector-domain-expansion/src/meta_learning.rs
+++ b/crates/ruvector-domain-expansion/src/meta_learning.rs
@@ -90,12 +90,7 @@ impl RegretTracker {
}
/// Record a choice and its reward, updating regret.
- pub fn record(
- &mut self,
- bucket: &ContextBucket,
- arm: &ArmId,
- reward: f32,
- ) {
+ pub fn record(&mut self, bucket: &ContextBucket, arm: &ArmId, reward: f32) {
// Avoid cloning when entry already exists (hot path optimization).
if !self.buckets.contains_key(bucket) {
self.buckets.insert(bucket.clone(), BucketRegret::new());
@@ -341,10 +336,8 @@ impl PlateauDetector {
let recent = &points[n - self.window_size..];
let prior = &points[n - 2 * self.window_size..n - self.window_size];
- let recent_mean = recent.iter().map(|p| p.accuracy).sum::()
- / recent.len() as f32;
- let prior_mean = prior.iter().map(|p| p.accuracy).sum::()
- / prior.len() as f32;
+ let recent_mean = recent.iter().map(|p| p.accuracy).sum::() / recent.len() as f32;
+ let prior_mean = prior.iter().map(|p| p.accuracy).sum::() / prior.len() as f32;
let improvement = recent_mean - prior_mean;
@@ -374,10 +367,9 @@ impl PlateauDetector {
let recent = &points[n - self.window_size..];
let prior = &points[n - 2 * self.window_size..n - self.window_size];
- let recent_cost = recent.iter().map(|p| p.cost_per_solve).sum::()
- / recent.len() as f32;
- let prior_cost = prior.iter().map(|p| p.cost_per_solve).sum::()
- / prior.len() as f32;
+ let recent_cost =
+ recent.iter().map(|p| p.cost_per_solve).sum::() / recent.len() as f32;
+ let prior_cost = prior.iter().map(|p| p.cost_per_solve).sum::() / prior.len() as f32;
// Cost should be decreasing; if it's not, that's a plateau
(prior_cost - recent_cost).abs() < self.improvement_threshold
@@ -561,13 +553,11 @@ impl ParetoFront {
/// Get the front point that maximizes a specific objective.
pub fn best_on(&self, objective_index: usize) -> Option<&ParetoPoint> {
- self.front
- .iter()
- .max_by(|a, b| {
- let va = a.objectives.get(objective_index).copied().unwrap_or(0.0);
- let vb = b.objectives.get(objective_index).copied().unwrap_or(0.0);
- va.partial_cmp(&vb).unwrap_or(std::cmp::Ordering::Equal)
- })
+ self.front.iter().max_by(|a, b| {
+ let va = a.objectives.get(objective_index).copied().unwrap_or(0.0);
+ let vb = b.objectives.get(objective_index).copied().unwrap_or(0.0);
+ va.partial_cmp(&vb).unwrap_or(std::cmp::Ordering::Equal)
+ })
}
/// Spread: range on each objective dimension. Higher = more diverse front.
@@ -751,12 +741,7 @@ impl MetaLearningEngine {
}
/// Record a decision outcome. Call after every arm selection.
- pub fn record_decision(
- &mut self,
- bucket: &ContextBucket,
- arm: &ArmId,
- reward: f32,
- ) {
+ pub fn record_decision(&mut self, bucket: &ContextBucket, arm: &ArmId, reward: f32) {
// 1. Track regret
self.regret.record(bucket, arm, reward);
@@ -801,22 +786,13 @@ impl MetaLearningEngine {
/// Get the curiosity-boosted score for an arm.
///
/// Combines the Thompson Sampling estimate with an exploration bonus.
- pub fn boosted_score(
- &self,
- bucket: &ContextBucket,
- arm: &ArmId,
- thompson_sample: f32,
- ) -> f32 {
+ pub fn boosted_score(&self, bucket: &ContextBucket, arm: &ArmId, thompson_sample: f32) -> f32 {
let bonus = self.curiosity.bonus(bucket, arm);
thompson_sample + bonus
}
/// Get the decaying beta mean for a bucket/arm (if tracked).
- pub fn decaying_mean(
- &self,
- bucket: &ContextBucket,
- arm: &ArmId,
- ) -> Option {
+ pub fn decaying_mean(&self, bucket: &ContextBucket, arm: &ArmId) -> Option {
let key = (bucket.clone(), arm.clone());
self.decaying_betas.get(&key).map(|db| db.mean())
}
@@ -1112,7 +1088,10 @@ mod tests {
})
.collect();
- assert_eq!(detector.check(&flat_points), PlateauAction::IncreaseExploration);
+ assert_eq!(
+ detector.check(&flat_points),
+ PlateauAction::IncreaseExploration
+ );
assert_eq!(detector.check(&flat_points), PlateauAction::TriggerTransfer);
assert_eq!(detector.check(&flat_points), PlateauAction::TriggerTransfer);
assert_eq!(detector.check(&flat_points), PlateauAction::InjectDiversity);
@@ -1142,8 +1121,14 @@ mod tests {
#[test]
fn test_pareto_dominates() {
assert!(ParetoFront::dominates(&[0.9, -0.1, 0.8], &[0.8, -0.2, 0.7]));
- assert!(!ParetoFront::dominates(&[0.9, -0.3, 0.8], &[0.8, -0.1, 0.7]));
- assert!(!ParetoFront::dominates(&[0.9, -0.1, 0.8], &[0.9, -0.1, 0.8])); // Equal
+ assert!(!ParetoFront::dominates(
+ &[0.9, -0.3, 0.8],
+ &[0.8, -0.1, 0.7]
+ ));
+ assert!(!ParetoFront::dominates(
+ &[0.9, -0.1, 0.8],
+ &[0.9, -0.1, 0.8]
+ )); // Equal
}
#[test]
diff --git a/crates/ruvector-domain-expansion/src/planning.rs b/crates/ruvector-domain-expansion/src/planning.rs
index 5700d1142..b3a2fa952 100644
--- a/crates/ruvector-domain-expansion/src/planning.rs
+++ b/crates/ruvector-domain-expansion/src/planning.rs
@@ -218,9 +218,7 @@ impl PlanningDomain {
}],
dependencies,
initial_state: Vec::new(),
- goal_state: (0..num_tasks)
- .map(|i| format!("job_{}_done", i))
- .collect(),
+ goal_state: (0..num_tasks).map(|i| format!("job_{}_done", i)).collect(),
max_cost: None,
max_steps: Some(num_tasks + 5),
}
@@ -316,10 +314,12 @@ impl PlanningDomain {
// Feature 8-15: Action type distribution
let action_counts: std::collections::HashMap<&str, usize> =
- plan.steps.iter().fold(std::collections::HashMap::new(), |mut acc, s| {
- *acc.entry(s.action.as_str()).or_insert(0) += 1;
- acc
- });
+ plan.steps
+ .iter()
+ .fold(std::collections::HashMap::new(), |mut acc, s| {
+ *acc.entry(s.action.as_str()).or_insert(0) += 1;
+ acc
+ });
let max_count = action_counts.values().max().copied().unwrap_or(0);
features[8] = action_counts.len() as f32 / 10.0;
features[9] = max_count as f32 / plan.steps.len().max(1) as f32;
@@ -436,8 +436,7 @@ impl PlanningDomain {
}
}
if !spec.dependencies.is_empty() {
- let dep_score =
- 1.0 - (dep_violations as f32 / spec.dependencies.len() as f32);
+ let dep_score = 1.0 - (dep_violations as f32 / spec.dependencies.len() as f32);
correctness = correctness * 0.5 + dep_score * 0.5;
}
@@ -478,7 +477,11 @@ impl PlanningDomain {
elegance = 1.0 - redundancy * 0.5;
// Bonus for parallel scheduling
- if plan.steps.windows(2).any(|w| w[0].start_time == w[1].start_time) {
+ if plan
+ .steps
+ .windows(2)
+ .any(|w| w[0].start_time == w[1].start_time)
+ {
elegance += 0.1;
}
elegance = elegance.clamp(0.0, 1.0);
@@ -633,10 +636,8 @@ mod tests {
let easy = domain.generate_tasks(1, 0.1);
let hard = domain.generate_tasks(1, 0.9);
- let easy_spec: PlanningTaskSpec =
- serde_json::from_value(easy[0].spec.clone()).unwrap();
- let hard_spec: PlanningTaskSpec =
- serde_json::from_value(hard[0].spec.clone()).unwrap();
+ let easy_spec: PlanningTaskSpec = serde_json::from_value(easy[0].spec.clone()).unwrap();
+ let hard_spec: PlanningTaskSpec = serde_json::from_value(hard[0].spec.clone()).unwrap();
assert!(
hard_spec.available_actions.len() >= easy_spec.available_actions.len(),
diff --git a/crates/ruvector-domain-expansion/src/policy_kernel.rs b/crates/ruvector-domain-expansion/src/policy_kernel.rs
index 0307dbe09..8fd307d01 100644
--- a/crates/ruvector-domain-expansion/src/policy_kernel.rs
+++ b/crates/ruvector-domain-expansion/src/policy_kernel.rs
@@ -90,7 +90,11 @@ impl PolicyKnobs {
/// Crossover two parent knobs to produce a child.
pub fn crossover(&self, other: &PolicyKnobs, rng: &mut impl Rng) -> Self {
Self {
- skip_mode: if rng.gen() { self.skip_mode } else { other.skip_mode },
+ skip_mode: if rng.gen() {
+ self.skip_mode
+ } else {
+ other.skip_mode
+ },
prepass_enabled: if rng.gen() {
self.prepass_enabled
} else {
@@ -254,12 +258,11 @@ impl PopulationSearch {
self.generation += 1;
// Sort by cost-adjusted fitness (descending)
- self.population
- .sort_by(|a, b| {
- b.cost_adjusted_fitness()
- .partial_cmp(&a.cost_adjusted_fitness())
- .unwrap_or(std::cmp::Ordering::Equal)
- });
+ self.population.sort_by(|a, b| {
+ b.cost_adjusted_fitness()
+ .partial_cmp(&a.cost_adjusted_fitness())
+ .unwrap_or(std::cmp::Ordering::Equal)
+ });
// Track best
if let Some(best) = self.population.first() {
@@ -296,7 +299,8 @@ impl PopulationSearch {
let child = if rng.gen::() < 0.3 && elites.len() > 1 {
// Crossover
- let other_idx = (parent_idx + 1 + rng.gen_range(0..elites.len() - 1)) % elites.len();
+ let other_idx =
+ (parent_idx + 1 + rng.gen_range(0..elites.len() - 1)) % elites.len();
let mut child = PolicyKernel::new(child_id);
child.knobs = elites[parent_idx]
.knobs
@@ -329,10 +333,7 @@ impl PopulationSearch {
pub fn stats(&self) -> PopulationStats {
let fitnesses: Vec = self.population.iter().map(|k| k.fitness()).collect();
let mean = fitnesses.iter().sum::() / fitnesses.len().max(1) as f32;
- let max = fitnesses
- .iter()
- .cloned()
- .fold(f32::NEG_INFINITY, f32::max);
+ let max = fitnesses.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
let min = fitnesses.iter().cloned().fold(f32::INFINITY, f32::min);
let variance = fitnesses.iter().map(|f| (f - mean).powi(2)).sum::()
/ fitnesses.len().max(1) as f32;
@@ -344,7 +345,11 @@ impl PopulationSearch {
max_fitness: max,
min_fitness: min,
fitness_variance: variance,
- best_ever_fitness: self.best_kernel.as_ref().map(|k| k.fitness()).unwrap_or(0.0),
+ best_ever_fitness: self
+ .best_kernel
+ .as_ref()
+ .map(|k| k.fitness())
+ .unwrap_or(0.0),
}
}
}
@@ -378,8 +383,8 @@ mod tests {
let knobs = PolicyKnobs::default_knobs();
let mut rng = rand::thread_rng();
let mutated = knobs.mutate(&mut rng, 1.0); // high mutation rate
- // At least something should differ (probabilistically)
- // Can't guarantee due to randomness, but bounds should hold
+ // At least something should differ (probabilistically)
+ // Can't guarantee due to randomness, but bounds should hold
assert!(mutated.speculation_threshold >= 0.01 && mutated.speculation_threshold <= 0.5);
assert!(mutated.exploration_budget >= 0.01 && mutated.exploration_budget <= 0.5);
}
diff --git a/crates/ruvector-domain-expansion/src/rust_synthesis.rs b/crates/ruvector-domain-expansion/src/rust_synthesis.rs
index 6ddb74a57..a90b22b0a 100644
--- a/crates/ruvector-domain-expansion/src/rust_synthesis.rs
+++ b/crates/ruvector-domain-expansion/src/rust_synthesis.rs
@@ -170,14 +170,16 @@ impl RustSynthesisDomain {
RustTaskSpec {
category: RustTaskCategory::DataStructure,
signature: "struct LRUCache".into(),
- description:
- "Implement an LRU cache with get, put, and capacity eviction.".into(),
+ description: "Implement an LRU cache with get, put, and capacity eviction.".into(),
test_cases: vec![
(
"cap=2; put(1,'a'); put(2,'b'); get(1); put(3,'c'); get(2)".into(),
"None".into(),
),
- ("cap=1; put(1,'a'); put(2,'b'); get(1)".into(), "None".into()),
+ (
+ "cap=1; put(1,'a'); put(2,'b'); get(1)".into(),
+ "None".into(),
+ ),
],
required_traits: Vec::new(),
banned_patterns: vec!["unsafe".into()],
diff --git a/crates/ruvector-domain-expansion/src/rvf_bridge.rs b/crates/ruvector-domain-expansion/src/rvf_bridge.rs
index 04f0720dd..2bbe13324 100644
--- a/crates/ruvector-domain-expansion/src/rvf_bridge.rs
+++ b/crates/ruvector-domain-expansion/src/rvf_bridge.rs
@@ -9,8 +9,8 @@
//! Requires the `rvf` feature to be enabled.
use rvf_types::{SegmentFlags, SegmentType};
-use rvf_wire::writer::write_segment;
use rvf_wire::reader::{read_segment, validate_segment};
+use rvf_wire::writer::write_segment;
use crate::cost_curve::{AccelerationScoreboard, CostCurve};
use crate::domain::DomainId;
@@ -60,8 +60,7 @@ impl From for TransferPrior {
fn from(w: WireTransferPrior) -> Self {
let mut bucket_priors = std::collections::HashMap::new();
for (bucket, arms) in w.bucket_priors {
- let arm_map: std::collections::HashMap =
- arms.into_iter().collect();
+ let arm_map: std::collections::HashMap = arms.into_iter().collect();
bucket_priors.insert(bucket, arm_map);
}
let cost_ema_priors: std::collections::HashMap =
@@ -153,8 +152,7 @@ pub fn transfer_prior_from_segment(data: &[u8]) -> Result Result [u8; 32] {
let wire: WireTransferPrior = prior.into();
- let payload =
- serde_json::to_vec(&wire).expect("WireTransferPrior serialization cannot fail");
+ let payload = serde_json::to_vec(&wire).expect("WireTransferPrior serialization cannot fail");
rvf_crypto::shake256_256(&payload)
}
@@ -401,11 +397,7 @@ pub fn extract_solver_priors(
.iter()
.map(|(arm, params)| (arm.0.clone(), params.alpha, params.beta))
.collect();
- let cost_ema = prior
- .cost_ema_priors
- .get(bucket)
- .copied()
- .unwrap_or(1.0);
+ let cost_ema = prior.cost_ema_priors.get(bucket).copied().unwrap_or(1.0);
SolverPriorExchange {
bucket_key,
@@ -514,7 +506,10 @@ impl std::fmt::Display for RvfBridgeError {
Self::Rvf(e) => write!(f, "RVF error: {e}"),
Self::Json(e) => write!(f, "JSON error: {e}"),
Self::WrongSegmentType { expected, got } => {
- write!(f, "wrong segment type: expected 0x{expected:02X}, got 0x{got:02X}")
+ write!(
+ f,
+ "wrong segment type: expected 0x{expected:02X}, got 0x{got:02X}"
+ )
}
Self::TruncatedTlv => write!(f, "TLV payload truncated"),
}
@@ -533,7 +528,7 @@ impl std::error::Error for RvfBridgeError {
#[cfg(test)]
mod tests {
use super::*;
- use crate::cost_curve::{CostCurvePoint, ConvergenceThresholds};
+ use crate::cost_curve::{ConvergenceThresholds, CostCurvePoint};
#[test]
fn transfer_prior_round_trip() {
@@ -542,11 +537,7 @@ mod tests {
difficulty_tier: "medium".into(),
category: "algo".into(),
};
- prior.update_posterior(
- bucket,
- crate::transfer::ArmId("greedy".into()),
- 0.85,
- );
+ prior.update_posterior(bucket, crate::transfer::ArmId("greedy".into()), 0.85);
let segment = transfer_prior_to_segment(&prior, 1);
let decoded = transfer_prior_from_segment(&segment).unwrap();
@@ -567,10 +558,7 @@ mod tests {
#[test]
fn cost_curve_round_trip() {
- let mut curve = CostCurve::new(
- DomainId("test".into()),
- ConvergenceThresholds::default(),
- );
+ let mut curve = CostCurve::new(DomainId("test".into()), ConvergenceThresholds::default());
curve.record(CostCurvePoint {
cycle: 0,
accuracy: 0.3,
@@ -592,7 +580,10 @@ mod tests {
let kernel = PolicyKernel::new("k".into());
let segment = policy_kernel_to_segment(&kernel, 1);
let result = transfer_prior_from_segment(&segment);
- assert!(matches!(result, Err(RvfBridgeError::WrongSegmentType { .. })));
+ assert!(matches!(
+ result,
+ Err(RvfBridgeError::WrongSegmentType { .. })
+ ));
}
#[test]
@@ -701,10 +692,7 @@ mod tests {
fn multi_segment_assembly() {
let prior = TransferPrior::uniform(DomainId("d1".into()));
let kernel = PolicyKernel::new("k0".into());
- let mut curve = CostCurve::new(
- DomainId("d1".into()),
- ConvergenceThresholds::default(),
- );
+ let mut curve = CostCurve::new(DomainId("d1".into()), ConvergenceThresholds::default());
curve.record(CostCurvePoint {
cycle: 0,
accuracy: 0.5,
@@ -714,24 +702,14 @@ mod tests {
timestamp: 0.0,
});
- let assembled = assemble_domain_expansion_segments(
- &[prior],
- &[kernel],
- &[curve],
- 100,
- );
+ let assembled = assemble_domain_expansion_segments(&[prior], &[kernel], &[curve], 100);
// Should contain 3 segments, each 64-byte aligned
assert!(assembled.len() >= 3 * 64);
assert_eq!(assembled.len() % 64, 0);
// Verify first segment header magic
- let magic = u32::from_le_bytes([
- assembled[0],
- assembled[1],
- assembled[2],
- assembled[3],
- ]);
+ let magic = u32::from_le_bytes([assembled[0], assembled[1], assembled[2], assembled[3]]);
assert_eq!(magic, rvf_types::SEGMENT_MAGIC);
}
}
diff --git a/crates/ruvector-domain-expansion/src/tool_orchestration.rs b/crates/ruvector-domain-expansion/src/tool_orchestration.rs
index 8064d3d92..d0a031c99 100644
--- a/crates/ruvector-domain-expansion/src/tool_orchestration.rs
+++ b/crates/ruvector-domain-expansion/src/tool_orchestration.rs
@@ -276,7 +276,13 @@ impl ToolOrchestrationDomain {
fn gen_parallel_coordination(&self, difficulty: f32) -> OrchestrationTaskSpec {
let tools = Self::base_tools();
- let parallelism = if difficulty < 0.3 { 2 } else if difficulty < 0.7 { 4 } else { 8 };
+ let parallelism = if difficulty < 0.3 {
+ 2
+ } else if difficulty < 0.7 {
+ 4
+ } else {
+ 8
+ };
OrchestrationTaskSpec {
category: OrchestrationCategory::ParallelCoordination,
@@ -311,7 +317,11 @@ impl ToolOrchestrationDomain {
plan.calls.iter().map(|c| c.tool_name.as_str()).collect();
features[1] = unique_tools.len() as f32 / 10.0;
// Parallelism ratio
- let parallel_calls = plan.calls.iter().filter(|c| c.parallel_group.is_some()).count();
+ let parallel_calls = plan
+ .calls
+ .iter()
+ .filter(|c| c.parallel_group.is_some())
+ .count();
features[2] = parallel_calls as f32 / plan.calls.len().max(1) as f32;
// Fallback coverage
let fallback_calls = plan.calls.iter().filter(|c| c.fallback.is_some()).count();
@@ -322,8 +332,14 @@ impl ToolOrchestrationDomain {
// Feature 8-15: Tool type usage
let tool_names = [
- "extract", "embed", "search", "generate", "transform",
- "execute", "fetch", "cache",
+ "extract",
+ "embed",
+ "search",
+ "generate",
+ "transform",
+ "execute",
+ "fetch",
+ "cache",
];
for (i, name) in tool_names.iter().enumerate() {
features[8 + i] = plan
@@ -375,11 +391,7 @@ impl ToolOrchestrationDomain {
features
}
- fn score_orchestration(
- &self,
- spec: &OrchestrationTaskSpec,
- solution: &Solution,
- ) -> Evaluation {
+ fn score_orchestration(&self, spec: &OrchestrationTaskSpec, solution: &Solution) -> Evaluation {
let content = &solution.content;
let mut correctness = 0.0f32;
let mut efficiency = 0.5f32;
@@ -457,8 +469,12 @@ impl ToolOrchestrationDomain {
.error_scenarios
.iter()
.filter(|scenario| {
- plan.calls.iter().any(|c| c.fallback.is_some() || c.retries > 0)
- || plan.error_strategy.contains(&scenario.as_str()[..scenario.len().min(10)])
+ plan.calls
+ .iter()
+ .any(|c| c.fallback.is_some() || c.retries > 0)
+ || plan
+ .error_strategy
+ .contains(&scenario.as_str()[..scenario.len().min(10)])
})
.count() as f32
/ spec.error_scenarios.len() as f32;
@@ -527,10 +543,7 @@ impl ToolOrchestrationDomain {
elegance += 0.1;
}
- let validation_used = plan
- .calls
- .iter()
- .any(|c| c.tool_name.contains("validat"));
+ let validation_used = plan.calls.iter().any(|c| c.tool_name.contains("validat"));
if validation_used {
elegance += 0.1;
}
@@ -706,6 +719,9 @@ mod tests {
let spec: OrchestrationTaskSpec = serde_json::from_value(t.spec.clone()).unwrap();
!spec.error_scenarios.is_empty()
});
- assert!(has_error_tasks, "High difficulty should produce error scenarios");
+ assert!(
+ has_error_tasks,
+ "High difficulty should produce error scenarios"
+ );
}
}
diff --git a/crates/ruvector-domain-expansion/src/transfer.rs b/crates/ruvector-domain-expansion/src/transfer.rs
index a7ab30e33..5cd0c5315 100644
--- a/crates/ruvector-domain-expansion/src/transfer.rs
+++ b/crates/ruvector-domain-expansion/src/transfer.rs
@@ -98,8 +98,7 @@ impl BetaParams {
x.clamp(0.001, 0.999)
} else {
// Fallback: simple power approximation
- p.powf(1.0 / a) * (1.0 - (1.0 - p).powf(1.0 / b))
- + p.powf(1.0 / a) * 0.5
+ p.powf(1.0 / a) * (1.0 - (1.0 - p).powf(1.0 / b)) + p.powf(1.0 / a) * 0.5
}
}
@@ -169,12 +168,7 @@ impl TransferPrior {
}
/// Update the posterior for a bucket/arm with a new observation.
- pub fn update_posterior(
- &mut self,
- bucket: ContextBucket,
- arm: ArmId,
- reward: f32,
- ) {
+ pub fn update_posterior(&mut self, bucket: ContextBucket, arm: ArmId, reward: f32) {
let arms = self.bucket_priors.entry(bucket.clone()).or_default();
let params = arms.entry(arm).or_insert_with(BetaParams::uniform);
params.update(reward);
@@ -319,7 +313,9 @@ impl MetaThompsonEngine {
/// Extract transfer prior from a domain (for shipping to another domain).
pub fn extract_prior(&self, domain_id: &DomainId) -> Option {
- self.domain_priors.get(domain_id).map(|p| p.extract_summary())
+ self.domain_priors
+ .get(domain_id)
+ .map(|p| p.extract_summary())
}
/// Get all domain IDs currently tracked.
@@ -518,7 +514,10 @@ mod tests {
// Domain2 should now have informative priors
let d2_prior = engine.domain_priors.get(&domain2).unwrap();
let a_params = d2_prior.get_prior(&bucket, &ArmId("strategy_a".into()));
- assert!(a_params.mean() > 0.5, "Transferred prior should favor strategy_a");
+ assert!(
+ a_params.mean() > 0.5,
+ "Transferred prior should favor strategy_a"
+ );
}
#[test]
@@ -545,10 +544,10 @@ mod tests {
let v = TransferVerification::verify(
DomainId("d1".into()),
DomainId("d2".into()),
- 0.8, // source before
- 0.5, // source after (regression!)
- 0.3, // target before
- 0.7, // target after
+ 0.8, // source before
+ 0.5, // source after (regression!)
+ 0.3, // target before
+ 0.7, // target after
100,
40,
);
@@ -560,10 +559,7 @@ mod tests {
#[test]
fn test_uncertainty_detection() {
- let mut engine = MetaThompsonEngine::new(vec![
- "a".into(),
- "b".into(),
- ]);
+ let mut engine = MetaThompsonEngine::new(vec!["a".into(), "b".into()]);
let domain = DomainId("test".into());
engine.init_domain_uniform(domain.clone());
@@ -578,20 +574,8 @@ mod tests {
// After many observations favoring one arm, should be certain
for _ in 0..100 {
- engine.record_outcome(
- &domain,
- bucket.clone(),
- ArmId("a".into()),
- 0.95,
- 1.0,
- );
- engine.record_outcome(
- &domain,
- bucket.clone(),
- ArmId("b".into()),
- 0.1,
- 1.0,
- );
+ engine.record_outcome(&domain, bucket.clone(), ArmId("a".into()), 0.95, 1.0);
+ engine.record_outcome(&domain, bucket.clone(), ArmId("b".into()), 0.1, 1.0);
}
assert!(!engine.is_uncertain(&domain, &bucket, 0.1));
diff --git a/crates/ruvector-postgres/Cargo.toml b/crates/ruvector-postgres/Cargo.toml
index 32a0f5fe6..dcc5464ab 100644
--- a/crates/ruvector-postgres/Cargo.toml
+++ b/crates/ruvector-postgres/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "ruvector-postgres"
-version = "2.0.4"
+version = "0.3.0"
edition = "2021"
license = "MIT"
description = "High-performance PostgreSQL vector database extension v2 - pgvector drop-in replacement with 230+ SQL functions, SIMD acceleration, Flash Attention, GNN layers, hybrid search, multi-tenancy, self-healing, and self-learning capabilities"
@@ -58,10 +58,21 @@ routing = [] # Tiny Dancer AI routing
embeddings = ["dep:fastembed"] # Local embedding generation
gated-transformer = ["dep:ruvector-mincut-gated-transformer"] # Mincut-gated transformer
+# v0.3 features — Solver, Math, TDA, Extended Attention, Sona, Domain Expansion
+solver = ["dep:ruvector-solver"]
+math-distances = ["dep:ruvector-math"]
+tda = ["dep:ruvector-math"]
+attention-extended = ["attention", "dep:ruvector-attention"]
+sona-learning = ["dep:ruvector-sona"]
+domain-expansion = ["dep:ruvector-domain-expansion"]
+
# Feature bundles
ai-complete = ["learning", "attention", "gnn", "routing", "gated-transformer"]
graph-complete = ["hyperbolic", "sparse", "graph"]
all-features = ["ai-complete", "graph-complete", "embeddings"]
+analytics-complete = ["solver", "math-distances", "tda"]
+ai-complete-v3 = ["ai-complete", "attention-extended", "sona-learning"]
+all-features-v3 = ["all-features", "analytics-complete", "ai-complete-v3", "domain-expansion"]
[dependencies]
# PostgreSQL extension framework
@@ -125,6 +136,13 @@ fastembed = { version = "5", optional = true }
# Mincut-gated transformer (optional)
ruvector-mincut-gated-transformer = { version = "0.1.0", path = "../ruvector-mincut-gated-transformer", optional = true }
+# v0.3 optional dependencies
+ruvector-solver = { version = "2.0", path = "../ruvector-solver", features = ["full"], optional = true }
+ruvector-math = { version = "2.0", path = "../ruvector-math", optional = true }
+ruvector-attention = { version = "0.1", path = "../ruvector-attention", optional = true }
+ruvector-sona = { version = "0.1", path = "../sona", features = ["serde-support"], optional = true }
+ruvector-domain-expansion = { version = "2.0", path = "../ruvector-domain-expansion", optional = true }
+
# Optional: Use ruvector-core for shared implementations
# Uncomment to link with existing ruvector-core crate
# ruvector-core = { path = "../ruvector-core", optional = true }
diff --git a/crates/ruvector-postgres/Dockerfile b/crates/ruvector-postgres/Dockerfile
index 46fe25f96..d54b8ca95 100644
--- a/crates/ruvector-postgres/Dockerfile
+++ b/crates/ruvector-postgres/Dockerfile
@@ -32,18 +32,79 @@ RUN apt-get update && apt-get install -y \
# Install cargo-pgrx
RUN cargo install cargo-pgrx --version 0.12.9 --locked
-# Set up workspace
-WORKDIR /build
-
-# Create a minimal standalone Cargo.toml for ruvector-postgres
-# (not the workspace version)
-COPY crates/ruvector-postgres/ ./
-
-# Copy the ruvector-mincut-gated-transformer dependency (required for gated-transformer feature)
-COPY crates/ruvector-mincut-gated-transformer /build/../ruvector-mincut-gated-transformer/
+# Set up workspace root — dependency crates use workspace inheritance
+WORKDIR /workspace
+
+# Create a minimal workspace Cargo.toml so dependency crates can resolve
+# workspace inheritance (edition.workspace, version.workspace, etc.)
+RUN cat > /workspace/Cargo.toml << 'WORKSPACE_EOF'
+[workspace]
+members = [
+ "crates/ruvector-postgres",
+ "crates/ruvector-solver",
+ "crates/ruvector-math",
+ "crates/ruvector-attention",
+ "crates/sona",
+ "crates/ruvector-domain-expansion",
+ "crates/ruvector-mincut-gated-transformer",
+]
+resolver = "2"
+
+[workspace.package]
+version = "2.0.4"
+edition = "2021"
+rust-version = "1.77"
+license = "MIT"
+authors = ["Ruvector Team"]
+repository = "https://github.com/ruvnet/ruvector"
+
+[workspace.dependencies]
+serde = { version = "1.0", features = ["derive"] }
+serde_json = "1.0"
+thiserror = "2.0"
+rand = "0.8"
+rand_distr = "0.4"
+tracing = "0.1"
+rayon = "1.10"
+crossbeam = "0.8"
+dashmap = "6.1"
+parking_lot = "0.12"
+once_cell = "1.20"
+criterion = { version = "0.5", features = ["html_reports"] }
+proptest = "1.5"
+nalgebra = { version = "0.33", default-features = false, features = ["std"] }
+ndarray = "0.16"
+chrono = "0.4"
+anyhow = "1.0"
+
+[profile.release]
+opt-level = 3
+lto = "fat"
+codegen-units = 1
+strip = true
+panic = "unwind"
+WORKSPACE_EOF
+
+# Copy ruvector-postgres source
+COPY crates/ruvector-postgres/ /workspace/crates/ruvector-postgres/
+
+# Copy dependency crates
+COPY crates/ruvector-mincut-gated-transformer /workspace/crates/ruvector-mincut-gated-transformer/
+COPY crates/ruvector-solver /workspace/crates/ruvector-solver/
+COPY crates/ruvector-math /workspace/crates/ruvector-math/
+COPY crates/ruvector-attention /workspace/crates/ruvector-attention/
+COPY crates/sona /workspace/crates/sona/
+COPY crates/ruvector-domain-expansion /workspace/crates/ruvector-domain-expansion/
+
+# Copy rvf crates (path deps of ruvector-domain-expansion)
+COPY crates/rvf/rvf-types /workspace/crates/rvf/rvf-types/
+COPY crates/rvf/rvf-wire /workspace/crates/rvf/rvf-wire/
+COPY crates/rvf/rvf-crypto /workspace/crates/rvf/rvf-crypto/
# Use the workspace Cargo.lock to pin dependencies and avoid registry parsing issues
-COPY Cargo.lock ./
+COPY Cargo.lock /workspace/crates/ruvector-postgres/
+
+WORKDIR /workspace/crates/ruvector-postgres
# Initialize pgrx with system PostgreSQL
RUN cargo pgrx init --pg17=/usr/lib/postgresql/17/bin/pg_config
@@ -55,8 +116,8 @@ RUN cargo fetch
# This uses the git protocol instead of sparse which skips problematic index entries
ENV CARGO_REGISTRIES_CRATES_IO_PROTOCOL=git
-# Build the extension with all features including embeddings and gated-transformer
-RUN cargo pgrx package --features "pg17 index-all quant-all embeddings gated-transformer"
+# Build the extension with all features including v0.3 modules
+RUN cargo pgrx package --features "pg17 index-all quant-all embeddings gated-transformer analytics-complete attention-extended sona-learning domain-expansion"
# Build the model downloader binary
RUN cargo build --release --bin download-models --features "embeddings"
@@ -71,15 +132,15 @@ RUN mkdir -p /opt/ruvector/models && \
# Copy the pre-built SQL schema file (with sparse functions removed)
# cargo pgrx schema doesn't work reliably in Docker, so we use the hand-crafted file
-RUN cp /build/sql/ruvector--0.1.0.sql /build/target/release/ruvector-pg17/usr/share/postgresql/17/extension/ruvector--0.1.0.sql && \
- echo "SQL schema copied with $(grep -c 'CREATE FUNCTION\|CREATE OR REPLACE FUNCTION' /build/target/release/ruvector-pg17/usr/share/postgresql/17/extension/ruvector--0.1.0.sql) functions"
+RUN cp /workspace/crates/ruvector-postgres/sql/ruvector--0.1.0.sql /workspace/target/release/ruvector-pg17/usr/share/postgresql/17/extension/ruvector--0.1.0.sql && \
+ echo "SQL schema copied with $(grep -c 'CREATE FUNCTION\|CREATE OR REPLACE FUNCTION' /workspace/target/release/ruvector-pg17/usr/share/postgresql/17/extension/ruvector--0.1.0.sql) functions"
# Verify the extension files are complete
-RUN ls -la /build/target/release/ruvector-pg17/usr/share/postgresql/17/extension/ && \
+RUN ls -la /workspace/target/release/ruvector-pg17/usr/share/postgresql/17/extension/ && \
echo "=== First 20 lines of SQL ===" && \
- head -20 /build/target/release/ruvector-pg17/usr/share/postgresql/17/extension/ruvector--0.1.0.sql && \
+ head -20 /workspace/target/release/ruvector-pg17/usr/share/postgresql/17/extension/ruvector--0.1.0.sql && \
echo "=== CREATE FUNCTION count ===" && \
- grep -c "CREATE FUNCTION\|CREATE OR REPLACE FUNCTION" /build/target/release/ruvector-pg17/usr/share/postgresql/17/extension/ruvector--0.1.0.sql
+ grep -c "CREATE FUNCTION\|CREATE OR REPLACE FUNCTION" /workspace/target/release/ruvector-pg17/usr/share/postgresql/17/extension/ruvector--0.1.0.sql
# Runtime stage
FROM postgres:17-bookworm
@@ -87,7 +148,7 @@ FROM postgres:17-bookworm
# Labels
LABEL maintainer="ruvector team"
LABEL description="PostgreSQL with ruvector extension - high-performance vector similarity search with local embeddings"
-LABEL version="2.0.4"
+LABEL version="0.3.0"
# Set embedding model cache path - models are pre-downloaded during build
# FASTEMBED_CACHE_DIR is the correct env var for fastembed-rs
@@ -97,22 +158,17 @@ ENV FASTEMBED_CACHE_DIR=/opt/ruvector/models
COPY --from=builder /opt/ruvector/models /opt/ruvector/models
# Copy the built extension from builder
-# Note: pgrx generates correct SQL from #[pg_extern] macros in target directory
-# The extension/* directory includes:
-# - ruvector.control (version info)
-# - ruvector--*.sql (pgrx-generated SQL with correct function symbols)
-# - Any additional SQL migration files
-COPY --from=builder /build/target/release/ruvector-pg17/usr/share/postgresql/17/extension/* \
+# Note: In a workspace, target/ is at the workspace root /workspace/target/
+COPY --from=builder /workspace/target/release/ruvector-pg17/usr/share/postgresql/17/extension/* \
/usr/share/postgresql/17/extension/
-COPY --from=builder /build/target/release/ruvector-pg17/usr/lib/postgresql/17/lib/* \
+COPY --from=builder /workspace/target/release/ruvector-pg17/usr/lib/postgresql/17/lib/* \
/usr/lib/postgresql/17/lib/
# Add initialization scripts
RUN mkdir -p /docker-entrypoint-initdb.d
# Copy the full initialization script with extension creation, role setup, and tests
-# The init.sql is copied from the builder stage where it was included in the source copy
-COPY --from=builder /build/docker/init.sql /docker-entrypoint-initdb.d/01-init.sql
+COPY --from=builder /workspace/crates/ruvector-postgres/docker/init.sql /docker-entrypoint-initdb.d/01-init.sql
# Health check
HEALTHCHECK --interval=30s --timeout=5s --start-period=5s --retries=3 \
diff --git a/crates/ruvector-postgres/README.md b/crates/ruvector-postgres/README.md
index fd5baa570..9c5a924c8 100644
--- a/crates/ruvector-postgres/README.md
+++ b/crates/ruvector-postgres/README.md
@@ -8,7 +8,17 @@
[](https://www.npmjs.com/package/@ruvector/core)
[](docs/SECURITY_AUDIT_REPORT.md)
-**The most advanced PostgreSQL vector database extension.** A drop-in pgvector replacement with **290+ SQL functions**, SIMD acceleration, 39 attention mechanisms, GNN layers, hyperbolic embeddings, mincut-gated transformers, hybrid search, multi-tenancy, self-healing, and self-learning capabilities.
+**The most advanced PostgreSQL vector database extension.** A drop-in pgvector replacement with **143 SQL functions**, SIMD acceleration, 46 attention mechanisms, GNN layers, hyperbolic embeddings, mincut-gated transformers, hybrid search, multi-tenancy, self-healing, and self-learning capabilities.
+
+## v0.3.0 Highlights (February 2026)
+
+- **Solver Integration**: 11 functions -- PageRank (3 variants), conjugate gradient, Laplacian solver, effective resistance, matrix analysis
+- **Math Distances & Spectral**: 12 functions -- Wasserstein/Sinkhorn OT, KL/Jensen-Shannon divergence, spectral clustering, Chebyshev graph filters, product manifold distances
+- **Topological Data Analysis**: 7 functions -- persistent homology, Betti numbers, bottleneck/Wasserstein diagram distance, Vietoris-Rips complexes, embedding drift detection
+- **Extended Attention**: 7 functions -- O(n) linear, sliding window, cross, sparse top-k, mixture-of-experts, hyperbolic (Poincare ball), benchmarking
+- **Sona Learning**: 4 functions -- micro-LoRA trajectory learning, EWC++ forgetting prevention, learned transform application
+- **Domain Expansion**: Cross-domain transfer with contextual bandits
+- **143 SQL functions** across 20+ feature-gated modules
## v2.0.0 Highlights (December 2025)
@@ -29,7 +39,7 @@
| Vector Search | HNSW, IVFFlat | HNSW, IVFFlat (optimized) |
| Distance Metrics | 3 | 8+ (including hyperbolic) |
| **Local Embeddings** | - | **6 models (fastembed)** |
-| **Attention Mechanisms** | - | **39 types** |
+| **Attention Mechanisms** | - | **46 types** |
| **Gated Transformers** | - | **Mincut-coherence control** |
| **Hybrid Search** | - | **RRF + Linear fusion** |
| **Graph Neural Networks** | - | **GCN, GraphSAGE, GAT** |
@@ -43,6 +53,11 @@
| **Agent Routing** | - | **Tiny Dancer** |
| **Graph/Cypher** | - | **Full support** |
| **SPARQL/RDF** | - | **W3C SPARQL 1.1** |
+| **Sublinear Solvers** | - | **PageRank, CG, Laplacian** |
+| **Math Distances** | - | **Wasserstein, Sinkhorn, spectral** |
+| **Topological Data Analysis** | - | **Persistent homology, Betti** |
+| **Sona Learning** | - | **Micro-LoRA, EWC++** |
+| **Domain Expansion** | - | **Cross-domain transfer** |
| AVX-512/NEON SIMD | Partial | **Full** |
| Quantization | No | **Scalar, Product, Binary** |
@@ -134,7 +149,7 @@ ORDER BY distance
LIMIT 10;
```
-## 290+ SQL Functions
+## 143 SQL Functions
RuVector exposes all advanced AI capabilities as native PostgreSQL functions.
@@ -200,7 +215,7 @@ SELECT ruvector_bm25_score(query_terms, doc_freqs, doc_len, avg_doc_len, total_d
SELECT ruvector_tf_idf(term_freq, doc_freq, total_docs);
```
-### 39 Attention Mechanisms
+### 46 Attention Mechanisms
Full transformer-style attention in PostgreSQL.
@@ -230,6 +245,121 @@ SELECT ruvector_attention_cross(query, context_keys, context_values);
SELECT ruvector_attention_self(input, num_heads);
```
+### Sublinear Solvers (11 functions)
+
+Graph analytics powered by ruvector-solver's O(log n) to O(sqrt(n)) algorithms.
+
+```sql
+-- PageRank (Forward Push, O(1/epsilon))
+SELECT ruvector_pagerank('{"edges":[[0,1],[1,2],[2,0]]}'::jsonb);
+
+-- Personalized PageRank from a source node
+SELECT ruvector_pagerank_personalized('{"edges":[[0,1],[1,2],[2,0]]}'::jsonb, 0);
+
+-- Solve sparse linear system Ax=b (Neumann or CG)
+SELECT ruvector_solve_sparse(matrix_json, ARRAY[1.0, 2.0]::real[], 'cg');
+
+-- Conjugate Gradient for SPD systems
+SELECT ruvector_conjugate_gradient(matrix_json, rhs);
+
+-- Graph Laplacian solver
+SELECT ruvector_solve_laplacian(laplacian_json, rhs);
+
+-- Effective resistance between nodes
+SELECT ruvector_effective_resistance(laplacian_json, 0, 1);
+
+-- Matrix sparsity analysis
+SELECT ruvector_matrix_analyze(matrix_json);
+
+-- List available solver algorithms
+SELECT * FROM ruvector_solver_info();
+```
+
+### Math Distances & Spectral (12 functions)
+
+Statistical distances, optimal transport, and spectral graph processing.
+
+```sql
+-- Wasserstein (Earth Mover's) distance
+SELECT ruvector_wasserstein_distance(ARRAY[0.5,0.5]::real[], ARRAY[0.3,0.7]::real[]);
+
+-- Sinkhorn optimal transport with regularization
+SELECT ruvector_sinkhorn_distance(cost_json, weights_a, weights_b);
+
+-- KL divergence and Jensen-Shannon divergence
+SELECT ruvector_kl_divergence(ARRAY[0.5,0.5]::real[], ARRAY[0.3,0.7]::real[]);
+SELECT ruvector_jensen_shannon(ARRAY[0.5,0.5]::real[], ARRAY[0.3,0.7]::real[]);
+
+-- Spectral clustering
+SELECT ruvector_spectral_cluster(adjacency_json, 3); -- k=3 clusters
+
+-- Chebyshev polynomial graph filter
+SELECT ruvector_chebyshev_filter(adj_json, signal, 'low_pass', 10);
+
+-- Heat kernel graph diffusion
+SELECT ruvector_graph_diffusion(adj_json, signal);
+
+-- Product manifold distance (Euclidean x Hyperbolic x Spherical)
+SELECT ruvector_product_manifold_distance(a, b, 3, 2, 1);
+
+-- Spherical (great-circle) distance
+SELECT ruvector_spherical_distance(ARRAY[1,0,0]::real[], ARRAY[0,1,0]::real[]);
+```
+
+### Topological Data Analysis (7 functions)
+
+Persistent homology and topological feature extraction from point clouds.
+
+```sql
+-- Persistent homology via Vietoris-Rips filtration
+SELECT ruvector_persistent_homology('[[1,0],[0,1],[-1,0],[0,-1]]'::jsonb, 1, 3.0);
+
+-- Betti numbers at a given radius
+SELECT ruvector_betti_numbers('[[0,0],[1,0],[0,1]]'::jsonb, 1.5);
+
+-- Bottleneck distance between persistence diagrams
+SELECT ruvector_bottleneck_distance(diagram_a, diagram_b);
+
+-- Wasserstein distance between persistence diagrams
+SELECT ruvector_persistence_wasserstein(diagram_a, diagram_b, 2);
+
+-- Topological summary (Betti + persistence statistics + entropy)
+SELECT ruvector_topological_summary(points_json, 1);
+
+-- Embedding drift detection via topology
+SELECT ruvector_embedding_drift(old_embeddings, new_embeddings);
+
+-- Build Vietoris-Rips simplicial complex
+SELECT ruvector_vietoris_rips(points_json, 2.0, 2);
+```
+
+### Sona Learning (4 functions)
+
+Self-Optimizing Neural Architecture with micro-LoRA and EWC++ forgetting prevention.
+
+```sql
+-- Record a learning trajectory
+SELECT ruvector_sona_learn('my_table', trajectory_json);
+
+-- Apply learned LoRA transform to an embedding
+SELECT ruvector_sona_apply('my_table', embedding);
+
+-- Check EWC++ forgetting metrics
+SELECT ruvector_sona_ewc_status('my_table');
+
+-- Get Sona engine statistics
+SELECT ruvector_sona_stats('my_table');
+```
+
+### Domain Expansion (1 function)
+
+Cross-domain transfer learning with contextual bandits.
+
+```sql
+-- Transfer embeddings to a target domain
+SELECT ruvector_domain_transfer(embeddings_json, 'target_domain');
+```
+
### Graph Neural Networks (5 functions)
GNN layers for graph-structured data.
diff --git a/crates/ruvector-postgres/docker/Dockerfile b/crates/ruvector-postgres/docker/Dockerfile
index d4d9401d3..ec99df9a5 100644
--- a/crates/ruvector-postgres/docker/Dockerfile
+++ b/crates/ruvector-postgres/docker/Dockerfile
@@ -48,7 +48,8 @@ FROM base-builder AS deps-builder
ARG PG_VERSION
-WORKDIR /build/ruvector-postgres
+# Use workspace layout: /build is the workspace root
+WORKDIR /build/crates/ruvector-postgres
# Copy only dependency files first for better caching
COPY crates/ruvector-postgres/Cargo.toml ./
@@ -70,8 +71,70 @@ FROM deps-builder AS extension-builder
ARG PG_VERSION
+# Create a minimal workspace Cargo.toml so dependency crates can resolve
+# workspace inheritance (edition.workspace, version.workspace, etc.)
+RUN cat > /build/Cargo.toml << 'WORKSPACE_EOF'
+[workspace]
+members = [
+ "crates/ruvector-postgres",
+ "crates/ruvector-solver",
+ "crates/ruvector-math",
+ "crates/ruvector-attention",
+ "crates/sona",
+ "crates/ruvector-domain-expansion",
+ "crates/ruvector-mincut-gated-transformer",
+]
+resolver = "2"
+
+[workspace.package]
+version = "2.0.4"
+edition = "2021"
+rust-version = "1.77"
+license = "MIT"
+authors = ["Ruvector Team"]
+repository = "https://github.com/ruvnet/ruvector"
+
+[workspace.dependencies]
+serde = { version = "1.0", features = ["derive"] }
+serde_json = "1.0"
+thiserror = "2.0"
+rand = "0.8"
+rand_distr = "0.4"
+tracing = "0.1"
+rayon = "1.10"
+crossbeam = "0.8"
+dashmap = "6.1"
+parking_lot = "0.12"
+once_cell = "1.20"
+criterion = { version = "0.5", features = ["html_reports"] }
+proptest = "1.5"
+nalgebra = { version = "0.33", default-features = false, features = ["std"] }
+ndarray = "0.16"
+chrono = "0.4"
+anyhow = "1.0"
+
+[profile.release]
+opt-level = 3
+lto = "fat"
+codegen-units = 1
+strip = true
+panic = "unwind"
+WORKSPACE_EOF
+
# Copy the ruvector-mincut-gated-transformer dependency (required for gated-transformer feature)
-COPY crates/ruvector-mincut-gated-transformer /build/ruvector-mincut-gated-transformer/
+COPY crates/ruvector-mincut-gated-transformer /build/crates/ruvector-mincut-gated-transformer/
+
+# Copy v0.3 dependencies (workspace layout preserves inheritance resolution)
+COPY crates/ruvector-solver /build/crates/ruvector-solver/
+COPY crates/ruvector-math /build/crates/ruvector-math/
+COPY crates/ruvector-attention /build/crates/ruvector-attention/
+COPY crates/sona /build/crates/sona/
+COPY crates/ruvector-domain-expansion /build/crates/ruvector-domain-expansion/
+
+# Copy rvf crates (optional path deps of ruvector-domain-expansion, Cargo validates they exist)
+COPY crates/rvf/rvf-types /build/crates/rvf/rvf-types/
+COPY crates/rvf/rvf-wire /build/crates/rvf/rvf-wire/
+COPY crates/rvf/rvf-crypto /build/crates/rvf/rvf-crypto/
# Copy actual source code
COPY crates/ruvector-postgres/Cargo.toml ./
@@ -81,13 +144,16 @@ COPY crates/ruvector-postgres/src ./src/
COPY crates/ruvector-postgres/sql ./sql/
COPY crates/ruvector-postgres/benches ./benches/
-# Build the extension with all features including gated-transformer
+# Build the extension with all features including v0.3 modules
RUN cargo pgrx package \
--pg-config /usr/lib/postgresql/${PG_VERSION}/bin/pg_config \
- --features pg${PG_VERSION},graph-complete,gated-transformer
+ --features pg${PG_VERSION},graph-complete,gated-transformer,analytics-complete,attention-extended,sona-learning,domain-expansion
-# pgrx generates .control and .so but not SQL - copy our hand-written SQL file
-RUN cp sql/ruvector--2.0.0.sql target/release/ruvector-pg${PG_VERSION}/usr/share/postgresql/${PG_VERSION}/extension/ 2>/dev/null || true
+# pgrx generates .control and .so but not SQL - copy our hand-written SQL files
+# In a workspace, target/ is at the workspace root /build/target/, not per-crate
+RUN cp sql/ruvector--0.3.0.sql /build/target/release/ruvector-pg${PG_VERSION}/usr/share/postgresql/${PG_VERSION}/extension/ 2>/dev/null || true && \
+ cp sql/ruvector--2.0.0.sql /build/target/release/ruvector-pg${PG_VERSION}/usr/share/postgresql/${PG_VERSION}/extension/ 2>/dev/null || true && \
+ cp sql/ruvector--2.0.0--0.3.0.sql /build/target/release/ruvector-pg${PG_VERSION}/usr/share/postgresql/${PG_VERSION}/extension/ 2>/dev/null || true
# ============================================================================
# Stage 4: Runtime (Production)
@@ -101,9 +167,9 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
libssl3 \
&& rm -rf /var/lib/apt/lists/*
-# Copy built extension from builder
-COPY --from=extension-builder /build/ruvector-postgres/target/release/ruvector-pg${PG_VERSION}/usr/share/postgresql/${PG_VERSION}/extension/* /usr/share/postgresql/${PG_VERSION}/extension/
-COPY --from=extension-builder /build/ruvector-postgres/target/release/ruvector-pg${PG_VERSION}/usr/lib/postgresql/${PG_VERSION}/lib/* /usr/lib/postgresql/${PG_VERSION}/lib/
+# Copy built extension from builder (workspace target is at /build/target/)
+COPY --from=extension-builder /build/target/release/ruvector-pg${PG_VERSION}/usr/share/postgresql/${PG_VERSION}/extension/* /usr/share/postgresql/${PG_VERSION}/extension/
+COPY --from=extension-builder /build/target/release/ruvector-pg${PG_VERSION}/usr/lib/postgresql/${PG_VERSION}/lib/* /usr/lib/postgresql/${PG_VERSION}/lib/
# Copy initialization script with proper permissions
COPY --chmod=644 crates/ruvector-postgres/docker/init.sql /docker-entrypoint-initdb.d/
@@ -118,13 +184,13 @@ ENV PG_VERSION=${PG_VERSION}
ENV POSTGRES_INITDB_ARGS="--data-checksums"
# Labels for version tracking
-LABEL org.opencontainers.image.title="RuVector PostgreSQL Extension v2"
-LABEL org.opencontainers.image.description="High-performance vector database extension for PostgreSQL with 230+ SQL functions, Flash Attention, GNN, hybrid search, multi-tenancy, and self-healing"
-LABEL org.opencontainers.image.version="2.0.4"
+LABEL org.opencontainers.image.title="RuVector PostgreSQL Extension v0.3"
+LABEL org.opencontainers.image.description="High-performance vector database extension for PostgreSQL with 143 SQL functions, Solver, Math, TDA, Extended Attention, Sona, and Domain Expansion"
+LABEL org.opencontainers.image.version="0.3.0"
LABEL org.opencontainers.image.vendor="ruv.io"
LABEL org.opencontainers.image.source="https://github.com/ruvnet/ruvector"
LABEL ruvector.pg.version="${PG_VERSION}"
-LABEL ruvector.features="attention,gnn,hybrid,tenancy,healing,learning,hyperbolic,graph"
+LABEL ruvector.features="attention,gnn,hybrid,tenancy,healing,learning,hyperbolic,graph,solver,math,tda,sona,domain-expansion"
# Health check
HEALTHCHECK --interval=5s --timeout=5s --start-period=10s --retries=5 \
diff --git a/crates/ruvector-postgres/docker/init.sql b/crates/ruvector-postgres/docker/init.sql
index e549dbf34..1233222f9 100644
--- a/crates/ruvector-postgres/docker/init.sql
+++ b/crates/ruvector-postgres/docker/init.sql
@@ -51,4 +51,41 @@ BEGIN
RAISE NOTICE 'Cosine distance: %', cosine_distance_arr(ARRAY[1.0, 0.0, 0.0]::real[], ARRAY[0.0, 1.0, 0.0]::real[]);
RAISE NOTICE 'All basic tests passed!';
+
+ -- ================================================================
+ -- v0.3 Module Tests
+ -- ================================================================
+ RAISE NOTICE '--- v0.3 Module Tests ---';
+
+ -- Solver: PageRank
+ RAISE NOTICE 'Solver PageRank: %', ruvector_pagerank('{"edges":[[0,1],[1,2],[2,0]]}'::jsonb);
+
+ -- Solver: Info
+ RAISE NOTICE 'Solver algorithms available';
+
+ -- Solver: Matrix analyze
+ RAISE NOTICE 'Matrix analyze: %', ruvector_matrix_analyze('{"rows":3,"cols":3,"entries":[[0,0,4],[0,1,-1],[1,0,-1],[1,1,4],[2,2,2]]}'::jsonb);
+
+ -- Math: Wasserstein distance
+ RAISE NOTICE 'Wasserstein distance: %', ruvector_wasserstein_distance(ARRAY[0.5,0.5]::real[], ARRAY[0.3,0.7]::real[]);
+
+ -- Math: KL divergence
+ RAISE NOTICE 'KL divergence: %', ruvector_kl_divergence(ARRAY[0.5,0.5]::real[], ARRAY[0.3,0.7]::real[]);
+
+ -- Math: Jensen-Shannon
+ RAISE NOTICE 'Jensen-Shannon: %', ruvector_jensen_shannon(ARRAY[0.5,0.5]::real[], ARRAY[0.3,0.7]::real[]);
+
+ -- TDA: Persistent homology
+ RAISE NOTICE 'Persistent homology: %', ruvector_persistent_homology('[[1,0],[0,1],[-1,0],[0,-1]]'::jsonb, 1, 3.0);
+
+ -- TDA: Betti numbers
+ RAISE NOTICE 'Betti numbers: %', ruvector_betti_numbers('[[0,0],[1,0],[0,1]]'::jsonb, 1.5);
+
+ -- Attention: Linear attention
+ RAISE NOTICE 'Linear attention: %', ruvector_linear_attention(ARRAY[1,0,0,0]::real[], '[[1,0,0,0],[0,1,0,0]]'::jsonb, '[[5,10],[15,20]]'::jsonb);
+
+ -- Attention: Benchmark
+ RAISE NOTICE 'Attention benchmark: %', ruvector_attention_benchmark(64, 128, 'scaled_dot');
+
+ RAISE NOTICE 'All v0.3 tests passed!';
END $$;
diff --git a/crates/ruvector-postgres/ruvector.control b/crates/ruvector-postgres/ruvector.control
index a5e50ae90..cf22b7095 100644
--- a/crates/ruvector-postgres/ruvector.control
+++ b/crates/ruvector-postgres/ruvector.control
@@ -2,8 +2,8 @@
# High-performance vector similarity search - pgvector drop-in replacement
# Features: 230+ SQL functions, Flash Attention, GNN, hybrid search, multi-tenancy, self-healing
-comment = 'RuVector v2: SIMD-optimized vector similarity search with AI capabilities'
-default_version = '2.0.0'
+comment = 'RuVector v0.3: SIMD-optimized vector similarity search with solver, math, TDA, and AI capabilities'
+default_version = '0.3.0'
module_pathname = '$libdir/ruvector'
relocatable = false
superuser = false
diff --git a/crates/ruvector-postgres/sql/ruvector--0.3.0.sql b/crates/ruvector-postgres/sql/ruvector--0.3.0.sql
new file mode 100644
index 000000000..12561d0ed
--- /dev/null
+++ b/crates/ruvector-postgres/sql/ruvector--0.3.0.sql
@@ -0,0 +1,1094 @@
+-- RuVector PostgreSQL Extension v0.3
+-- Version: 0.3.0
+-- High-performance vector similarity search with SIMD optimizations
+-- Features: 270+ SQL functions, Solver, Math, TDA, Extended Attention, Sona, Domain Expansion
+
+-- Complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION ruvector" to load this file. \quit
+
+-- ============================================================================
+-- Utility Functions
+-- ============================================================================
+
+-- Get extension version
+CREATE OR REPLACE FUNCTION ruvector_version()
+RETURNS text
+AS 'MODULE_PATHNAME', 'ruvector_version_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Get SIMD info
+CREATE OR REPLACE FUNCTION ruvector_simd_info()
+RETURNS text
+AS 'MODULE_PATHNAME', 'ruvector_simd_info_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Get memory stats
+CREATE OR REPLACE FUNCTION ruvector_memory_stats()
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_memory_stats_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- ============================================================================
+-- Native RuVector Type (pgvector-compatible)
+-- ============================================================================
+
+-- Create the ruvector type using low-level I/O functions
+CREATE TYPE ruvector;
+
+CREATE OR REPLACE FUNCTION ruvector_in(cstring) RETURNS ruvector
+AS 'MODULE_PATHNAME', 'ruvector_in' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_out(ruvector) RETURNS cstring
+AS 'MODULE_PATHNAME', 'ruvector_out' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_recv(internal) RETURNS ruvector
+AS 'MODULE_PATHNAME', 'ruvector_recv' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_send(ruvector) RETURNS bytea
+AS 'MODULE_PATHNAME', 'ruvector_send' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_typmod_in(cstring[]) RETURNS int
+AS 'MODULE_PATHNAME', 'ruvector_typmod_in' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_typmod_out(int) RETURNS cstring
+AS 'MODULE_PATHNAME', 'ruvector_typmod_out' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+CREATE TYPE ruvector (
+ INPUT = ruvector_in,
+ OUTPUT = ruvector_out,
+ RECEIVE = ruvector_recv,
+ SEND = ruvector_send,
+ TYPMOD_IN = ruvector_typmod_in,
+ TYPMOD_OUT = ruvector_typmod_out,
+ STORAGE = extended,
+ INTERNALLENGTH = VARIABLE,
+ ALIGNMENT = double
+);
+
+-- ============================================================================
+-- Native RuVector Distance Functions (SIMD-optimized)
+-- ============================================================================
+
+-- L2 distance for native ruvector type
+CREATE OR REPLACE FUNCTION ruvector_l2_distance(a ruvector, b ruvector)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_l2_distance_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Cosine distance for native ruvector type
+CREATE OR REPLACE FUNCTION ruvector_cosine_distance(a ruvector, b ruvector)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_cosine_distance_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Inner product for native ruvector type
+CREATE OR REPLACE FUNCTION ruvector_inner_product(a ruvector, b ruvector)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_inner_product_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Manhattan (L1) distance for native ruvector type
+CREATE OR REPLACE FUNCTION ruvector_l1_distance(a ruvector, b ruvector)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_l1_distance_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Get dimensions of ruvector
+CREATE OR REPLACE FUNCTION ruvector_dims(v ruvector)
+RETURNS int
+AS 'MODULE_PATHNAME', 'ruvector_dims_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Get L2 norm of ruvector
+CREATE OR REPLACE FUNCTION ruvector_norm(v ruvector)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_norm_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Normalize ruvector
+CREATE OR REPLACE FUNCTION ruvector_normalize(v ruvector)
+RETURNS ruvector
+AS 'MODULE_PATHNAME', 'ruvector_normalize_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Add two ruvectors
+CREATE OR REPLACE FUNCTION ruvector_add(a ruvector, b ruvector)
+RETURNS ruvector
+AS 'MODULE_PATHNAME', 'ruvector_add_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Subtract two ruvectors
+CREATE OR REPLACE FUNCTION ruvector_sub(a ruvector, b ruvector)
+RETURNS ruvector
+AS 'MODULE_PATHNAME', 'ruvector_sub_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Multiply ruvector by scalar
+CREATE OR REPLACE FUNCTION ruvector_mul_scalar(v ruvector, s real)
+RETURNS ruvector
+AS 'MODULE_PATHNAME', 'ruvector_mul_scalar_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- ============================================================================
+-- Operators for Native RuVector Type
+-- ============================================================================
+
+-- L2 distance operator (<->)
+CREATE OPERATOR <-> (
+ LEFTARG = ruvector,
+ RIGHTARG = ruvector,
+ FUNCTION = ruvector_l2_distance,
+ COMMUTATOR = '<->'
+);
+
+-- Cosine distance operator (<=>)
+CREATE OPERATOR <=> (
+ LEFTARG = ruvector,
+ RIGHTARG = ruvector,
+ FUNCTION = ruvector_cosine_distance,
+ COMMUTATOR = '<=>'
+);
+
+-- Inner product operator (<#>)
+CREATE OPERATOR <#> (
+ LEFTARG = ruvector,
+ RIGHTARG = ruvector,
+ FUNCTION = ruvector_inner_product,
+ COMMUTATOR = '<#>'
+);
+
+-- Addition operator (+)
+CREATE OPERATOR + (
+ LEFTARG = ruvector,
+ RIGHTARG = ruvector,
+ FUNCTION = ruvector_add,
+ COMMUTATOR = '+'
+);
+
+-- Subtraction operator (-)
+CREATE OPERATOR - (
+ LEFTARG = ruvector,
+ RIGHTARG = ruvector,
+ FUNCTION = ruvector_sub
+);
+
+-- ============================================================================
+-- Distance Functions (array-based with SIMD optimization)
+-- ============================================================================
+
+-- L2 (Euclidean) distance between two float arrays
+CREATE OR REPLACE FUNCTION l2_distance_arr(a real[], b real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'l2_distance_arr_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Inner product between two float arrays
+CREATE OR REPLACE FUNCTION inner_product_arr(a real[], b real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'inner_product_arr_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Negative inner product (for ORDER BY ASC nearest neighbor)
+CREATE OR REPLACE FUNCTION neg_inner_product_arr(a real[], b real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'neg_inner_product_arr_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Cosine distance between two float arrays
+CREATE OR REPLACE FUNCTION cosine_distance_arr(a real[], b real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'cosine_distance_arr_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Cosine similarity between two float arrays
+CREATE OR REPLACE FUNCTION cosine_similarity_arr(a real[], b real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'cosine_similarity_arr_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- L1 (Manhattan) distance between two float arrays
+CREATE OR REPLACE FUNCTION l1_distance_arr(a real[], b real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'l1_distance_arr_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- ============================================================================
+-- Vector Utility Functions
+-- ============================================================================
+
+-- Normalize a vector to unit length
+CREATE OR REPLACE FUNCTION vector_normalize(v real[])
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'vector_normalize_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Add two vectors element-wise
+CREATE OR REPLACE FUNCTION vector_add(a real[], b real[])
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'vector_add_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Subtract two vectors element-wise
+CREATE OR REPLACE FUNCTION vector_sub(a real[], b real[])
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'vector_sub_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Multiply vector by scalar
+CREATE OR REPLACE FUNCTION vector_mul_scalar(v real[], scalar real)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'vector_mul_scalar_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Get vector dimensions
+CREATE OR REPLACE FUNCTION vector_dims(v real[])
+RETURNS int
+AS 'MODULE_PATHNAME', 'vector_dims_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Get vector L2 norm
+CREATE OR REPLACE FUNCTION vector_norm(v real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'vector_norm_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Average two vectors
+CREATE OR REPLACE FUNCTION vector_avg2(a real[], b real[])
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'vector_avg2_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- ============================================================================
+-- Quantization Functions
+-- ============================================================================
+
+-- Binary quantize a vector
+CREATE OR REPLACE FUNCTION binary_quantize_arr(v real[])
+RETURNS bytea
+AS 'MODULE_PATHNAME', 'binary_quantize_arr_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Scalar quantize a vector (SQ8)
+CREATE OR REPLACE FUNCTION scalar_quantize_arr(v real[])
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'scalar_quantize_arr_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- ============================================================================
+-- Aggregate Functions
+-- ============================================================================
+
+-- State transition function for vector sum
+CREATE OR REPLACE FUNCTION vector_sum_state(state real[], value real[])
+RETURNS real[]
+AS $$
+SELECT CASE
+ WHEN state IS NULL THEN value
+ WHEN value IS NULL THEN state
+ ELSE vector_add(state, value)
+END;
+$$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE;
+
+-- Final function for vector average
+CREATE OR REPLACE FUNCTION vector_avg_final(state real[], count bigint)
+RETURNS real[]
+AS $$
+SELECT CASE
+ WHEN state IS NULL OR count = 0 THEN NULL
+ ELSE vector_mul_scalar(state, 1.0 / count::real)
+END;
+$$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE;
+
+-- Vector sum aggregate
+CREATE AGGREGATE vector_sum(real[]) (
+ SFUNC = vector_sum_state,
+ STYPE = real[],
+ PARALLEL = SAFE
+);
+
+-- ============================================================================
+-- Fast Pre-Normalized Cosine Distance (3x faster)
+-- ============================================================================
+
+-- Cosine distance for pre-normalized vectors (only dot product)
+CREATE OR REPLACE FUNCTION cosine_distance_normalized_arr(a real[], b real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'cosine_distance_normalized_arr_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- ============================================================================
+-- Temporal Compression Functions
+-- ============================================================================
+
+-- Compute delta between two consecutive vectors
+CREATE OR REPLACE FUNCTION temporal_delta(current real[], previous real[])
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'temporal_delta_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Reconstruct vector from delta and previous vector
+CREATE OR REPLACE FUNCTION temporal_undelta(delta real[], previous real[])
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'temporal_undelta_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Exponential moving average update
+CREATE OR REPLACE FUNCTION temporal_ema_update(current real[], ema_prev real[], alpha real)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'temporal_ema_update_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Compute temporal drift (rate of change)
+CREATE OR REPLACE FUNCTION temporal_drift(v1 real[], v2 real[], time_delta real)
+RETURNS real
+AS 'MODULE_PATHNAME', 'temporal_drift_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Compute velocity (first derivative)
+CREATE OR REPLACE FUNCTION temporal_velocity(v_t0 real[], v_t1 real[], dt real)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'temporal_velocity_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- ============================================================================
+-- Attention Mechanism Functions
+-- ============================================================================
+
+-- Compute scaled attention score between query and key
+CREATE OR REPLACE FUNCTION attention_score(query real[], key real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'attention_score_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Apply softmax to scores array
+CREATE OR REPLACE FUNCTION attention_softmax(scores real[])
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'attention_softmax_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Weighted vector addition for attention
+CREATE OR REPLACE FUNCTION attention_weighted_add(accumulator real[], value real[], weight real)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'attention_weighted_add_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Initialize attention accumulator
+CREATE OR REPLACE FUNCTION attention_init(dim int)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'attention_init_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Compute single attention (returns JSON with score and value)
+CREATE OR REPLACE FUNCTION attention_single(query real[], key real[], value real[], score_offset real)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'attention_single_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- ============================================================================
+-- Graph Traversal Functions
+-- ============================================================================
+
+-- Compute edge similarity between two vectors
+CREATE OR REPLACE FUNCTION graph_edge_similarity(source real[], target real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'graph_edge_similarity_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- PageRank contribution calculation
+CREATE OR REPLACE FUNCTION graph_pagerank_contribution(importance real, num_neighbors int, damping real)
+RETURNS real
+AS 'MODULE_PATHNAME', 'graph_pagerank_contribution_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- PageRank base importance
+CREATE OR REPLACE FUNCTION graph_pagerank_base(num_nodes int, damping real)
+RETURNS real
+AS 'MODULE_PATHNAME', 'graph_pagerank_base_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Check semantic connection
+CREATE OR REPLACE FUNCTION graph_is_connected(v1 real[], v2 real[], threshold real)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'graph_is_connected_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Centroid update for clustering
+CREATE OR REPLACE FUNCTION graph_centroid_update(centroid real[], neighbor real[], weight real)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'graph_centroid_update_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Bipartite matching score for RAG
+CREATE OR REPLACE FUNCTION graph_bipartite_score(query real[], node real[], edge_weight real)
+RETURNS real
+AS 'MODULE_PATHNAME', 'graph_bipartite_score_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- ============================================================================
+-- Hyperbolic Geometry Functions
+-- ============================================================================
+
+-- Poincare distance
+CREATE OR REPLACE FUNCTION ruvector_poincare_distance(a real[], b real[], curvature real DEFAULT -1.0)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_poincare_distance_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Lorentz/hyperboloid distance
+CREATE OR REPLACE FUNCTION ruvector_lorentz_distance(a real[], b real[], curvature real DEFAULT -1.0)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_lorentz_distance_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Mobius addition in Poincare ball
+CREATE OR REPLACE FUNCTION ruvector_mobius_add(a real[], b real[], curvature real DEFAULT -1.0)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_mobius_add_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Exponential map (tangent to manifold)
+CREATE OR REPLACE FUNCTION ruvector_exp_map(base real[], tangent real[], curvature real DEFAULT -1.0)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_exp_map_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Logarithmic map (manifold to tangent)
+CREATE OR REPLACE FUNCTION ruvector_log_map(base real[], target real[], curvature real DEFAULT -1.0)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_log_map_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Convert Poincare to Lorentz coordinates
+CREATE OR REPLACE FUNCTION ruvector_poincare_to_lorentz(poincare real[], curvature real DEFAULT -1.0)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_poincare_to_lorentz_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Convert Lorentz to Poincare coordinates
+CREATE OR REPLACE FUNCTION ruvector_lorentz_to_poincare(lorentz real[], curvature real DEFAULT -1.0)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_lorentz_to_poincare_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- Minkowski inner product
+CREATE OR REPLACE FUNCTION ruvector_minkowski_dot(a real[], b real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_minkowski_dot_wrapper'
+LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
+
+-- ============================================================================
+-- GNN (Graph Neural Network) Functions
+-- ============================================================================
+-- Note: GCN and GraphSAGE functions are auto-generated by pgrx with JsonB signature
+-- The functions ruvector_gcn_forward and ruvector_graphsage_forward use JsonB types
+-- and are defined in src/gnn/operators.rs with #[pg_extern] macro
+
+-- ============================================================================
+-- Routing/Agent Functions (Tiny Dancer)
+-- ============================================================================
+
+-- Register an agent
+CREATE OR REPLACE FUNCTION ruvector_register_agent(name text, agent_type text, capabilities text[], cost_per_request real, avg_latency_ms real, quality_score real)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'ruvector_register_agent_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Register agent with full config
+CREATE OR REPLACE FUNCTION ruvector_register_agent_full(config jsonb)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'ruvector_register_agent_full_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Update agent metrics
+CREATE OR REPLACE FUNCTION ruvector_update_agent_metrics(name text, latency_ms real, success boolean, quality real DEFAULT NULL)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'ruvector_update_agent_metrics_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Remove agent
+CREATE OR REPLACE FUNCTION ruvector_remove_agent(name text)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'ruvector_remove_agent_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Set agent active status
+CREATE OR REPLACE FUNCTION ruvector_set_agent_active(name text, is_active boolean)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'ruvector_set_agent_active_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Route request to best agent
+CREATE OR REPLACE FUNCTION ruvector_route(embedding real[], optimize_for text DEFAULT 'balanced', constraints jsonb DEFAULT NULL)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_route_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- List all agents
+CREATE OR REPLACE FUNCTION ruvector_list_agents()
+RETURNS TABLE(name text, agent_type text, capabilities text[], cost_per_request real, avg_latency_ms real, quality_score real, success_rate real, total_requests bigint, is_active boolean)
+AS 'MODULE_PATHNAME', 'ruvector_list_agents_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Get agent details
+CREATE OR REPLACE FUNCTION ruvector_get_agent(name text)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_get_agent_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Find agents by capability
+CREATE OR REPLACE FUNCTION ruvector_find_agents_by_capability(capability text, max_results int DEFAULT 10)
+RETURNS TABLE(name text, quality_score real, avg_latency_ms real, cost_per_request real)
+AS 'MODULE_PATHNAME', 'ruvector_find_agents_by_capability_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Get routing statistics
+CREATE OR REPLACE FUNCTION ruvector_routing_stats()
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_routing_stats_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Clear all agents
+CREATE OR REPLACE FUNCTION ruvector_clear_agents()
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'ruvector_clear_agents_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- ============================================================================
+-- Learning/ReasoningBank Functions
+-- ============================================================================
+
+-- Enable learning for a table
+CREATE OR REPLACE FUNCTION ruvector_enable_learning(table_name text, config jsonb DEFAULT NULL)
+RETURNS text
+AS 'MODULE_PATHNAME', 'ruvector_enable_learning_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Record feedback for learning
+CREATE OR REPLACE FUNCTION ruvector_record_feedback(table_name text, query_vector real[], relevant_ids bigint[], irrelevant_ids bigint[])
+RETURNS text
+AS 'MODULE_PATHNAME', 'ruvector_record_feedback_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Get learning statistics
+CREATE OR REPLACE FUNCTION ruvector_learning_stats(table_name text)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_learning_stats_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Auto-tune search parameters
+CREATE OR REPLACE FUNCTION ruvector_auto_tune(table_name text, optimize_for text DEFAULT 'balanced', sample_queries real[][] DEFAULT NULL)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_auto_tune_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Extract query patterns
+CREATE OR REPLACE FUNCTION ruvector_extract_patterns(table_name text, num_clusters int DEFAULT 10)
+RETURNS text
+AS 'MODULE_PATHNAME', 'ruvector_extract_patterns_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Get optimized search parameters for query
+CREATE OR REPLACE FUNCTION ruvector_get_search_params(table_name text, query_vector real[])
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_get_search_params_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Clear learning data
+CREATE OR REPLACE FUNCTION ruvector_clear_learning(table_name text)
+RETURNS text
+AS 'MODULE_PATHNAME', 'ruvector_clear_learning_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- ============================================================================
+-- Graph/Cypher Functions
+-- ============================================================================
+
+-- Create a new graph
+CREATE OR REPLACE FUNCTION ruvector_create_graph(name text)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'ruvector_create_graph_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Execute Cypher query
+CREATE OR REPLACE FUNCTION ruvector_cypher(graph_name text, query text, params jsonb DEFAULT NULL)
+RETURNS SETOF jsonb
+AS 'MODULE_PATHNAME', 'ruvector_cypher_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Add node to graph
+CREATE OR REPLACE FUNCTION ruvector_add_node(graph_name text, labels text[], properties jsonb)
+RETURNS bigint
+AS 'MODULE_PATHNAME', 'ruvector_add_node_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Add edge to graph
+CREATE OR REPLACE FUNCTION ruvector_add_edge(graph_name text, source_id bigint, target_id bigint, edge_type text, properties jsonb)
+RETURNS bigint
+AS 'MODULE_PATHNAME', 'ruvector_add_edge_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Find shortest path
+CREATE OR REPLACE FUNCTION ruvector_shortest_path(graph_name text, start_id bigint, end_id bigint, max_hops int DEFAULT 10)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_shortest_path_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Get graph statistics
+CREATE OR REPLACE FUNCTION ruvector_graph_stats(graph_name text)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_graph_stats_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- List all graphs
+CREATE OR REPLACE FUNCTION ruvector_list_graphs()
+RETURNS text[]
+AS 'MODULE_PATHNAME', 'ruvector_list_graphs_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Delete a graph
+CREATE OR REPLACE FUNCTION ruvector_delete_graph(graph_name text)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'ruvector_delete_graph_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- ============================================================================
+-- SPARQL / RDF Triple Store Operations (W3C SPARQL 1.1)
+-- ============================================================================
+
+-- Create a new RDF triple store
+CREATE OR REPLACE FUNCTION ruvector_create_rdf_store(name text)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'ruvector_create_rdf_store_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Execute SPARQL query with format selection
+CREATE OR REPLACE FUNCTION ruvector_sparql(store_name text, query text, format text)
+RETURNS text
+AS 'MODULE_PATHNAME', 'ruvector_sparql_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Execute SPARQL query and return JSONB
+CREATE OR REPLACE FUNCTION ruvector_sparql_json(store_name text, query text)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_sparql_json_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Insert RDF triple
+CREATE OR REPLACE FUNCTION ruvector_insert_triple(store_name text, subject text, predicate text, object text)
+RETURNS bigint
+AS 'MODULE_PATHNAME', 'ruvector_insert_triple_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Insert RDF triple into named graph
+CREATE OR REPLACE FUNCTION ruvector_insert_triple_graph(store_name text, subject text, predicate text, object text, graph text)
+RETURNS bigint
+AS 'MODULE_PATHNAME', 'ruvector_insert_triple_graph_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Bulk load N-Triples format
+CREATE OR REPLACE FUNCTION ruvector_load_ntriples(store_name text, ntriples text)
+RETURNS bigint
+AS 'MODULE_PATHNAME', 'ruvector_load_ntriples_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Get RDF store statistics
+CREATE OR REPLACE FUNCTION ruvector_rdf_stats(store_name text)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_rdf_stats_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Query triples by pattern (NULL for wildcards)
+CREATE OR REPLACE FUNCTION ruvector_query_triples(store_name text, subject text DEFAULT NULL, predicate text DEFAULT NULL, object text DEFAULT NULL)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_query_triples_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Clear all triples from store
+CREATE OR REPLACE FUNCTION ruvector_clear_rdf_store(store_name text)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'ruvector_clear_rdf_store_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Delete RDF triple store
+CREATE OR REPLACE FUNCTION ruvector_delete_rdf_store(store_name text)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'ruvector_delete_rdf_store_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- List all RDF stores
+CREATE OR REPLACE FUNCTION ruvector_list_rdf_stores()
+RETURNS text[]
+AS 'MODULE_PATHNAME', 'ruvector_list_rdf_stores_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- Execute SPARQL UPDATE operations
+CREATE OR REPLACE FUNCTION ruvector_sparql_update(store_name text, query text)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'ruvector_sparql_update_wrapper'
+LANGUAGE C VOLATILE PARALLEL SAFE;
+
+-- ============================================================================
+-- Comments
+-- ============================================================================
+
+COMMENT ON FUNCTION ruvector_version() IS 'Returns RuVector extension version';
+COMMENT ON FUNCTION ruvector_simd_info() IS 'Returns SIMD capability information';
+COMMENT ON FUNCTION ruvector_memory_stats() IS 'Returns memory statistics for the extension';
+COMMENT ON FUNCTION l2_distance_arr(real[], real[]) IS 'Compute L2 (Euclidean) distance between two vectors';
+COMMENT ON FUNCTION cosine_distance_arr(real[], real[]) IS 'Compute cosine distance between two vectors';
+COMMENT ON FUNCTION cosine_distance_normalized_arr(real[], real[]) IS 'Fast cosine distance for pre-normalized vectors (3x faster)';
+COMMENT ON FUNCTION inner_product_arr(real[], real[]) IS 'Compute inner product between two vectors';
+COMMENT ON FUNCTION l1_distance_arr(real[], real[]) IS 'Compute L1 (Manhattan) distance between two vectors';
+COMMENT ON FUNCTION vector_normalize(real[]) IS 'Normalize a vector to unit length';
+COMMENT ON FUNCTION vector_add(real[], real[]) IS 'Add two vectors element-wise';
+COMMENT ON FUNCTION vector_sub(real[], real[]) IS 'Subtract two vectors element-wise';
+COMMENT ON FUNCTION vector_mul_scalar(real[], real) IS 'Multiply vector by scalar';
+COMMENT ON FUNCTION vector_dims(real[]) IS 'Get vector dimensions';
+COMMENT ON FUNCTION vector_norm(real[]) IS 'Get vector L2 norm';
+COMMENT ON FUNCTION binary_quantize_arr(real[]) IS 'Binary quantize a vector (32x compression)';
+COMMENT ON FUNCTION scalar_quantize_arr(real[]) IS 'Scalar quantize a vector (4x compression)';
+COMMENT ON FUNCTION temporal_delta(real[], real[]) IS 'Compute delta between consecutive vectors for compression';
+COMMENT ON FUNCTION temporal_undelta(real[], real[]) IS 'Reconstruct vector from delta encoding';
+COMMENT ON FUNCTION temporal_ema_update(real[], real[], real) IS 'Exponential moving average update step';
+COMMENT ON FUNCTION temporal_drift(real[], real[], real) IS 'Compute temporal drift (rate of change) between vectors';
+COMMENT ON FUNCTION temporal_velocity(real[], real[], real) IS 'Compute velocity (first derivative) of vector';
+COMMENT ON FUNCTION attention_score(real[], real[]) IS 'Compute scaled attention score between query and key';
+COMMENT ON FUNCTION attention_softmax(real[]) IS 'Apply softmax to scores array';
+COMMENT ON FUNCTION attention_weighted_add(real[], real[], real) IS 'Weighted vector addition for attention';
+COMMENT ON FUNCTION attention_init(int) IS 'Initialize zero-vector accumulator for attention';
+COMMENT ON FUNCTION attention_single(real[], real[], real[], real) IS 'Single key-value attention with score';
+COMMENT ON FUNCTION graph_edge_similarity(real[], real[]) IS 'Compute edge similarity (cosine) between vectors';
+COMMENT ON FUNCTION graph_pagerank_contribution(real, int, real) IS 'Calculate PageRank contribution to neighbors';
+COMMENT ON FUNCTION graph_pagerank_base(int, real) IS 'Initialize PageRank base importance';
+COMMENT ON FUNCTION graph_is_connected(real[], real[], real) IS 'Check if vectors are semantically connected';
+COMMENT ON FUNCTION graph_centroid_update(real[], real[], real) IS 'Update centroid with neighbor contribution';
+
+-- SPARQL / RDF Comments
+COMMENT ON FUNCTION ruvector_create_rdf_store(text) IS 'Create a new RDF triple store for SPARQL queries';
+COMMENT ON FUNCTION ruvector_sparql(text, text, text) IS 'Execute W3C SPARQL 1.1 query (SELECT, ASK, CONSTRUCT, DESCRIBE) with format selection (json, xml, csv, tsv)';
+COMMENT ON FUNCTION ruvector_sparql_json(text, text) IS 'Execute SPARQL query and return results as JSONB';
+COMMENT ON FUNCTION ruvector_insert_triple(text, text, text, text) IS 'Insert RDF triple (subject, predicate, object) into store';
+COMMENT ON FUNCTION ruvector_insert_triple_graph(text, text, text, text, text) IS 'Insert RDF triple into named graph';
+COMMENT ON FUNCTION ruvector_load_ntriples(text, text) IS 'Bulk load RDF triples from N-Triples format';
+COMMENT ON FUNCTION ruvector_rdf_stats(text) IS 'Get statistics for RDF triple store (counts, graphs)';
+COMMENT ON FUNCTION ruvector_query_triples(text, text, text, text) IS 'Query triples by pattern (use NULL for wildcards)';
+COMMENT ON FUNCTION ruvector_clear_rdf_store(text) IS 'Clear all triples from RDF store';
+COMMENT ON FUNCTION ruvector_delete_rdf_store(text) IS 'Delete RDF triple store completely';
+COMMENT ON FUNCTION ruvector_list_rdf_stores() IS 'List all RDF triple stores';
+COMMENT ON FUNCTION ruvector_sparql_update(text, text) IS 'Execute SPARQL UPDATE operations (INSERT DATA, DELETE DATA, DELETE/INSERT WHERE)';
+COMMENT ON FUNCTION graph_bipartite_score(real[], real[], real) IS 'Compute bipartite matching score for RAG';
+-- ============================================================================
+-- ============================================================================
+-- Embedding Generation Functions
+-- ============================================================================
+-- Note: Embedding functions require the 'embeddings' feature flag to be enabled
+-- during compilation. These functions are not available in the default build.
+-- To enable, build with: cargo pgrx package --features embeddings
+
+-- ============================================================================
+-- HNSW Access Method
+-- ============================================================================
+
+-- HNSW Access Method Handler
+CREATE OR REPLACE FUNCTION hnsw_handler(internal)
+RETURNS index_am_handler
+AS 'MODULE_PATHNAME', 'hnsw_handler_wrapper'
+LANGUAGE C STRICT;
+
+-- Create HNSW Access Method
+CREATE ACCESS METHOD hnsw TYPE INDEX HANDLER hnsw_handler;
+
+-- ============================================================================
+-- Operator Classes for HNSW
+-- ============================================================================
+
+-- HNSW Operator Class for L2 (Euclidean) distance
+CREATE OPERATOR CLASS ruvector_l2_ops
+ DEFAULT FOR TYPE ruvector USING hnsw AS
+ OPERATOR 1 <-> (ruvector, ruvector) FOR ORDER BY float_ops,
+ FUNCTION 1 ruvector_l2_distance(ruvector, ruvector);
+
+COMMENT ON OPERATOR CLASS ruvector_l2_ops USING hnsw IS
+'ruvector HNSW operator class for L2/Euclidean distance';
+
+-- HNSW Operator Class for Cosine distance
+CREATE OPERATOR CLASS ruvector_cosine_ops
+ FOR TYPE ruvector USING hnsw AS
+ OPERATOR 1 <=> (ruvector, ruvector) FOR ORDER BY float_ops,
+ FUNCTION 1 ruvector_cosine_distance(ruvector, ruvector);
+
+COMMENT ON OPERATOR CLASS ruvector_cosine_ops USING hnsw IS
+'ruvector HNSW operator class for cosine distance';
+
+-- HNSW Operator Class for Inner Product
+CREATE OPERATOR CLASS ruvector_ip_ops
+ FOR TYPE ruvector USING hnsw AS
+ OPERATOR 1 <#> (ruvector, ruvector) FOR ORDER BY float_ops,
+ FUNCTION 1 ruvector_inner_product(ruvector, ruvector);
+
+COMMENT ON OPERATOR CLASS ruvector_ip_ops USING hnsw IS
+'ruvector HNSW operator class for inner product (max similarity)';
+
+-- ============================================================================
+-- IVFFlat Access Method
+-- ============================================================================
+
+-- IVFFlat Access Method Handler
+CREATE OR REPLACE FUNCTION ruivfflat_handler(internal)
+RETURNS index_am_handler
+AS 'MODULE_PATHNAME', 'ruivfflat_handler_wrapper'
+LANGUAGE C STRICT;
+
+-- Create IVFFlat Access Method (also aliased as 'ivfflat' for pgvector compatibility)
+CREATE ACCESS METHOD ruivfflat TYPE INDEX HANDLER ruivfflat_handler;
+
+-- Operator Classes for IVFFlat (L2/Euclidean distance)
+CREATE OPERATOR CLASS ruvector_l2_ops
+ DEFAULT FOR TYPE ruvector USING ruivfflat AS
+ OPERATOR 1 <-> (ruvector, ruvector) FOR ORDER BY float_ops,
+ FUNCTION 1 ruvector_l2_distance(ruvector, ruvector);
+
+-- IVFFlat Cosine Operator Class
+CREATE OPERATOR CLASS ruvector_cosine_ops
+ FOR TYPE ruvector USING ruivfflat AS
+ OPERATOR 1 <=> (ruvector, ruvector) FOR ORDER BY float_ops,
+ FUNCTION 1 ruvector_cosine_distance(ruvector, ruvector);
+
+-- IVFFlat Inner Product Operator Class
+CREATE OPERATOR CLASS ruvector_ip_ops
+ FOR TYPE ruvector USING ruivfflat AS
+ OPERATOR 1 <#> (ruvector, ruvector) FOR ORDER BY float_ops,
+ FUNCTION 1 ruvector_inner_product(ruvector, ruvector);
+-- ============================================================================
+-- Solver Functions (feature: solver)
+-- ============================================================================
+
+CREATE OR REPLACE FUNCTION ruvector_pagerank(edges_json jsonb, alpha real DEFAULT 0.85, epsilon real DEFAULT 1e-6)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_pagerank_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_pagerank_personalized(edges_json jsonb, source int, alpha real DEFAULT 0.85, epsilon real DEFAULT 1e-6)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_pagerank_personalized_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_pagerank_multi_seed(edges_json jsonb, seeds_json jsonb, alpha real DEFAULT 0.85, epsilon real DEFAULT 1e-6)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_pagerank_multi_seed_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_solve_sparse(matrix_json jsonb, rhs real[], method text DEFAULT 'neumann')
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_solve_sparse_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_solve_laplacian(laplacian_json jsonb, rhs real[])
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_solve_laplacian_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_effective_resistance(laplacian_json jsonb, source int, target int)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_effective_resistance_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_graph_pagerank(graph_name text, alpha real DEFAULT 0.85, epsilon real DEFAULT 1e-6)
+RETURNS TABLE(node_id bigint, rank double precision)
+AS 'MODULE_PATHNAME', 'ruvector_graph_pagerank_wrapper'
+LANGUAGE C;
+
+CREATE OR REPLACE FUNCTION ruvector_solver_info()
+RETURNS TABLE(algorithm text, description text, complexity text)
+AS 'MODULE_PATHNAME', 'ruvector_solver_info_wrapper'
+LANGUAGE C;
+
+CREATE OR REPLACE FUNCTION ruvector_matrix_analyze(matrix_json jsonb)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_matrix_analyze_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_conjugate_gradient(matrix_json jsonb, rhs real[], tol real DEFAULT 1e-6, max_iter int DEFAULT 1000)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_conjugate_gradient_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_graph_centrality(graph_name text, method text DEFAULT 'pagerank')
+RETURNS TABLE(node_id bigint, centrality double precision)
+AS 'MODULE_PATHNAME', 'ruvector_graph_centrality_wrapper'
+LANGUAGE C;
+
+-- ============================================================================
+-- Math Distance & Spectral Functions (feature: math-distances)
+-- ============================================================================
+
+CREATE OR REPLACE FUNCTION ruvector_wasserstein_distance(a real[], b real[], p int DEFAULT 1)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_wasserstein_distance_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_sinkhorn_distance(cost_json jsonb, w_a real[], w_b real[], reg real DEFAULT 0.1)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_sinkhorn_distance_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_sliced_wasserstein(pts_a_json jsonb, pts_b_json jsonb, n_proj int DEFAULT 100)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_sliced_wasserstein_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_kl_divergence(p real[], q real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_kl_divergence_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_jensen_shannon(p real[], q real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_jensen_shannon_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_fisher_information(dist real[], tangent real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_fisher_information_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_spectral_cluster(adj_json jsonb, k int)
+RETURNS int[]
+AS 'MODULE_PATHNAME', 'ruvector_spectral_cluster_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_chebyshev_filter(adj_json jsonb, signal real[], filter_type text DEFAULT 'low_pass', degree int DEFAULT 10)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_chebyshev_filter_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_graph_diffusion(adj_json jsonb, signal real[], diffusion_time real DEFAULT 1.0, degree int DEFAULT 10)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_graph_diffusion_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_product_manifold_distance(a real[], b real[], e_dim int, h_dim int, s_dim int)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_product_manifold_distance_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_spherical_distance(a real[], b real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_spherical_distance_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_gromov_wasserstein(dist_a_json jsonb, dist_b_json jsonb)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_gromov_wasserstein_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+-- ============================================================================
+-- TDA Functions (feature: tda)
+-- ============================================================================
+
+CREATE OR REPLACE FUNCTION ruvector_persistent_homology(points_json jsonb, max_dim int DEFAULT 1, max_radius real DEFAULT 3.0)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_persistent_homology_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_betti_numbers(points_json jsonb, radius real, max_dim int DEFAULT 2)
+RETURNS int[]
+AS 'MODULE_PATHNAME', 'ruvector_betti_numbers_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_bottleneck_distance(diag_a_json jsonb, diag_b_json jsonb)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_bottleneck_distance_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_persistence_wasserstein(diag_a_json jsonb, diag_b_json jsonb, p int DEFAULT 2)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_persistence_wasserstein_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_topological_summary(points_json jsonb, max_dim int DEFAULT 1)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_topological_summary_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_embedding_drift(old_json jsonb, new_json jsonb)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_embedding_drift_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_vietoris_rips(points_json jsonb, max_radius real DEFAULT 2.0, max_dim int DEFAULT 2)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_vietoris_rips_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+-- ============================================================================
+-- Extended Attention Functions (feature: attention-extended)
+-- ============================================================================
+
+CREATE OR REPLACE FUNCTION ruvector_linear_attention(q real[], keys_json jsonb, values_json jsonb)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_linear_attention_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_sliding_window_attention(q real[], keys_json jsonb, values_json jsonb, window_size int DEFAULT 256)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_sliding_window_attention_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_cross_attention(q real[], ctx_keys_json jsonb, ctx_values_json jsonb)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_cross_attention_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_sparse_attention(q real[], keys_json jsonb, values_json jsonb, top_k int DEFAULT 8)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_sparse_attention_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_moe_attention(q real[], keys_json jsonb, values_json jsonb, n_experts int DEFAULT 4, top_k int DEFAULT 2)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_moe_attention_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_hyperbolic_attention(q real[], keys_json jsonb, values_json jsonb, curvature real DEFAULT 1.0)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_hyperbolic_attention_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_attention_benchmark(dim int DEFAULT 64, seq_len int DEFAULT 128, attention_type text DEFAULT 'scaled_dot')
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_attention_benchmark_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+-- ============================================================================
+-- Sona Learning Functions (feature: sona-learning)
+-- ============================================================================
+
+CREATE OR REPLACE FUNCTION ruvector_sona_learn(table_name text, trajectory_json jsonb)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_sona_learn_wrapper'
+LANGUAGE C;
+
+CREATE OR REPLACE FUNCTION ruvector_sona_apply(table_name text, embedding real[])
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_sona_apply_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_sona_ewc_status(table_name text)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_sona_ewc_status_wrapper'
+LANGUAGE C;
+
+CREATE OR REPLACE FUNCTION ruvector_sona_stats(table_name text)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_sona_stats_wrapper'
+LANGUAGE C;
+
+-- ============================================================================
+-- Domain Expansion Functions (feature: domain-expansion)
+-- ============================================================================
+
+CREATE OR REPLACE FUNCTION ruvector_domain_transfer(embeddings_json jsonb, target_domain text, config_json jsonb DEFAULT '{}'::jsonb)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_domain_transfer_wrapper'
+LANGUAGE C;
diff --git a/crates/ruvector-postgres/sql/ruvector--2.0.0--0.3.0.sql b/crates/ruvector-postgres/sql/ruvector--2.0.0--0.3.0.sql
new file mode 100644
index 000000000..11c58b2d4
--- /dev/null
+++ b/crates/ruvector-postgres/sql/ruvector--2.0.0--0.3.0.sql
@@ -0,0 +1,239 @@
+-- RuVector PostgreSQL Extension v0.3 Upgrade Script
+-- Upgrades from 2.0.0 to 0.3.0
+-- Adds: Solver, Math/Spectral, TDA, Extended Attention, Sona, Domain Expansion
+
+\echo Use "ALTER EXTENSION ruvector UPDATE TO '0.3.0'" to load this file. \quit
+
+-- ============================================================================
+-- Solver Functions (feature: solver)
+-- ============================================================================
+
+CREATE OR REPLACE FUNCTION ruvector_pagerank(edges_json jsonb, alpha real DEFAULT 0.85, epsilon real DEFAULT 1e-6)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_pagerank_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_pagerank_personalized(edges_json jsonb, source int, alpha real DEFAULT 0.85, epsilon real DEFAULT 1e-6)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_pagerank_personalized_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_pagerank_multi_seed(edges_json jsonb, seeds_json jsonb, alpha real DEFAULT 0.85, epsilon real DEFAULT 1e-6)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_pagerank_multi_seed_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_solve_sparse(matrix_json jsonb, rhs real[], method text DEFAULT 'neumann')
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_solve_sparse_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_solve_laplacian(laplacian_json jsonb, rhs real[])
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_solve_laplacian_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_effective_resistance(laplacian_json jsonb, source int, target int)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_effective_resistance_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_graph_pagerank(graph_name text, alpha real DEFAULT 0.85, epsilon real DEFAULT 1e-6)
+RETURNS TABLE(node_id bigint, rank double precision)
+AS 'MODULE_PATHNAME', 'ruvector_graph_pagerank_wrapper'
+LANGUAGE C;
+
+CREATE OR REPLACE FUNCTION ruvector_solver_info()
+RETURNS TABLE(algorithm text, description text, complexity text)
+AS 'MODULE_PATHNAME', 'ruvector_solver_info_wrapper'
+LANGUAGE C;
+
+CREATE OR REPLACE FUNCTION ruvector_matrix_analyze(matrix_json jsonb)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_matrix_analyze_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_conjugate_gradient(matrix_json jsonb, rhs real[], tol real DEFAULT 1e-6, max_iter int DEFAULT 1000)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_conjugate_gradient_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_graph_centrality(graph_name text, method text DEFAULT 'pagerank')
+RETURNS TABLE(node_id bigint, centrality double precision)
+AS 'MODULE_PATHNAME', 'ruvector_graph_centrality_wrapper'
+LANGUAGE C;
+
+-- ============================================================================
+-- Math Distance & Spectral Functions (feature: math-distances)
+-- ============================================================================
+
+CREATE OR REPLACE FUNCTION ruvector_wasserstein_distance(a real[], b real[], p int DEFAULT 1)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_wasserstein_distance_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_sinkhorn_distance(cost_json jsonb, w_a real[], w_b real[], reg real DEFAULT 0.1)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_sinkhorn_distance_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_sliced_wasserstein(pts_a_json jsonb, pts_b_json jsonb, n_proj int DEFAULT 100)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_sliced_wasserstein_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_kl_divergence(p real[], q real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_kl_divergence_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_jensen_shannon(p real[], q real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_jensen_shannon_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_fisher_information(dist real[], tangent real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_fisher_information_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_spectral_cluster(adj_json jsonb, k int)
+RETURNS int[]
+AS 'MODULE_PATHNAME', 'ruvector_spectral_cluster_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_chebyshev_filter(adj_json jsonb, signal real[], filter_type text DEFAULT 'low_pass', degree int DEFAULT 10)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_chebyshev_filter_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_graph_diffusion(adj_json jsonb, signal real[], diffusion_time real DEFAULT 1.0, degree int DEFAULT 10)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_graph_diffusion_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_product_manifold_distance(a real[], b real[], e_dim int, h_dim int, s_dim int)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_product_manifold_distance_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_spherical_distance(a real[], b real[])
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_spherical_distance_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_gromov_wasserstein(dist_a_json jsonb, dist_b_json jsonb)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_gromov_wasserstein_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+-- ============================================================================
+-- TDA Functions (feature: tda)
+-- ============================================================================
+
+CREATE OR REPLACE FUNCTION ruvector_persistent_homology(points_json jsonb, max_dim int DEFAULT 1, max_radius real DEFAULT 3.0)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_persistent_homology_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_betti_numbers(points_json jsonb, radius real, max_dim int DEFAULT 2)
+RETURNS int[]
+AS 'MODULE_PATHNAME', 'ruvector_betti_numbers_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_bottleneck_distance(diag_a_json jsonb, diag_b_json jsonb)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_bottleneck_distance_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_persistence_wasserstein(diag_a_json jsonb, diag_b_json jsonb, p int DEFAULT 2)
+RETURNS real
+AS 'MODULE_PATHNAME', 'ruvector_persistence_wasserstein_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_topological_summary(points_json jsonb, max_dim int DEFAULT 1)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_topological_summary_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_embedding_drift(old_json jsonb, new_json jsonb)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_embedding_drift_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_vietoris_rips(points_json jsonb, max_radius real DEFAULT 2.0, max_dim int DEFAULT 2)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_vietoris_rips_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+-- ============================================================================
+-- Extended Attention Functions (feature: attention-extended)
+-- ============================================================================
+
+CREATE OR REPLACE FUNCTION ruvector_linear_attention(q real[], keys_json jsonb, values_json jsonb)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_linear_attention_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_sliding_window_attention(q real[], keys_json jsonb, values_json jsonb, window_size int DEFAULT 256)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_sliding_window_attention_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_cross_attention(q real[], ctx_keys_json jsonb, ctx_values_json jsonb)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_cross_attention_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_sparse_attention(q real[], keys_json jsonb, values_json jsonb, top_k int DEFAULT 8)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_sparse_attention_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_moe_attention(q real[], keys_json jsonb, values_json jsonb, n_experts int DEFAULT 4, top_k int DEFAULT 2)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_moe_attention_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_hyperbolic_attention(q real[], keys_json jsonb, values_json jsonb, curvature real DEFAULT 1.0)
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_hyperbolic_attention_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_attention_benchmark(dim int DEFAULT 64, seq_len int DEFAULT 128, attention_type text DEFAULT 'scaled_dot')
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_attention_benchmark_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+-- ============================================================================
+-- Sona Learning Functions (feature: sona-learning)
+-- ============================================================================
+
+CREATE OR REPLACE FUNCTION ruvector_sona_learn(table_name text, trajectory_json jsonb)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_sona_learn_wrapper'
+LANGUAGE C;
+
+CREATE OR REPLACE FUNCTION ruvector_sona_apply(table_name text, embedding real[])
+RETURNS real[]
+AS 'MODULE_PATHNAME', 'ruvector_sona_apply_wrapper'
+LANGUAGE C IMMUTABLE PARALLEL SAFE;
+
+CREATE OR REPLACE FUNCTION ruvector_sona_ewc_status(table_name text)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_sona_ewc_status_wrapper'
+LANGUAGE C;
+
+CREATE OR REPLACE FUNCTION ruvector_sona_stats(table_name text)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_sona_stats_wrapper'
+LANGUAGE C;
+
+-- ============================================================================
+-- Domain Expansion Functions (feature: domain-expansion)
+-- ============================================================================
+
+CREATE OR REPLACE FUNCTION ruvector_domain_transfer(embeddings_json jsonb, target_domain text, config_json jsonb DEFAULT '{}'::jsonb)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'ruvector_domain_transfer_wrapper'
+LANGUAGE C;
diff --git a/crates/ruvector-postgres/src/attention/operators.rs b/crates/ruvector-postgres/src/attention/operators.rs
index da3533b0e..d0df0845a 100644
--- a/crates/ruvector-postgres/src/attention/operators.rs
+++ b/crates/ruvector-postgres/src/attention/operators.rs
@@ -327,6 +327,577 @@ pub fn ruvector_attention_scores(
attention.attention_scores(&query, &key_refs)
}
+// ============================================================================
+// Extended Attention Functions (feature-gated: attention-extended)
+// ============================================================================
+
+/// Linear attention: O(n) complexity using kernel feature maps.
+#[cfg(feature = "attention-extended")]
+#[pg_extern(immutable, parallel_safe)]
+pub fn ruvector_linear_attention(
+ query: Vec,
+ keys_json: JsonB,
+ values_json: JsonB,
+) -> Vec {
+ let keys: Vec> = match keys_json.0.as_array() {
+ Some(arr) => arr
+ .iter()
+ .filter_map(|v| {
+ v.as_array().map(|a| {
+ a.iter()
+ .filter_map(|x| x.as_f64().map(|f| f as f32))
+ .collect()
+ })
+ })
+ .collect(),
+ None => return Vec::new(),
+ };
+
+ let values: Vec