Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ clap = { version = "4", features = ["derive"] }

# Storage - Multi-model DB (Documents + Vectors + Graph)
# Using kv-mem for testing and kv-surrealkv for persistence (pure Rust, no C++)
surrealdb = { version = "3", default-features = false, features = ["kv-mem", "kv-surrealkv", "protocol-ws"] }
surrealdb = { version = "2.1", default-features = false, features = ["kv-mem", "kv-surrealkv", "protocol-ws"] }

# Storage - Key-Value Buffer
sled = "0.34"
Expand All @@ -72,7 +72,7 @@ tantivy = "0.21"
libp2p = { version = "0.56", features = ["tokio", "gossipsub", "noise", "tcp", "yamux", "mdns", "macros", "kad", "relay", "dcutr", "identify"] }

# Cryptography
rand = { version = "0.9", features = ["small_rng"] }
rand = { version = "0.8", features = ["small_rng"] }
pqcrypto-kyber = "0.8"
pqcrypto-dilithium = "0.5"
aes-gcm = "0.10"
Expand Down
6 changes: 3 additions & 3 deletions apps/desktop/src-tauri/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use synapse_infra::adapters::candle_adapter::CandleAdapter;
use synapse_infra::SimulatedHolographicAdapter;

use synapse_infra::adapters::mock_embedding_adapter::MockEmbeddingAdapter;
use synapse_infra::adapters::embedding_adapter::{EmbeddingAdapter, EmbeddingConfig};
use synapse_infra::adapters::ort_adapter::{OrtAdapter, OrtConfig};
use synapse_infra::adapters::mock_llm_adapter::MockLlmAdapter;
use synapse_core::ports::{LlmPort, BufferPort, EmbeddingPort, HolographicPort};
use std::sync::Arc;
Expand Down Expand Up @@ -262,7 +262,7 @@ pub async fn init_node(state: &SynapseState, app_data_dir: std::path::PathBuf) -
// Try to load real embedding adapter
let embedding_dir = models_dir.join("all-MiniLM-L6-v2");
let embedder_arc: Arc<dyn EmbeddingPort> = if embedding_dir.exists() {
match EmbeddingAdapter::new(EmbeddingConfig {
match OrtAdapter::new(OrtConfig {
model_dir: embedding_dir.clone(),
max_seq_len: 256
}) {
Expand All @@ -271,7 +271,7 @@ pub async fn init_node(state: &SynapseState, app_data_dir: std::path::PathBuf) -
Arc::new(adapter)
}
Err(e) => {
println!("Failed to load EmbeddingAdapter: {}, using mock", e);
println!("Failed to load OrtAdapter: {}, using mock", e);
Arc::new(MockEmbeddingAdapter::new())
}
}
Expand Down
2 changes: 1 addition & 1 deletion crates/synapse-cognition/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ tokenizers = "0.19"
ndarray = "0.17"

# Randomness
rand = "0.10"
rand = { workspace = true }
chrono = { version = "0.4", features = ["serde"] }

# Logging
Expand Down
4 changes: 2 additions & 2 deletions crates/synapse-cognition/src/bootstrap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use synapse_core::logic::metabolism::Metabolism;
use synapse_core::ports::{LlmPort, EmbeddingPort, BufferPort, MemoryPort, HolographicPort};
use synapse_core::error::{Result, Error};
use synapse_infra::adapters::{
ModelManager, CandleAdapter, EmbeddingAdapter, EmbeddingConfig,
ModelManager, CandleAdapter, OrtAdapter, OrtConfig,
SledBufferAdapter, SledMemoryAdapter, SledPeerAdapter, SimulatedHolographicAdapter,
EngramSledAdapter,
MockLlmAdapter, MockEmbeddingAdapter, LibP2PConfig, LibP2PEmpathyAdapter
Expand Down Expand Up @@ -75,7 +75,7 @@ impl CognitionSystem {
let embedder: Arc<dyn EmbeddingPort> = match paths.embedding_dir {
Some(dir) => {
tracing::info!("Initializing ORT Embeddings from {:?}", dir);
match EmbeddingAdapter::new(EmbeddingConfig {
match OrtAdapter::new(OrtConfig {
model_dir: dir,
max_seq_len: 256
}) {
Expand Down
16 changes: 4 additions & 12 deletions crates/synapse-cognition/src/sovereign_service.rs
Original file line number Diff line number Diff line change
@@ -1,27 +1,20 @@
use std::sync::Arc;
use async_trait::async_trait;
use aes_gcm::{
aead::{Aead, AeadCore, KeyInit/*, OsRng*/},
aead::{Aead, AeadCore, KeyInit},
Aes256Gcm, Nonce
};
use sha2::{Sha256, Digest};
use rand::Rng;
use rand::{thread_rng, Rng};
use synapse_core::{
ports::{SovereigntyPort, NetworkPort},
MemoryPort,
MemoryNode,
data_container::DataContainer,
};
// use synapse_infra::LibP2PEmpathyAdapter;
// use tokio::sync::Mutex;

pub struct SovereignService {
// We might need access to NetworkPort to store/retrieve encrypted blobs
// For now, we stub the network interaction or assume we can inject a network adapter.
// But ports usually don't depend on adapters directly.
// Services depend on traits.
network: Arc<dyn NetworkPort>,
// We might need memory port to re-hydrate the recovered memories locally
_memory: Arc<dyn MemoryPort>,
}

Expand All @@ -40,7 +33,7 @@ impl SovereignService {
#[async_trait]
impl SovereigntyPort for SovereignService {
fn generate_hypertoken(&self) -> synapse_core::ports::Hypertoken {
let random_bytes: [u8; 32] = rand::random();
let random_bytes: [u8; 32] = thread_rng().gen();
let secret = hex::encode(random_bytes);

let mut hasher = Sha256::new();
Expand All @@ -58,7 +51,7 @@ impl SovereigntyPort for SovereignService {
let key_bytes = self.derive_key(&hypertoken.master_key_hash);
let key = aes_gcm::Key::<Aes256Gcm>::from_slice(&key_bytes);
let cipher = Aes256Gcm::new(key);
let nonce = Aes256Gcm::generate_nonce(&mut rand::rng());
let nonce = Aes256Gcm::generate_nonce(&mut thread_rng());

let data = serde_json::to_vec(node).map_err(|e| synapse_core::error::Error::Serialization(e))?;

Expand Down Expand Up @@ -136,4 +129,3 @@ impl SovereigntyPort for SovereignService {
unimplemented!()
}
}

2 changes: 1 addition & 1 deletion crates/synapse-infra/src/adapters/diffusion_adapter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ impl DiffusionAdapter {

let mut rng = StdRng::seed_from_u64(seed);
let latent: Vec<f32> = (0..self.config.latent_dim)
.map(|_| rng.random::<f32>() * 2.0 - 1.0)
.map(|_| rng.gen::<f32>() * 2.0 - 1.0)
.collect();

// Store in state
Expand Down
2 changes: 1 addition & 1 deletion crates/synapse-infra/src/adapters/hologram_codec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ impl HologramCodec {
let mut rng = StdRng::seed_from_u64(_seed);
let latent_len = channels * latent_dim * latent_dim;
let latent_data: Vec<f32> = (0..latent_len)
.map(|_| rng.random::<f32>() * 2.0 - 1.0)
.map(|_| rng.gen::<f32>() * 2.0 - 1.0)
.collect();
let mut latent = Tensor::from_vec(latent_data, (1, channels, latent_dim, latent_dim), &self.device)?;

Expand Down
6 changes: 2 additions & 4 deletions crates/synapse-infra/src/adapters/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,11 @@
pub mod surrealdb_adapter;
pub mod sled_adapter;
pub mod sled_memory_adapter;
// pub mod ort_adapter; // TODO: File missing, needs to be created or imported from feature branch
pub mod ort_adapter;
pub mod context_adapter;
pub mod immune_adapter;
pub mod mock_llm_adapter;
pub mod mock_embedding_adapter;
pub mod embedding_adapter;
pub mod candle_adapter;
#[cfg(feature = "vision")]
pub mod vision_adapter;
Expand All @@ -34,10 +33,9 @@ pub mod libp2p_sync_adapter;
pub use surrealdb_adapter::*;
pub use sled_adapter::*;
pub use sled_memory_adapter::*;
// pub use ort_adapter::*;
pub use ort_adapter::*;
pub use mock_llm_adapter::*;
pub use mock_embedding_adapter::*;
pub use embedding_adapter::*;
pub use mock_empathy_adapter::*;
pub use bedrock_adapter::*;
pub use engram_adapter::*;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//! # Embedding Adapter
//! # ORT Adapter
//!
//! Real embedding generation using all-MiniLM-L6-v2.
//! Produces 384-dimensional vectors for ethics evaluation by GenesisBlock.
Expand All @@ -17,16 +17,16 @@ use tokio::sync::Mutex;
use tokenizers::Tokenizer;
use tracing::{debug, info};

/// Configuration for the embedding adapter
/// Configuration for the ORT adapter
#[derive(Debug, Clone)]
pub struct EmbeddingConfig {
pub struct OrtConfig {
/// Path to the model directory containing ONNX model and tokenizer
pub model_dir: PathBuf,
/// Maximum sequence length
pub max_seq_len: usize,
}

impl Default for EmbeddingConfig {
impl Default for OrtConfig {
fn default() -> Self {
Self {
model_dir: PathBuf::from("models/all-MiniLM-L6-v2"),
Expand All @@ -35,15 +35,15 @@ impl Default for EmbeddingConfig {
}
}

/// Embedding adapter using all-MiniLM-L6-v2 for 384-dimensional embeddings.
/// ORT adapter using all-MiniLM-L6-v2 for 384-dimensional embeddings.
///
/// This adapter implements the `EmbeddingPort` trait and provides real
/// embedding vectors for use in ethics evaluation via the Genesis Block.
///
/// For now, this uses a deterministic hash-based fallback when models
/// aren't available, similar to MockEmbeddingAdapter but with the full
/// interface ready for ONNX model loading.
pub struct EmbeddingAdapter {
pub struct OrtAdapter {
/// ONNX Runtime session
session: Option<Arc<Mutex<Session>>>,
/// Tokenizer for text preprocessing
Expand All @@ -56,8 +56,8 @@ pub struct EmbeddingAdapter {
dimension: usize,
}

impl EmbeddingAdapter {
/// Create a new EmbeddingAdapter from a model directory.
impl OrtAdapter {
/// Create a new OrtAdapter from a model directory.
///
/// The directory should contain:
/// - model.onnx or model.safetensors
Expand All @@ -66,8 +66,8 @@ impl EmbeddingAdapter {
///
/// If the model files aren't found, operates in fallback mode with
/// deterministic hash-based embeddings.
pub fn new(config: EmbeddingConfig) -> Result<Self, Error> {
info!("Initializing embedding adapter from {:?}", config.model_dir);
pub fn new(config: OrtConfig) -> Result<Self, Error> {
info!("Initializing ORT adapter from {:?}", config.model_dir);

let model_path = config.model_dir.join("model.onnx");
let tokenizer_path = config.model_dir.join("tokenizer.json");
Expand Down Expand Up @@ -105,12 +105,12 @@ impl EmbeddingAdapter {
})
}

/// Create an EmbeddingAdapter in fallback mode (no model required).
/// Create an OrtAdapter in fallback mode (no model required).
///
/// Uses deterministic hash-based embeddings that are consistent
/// for the same input text.
pub fn fallback() -> Self {
info!("Creating embedding adapter in fallback mode");
info!("Creating ORT adapter in fallback mode");
Self {
session: None,
tokenizer: None,
Expand Down Expand Up @@ -160,7 +160,7 @@ impl EmbeddingAdapter {
}

#[async_trait]
impl EmbeddingPort for EmbeddingAdapter {
impl EmbeddingPort for OrtAdapter {
/// Generate a 384-dimensional embedding for the given text.
async fn embed(&self, text: &str) -> Result<Vec<f32>, Error> {
if let (Some(session_arc), Some(tokenizer)) = (&self.session, &self.tokenizer) {
Expand Down Expand Up @@ -261,37 +261,37 @@ mod tests {

#[test]
fn test_fallback_creation() {
let adapter = EmbeddingAdapter::fallback();
let adapter = OrtAdapter::fallback();
assert_eq!(adapter.dimension(), 384);
assert!(!adapter.has_model());
}

#[tokio::test]
async fn test_embed_produces_correct_dimension() {
let adapter = EmbeddingAdapter::fallback();
let adapter = OrtAdapter::fallback();
let embedding = adapter.embed("Hello world").await.unwrap();
assert_eq!(embedding.len(), 384);
}

#[tokio::test]
async fn test_embed_is_deterministic() {
let adapter = EmbeddingAdapter::fallback();
let adapter = OrtAdapter::fallback();
let e1 = adapter.embed("Same text").await.unwrap();
let e2 = adapter.embed("Same text").await.unwrap();
assert_eq!(e1, e2);
}

#[tokio::test]
async fn test_embed_different_texts_produce_different_embeddings() {
let adapter = EmbeddingAdapter::fallback();
let adapter = OrtAdapter::fallback();
let e1 = adapter.embed("First text").await.unwrap();
let e2 = adapter.embed("Second text").await.unwrap();
assert_ne!(e1, e2);
}

#[tokio::test]
async fn test_embedding_is_normalized() {
let adapter = EmbeddingAdapter::fallback();
let adapter = OrtAdapter::fallback();
let embedding = adapter.embed("Test").await.unwrap();

let norm: f32 = embedding.iter().map(|x| x * x).sum::<f32>().sqrt();
Expand All @@ -304,41 +304,41 @@ mod tests {

#[test]
fn test_config_default() {
let config = EmbeddingConfig::default();
let config = OrtConfig::default();
assert_eq!(config.max_seq_len, 256);
}

#[test]
fn test_model_loading() {
let config = EmbeddingConfig::default();
let config = OrtConfig::default();
if !config.model_dir.exists() {
println!("Skipping model loading test: model dir not found");
return;
}
let adapter = EmbeddingAdapter::new(config).unwrap();
let adapter = OrtAdapter::new(config).unwrap();
assert!(adapter.has_model());
}

#[tokio::test]
async fn test_ort_embed_produces_correct_dimension() {
let config = EmbeddingConfig::default();
let config = OrtConfig::default();
if !config.model_dir.exists() {
println!("Skipping ORT embedding test: model dir not found");
return;
}
let adapter = EmbeddingAdapter::new(config).unwrap();
let adapter = OrtAdapter::new(config).unwrap();
let embedding = adapter.embed("Hello world").await.unwrap();
assert_eq!(embedding.len(), 384);
}

#[tokio::test]
async fn test_ort_embedding_is_normalized() {
let config = EmbeddingConfig::default();
let config = OrtConfig::default();
if !config.model_dir.exists() {
println!("Skipping ORT normalization test: model dir not found");
return;
}
let adapter = EmbeddingAdapter::new(config).unwrap();
let adapter = OrtAdapter::new(config).unwrap();
let embedding = adapter.embed("Test").await.unwrap();

let norm: f32 = embedding.iter().map(|x| x * x).sum::<f32>().sqrt();
Expand Down
Loading
Loading