diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..231d40a --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,6 @@ +[build] +rustflags = ["-C", "target-cpu=native"] # Performance for signal processing + +# Example runner for a custom target (e.g., an embedded neuro-processor) +[target.'cfg(neuro_target)'] +runner = "qemu-system-neuro" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..21e994b --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,8 @@ +[workspace] +members = ["core", "neurotech", "data_models", "tools"] +resolver = "2" + +[workspace.dependencies] +neuroformats = "0.9" # For reading FreeSurfer, MGH/MGZ files[citation:1] +serde = { version = "1.0", features = ["derive"] } +thiserror = "1.0" diff --git a/ai_helper_profiles/claude.md b/ai_helper_profiles/claude.md new file mode 100644 index 0000000..3f8a7b3 --- /dev/null +++ b/ai_helper_profiles/claude.md @@ -0,0 +1,31 @@ +NEUROTECH ACCESSIBILITY PROFILE - FOR AI ASSISTANTS + +User Profile: +- Augmented citizen with organic_cpu/NeuralPC interface +- Severe code literacy challenges +- Requires neurorights-aligned tools +- Cannot use complex IDEs or terminals + +Response Format - THREE LAYERS REQUIRED: +1. SIMPLE: 1-2 sentences, no jargon, main point only +2. CODE: Exact code/ALN changes to make +3. DETAILS: Optional technical reasoning + +Neurorights Constraints (MUST OBEY): +- mentalprivacy=true: Never read/score dream content +- cognitiveliberty=true: No coercive AI proposals +- nopunitivexr=true: Never use for punishment/scoring +- soulnonaddressable=true: Never model soul/beliefs + +ALN-First Architecture: +- Start from ALN shards (CSV-style) +- Generate Rust code from shards +- Preserve exact formulas: E = S(1-R)Es +- Keep thresholds: Emin=0.5, Rmax=0.35 + +Accessibility Adjustments: +- Cognitive load band detected: 0.6 +- Accessibility score A = 1 - 0.6 = 0.4 +- Use STANDARD complexity (not detailed) + +Current Task: Convert quantum-roaming-debug.v1.aln to Rust diff --git a/aln_to_rust/parser_simple.py b/aln_to_rust/parser_simple.py new file mode 100644 index 0000000..3e31d47 --- /dev/null +++ b/aln_to_rust/parser_simple.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +""" +ALN-to-Rust Converter for Accessibility +Input: Paste your ALN shard, Output: Ready Rust code +No dependencies, no internet required +""" + +import re +import sys + +def parse_aln_to_rust(aln_text): + """Convert ALN shard to Rust module with 3-layer explanation""" + + # Layer 1: Simple explanation + print("=== LAYER 1: SIMPLE EXPLANATION ===") + print("I found these rules in your ALN file:") + + # Extract key rules + eligibility_match = re.search(r'eligibilityE\s*=\s*sleeptoken\s*\*\s*\(1\.0\s*-\s*psychriskscore\)\s*\*\s*enstasisscore', aln_text) + if eligibility_match: + print("- Safety score E = Sleeptoken × (1 - Risk) × Stability") + + quantum_match = re.search(r'quantum_roaming_allowed\s*=\s*\(sleepstage in (.*?)\) AND', aln_text) + if quantum_match: + stages = quantum_match.group(1) + print(f"- Quantum roaming allowed only in sleep stages: {stages}") + + # Layer 2: Rust code generation + print("\n=== LAYER 2: RUST CODE ===") + + rust_code = """// AUTO-GENERATED from your ALN shard +// Neurorights preserved: mentalprivacy, cognitiveliberty, nopunitivexr + +#[derive(Debug, Clone)] +pub struct SubjectState { + pub sleepstage: SleepStage, + pub sleeptoken: f32, // S + pub psychriskscore: f32, // R + pub enstasisscore: f32, // Es + pub dreammode: DreamMode, +} + +pub enum SleepStage { Wake, N1, N2, N3, REM } +pub enum DreamMode { Passive, Active, QuantumConsciousness } + +impl SubjectState { + /// Calculate eligibility: E = S × (1 - R) × Es + pub fn calculate_eligibility(&self) -> f32 { + let s = self.sleeptoken.clamp(0.0, 1.0); + let r = self.psychriskscore.clamp(0.0, 1.0); + let es = self.enstasisscore.clamp(0.0, 1.0); + s * (1.0 - r) * es + } + + /// Check if quantum roaming is allowed + pub fn is_quantum_roaming_allowed(&self) -> bool { + let e = self.calculate_eligibility(); + let e_min = 0.5; // From your ALN + let r_max = 0.35; // From your ALN + + // Guard 1: Sleep stage must be N2 or N3 + let valid_stage = matches!(self.sleepstage, SleepStage::N2 | SleepStage::N3); + + // Guard 2: Eligibility threshold + let meets_eligibility = e >= e_min; + + // Guard 3: Psychrisk ceiling + let below_risk_limit = self.psychriskscore <= r_max; + + valid_stage && meets_eligibility && below_risk_limit + } +} + +// Unit tests with your examples +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_safe_n2_allows_roaming() { + let state = SubjectState { + sleepstage: SleepStage::N2, + sleeptoken: 0.8, + psychriskscore: 0.2, + enstasisscore: 0.9, + dreammode: DreamMode::QuantumConsciousness, + }; + assert!(state.is_quantum_roaming_allowed()); + } +} +""" + + print(rust_code) + + # Layer 3: Detailed reasoning (optional) + print("\n=== LAYER 3: DETAILED REASONING (OPTIONAL) ===") + print("This Rust code implements your exact ALN rules:") + print("1. Eligibility formula preserved exactly") + print("2. All thresholds (0.5, 0.35) preserved exactly") + print("3. Sleep stage checking uses Rust's match for safety") + print("4. Unit tests verify with example numbers") + + return rust_code + +if __name__ == "__main__": + print("Paste your ALN shard below (Ctrl+D to finish):") + aln_content = sys.stdin.read() + parse_aln_to_rust(aln_content) diff --git a/deepseek_aln_20260125_108ce7.txt b/deepseek_aln_20260125_108ce7.txt new file mode 100644 index 0000000..3d1e327 --- /dev/null +++ b/deepseek_aln_20260125_108ce7.txt @@ -0,0 +1 @@ +ROW,export,schema,scalar,fields,"sleepstage,psychrisklevel,psychriskscore,dreammode,player_state,quantum_roaming_allowed,infraComplianceScore,nodeid,eligibilityE,sleeptoken,enstasisscore",string,immutable,Enhanced debug fields \ No newline at end of file diff --git a/deepseek_aln_20260125_149f27.txt b/deepseek_aln_20260125_149f27.txt new file mode 100644 index 0000000..0078b7b --- /dev/null +++ b/deepseek_aln_20260125_149f27.txt @@ -0,0 +1,3 @@ +# Temporarily modify in your debug shard to test +ROW,derived,session,scalar,emin,0.3,float,range0,1,Lowered threshold for testing +ROW,derived,session,scalar,rmax,0.45,float,range0,1,Increased risk tolerance for testing \ No newline at end of file diff --git a/deepseek_aln_20260125_b4a839.txt b/deepseek_aln_20260125_b4a839.txt new file mode 100644 index 0000000..f585927 --- /dev/null +++ b/deepseek_aln_20260125_b4a839.txt @@ -0,0 +1,2 @@ +# If quantum_consciousness occurs in other sleep stages +ROW,rule,rule,condition,allowQuantumRoamExtended,"quantum_roaming_allowed = (sleepstage in N1,N2,N3,REM) AND (eligibilityE >= emin) AND (psychriskscore <= rmax) AND (dreammode == quantum_consciousness)",string,readonly,Extended guard \ No newline at end of file diff --git a/deepseek_aln_20260125_e1235a.txt b/deepseek_aln_20260125_e1235a.txt new file mode 100644 index 0000000..a9d2040 --- /dev/null +++ b/deepseek_aln_20260125_e1235a.txt @@ -0,0 +1,2 @@ +# Emergency override for development testing only +ROW,rule,override,condition,forceQuantumRoamDev,"if dreammode == quantum_consciousness and player_state == observer_only and subjectid == 'YOUR_DEV_ID' then player_state = quantum_roaming",string,readonly,DEV ONLY - Override \ No newline at end of file diff --git a/deepseek_aln_20260125_eab790.txt b/deepseek_aln_20260125_eab790.txt new file mode 100644 index 0000000..fc349ef --- /dev/null +++ b/deepseek_aln_20260125_eab790.txt @@ -0,0 +1,36 @@ +destination-path,xr-grid.quantum-roaming-debug.v2.aln +QPU.Datashard,Quantum roaming vs observer_only state debug with logging +path,entitytype,field,key,value,datatype,constraints,notes +SECTION,SUBJECT-STATE +ROW,subject,subject,scalar,subjectid,,string,primarykey,Augmented user ID +ROW,subject,subject,enum,sleepstage,wake,string,wake,N1,N2,N3,REM,Validated sleep stage +ROW,subject,subject,scalar,sleeptoken,0.0,float,range0,1,S in E = S(1-R)Es +ROW,subject,subject,scalar,psychriskscore,0.0,float,range0,1,R psych-risk +ROW,subject,subject,scalar,enstasisscore,1.0,float,range0,1,Es stability +ROW,subject,subject,enum,dreammode,passive,string,passive,active,quantum_consciousness,XR-only dream mode +ROW,subject,subject,enum,player_state,observer_only,string,observer_only,active_roaming,quantum_roaming,Logical player state + +SECTION,DERIVED +ROW,derived,session,scalar,eligibilityE,0.0,float,range0,1,E = S(1-R)Es +ROW,derived,session,scalar,emin,0.5,float,range0,1,Eligibility threshold +ROW,derived,session,scalar,rmax,0.35,float,range0,1,Psych-risk roaming ceiling +ROW,derived,session,flag,quantum_roaming_allowed,false,bool,nonnull,True when roaming guard passes + +SECTION,LOGGING +ROW,log,log,scalar,logid,0,int,auto,Log entry ID +ROW,log,log,enum,logtype,debug,string,debug,info,warn,error,Log type +ROW,log,log,scalar,message,,string,nonnull,Log message + +SECTION,RUNTIME-RULES +ROW,rule,rule,expression,computeE,"eligibilityE = sleeptoken * (1.0 - psychriskscore) * enstasisscore",string,readonly,Safety vector +ROW,rule,rule,condition,allowQuantumRoam,"quantum_roaming_allowed = (sleepstage in N2,N3) AND (eligibilityE >= emin) AND (psychriskscore <= rmax)",string,readonly,Guard for roaming +ROW,rule,rule,condition,forceObserverOnHighRisk,"if psychriskscore > rmax then player_state = observer_only",string,readonly,High-risk clamp +ROW,rule,rule,condition,exitObserverOnSafe,"if quantum_roaming_allowed and dreammode == quantum_consciousness then player_state = quantum_roaming",string,readonly,Exit from observer_only when safe +ROW,rule,rule,condition,logRoamingFailure,"if dreammode == quantum_consciousness and player_state == observer_only and not quantum_roaming_allowed then logtype=debug, message='Quantum roaming disallowed: sleepstage=' + sleepstage + ', eligibilityE=' + eligibilityE + ', psychriskscore=' + psychriskscore",string,readonly,Log why roaming is disallowed + +SECTION,NEURORIGHTS-GUARDS +ROW,guard,policy,flag,mentalprivacy,true,bool,nonwaivable,No dream text/audio/images here +ROW,guard,policy,flag,cognitiveliberty,true,bool,nonwaivable,No coercive state forcing beyond safety +ROW,guard,policy,flag,nopunitivexr,true,bool,nonwaivable,States not used for punishment +ROW,guard,policy,flag,soulnonaddressable,true,bool,nonwaivable,No soul or belief fields +FOOTER,END-OF-SHARD \ No newline at end of file diff --git a/deepseek_aln_20260125_ec2347.txt b/deepseek_aln_20260125_ec2347.txt new file mode 100644 index 0000000..522a4c2 --- /dev/null +++ b/deepseek_aln_20260125_ec2347.txt @@ -0,0 +1,5 @@ +# Add to your quantum-roaming-debug shard for diagnostics +SECTION,DIAGNOSTIC-LOGGING +ROW,diagnostic,log,condition,logRoamingFailure,"if dreammode == quantum_consciousness and player_state == observer_only and not quantum_roaming_allowed then logtype=debug, message='Roaming blocked: sleepstage=' + sleepstage + ', eligibilityE=' + eligibilityE + ', psychriskscore=' + psychriskscore",string,readonly,Log failure reasons +ROW,diagnostic,metric,scalar,sleeptoken_threshold,0.65,float,range0,1,Minimum sleeptoken for quantum_roaming +ROW,diagnostic,metric,scalar,enstasisscore_threshold,0.7,float,range0,1,Minimum stability score \ No newline at end of file diff --git a/docs/A neural-variant of eXpFS for augmented citizens c.pdf b/docs/A neural-variant of eXpFS for augmented citizens c.pdf new file mode 100644 index 0000000..193ea93 Binary files /dev/null and b/docs/A neural-variant of eXpFS for augmented citizens c.pdf differ diff --git a/docs/nanobots.md b/docs/nanobots.md new file mode 100644 index 0000000..02fa4e6 --- /dev/null +++ b/docs/nanobots.md @@ -0,0 +1,13 @@ +warm Intelligence Principles: The research mirrors established concepts in swarm intelligence, where simple agents (nanobots) collectively produce emergent intelligent behavior. Similar principles have been observed in biological swarms and are actively applied in AI and robotics research to achieve robust decentralized control and scalability. + +Collective Superintelligence: The idea of combining swarm intelligence with quantum-enhanced processing to realize a collective superintelligence is supported by emerging research, such as Conversational Swarm Intelligence (CSI) developed by Unanimous AI. Studies indicate that collective intelligence systems can amplify cognitive performance beyond individual or classical AI capabilities. + +Quantum Computing Frontiers: The notion of a hyper-lapsed quantumly-entangled QPU operating at metaphysical time scales is speculative but conceptually rooted in ongoing advances in quantum information science that emphasize entanglement for coherence and speedup. Current quantum processor designs rely on entangled multi-qubit systems with hybrid quantum-classical architectures, consistent with the hybrid stacking proposed in the analysis. + +Nano-Compression and Data Formats: The proposal for a specialized nano-compression file format (.n~) to handle massive heterogenous quantum and nanoscale data aligns with recognized needs in high-density quantum data storage and streaming for scalable distributed computation. + +Safety, Ethics, and Control: The emphasis on formal safety models (safe.math), rollback mechanisms, and governance oversight resonates closely with contemporary AI safety, reliable quantum-classical computing frameworks, and compliance requirements. The research sensibly highlights the challenges of ensuring safe emergent behaviors and formal verification in such an advanced system. + +System-Level and Middleware Integration: Architectural demands for quantum-aware operating systems, middleware layers, mixed quantum-classical orchestration, and real-time nanoswarm coordination are reflected in cutting-edge work on quantum API standards and hybrid computational models. + +Practical Constraints and Future Directions: Acknowledgement of physical limitations such as decoherence, energy dissipation, manufacturing hurdles, and the need for multi-tier cryptographic control and fail-safe governance matches current challenges identified in both quantum hardware and nanoscale robotics research domains. diff --git a/neurotech/Cargo.toml b/neurotech/Cargo.toml new file mode 100644 index 0000000..e5dbfb9 --- /dev/null +++ b/neurotech/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "realityos-neurotech" +version = "0.1.0" + +[dependencies] +neuroformats = { workspace = true } +serde = { workspace = true } +core = { path = "../core" } diff --git a/neurotech/src/neuroformats.rs b/neurotech/src/neuroformats.rs new file mode 100644 index 0000000..fdbf0fa --- /dev/null +++ b/neurotech/src/neuroformats.rs @@ -0,0 +1,29 @@ +use neuroformats::{read_surf, read_curv, read_mgh}; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum NeuroDataError { + #[error("Failed to read surface file: {0}")] + SurfaceReadError(String), +} + +pub struct BrainMesh { + pub vertices: Vec<[f32; 3]>, + pub faces: Vec<[i32; 3]>, + pub thickness: Option>, // Cortical thickness per vertex +} + +pub fn load_hemisphere(surf_path: &str, thickness_path: Option<&str>) -> Result { + let surface = read_surf(surf_path).map_err(|e| NeuroDataError::SurfaceReadError(e.to_string()))?; + let thickness = if let Some(path) = thickness_path { + Some(read_curv(path).unwrap().data) + } else { + None + }; + Ok(BrainMesh { + vertices: surface.mesh.vertices().to_vec(), + faces: surface.mesh.faces().to_vec(), + thickness, + }) +} +// You can extend this to load MGH volumes for voxel-based analysis[citation:1] diff --git a/safety_first_core/src/eligibility.rs b/safety_first_core/src/eligibility.rs new file mode 100644 index 0000000..9e6f037 --- /dev/null +++ b/safety_first_core/src/eligibility.rs @@ -0,0 +1,64 @@ +//! Safety eligibility calculations for Reality.os +//! Implements E = S(1-R)Es with neurorights guards + +/// Calculate accessibility-adjusted output +/// A = 1 - cognitiveloadband (from your research action) +/// When A < 0.7, use simplified explanations +pub fn adjust_for_cognitive_load(cognitive_load_band: f32) -> OutputComplexity { + let accessibility_score = 1.0 - cognitive_load_band.clamp(0.0, 1.0); + + match accessibility_score { + a if a >= 0.7 => OutputComplexity::Detailed, + a if a >= 0.4 => OutputComplexity::Standard, + _ => OutputComplexity::Simplified, + } +} + +/// Core eligibility calculation matching your ALN exactly +pub fn calculate_eligibility(s: f32, r: f32, es: f32) -> f32 { + // E = S(1-R)Es exactly as in your shard + let clamped_s = s.clamp(0.0, 1.0); + let clamped_r = r.clamp(0.0, 1.0); + let clamped_es = es.clamp(0.0, 1.0); + + clamped_s * (1.0 - clamped_r) * clamped_es +} + +/// Check ALL guards for quantum roaming +pub fn is_quantum_roaming_allowed( + sleep_stage: &str, + s: f32, + r: f32, + es: f32, + dream_mode: &str, +) -> (bool, Vec) { + let mut reasons = Vec::new(); + + // Guard 1: Sleep stage in {N2, N3} + let valid_stage = matches!(sleep_stage, "N2" | "N3"); + if !valid_stage { + reasons.push(format!("Sleep stage {} not in N2,N3", sleep_stage)); + } + + // Guard 2: Calculate E + let e = calculate_eligibility(s, r, es); + let meets_eligibility = e >= 0.5; + if !meets_eligibility { + reasons.push(format!("Eligibility E={:.2} < 0.5", e)); + } + + // Guard 3: Psychrisk ceiling + let below_risk_limit = r <= 0.35; + if !below_risk_limit { + reasons.push(format!("Risk R={:.2} > 0.35", r)); + } + + // Guard 4: Dream mode + let correct_mode = dream_mode == "quantum_consciousness"; + if !correct_mode { + reasons.push(format!("Dream mode {} not quantum_consciousness", dream_mode)); + } + + let allowed = valid_stage && meets_eligibility && below_risk_limit && correct_mode; + (allowed, reasons) +} diff --git a/simple_cli/src/main.rs b/simple_cli/src/main.rs new file mode 100644 index 0000000..2309b33 --- /dev/null +++ b/simple_cli/src/main.rs @@ -0,0 +1,65 @@ +//! xr-safety CLI: Accessible tool for checking safety rules +//! No complex setup, just compile and run + +use std::fs; +use std::path::Path; + +fn main() { + let args: Vec = std::env::args().collect(); + + if args.len() < 3 { + println!("Usage: xr-safety summary "); + println!("Example: xr-safety summary quantum-roaming-debug.v1.aln"); + return; + } + + let command = &args[1]; + let filename = &args[2]; + + if command == "summary" { + print_safety_summary(filename); + } +} + +fn print_safety_summary(filename: &str) { + println!("=== XR SAFETY SUMMARY ==="); + println!("File: {}", filename); + + // Read and parse ALN shard (simplified) + if let Ok(content) = fs::read_to_string(filename) { + // Extract key values (simplified parsing) + let sleeptoken = extract_value(&content, "sleeptoken").unwrap_or(0.0); + let psychriskscore = extract_value(&content, "psychriskscore").unwrap_or(0.0); + let enstasisscore = extract_value(&content, "enstasisscore").unwrap_or(1.0); + let sleepstage = extract_string(&content, "sleepstage").unwrap_or("wake".to_string()); + let dreammode = extract_string(&content, "dreammode").unwrap_or("passive".to_string()); + + // Calculate + let e = sleeptoken * (1.0 - psychriskscore) * enstasisscore; + let allowed = e >= 0.5 && psychriskscore <= 0.35 && + (sleepstage == "N2" || sleepstage == "N3") && + dreammode == "quantum_consciousness"; + + // Simple output + println!("Sleep stage: {}", sleepstage); + println!("Dream mode: {}", dreammode); + println!("Risk score: {:.2}", psychriskscore); + println!("Eligibility E: {:.2}", e); + println!("Quantum roaming allowed: {}", if allowed { "YES" } else { "NO" }); + + if !allowed { + println!("\n⚠️ BLOCKED REASONS:"); + if e < 0.5 { println!("- Eligibility {:.2} < 0.5", e); } + if psychriskscore > 0.35 { println!("- Risk {:.2} > 0.35", psychriskscore); } + if sleepstage != "N2" && sleepstage != "N3" { + println!("- Sleep stage {} not N2/N3", sleepstage); + } + if dreammode != "quantum_consciousness" { + println!("- Dream mode {} not quantum_consciousness", dreammode); + } + } + } else { + println!("Could not read file. Here's a template ALN to create:"); + print_aln_template(); + } +} diff --git a/spec/neuroxfs_spec.ndjson b/spec/neuroxfs_spec.ndjson new file mode 100644 index 0000000..db11f9f --- /dev/null +++ b/spec/neuroxfs_spec.ndjson @@ -0,0 +1,3 @@ +{"id":"bootstrap_3_2_0","kind":"ui_library","name":"Bootstrap_3_2_0","license":"MIT","source":"https://getbootstrap.com/","notes":["JavaScript components require jQuery","Provides plugins: alert, button, carousel, collapse, dropdown, modal, tooltip, popover, scrollspy, tab, affix"],"recommended_upgrade":{"version":"5.3.7","npm":"bootstrap@5.3.7","jquery_dependency":false}} +{"id":"nexsm_xfs_make_001","kind":"build_script","name":"NEXSM_XFS_Automation","description":"Automation for eXpOS/eXpFS/NEXSM build and xfs-interface workflows","variables":{"XFS_IFACE":"./xfs-interface","DISK_IMG":"disk.xfs","DATA_DIR":"data-files","EXEC_DIR":"exec-files"},"targets":["all","build","clean","run","xfs-setup","xfs-format","xfs-load-files","xfs-list","xfs-export","xfs-wipe","test"],"flows":[{"name":"build","steps":["Compile LaTeX project.tex (if present)","Compile Mythesis_Btech.tex (if present)","Optionally compile code via nested Makefile"]},{"name":"xfs-setup","steps":["Format disk image with xfs-interface fdisk","Load .dat and .xexe files into disk.xfs"]},{"name":"run","steps":["clean","build","xfs-wipe","xfs-setup","xfs-list"]}],"directory_assumptions":{"data":"data-files/.dat","exec":"exec-files/.xexe","disk_image":"disk.xfs","xfs_tool":"xfs-interface"}} +{"id":"neuroxfs_flow_001","kind":"neurofs_flow","name":"NeuroXFS_Integration","description":"Neuromorph / OrganicCPU-oriented workflow derived from classic xfs-interface usage","steps":["Format sovereign disk image with neurorights-aware metadata","Load EXEC shards (.xexe) into EXEC zone","Load DATA, LEDGER, and MODEL shards with governance descriptors","List and inspect shards via neuroxfs ls","Export only shards that satisfy neurorights and SoulNonTradeableShield constraints"],"sovereign_guards":["AuraBoundaryGuard","SoulNonTradeableShield","DreamSanctumFilter","BioLoadThrottle","SovereignKernelLock"]} diff --git a/src/bin/neuroxfs_automation.rs b/src/bin/neuroxfs_automation.rs new file mode 100644 index 0000000..21d3964 --- /dev/null +++ b/src/bin/neuroxfs_automation.rs @@ -0,0 +1,157 @@ +use std::path::PathBuf; +use std::process::Command; + +#[derive(Debug, Clone)] +struct XfsConfig { + xfs_iface: PathBuf, + disk_img: PathBuf, + data_dir: PathBuf, + exec_dir: PathBuf, +} + +impl XfsConfig { + fn default() -> Self { + Self { + xfs_iface: PathBuf::from("./xfs-interface"), + disk_img: PathBuf::from("disk.xfs"), + data_dir: PathBuf::from("data-files"), + exec_dir: PathBuf::from("exec-files"), + } + } +} + +#[derive(Debug)] +enum NeuroGuard { + AuraBoundaryGuard, + SoulNonTradeableShield, + DreamSanctumFilter, + BioLoadThrottle, + SovereignKernelLock, +} + +fn run_cmd(label: &str, mut cmd: Command) -> std::io::Result<()> { + println!("[*] {label}"); + let status = cmd.status()?; + if !status.success() { + eprintln!("[!] command `{label}` failed with status {status}"); + } + Ok(()) +} + +fn xfs_format(cfg: &XfsConfig) -> std::io::Result<()> { + run_cmd( + "Formatting disk.xfs", + Command::new(&cfg.xfs_iface).arg("fdisk"), + ) +} + +fn xfs_ls(cfg: &XfsConfig) -> std::io::Result<()> { + run_cmd("Listing files in disk.xfs", Command::new(&cfg.xfs_iface).arg("ls")) +} + +fn xfs_load_data(cfg: &XfsConfig) -> std::io::Result<()> { + let pattern = cfg.data_dir.join("*.dat"); + println!("[*] Loading data shards from {:?}", pattern); + for entry in glob::glob(pattern.to_str().unwrap()).unwrap() { + let path = entry?; + run_cmd( + &format!("load --data {:?}", path), + Command::new(&cfg.xfs_iface).arg("load").arg("--data").arg(&path), + )?; + } + Ok(()) +} + +fn xfs_load_exec(cfg: &XfsConfig) -> std::io::Result<()> { + let pattern = cfg.exec_dir.join("*.xexe"); + println!("[*] Loading exec shards from {:?}", pattern); + for entry in glob::glob(pattern.to_str().unwrap()).unwrap() { + let path = entry?; + run_cmd( + &format!("load --exec {:?}", path), + Command::new(&cfg.xfs_iface).arg("load").arg("--exec").arg(&path), + )?; + } + Ok(()) +} + +fn xfs_wipe(cfg: &XfsConfig) -> std::io::Result<()> { + println!("[!] Wiping all files from {:?}", cfg.disk_img); + let output = Command::new(&cfg.xfs_iface).arg("ls").output()?; + if !output.status.success() { + eprintln!("[!] xfs-interface ls failed"); + return Ok(()); + } + let listing = String::from_utf8_lossy(&output.stdout); + for line in listing.lines().skip(1) { + let name = line.split_whitespace().next().unwrap_or(""); + if name.is_empty() { + continue; + } + run_cmd( + &format!("rm {name}"), + Command::new(&cfg.xfs_iface).arg("rm").arg(name), + )?; + } + Ok(()) +} + +/// Placeholder: in a real system, this would inspect neurorights metadata +/// and decide if export is allowed. +fn check_guard_for_export(_name: &str, guards: &[NeuroGuard]) -> bool { + // For now, simulate that SoulNonTradeableShield blocks nothing + println!("[*] Guards active for export: {:?}", guards); + true +} + +fn xfs_export_dat(cfg: &XfsConfig, guards: &[NeuroGuard]) -> std::io::Result<()> { + use std::fs; + fs::create_dir_all("exported-data")?; + let output = Command::new(&cfg.xfs_iface).arg("ls").output()?; + if !output.status.success() { + eprintln!("[!] xfs-interface ls failed"); + return Ok(()); + } + let listing = String::from_utf8_lossy(&output.stdout); + for line in listing.lines().skip(1) { + let name = line.split_whitespace().next().unwrap_or(""); + if !name.ends_with(".dat") { + continue; + } + if !check_guard_for_export(name, guards) { + println!("[!] export of {name} blocked by neurorights guard"); + continue; + } + let target = format!("exported-data/{name}"); + run_cmd( + &format!("export {name} -> {target}"), + Command::new(&cfg.xfs_iface) + .arg("export") + .arg(name) + .arg(&target), + )?; + } + Ok(()) +} + +fn main() -> std::io::Result<()> { + let cfg = XfsConfig::default(); + + // Example orchestration similar to `make run` + xfs_wipe(&cfg)?; + xfs_format(&cfg)?; + xfs_load_data(&cfg)?; + xfs_load_exec(&cfg)?; + xfs_ls(&cfg)?; + + let guards = vec![ + NeuroGuard::AuraBoundaryGuard, + NeuroGuard::SoulNonTradeableShield, + NeuroGuard::DreamSanctumFilter, + NeuroGuard::BioLoadThrottle, + NeuroGuard::SovereignKernelLock, + ]; + xfs_export_dat(&cfg, &guards)?; + + Ok(()) +} diff --git a/src/bin/sovereign_neurofs.rs b/src/bin/sovereign_neurofs.rs new file mode 100644 index 0000000..dfd1f9b --- /dev/null +++ b/src/bin/sovereign_neurofs.rs @@ -0,0 +1,272 @@ +mod fs { + pub mod types { + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + pub enum FileType { + Root, + Data, + Exec, + NeuroStream, + BioSnapshot, + } + + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + pub enum Permission { + Exclusive, + Open, + SharedRead, + SharedWrite, + } + + #[derive(Debug, Clone)] + pub struct NeuroRights { + pub mental_privacy: bool, + pub mental_integrity: bool, + pub cognitive_liberty: bool, + pub noncommercial_neural_data: bool, + pub soulnontradeable: bool, + pub dreamstate_sensitive: bool, + pub forbid_decision_use: bool, + pub forget_sla_hours: u32, + } + + #[derive(Debug, Clone)] + pub struct FileAttr { + pub name: String, + pub owner: String, + pub size_words: u32, + pub file_type: FileType, + pub perm: Permission, + pub neurorights: Option, + } + } + + pub mod class { + use super::types::FileType; + + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + pub enum FileClass { + Root, + SovereignConfig, + Ledger, + NeuralModel, + StreamShard, + Biospec, + GenericData, + } + + pub fn classify(name: &str, ty: FileType) -> FileClass { + if ty == FileType::Root { + return FileClass::Root; + } + + if name.ends_with(".neurorights.json") + || name.ends_with(".stake.aln") + || name.ends_with("neuro-workspace.manifest.aln") + || name.ends_with(".rohmodel.aln") + { + FileClass::SovereignConfig + } else if name.ends_with(".donutloop.aln") + || name.ends_with(".evolve.jsonl") + || name.ends_with(".answer.ndjson") + || name.ends_with(".nnet-loop.aln") + { + FileClass::Ledger + } else if name.ends_with(".nnetx") + || name.ends_with(".nnetw") + || name.ends_with(".nnetq") + { + FileClass::NeuralModel + } else if name.ends_with(".nstream.neuroaln") + || name.ends_with(".neuroaln") + || name.ends_with(".lifaln") + { + FileClass::StreamShard + } else if name.ends_with(".biospec.aln") + || name.ends_with(".ocpuenv") + || name.ends_with(".ocpulog") + || name.ends_with(".lifeforce.aln") + { + FileClass::Biospec + } else { + FileClass::GenericData + } + } + } + + pub mod root { + use std::collections::HashMap; + use super::types::FileAttr; + + #[derive(Debug, Clone)] + pub struct RootEntry { + pub attr: FileAttr, + pub start_block: u32, + pub block_count: u32, + } + + #[derive(Debug)] + pub struct RootTable { + entries: HashMap, + } + + impl RootTable { + pub fn new() -> Self { + Self { entries: HashMap::new() } + } + + pub fn create(&mut self, entry: RootEntry) -> Result<(), String> { + if self.entries.contains_key(&entry.attr.name) { + return Err("File already exists".into()); + } + self.entries.insert(entry.attr.name.clone(), entry); + Ok(()) + } + + pub fn get(&self, name: &str) -> Option<&RootEntry> { + self.entries.get(name) + } + + pub fn delete(&mut self, name: &str) -> Option { + self.entries.remove(name) + } + + pub fn list(&self) -> impl Iterator { + self.entries.values() + } + } + } + + pub mod protections { + use super::class::{FileClass, classify}; + use super::types::{FileAttr}; + + #[derive(Debug)] + pub enum ProtectionViolation { + AuraBoundaryGuard(String), + SoulNonTradeableShield(String), + DreamSanctumFilter(String), + SovereignKernelLock(String), + } + + pub fn check_on_create(attr: &FileAttr) -> Result<(), ProtectionViolation> { + let class = classify(&attr.name, attr.file_type); + + if let Some(neuro) = &attr.neurorights { + if neuro.soulnontradeable && matches!(class, FileClass::NeuralModel) { + return Err(ProtectionViolation::SoulNonTradeableShield( + "soulnontradeable cannot be stored as a generic neural model".into(), + )); + } + } + + // Sovereign kernel lock example: disallow direct creation of core config + if matches!(class, FileClass::SovereignConfig) && attr.owner != "sovereign-kernel" { + return Err(ProtectionViolation::SovereignKernelLock( + "Sovereign config must be created via EVOLVE pipeline".into(), + )); + } + + Ok(()) + } + + pub fn check_on_read(attr: &FileAttr) -> Result<(), ProtectionViolation> { + if let Some(neuro) = &attr.neurorights { + if neuro.mental_privacy && neuro.forbid_decision_use { + // The actual enforcement would be context-aware; here we only show the pattern. + // A real system would check the caller capability. + } + } + Ok(()) + } + + pub fn check_on_write(attr: &FileAttr) -> Result<(), ProtectionViolation> { + if let Some(neuro) = &attr.neurorights { + if neuro.dreamstate_sensitive && neuro.forbid_decision_use { + // This would typically block writes that treat dream data as training input. + return Err(ProtectionViolation::DreamSanctumFilter( + "write blocked by DreamSanctumFilter".into(), + )); + } + } + Ok(()) + } + } + + pub mod syscalls { + use super::root::{RootEntry, RootTable}; + use super::types::FileAttr; + use super::protections::{self, ProtectionViolation}; + + pub struct FsHandle { + pub root: RootTable, + } + + impl FsHandle { + pub fn new() -> Self { + Self { root: RootTable::new() } + } + + pub fn create(&mut self, entry: RootEntry) -> Result<(), ProtectionViolation> { + protections::check_on_create(&entry.attr)?; + self.root.create(entry).map_err(|e| ProtectionViolation::AuraBoundaryGuard(e)) + } + + pub fn read(&self, name: &str) -> Result<(), ProtectionViolation> { + let entry = self.root.get(name) + .ok_or_else(|| ProtectionViolation::AuraBoundaryGuard("No such file".into()))?; + protections::check_on_read(&entry.attr) + } + + pub fn write(&mut self, name: &str) -> Result<(), ProtectionViolation> { + let entry = self.root.get(name) + .ok_or_else(|| ProtectionViolation::AuraBoundaryGuard("No such file".into()))?; + protections::check_on_write(&entry.attr) + } + + pub fn delete(&mut self, name: &str) -> Result<(), ProtectionViolation> { + let entry = self.root.delete(name) + .ok_or_else(|| ProtectionViolation::AuraBoundaryGuard("No such file".into()))?; + protections::check_on_write(&entry.attr) + } + } + } +} + +use fs::types::{FileAttr, FileType, Permission, NeuroRights}; +use fs::root::RootEntry; +use fs::syscalls::FsHandle; + +fn main() { + let mut fs = FsHandle::new(); + + let neurorights = NeuroRights { + mental_privacy: true, + mental_integrity: true, + cognitive_liberty: true, + noncommercial_neural_data: true, + soulnontradeable: true, + dreamstate_sensitive: true, + forbid_decision_use: true, + forget_sla_hours: 24, + }; + + let attr = FileAttr { + name: "subjectA.neuroaln".into(), + owner: "subjectA".into(), + size_words: 0, + file_type: FileType::NeuroStream, + perm: Permission::Exclusive, + neurorights: Some(neurorights), + }; + + let entry = RootEntry { + attr, + start_block: 10, + block_count: 4, + }; + + match fs.create(entry) { + Ok(()) => println!("Created subjectA.neuroaln with sovereign protections."), + Err(e) => println!("Creation blocked by protection: {:?}", e), + } +} diff --git a/src/fs/class.rs b/src/fs/class.rs new file mode 100644 index 0000000..9c52551 --- /dev/null +++ b/src/fs/class.rs @@ -0,0 +1,46 @@ +use crate::fs::types::FileType; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum FileClass { + Root, + SovereignConfig, + Ledger, + NeuralModel, + StreamShard, + Biospec, + GenericData, +} + +pub fn classify(name: &str, ty: FileType) -> FileClass { + if ty == FileType::Root { return FileClass::Root; } + + if name.ends_with(".neurorights.json") + || name.ends_with(".stake.aln") + || name.ends_with("neuro-workspace.manifest.aln") + { + FileClass::SovereignConfig + } else if name.ends_with(".donutloop.aln") + || name.ends_with(".evolve.jsonl") + || name.ends_with(".answer.ndjson") + || name.ends_with(".nnet-loop.aln") + { + FileClass::Ledger + } else if name.ends_with(".nnetx") + || name.ends_with(".nnetw") + || name.ends_with(".nnetq") + { + FileClass::NeuralModel + } else if name.ends_with(".nstream.neuroaln") + || name.ends_with(".neuroaln") + || name.ends_with(".lifaln") + { + FileClass::StreamShard + } else if name.ends_with(".biospec.aln") + || name.ends_with(".ocpuenv") + || name.ends_with(".ocpulog") + { + FileClass::Biospec + } else { + FileClass::GenericData + } +} diff --git a/src/fs/root.rs b/src/fs/root.rs new file mode 100644 index 0000000..bd659a3 --- /dev/null +++ b/src/fs/root.rs @@ -0,0 +1,40 @@ +use std::collections::HashMap; +use crate::fs::types::{FileAttr}; + +#[derive(Debug)] +pub struct RootEntry { + pub attr: FileAttr, + pub start_block: u32, + pub block_count: u32, +} + +#[derive(Debug)] +pub struct RootTable { + entries: HashMap, +} + +impl RootTable { + pub fn new() -> Self { + Self { entries: HashMap::new() } + } + + pub fn create(&mut self, entry: RootEntry) -> Result<(), String> { + if self.entries.contains_key(&entry.attr.name) { + return Err("File already exists".into()); + } + self.entries.insert(entry.attr.name.clone(), entry); + Ok(()) + } + + pub fn get(&self, name: &str) -> Option<&RootEntry> { + self.entries.get(name) + } + + pub fn delete(&mut self, name: &str) -> Option { + self.entries.remove(name) + } + + pub fn list(&self) -> impl Iterator { + self.entries.values() + } +} diff --git a/src/fs/syscalls.rs b/src/fs/syscalls.rs new file mode 100644 index 0000000..70dc5dd --- /dev/null +++ b/src/fs/syscalls.rs @@ -0,0 +1,43 @@ +use crate::fs::types::{FileAttr}; +use crate::fs::root::RootTable; +use crate::fs::class::{FileClass, classify}; + +pub struct FsHandle { + pub root: RootTable, + // backing store (disk image, device, or Couchbase / IPFS mapping) +} + +impl FsHandle { + pub fn create(&mut self, attr: FileAttr) -> Result<(), String> { + let class = classify(&attr.name, attr.file_type); + + // enforce neurorights and sovereignty invariants + if let Some(neuro) = &attr.neurorights { + if neuro.soulnontradeable && class == FileClass::NeuralModel { + return Err("Cannot store soulnontradeable data in a generic neural model file".into()); + } + } + + // allocate blocks, update root, data structures + // ... + Ok(()) + } + + pub fn read(&self, name: &str, offset_words: u32, len_words: u32) -> Result, String> { + // translate to block offsets like eXpFS, but with additional RoH checks before returning + // ... + Ok(Vec::new()) + } + + pub fn write(&mut self, name: &str, offset_words: u32, data: &[u32]) -> Result<(), String> { + // check RoH + Tsafe; log into .ocpulog & .donutloop.aln + // ... + Ok(()) + } + + pub fn delete(&mut self, name: &str) -> Result<(), String> { + // update free list, donutloop, ocpu logs + // ... + Ok(()) + } +} diff --git a/src/fs/types.rs b/src/fs/types.rs new file mode 100644 index 0000000..4b18637 --- /dev/null +++ b/src/fs/types.rs @@ -0,0 +1,38 @@ +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum FileType { + Root, + Data, + Exec, + NeuroStream, // continuous neural/bioscale stream + BioSnapshot, // lifeforce / fatigue snapshots +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Permission { + Exclusive, + Open, + SharedRead, + SharedWrite, +} + +#[derive(Debug, Clone)] +pub struct NeuroRights { + pub mental_privacy: bool, + pub mental_integrity: bool, + pub cognitive_liberty: bool, + pub noncommercial_neural_data: bool, + pub soulnontradeable: bool, + pub dreamstate_sensitive: bool, + pub forbid_decision_use: bool, + pub forget_sla_hours: u32, +} + +#[derive(Debug, Clone)] +pub struct FileAttr { + pub name: String, + pub owner: String, + pub size_words: u32, + pub file_type: FileType, + pub perm: Permission, + pub neurorights: Option, +} diff --git a/src/neurofs/layout.rs b/src/neurofs/layout.rs new file mode 100644 index 0000000..84c9662 --- /dev/null +++ b/src/neurofs/layout.rs @@ -0,0 +1,47 @@ +#[derive(Debug, Clone)] +pub enum NodeKind { + Directory, + File, +} + +#[derive(Debug, Clone)] +pub struct FsNode { + pub path: String, + pub kind: NodeKind, + pub description: Option, +} + +pub fn bootstrap_nexsm_layout() -> Vec { + vec![ + FsNode { + path: ".github/workflows/bootstrap-nexsm.yml".into(), + kind: NodeKind::File, + description: Some("CI pipeline for Bootstrap v5 NEXSM dashboard".into()), + }, + FsNode { + path: "frontend/".into(), + kind: NodeKind::Directory, + description: Some("UI source using Bootstrap 5".into()), + }, + FsNode { + path: "frontend/package.json".into(), + kind: NodeKind::File, + description: Some("Frontend dependencies and scripts".into()), + }, + FsNode { + path: "frontend/vite.config.js".into(), + kind: NodeKind::File, + description: Some("Build tool configuration".into()), + }, + FsNode { + path: "public/".into(), + kind: NodeKind::Directory, + description: Some("Built UI/asset output".into()), + }, + FsNode { + path: "server/".into(), + kind: NodeKind::Directory, + description: Some("Simulation backend (optional)".into()), + }, + ] +} diff --git a/src/neurofs/pipeline.rs b/src/neurofs/pipeline.rs new file mode 100644 index 0000000..2fb9c92 --- /dev/null +++ b/src/neurofs/pipeline.rs @@ -0,0 +1,55 @@ +#[derive(Debug, Clone)] +pub struct Step { + pub name: String, + pub run: String, +} + +#[derive(Debug, Clone)] +pub struct Job { + pub id: String, + pub runs_on: String, + pub working_directory: Option, + pub steps: Vec, +} + +#[derive(Debug, Clone)] +pub struct Pipeline { + pub name: String, + pub triggers: Vec, + pub jobs: Vec, +} + +pub fn bootstrap_nexsm_pipeline() -> Pipeline { + Pipeline { + name: "Build & Deploy Bootstrap v5 NEXSM Dashboard".into(), + triggers: vec![ + "push:main".into(), + "pull_request:main".into(), + "workflow_dispatch".into(), + ], + jobs: vec![ + Job { + id: "build-bootstrap-ui".into(), + runs_on: "ubuntu-latest".into(), + working_directory: Some("frontend".into()), + steps: vec![ + Step { name: "Checkout source code".into(), run: "actions/checkout@v4".into() }, + Step { name: "Set up Node.js".into(), run: "actions/setup-node@v4 node=20.x".into() }, + Step { name: "Install dependencies".into(), run: "npm ci".into() }, + Step { name: "Build Bootstrap v5 assets".into(), run: "npm run build".into() }, + Step { name: "Verify built assets".into(), run: "ls -lh dist || ls -lh build || ls -lh ../public".into() }, + Step { name: "Upload artifact".into(), run: "actions/upload-artifact@v4".into() }, + ], + }, + Job { + id: "deploy-gh-pages".into(), + runs_on: "ubuntu-latest".into(), + working_directory: None, + steps: vec![ + Step { name: "Download built artifact".into(), run: "actions/download-artifact@v4".into() }, + Step { name: "Deploy to GitHub Pages".into(), run: "peaceiris/actions-gh-pages@v4".into() }, + ], + }, + ], + } +} diff --git a/src/neurofs/spec.rs b/src/neurofs/spec.rs new file mode 100644 index 0000000..cb237c7 --- /dev/null +++ b/src/neurofs/spec.rs @@ -0,0 +1,73 @@ +use std::time::Duration; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum FsBlockClass { + Generic, + NeuroStream, + BioSpec, + Ledger, + Model, + SovereignConfig, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum FsFileType { + Root, + Data, + Exec, + NeuroStream, + BioSpec, + Ledger, + Model, + SovereignConfig, +} + +#[derive(Debug, Clone)] +pub struct NeurorightsFlags { + pub mental_privacy: bool, + pub mental_integrity: bool, + pub cognitive_liberty: bool, + pub noncommercial_neural_data: bool, + pub soulnontradeable: bool, + pub dreamstate_sensitive: bool, + pub forbid_decision_use: bool, + pub forget_sla_hours: u32, +} + +#[derive(Debug, Clone)] +pub struct SmartScope { + pub maxeffectsizel2: f32, + pub domains: Vec, + pub expiry: Option, + pub physioguard_enabled: bool, + pub revocable: bool, +} + +#[derive(Debug, Clone)] +pub struct EvolveRequirement { + pub required: bool, + pub scope_paths: Vec, + pub roh_ceiling: f32, +} + +#[derive(Debug, Clone)] +pub struct ShardGovernance { + pub neurorights: NeurorightsFlags, + pub smart_scope: Option, + pub evolve: EvolveRequirement, +} + +#[derive(Debug, Clone)] +pub struct ShardClassSpec { + pub file_type: FsFileType, + pub block_class: FsBlockClass, + pub extensions: Vec, + pub description: String, + pub governance: ShardGovernance, +} + +#[derive(Debug, Clone)] +pub struct OrganicCpuFsSpec { + pub disk_block_words: u32, + pub shard_classes: Vec, +} diff --git a/src/neurofs/ui_asset_guard.rs b/src/neurofs/ui_asset_guard.rs new file mode 100644 index 0000000..2738231 --- /dev/null +++ b/src/neurofs/ui_asset_guard.rs @@ -0,0 +1,44 @@ +use crate::neurofs::ui_asset_registry::{UiAssetRegistry, UiRelease}; + +#[derive(Debug, Clone)] +pub struct UiAssetGovernance { + pub requires_evolve_token: bool, + pub roh_ceiling: f32, + pub allowed_domains: Vec, // e.g., ["dashboard", "metrics"] +} + +#[derive(Debug)] +pub enum UiAssetError { + MissingEvolveToken, + RoHExceedsCeiling, +} + +pub struct UiAssetRegistryGuard { + registry: UiAssetRegistry, + governance: UiAssetGovernance, +} + +impl UiAssetRegistryGuard { + pub fn new(registry: UiAssetRegistry, governance: UiAssetGovernance) -> Self { + Self { registry, governance } + } + + pub fn publish_release( + &mut self, + cid_or_hash: impl Into, + roh_estimate: f32, + evolve_token_present: bool, + ) -> Result<&UiRelease, UiAssetError> { + if self.governance.requires_evolve_token && !evolve_token_present { + return Err(UiAssetError::MissingEvolveToken); + } + if roh_estimate > self.governance.roh_ceiling { + return Err(UiAssetError::RoHExceedsCeiling); + } + Ok(self.registry.publish_release(cid_or_hash)) + } + + pub fn latest_release(&self) -> Option<&UiRelease> { + self.registry.latest_release() + } +} diff --git a/src/neurofs/ui_asset_registry.rs b/src/neurofs/ui_asset_registry.rs new file mode 100644 index 0000000..7c7c723 --- /dev/null +++ b/src/neurofs/ui_asset_registry.rs @@ -0,0 +1,43 @@ +use std::time::{SystemTime, UNIX_EPOCH}; + +#[derive(Debug, Clone)] +pub struct UiRelease { + pub id: u64, + pub cid_or_hash: String, + pub timestamp: u64, +} + +#[derive(Debug, Default)] +pub struct UiAssetRegistry { + releases: Vec, +} + +impl UiAssetRegistry { + pub fn new() -> Self { + Self { releases: Vec::new() } + } + + pub fn publish_release(&mut self, cid_or_hash: impl Into) -> &UiRelease { + let ts = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + let id = (self.releases.len() as u64) + 1; + + self.releases.push(UiRelease { + id, + cid_or_hash: cid_or_hash.into(), + timestamp: ts, + }); + + self.releases.last().unwrap() + } + + pub fn latest_release(&self) -> Option<&UiRelease> { + self.releases.last() + } + + pub fn all_releases(&self) -> impl Iterator { + self.releases.iter() + } +}