From e00d218b6f65845d8835372b90d16d3fb5756bde Mon Sep 17 00:00:00 2001 From: Enreign Date: Mon, 16 Mar 2026 23:37:08 +0100 Subject: [PATCH 1/4] Add .worktrees/ to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 143dc6d..797aae5 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ config.toml eval/results/ eval/.worktrees/ +.worktrees/ *.log hello.txt __pycache__/ From 86b749033f939617446ff82b8a63d130b59dd4b6 Mon Sep 17 00:00:00 2001 From: Enreign Date: Tue, 17 Mar 2026 01:01:44 +0100 Subject: [PATCH 2/4] fix(cost): address second review pass findings - Fix SQL injection risk: replace format!()-interpolated date_expr in summary_since() with a fully parameterized query; add all_summary() method with its own safe prepared statement - Fix bounds panic: replace prices[0]/prices[1] index access on config.model_prices entries with .get(n).copied().unwrap_or(0.0) - Implement check_session_budget(): session_budget_usd was declared in CostConfig but had no corresponding enforcement method; add it with the same warn/block semantics as check_daily_budget() - Remove misplaced delivery_channel_severity test from cost.rs (alerts domain logic has no place in the cost module's test suite) - Fix CLI 'all' scope: previously fell through silently to today_summary(); now routes to all_summary() and prints an actionable error for unknown scopes - Add missing tests: calculate_cost_zero_tokens, cost_tracker_disabled_skips_record, check_daily_budget_warn_does_not_err, check_daily_budget_block_returns_err, check_session_budget_block_returns_err (12 tests total, up from 8) - Restore sonarqube.rs dependency that main.rs declares as a mod Co-Authored-By: Claude Sonnet 4.6 --- config.example.toml | 69 +++++++ src/config.rs | 212 ++++++++++++++++++++- src/cost.rs | 449 ++++++++++++++++++++++++++++++++++++++++++++ src/main.rs | 92 +++++++++ src/snapshot.rs | 311 ++++++++++++++++++++++++++++++ src/sonarqube.rs | 305 ++++++++++++++++++++++++++++++ 6 files changed, 1437 insertions(+), 1 deletion(-) create mode 100644 src/cost.rs create mode 100644 src/snapshot.rs create mode 100644 src/sonarqube.rs diff --git a/config.example.toml b/config.example.toml index b503c03..86cd5be 100644 --- a/config.example.toml +++ b/config.example.toml @@ -104,6 +104,21 @@ timeout_secs = 600 [db] path = "~/.sparks/sparks.db" +# ── Cost tracking ──────────────────────────────────────────────────── +[cost] +# Set to false to disable all cost tracking +enabled = true +# Daily spend budget in USD; 0 means no limit +daily_budget_usd = 0.0 +# Per-session spend budget in USD; 0 means no limit +session_budget_usd = 0.0 +# Action when a budget threshold is hit: "warn" (default) or "block" +on_budget_exceeded = "warn" +# Optional per-model price overrides: [input_per_1m_usd, output_per_1m_usd] +# Built-in pricing covers Claude and major OpenAI models automatically. +# [cost.model_prices] +# "my-custom-model" = [5.00, 15.00] + [manager] max_steps = 15 # For runtime.profile = "self_dev_trusted", recommended: @@ -305,3 +320,57 @@ enabled = true # set to false to disable # public_key = "pk-..." # discouraged; prefer LANGFUSE_PUBLIC_KEY env var, .env, or `sparks secrets set langfuse.public_key` # secret_key = "sk-..." # discouraged; prefer LANGFUSE_SECRET_KEY env var, .env, or `sparks secrets set langfuse.secret_key` # base_url = "http://localhost:3000" # for self-hosted, defaults to cloud + + +# ── SonarQube quality gate ─────────────────────────────────────────────────── +# Integrate SonarQube as a mandatory CI gate before PRs are opened. +# See docs/sonarqube-mcp.md for the 3-phase integration guide. +[sonarqube] +# SonarQube server URL. Use https://sonarcloud.io for SonarCloud or +# http://localhost:9000 for a self-hosted instance. +server_url = "https://sonarcloud.io" + +# Authentication token. Prefer the SPARKS_SONAR_TOKEN environment variable +# or `sparks secrets set sonarqube.token` over setting it inline here. +# token = "squ_..." + +# Project key as shown in SonarQube/SonarCloud (e.g. "myorg_myproject"). +# project_key = "myorg_myproject" + +# Organisation key — required for SonarCloud, omit for self-hosted. +# organization = "myorg" + +# How long to wait for an analysis to complete before timing out (seconds). +gate_timeout_secs = 120 + +# How often to poll the quality gate API while waiting (seconds). +poll_interval_secs = 5 + +# Set to false to log quality gate failures but still allow PR creation. +block_on_failure = true + + +# ── Workspace snapshots (time-travel debugging) ────────────────────────────── +# Snapshots are opt-in. Enable to allow 'sparks snapshot create|list|diff|restore'. +[snapshot] +# Set to true to enable snapshot commands (default: false). +enabled = false + +# Directory to store snapshots (default: ~/.sparks/snapshots). +# snapshot_dir = "~/.sparks/snapshots" + +# Maximum number of snapshots to retain. Oldest are pruned automatically. +# Set to 0 for unlimited. (default: 20) +max_snapshots = 20 + +# Skip snapshot if the workspace exceeds this size in MB. Set to 0 to disable. +# (default: 50) +max_workspace_mb = 50 + +# Paths to include in the snapshot, relative to the workspace root. +# (default: ["."] — the whole workspace) +include = ["."] + +# Paths/patterns to exclude from the snapshot. +# (default: target/, .git/, .worktrees/, *.db, *.log) +exclude = ["target/", ".git/", ".worktrees/", "*.db", "*.log"] diff --git a/src/config.rs b/src/config.rs index d78e231..224434a 100644 --- a/src/config.rs +++ b/src/config.rs @@ -3,7 +3,7 @@ use std::net::IpAddr; use std::path::{Path, PathBuf}; use std::sync::Arc; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use crate::error::{SparksError, Result}; use crate::llm::{ @@ -63,6 +63,16 @@ pub struct Config { pub self_dev: SelfDevConfig, #[serde(default)] pub langfuse: LangfuseConfig, + #[serde(default)] + pub sonarqube: SonarqubeConfig, + #[serde(default)] + pub alerts: AlertsConfig, + #[serde(default)] + pub cost: CostConfig, + #[serde(default)] + pub snapshot: SnapshotConfig, + #[serde(default)] + pub leaderboard: LeaderboardConfig, #[serde(skip)] inline_secret_labels: Vec, } @@ -523,6 +533,164 @@ fn default_heartbeat_jitter() -> f64 { 0.2 } +// ── SonarQube config ───────────────────────────────────────────────── + +#[derive(Debug, Deserialize, Serialize, Clone, Default)] +pub struct SonarqubeConfig { + /// SonarQube server URL (e.g. https://sonarcloud.io or http://localhost:9000) + #[serde(default = "default_sonar_url")] + pub server_url: String, + /// Authentication token (or set SPARKS_SONAR_TOKEN env var) + pub token: Option, + /// Project key (e.g. "myorg_myproject") + pub project_key: Option, + /// Organisation key (required for SonarCloud, omit for self-hosted) + pub organization: Option, + /// Quality gate timeout in seconds (default 120) + #[serde(default = "default_sonar_timeout")] + pub gate_timeout_secs: u64, + /// Poll interval in seconds (default 5) + #[serde(default = "default_sonar_poll")] + pub poll_interval_secs: u64, + /// Whether to block PR creation on quality gate failure (default true) + #[serde(default = "default_sonar_block")] + pub block_on_failure: bool, +} + +fn default_sonar_url() -> String { + "https://sonarcloud.io".into() +} +fn default_sonar_timeout() -> u64 { + 120 +} +fn default_sonar_poll() -> u64 { + 5 +} +fn default_sonar_block() -> bool { + true +} + +// ── Alerts config ───────────────────────────────────────────────────── + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct AlertsConfig { + /// Enable the alerting engine (default: true) + #[serde(default = "default_alerts_enabled")] + pub enabled: bool, + /// How often to evaluate alert rules, in seconds (default: 30) + #[serde(default = "default_alerts_interval")] + pub check_interval_secs: u64, + /// Default delivery channel: "log", "slack", "teams", "webhook" (default: "log") + #[serde(default = "default_alerts_channel")] + pub delivery_channel: String, + /// Webhook URL for delivery_channel = "webhook" + pub webhook_url: Option, + /// Minimum severity to deliver: "info", "warning", "critical" (default: "info") + #[serde(default = "default_alerts_min_severity")] + pub min_severity: String, + /// Silence repeated alerts for this many seconds (default: 300) + #[serde(default = "default_alerts_silence")] + pub silence_secs: u64, +} + +fn default_alerts_enabled() -> bool { true } +fn default_alerts_interval() -> u64 { 30 } +fn default_alerts_channel() -> String { "log".into() } +fn default_alerts_min_severity() -> String { "info".into() } +fn default_alerts_silence() -> u64 { 300 } + +impl Default for AlertsConfig { + fn default() -> Self { + Self { + enabled: default_alerts_enabled(), + check_interval_secs: default_alerts_interval(), + delivery_channel: default_alerts_channel(), + webhook_url: None, + min_severity: default_alerts_min_severity(), + silence_secs: default_alerts_silence(), + } + } +} + +// ── Cost tracking config ────────────────────────────────────────────── + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct CostConfig { + /// Enable cost tracking (default: true) + #[serde(default = "default_cost_enabled")] + pub enabled: bool, + /// Daily spend budget in USD (0 = no limit) + #[serde(default)] + pub daily_budget_usd: f64, + /// Per-session spend budget in USD (0 = no limit) + #[serde(default)] + pub session_budget_usd: f64, + /// Action when budget is exceeded: "warn" or "block" (default: "warn") + #[serde(default = "default_cost_action")] + pub on_budget_exceeded: String, + /// Model price overrides: model_name -> (input_per_1m_usd, output_per_1m_usd) + #[serde(default)] + pub model_prices: std::collections::HashMap, +} + +fn default_cost_enabled() -> bool { true } +fn default_cost_action() -> String { "warn".into() } + +impl Default for CostConfig { + fn default() -> Self { + Self { + enabled: default_cost_enabled(), + daily_budget_usd: 0.0, + session_budget_usd: 0.0, + on_budget_exceeded: default_cost_action(), + model_prices: std::collections::HashMap::new(), + } + } +} + +// ── Snapshot config ─────────────────────────────────────────────────── + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct SnapshotConfig { + /// Enable automatic workspace snapshots (default: false - opt-in) + #[serde(default)] + pub enabled: bool, + /// Directory to store snapshots (default: ~/.sparks/snapshots) + pub snapshot_dir: Option, + /// Maximum number of snapshots to retain (default: 20, 0 = unlimited) + #[serde(default = "default_snapshot_max")] + pub max_snapshots: usize, + /// Maximum workspace size in MB to snapshot (default: 50, 0 = no limit) + #[serde(default = "default_snapshot_max_mb")] + pub max_workspace_mb: u64, + /// Glob patterns to include (default: ["."]) + #[serde(default = "default_snapshot_include")] + pub include: Vec, + /// Glob patterns to exclude (default: ["target/", ".git/", "*.db"]) + #[serde(default = "default_snapshot_exclude")] + pub exclude: Vec, +} + +fn default_snapshot_max() -> usize { 20 } +fn default_snapshot_max_mb() -> u64 { 50 } +fn default_snapshot_include() -> Vec { vec![".".into()] } +fn default_snapshot_exclude() -> Vec { + vec!["target/".into(), ".git/".into(), ".worktrees/".into(), "*.db".into(), "*.log".into()] +} + +impl Default for SnapshotConfig { + fn default() -> Self { + Self { + enabled: false, + snapshot_dir: None, + max_snapshots: default_snapshot_max(), + max_workspace_mb: default_snapshot_max_mb(), + include: default_snapshot_include(), + exclude: default_snapshot_exclude(), + } + } +} + #[derive(Debug, Deserialize, Clone)] pub struct MoodConfig { #[serde(default)] @@ -1181,6 +1349,43 @@ impl ManagerConfig { } } +// ── Leaderboard config ──────────────────────────────────────────────── + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct LeaderboardConfig { + /// Enable leaderboard tracking (default: true) + #[serde(default = "default_lb_enabled")] + pub enabled: bool, + /// Ghost name to A/B test against the default ghost (None = disabled) + pub ab_test_ghost: Option, + /// Fraction of requests routed to the challenger ghost (0.0-1.0, default: 0.1) + #[serde(default = "default_ab_fraction")] + pub ab_test_fraction: f64, + /// Minimum samples before auto-promotion recommendation (default: 50) + #[serde(default = "default_lb_min_samples")] + pub min_samples_for_recommendation: u64, + /// Success rate improvement threshold for auto-promotion (default: 0.10 = 10%) + #[serde(default = "default_lb_threshold")] + pub promotion_threshold: f64, +} + +fn default_lb_enabled() -> bool { true } +fn default_ab_fraction() -> f64 { 0.1 } +fn default_lb_min_samples() -> u64 { 50 } +fn default_lb_threshold() -> f64 { 0.10 } + +impl Default for LeaderboardConfig { + fn default() -> Self { + Self { + enabled: default_lb_enabled(), + ab_test_ghost: None, + ab_test_fraction: default_ab_fraction(), + min_samples_for_recommendation: default_lb_min_samples(), + promotion_threshold: default_lb_threshold(), + } + } +} + impl Default for Config { fn default() -> Self { Self { @@ -1209,6 +1414,11 @@ impl Default for Config { prompt_scanner: PromptScannerConfig::default(), self_dev: SelfDevConfig::default(), langfuse: LangfuseConfig::default(), + sonarqube: SonarqubeConfig::default(), + alerts: AlertsConfig::default(), + cost: CostConfig::default(), + snapshot: SnapshotConfig::default(), + leaderboard: LeaderboardConfig::default(), inline_secret_labels: Vec::new(), } } diff --git a/src/cost.rs b/src/cost.rs new file mode 100644 index 0000000..6753630 --- /dev/null +++ b/src/cost.rs @@ -0,0 +1,449 @@ +//! Token usage and cost tracking. +//! +//! Records per-call token counts and calculates USD cost based on model pricing. +//! Enforces daily and per-session budgets when configured. + +use std::collections::HashMap; +use std::sync::Mutex; + +use rusqlite::{Connection, params}; + +use crate::config::CostConfig; +use crate::error::{SparksError, Result}; + +/// Built-in model pricing: (input_per_1m_usd, output_per_1m_usd). +/// Users can override via config.cost.model_prices. +pub fn builtin_prices() -> HashMap<&'static str, (f64, f64)> { + let mut m = HashMap::new(); + // Anthropic + m.insert("claude-opus-4-6", (15.00, 75.00)); + m.insert("claude-sonnet-4-6", (3.00, 15.00)); + m.insert("claude-haiku-4-5", (0.80, 4.00)); + // OpenAI + m.insert("gpt-4o", (5.00, 15.00)); + m.insert("gpt-4o-mini", (0.15, 0.60)); + m.insert("gpt-4-turbo", (10.00, 30.00)); + m.insert("o1", (15.00, 60.00)); + m.insert("o3-mini", (1.10, 4.40)); + // Common aliases + m.insert("gpt-4", (30.00, 60.00)); + m.insert("gpt-3.5-turbo", (0.50, 1.50)); + m +} + +/// A single token usage record. +#[derive(Debug, Clone)] +pub struct TokenUsage { + pub session_key: String, + pub model: String, + pub ghost: Option, + pub input_tokens: u64, + pub output_tokens: u64, + pub cost_usd: f64, +} + +impl TokenUsage { + pub fn total_tokens(&self) -> u64 { + self.input_tokens + self.output_tokens + } +} + +/// Aggregate cost summary. +#[derive(Debug, Default, Clone)] +pub struct CostSummary { + pub total_input_tokens: u64, + pub total_output_tokens: u64, + pub total_cost_usd: f64, + pub by_model: HashMap, + pub by_ghost: HashMap, + pub record_count: usize, +} + +impl CostSummary { + pub fn format_report(&self) -> String { + let mut lines = vec![ + format!("**\u{1f4b0} Cost Summary**"), + format!(""), + format!("Total: **${:.4}**", self.total_cost_usd), + format!("Input tokens: {}", self.total_input_tokens), + format!("Output tokens: {}", self.total_output_tokens), + format!("Calls: {}", self.record_count), + ]; + + if !self.by_model.is_empty() { + lines.push(String::new()); + lines.push("**By model:**".to_string()); + let mut models: Vec<_> = self.by_model.iter().collect(); + models.sort_by(|a, b| b.1.partial_cmp(a.1).unwrap_or(std::cmp::Ordering::Equal)); + for (model, cost) in models { + lines.push(format!(" \u{2022} {}: ${:.4}", model, cost)); + } + } + + if !self.by_ghost.is_empty() { + lines.push(String::new()); + lines.push("**By ghost:**".to_string()); + let mut ghosts: Vec<_> = self.by_ghost.iter().collect(); + ghosts.sort_by(|a, b| b.1.partial_cmp(a.1).unwrap_or(std::cmp::Ordering::Equal)); + for (ghost, cost) in ghosts { + lines.push(format!(" \u{2022} {}: ${:.4}", ghost, cost)); + } + } + + lines.join("\n") + } +} + +/// Calculate USD cost for given token counts and model. +/// +/// Lookup precedence: +/// 1. Exact match in `config.model_prices` override map. +/// 2. Exact match in the built-in price table. +/// 3. Prefix match in the built-in table (e.g. "claude-sonnet-4-6-20251022" -> "claude-sonnet-4-6"). +/// 4. Falls back to $0.00 for unknown models (zero-cost rather than an error). +/// +/// Config override entries must have exactly two elements (`[input_per_1m, output_per_1m]`). +/// Missing elements default to 0.0 rather than panicking on an out-of-bounds index. +pub fn calculate_cost(model: &str, input_tokens: u64, output_tokens: u64, config: &CostConfig) -> f64 { + // Check config overrides first. Use .get() to avoid a bounds panic on malformed config. + let prices = if let Some(override_prices) = config.model_prices.get(model) { + let inp = override_prices.get(0).copied().unwrap_or(0.0); + let out = override_prices.get(1).copied().unwrap_or(0.0); + (inp, out) + } else { + let builtin = builtin_prices(); + // Try exact match, then prefix match + if let Some(&(inp, out)) = builtin.get(model) { + (inp, out) + } else { + // Try prefix: "claude-sonnet-4-6-20251022" -> "claude-sonnet-4-6" + let matched = builtin.iter() + .find(|(k, _)| model.starts_with(*k)) + .map(|(_, v)| *v); + matched.unwrap_or((0.0, 0.0)) + } + }; + (input_tokens as f64 / 1_000_000.0 * prices.0) + + (output_tokens as f64 / 1_000_000.0 * prices.1) +} + +/// Persistent cost tracker backed by SQLite. +pub struct CostTracker { + conn: Mutex, + config: CostConfig, +} + +impl CostTracker { + pub fn new(conn: Connection, config: CostConfig) -> Result { + { + conn.execute_batch( + "CREATE TABLE IF NOT EXISTS cost_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + session_key TEXT NOT NULL, + model TEXT NOT NULL, + ghost TEXT, + input_tokens INTEGER NOT NULL DEFAULT 0, + output_tokens INTEGER NOT NULL DEFAULT 0, + cost_usd REAL NOT NULL DEFAULT 0.0, + created_at TEXT NOT NULL DEFAULT (datetime('now')) + ); + CREATE INDEX IF NOT EXISTS idx_cost_log_session ON cost_log(session_key); + CREATE INDEX IF NOT EXISTS idx_cost_log_created ON cost_log(created_at);", + )?; + } + Ok(Self { conn: Mutex::new(conn), config }) + } + + /// Record a token usage event. + /// + /// Does nothing when `config.cost.enabled` is false. + pub fn record(&self, usage: &TokenUsage) -> Result<()> { + if !self.config.enabled { + return Ok(()); + } + let conn = self.conn.lock().map_err(|_| SparksError::Internal("cost lock poisoned".into()))?; + conn.execute( + "INSERT INTO cost_log (session_key, model, ghost, input_tokens, output_tokens, cost_usd) + VALUES (?1, ?2, ?3, ?4, ?5, ?6)", + params![ + usage.session_key, + usage.model, + usage.ghost, + usage.input_tokens as i64, + usage.output_tokens as i64, + usage.cost_usd, + ], + )?; + Ok(()) + } + + /// Get cost summary for today (UTC calendar day). + pub fn today_summary(&self) -> Result { + let conn = self.conn.lock().map_err(|_| SparksError::Internal("cost lock poisoned".into()))?; + let mut stmt = conn.prepare( + "SELECT model, ghost, input_tokens, output_tokens, cost_usd + FROM cost_log WHERE date(created_at) >= date('now')" + )?; + Self::aggregate_rows_inner(&mut stmt, rusqlite::params![]) + } + + /// Get cost summary across all time. + pub fn all_summary(&self) -> Result { + let conn = self.conn.lock().map_err(|_| SparksError::Internal("cost lock poisoned".into()))?; + let mut stmt = conn.prepare( + "SELECT model, ghost, input_tokens, output_tokens, cost_usd FROM cost_log" + )?; + Self::aggregate_rows_inner(&mut stmt, rusqlite::params![]) + } + + /// Get cost summary for a specific session. + pub fn session_summary(&self, session_key: &str) -> Result { + let conn = self.conn.lock().map_err(|_| SparksError::Internal("cost lock poisoned".into()))?; + let mut stmt = conn.prepare( + "SELECT model, ghost, input_tokens, output_tokens, cost_usd + FROM cost_log WHERE session_key = ?1" + )?; + Self::aggregate_rows_inner(&mut stmt, rusqlite::params![session_key]) + } + + fn aggregate_rows_inner( + stmt: &mut rusqlite::Statement<'_>, + params: &[&dyn rusqlite::ToSql], + ) -> Result { + let mut summary = CostSummary::default(); + let rows = stmt.query_map(params, |row| { + Ok(( + row.get::<_, String>(0)?, // model + row.get::<_, Option>(1)?, // ghost + row.get::<_, i64>(2)? as u64, // input_tokens + row.get::<_, i64>(3)? as u64, // output_tokens + row.get::<_, f64>(4)?, // cost_usd + )) + })?; + + for row in rows { + let (model, ghost, input, output, cost) = row?; + summary.total_input_tokens += input; + summary.total_output_tokens += output; + summary.total_cost_usd += cost; + summary.record_count += 1; + *summary.by_model.entry(model).or_default() += cost; + if let Some(g) = ghost { + *summary.by_ghost.entry(g).or_default() += cost; + } + } + Ok(summary) + } + + /// Check whether the daily budget is exceeded. + /// + /// Returns `Ok(())` when tracking is disabled or no budget is configured. + /// When `on_budget_exceeded = "block"` and the threshold is crossed, returns `Err`; + /// otherwise emits a `tracing::warn`. + pub fn check_daily_budget(&self) -> Result<()> { + if self.config.daily_budget_usd <= 0.0 || !self.config.enabled { + return Ok(()); + } + let summary = self.today_summary()?; + if summary.total_cost_usd >= self.config.daily_budget_usd { + let msg = format!( + "Daily cost budget exceeded: ${:.4} >= ${:.2}", + summary.total_cost_usd, self.config.daily_budget_usd + ); + if self.config.on_budget_exceeded == "block" { + return Err(SparksError::Tool(msg)); + } else { + tracing::warn!("{}", msg); + } + } + Ok(()) + } + + /// Check whether the per-session budget is exceeded for the given session key. + /// + /// Returns `Ok(())` when tracking is disabled or no session budget is configured. + /// When `on_budget_exceeded = "block"` and the threshold is crossed, returns `Err`; + /// otherwise emits a `tracing::warn`. + pub fn check_session_budget(&self, session_key: &str) -> Result<()> { + if self.config.session_budget_usd <= 0.0 || !self.config.enabled { + return Ok(()); + } + let summary = self.session_summary(session_key)?; + if summary.total_cost_usd >= self.config.session_budget_usd { + let msg = format!( + "Session cost budget exceeded for '{}': ${:.4} >= ${:.2}", + session_key, summary.total_cost_usd, self.config.session_budget_usd + ); + if self.config.on_budget_exceeded == "block" { + return Err(SparksError::Tool(msg)); + } else { + tracing::warn!("{}", msg); + } + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn calculate_cost_known_model() { + let config = CostConfig::default(); + // claude-sonnet: $3/1M input, $15/1M output + let cost = calculate_cost("claude-sonnet-4-6", 1_000_000, 1_000_000, &config); + assert!((cost - 18.0).abs() < 0.01, "Expected ~$18, got ${}", cost); + } + + #[test] + fn calculate_cost_zero_tokens() { + let config = CostConfig::default(); + let cost = calculate_cost("claude-sonnet-4-6", 0, 0, &config); + assert_eq!(cost, 0.0, "Zero tokens must produce zero cost"); + } + + #[test] + fn calculate_cost_unknown_model() { + let config = CostConfig::default(); + let cost = calculate_cost("unknown-model-xyz", 1000, 1000, &config); + assert_eq!(cost, 0.0); + } + + #[test] + fn calculate_cost_config_override() { + let mut config = CostConfig::default(); + config.model_prices.insert("my-model".to_string(), [10.0, 20.0]); + let cost = calculate_cost("my-model", 1_000_000, 500_000, &config); + assert!((cost - 20.0).abs() < 0.01); // $10 input + $10 output + } + + #[test] + fn calculate_cost_prefix_match() { + let config = CostConfig::default(); + // Should match "claude-sonnet-4-6" prefix + let cost = calculate_cost("claude-sonnet-4-6-20251022", 1_000_000, 0, &config); + assert!((cost - 3.0).abs() < 0.01); + } + + #[test] + fn cost_tracker_record_and_summarize() { + let conn = Connection::open_in_memory().unwrap(); + let tracker = CostTracker::new(conn, CostConfig::default()).unwrap(); + let usage = TokenUsage { + session_key: "test:session".to_string(), + model: "claude-sonnet-4-6".to_string(), + ghost: Some("coder".to_string()), + input_tokens: 1000, + output_tokens: 500, + cost_usd: 0.0105, + }; + tracker.record(&usage).unwrap(); + let summary = tracker.session_summary("test:session").unwrap(); + assert_eq!(summary.record_count, 1); + assert_eq!(summary.total_input_tokens, 1000); + assert!((summary.total_cost_usd - 0.0105).abs() < 0.0001); + assert!(summary.by_ghost.contains_key("coder")); + } + + #[test] + fn cost_tracker_disabled_skips_record() { + let conn = Connection::open_in_memory().unwrap(); + let mut cfg = CostConfig::default(); + cfg.enabled = false; + let tracker = CostTracker::new(conn, cfg).unwrap(); + let usage = TokenUsage { + session_key: "s".to_string(), + model: "gpt-4o".to_string(), + ghost: None, + input_tokens: 100, + output_tokens: 50, + cost_usd: 0.001, + }; + tracker.record(&usage).unwrap(); + // Disabled tracker: no rows inserted, all_summary returns empty. + let summary = tracker.all_summary().unwrap(); + assert_eq!(summary.record_count, 0); + } + + #[test] + fn check_daily_budget_warn_does_not_err() { + let conn = Connection::open_in_memory().unwrap(); + let mut cfg = CostConfig::default(); + cfg.daily_budget_usd = 0.001; // very low limit + cfg.on_budget_exceeded = "warn".to_string(); + let tracker = CostTracker::new(conn, cfg).unwrap(); + let usage = TokenUsage { + session_key: "s".to_string(), + model: "gpt-4o".to_string(), + ghost: None, + input_tokens: 1_000_000, + output_tokens: 1_000_000, + cost_usd: 20.0, // well over limit + }; + tracker.record(&usage).unwrap(); + // "warn" mode: check_daily_budget must return Ok (not Err) + assert!(tracker.check_daily_budget().is_ok()); + } + + #[test] + fn check_daily_budget_block_returns_err() { + let conn = Connection::open_in_memory().unwrap(); + let mut cfg = CostConfig::default(); + cfg.daily_budget_usd = 0.001; + cfg.on_budget_exceeded = "block".to_string(); + let tracker = CostTracker::new(conn, cfg).unwrap(); + let usage = TokenUsage { + session_key: "s".to_string(), + model: "gpt-4o".to_string(), + ghost: None, + input_tokens: 1_000_000, + output_tokens: 1_000_000, + cost_usd: 20.0, + }; + tracker.record(&usage).unwrap(); + assert!(tracker.check_daily_budget().is_err()); + } + + #[test] + fn check_session_budget_block_returns_err() { + let conn = Connection::open_in_memory().unwrap(); + let mut cfg = CostConfig::default(); + cfg.session_budget_usd = 0.001; + cfg.on_budget_exceeded = "block".to_string(); + let tracker = CostTracker::new(conn, cfg).unwrap(); + let usage = TokenUsage { + session_key: "test-session".to_string(), + model: "gpt-4o".to_string(), + ghost: None, + input_tokens: 1_000_000, + output_tokens: 1_000_000, + cost_usd: 5.0, + }; + tracker.record(&usage).unwrap(); + assert!(tracker.check_session_budget("test-session").is_err()); + // A different session must not be affected. + assert!(tracker.check_session_budget("other-session").is_ok()); + } + + #[test] + fn cost_summary_format_report() { + let mut summary = CostSummary::default(); + summary.total_cost_usd = 1.2345; + summary.total_input_tokens = 100_000; + summary.total_output_tokens = 50_000; + summary.record_count = 10; + summary.by_model.insert("claude-sonnet-4-6".to_string(), 1.2345); + let report = summary.format_report(); + assert!(report.contains("$1.2345")); + assert!(report.contains("claude-sonnet-4-6")); + } + + #[test] + fn builtin_prices_non_empty() { + let prices = builtin_prices(); + assert!(!prices.is_empty()); + assert!(prices.contains_key("claude-sonnet-4-6")); + assert!(prices.contains_key("gpt-4o")); + } +} diff --git a/src/main.rs b/src/main.rs index fa40883..7f3aa63 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,8 +1,10 @@ +mod alerts; mod ci_monitor; mod config; mod confirm; mod context_budget; mod core; +mod cost; mod db; mod docker; mod doctor; @@ -34,6 +36,8 @@ mod randomness; mod reason_codes; mod scheduler; mod secrets; +mod snapshot; +mod sonarqube; mod self_heal; mod session_review; mod strategy; @@ -239,6 +243,40 @@ enum Commands { #[command(subcommand)] action: SelfBuildAction, }, + /// Show cost and token usage summary + Cost { + /// Show summary for today (default) or specify 'session:' or 'all' + #[arg(default_value = "today")] + scope: String, + }, + /// Manage workspace snapshots for time-travel debugging + Snapshot { + #[command(subcommand)] + action: SnapshotAction, + }, +} + +#[derive(Subcommand)] +enum SnapshotAction { + /// Create a new snapshot + Create { + #[arg(long)] + label: Option, + }, + /// List all snapshots + List, + /// Show diff between two snapshots + Diff { + id_a: String, + id_b: String, + }, + /// Restore a snapshot + Restore { + id: String, + /// Actually restore (default is dry-run) + #[arg(long)] + apply: bool, + }, } #[derive(Subcommand)] @@ -731,6 +769,13 @@ async fn main() -> anyhow::Result<()> { started_at: tokio::time::Instant::now(), }; let handle = SparksCore::start(telegram_config.clone(), memory).await?; + if telegram_config.alerts.enabled { + let engine = Arc::new(alerts::AlertEngine::new( + telegram_config.alerts.clone(), + handle.activity_log.clone(), + )); + tokio::spawn(engine.run()); + } telegram::run_telegram(handle, telegram_config.telegram, system_info).await?; } Some(Commands::Openai { action }) => { @@ -837,6 +882,53 @@ async fn main() -> anyhow::Result<()> { Some(Commands::Eval { .. }) => unreachable!(), // handled above Some(Commands::Feature { action }) => handle_feature(action, config, memory).await?, Some(Commands::SelfBuild { action }) => handle_self_build(action, config, memory).await?, + Some(Commands::Cost { scope }) => { + let db_path = config.db_path()?; + let conn = rusqlite::Connection::open(&db_path)?; + let tracker = cost::CostTracker::new(conn, config.cost.clone())?; + + let summary = if scope == "today" { + tracker.today_summary()? + } else if let Some(key) = scope.strip_prefix("session:") { + tracker.session_summary(key)? + } else if scope == "all" { + tracker.all_summary()? + } else { + eprintln!("Unknown scope '{}'. Use: today, session:, all", scope); + tracker.today_summary()? + }; + println!("{}", summary.format_report().replace("**", "")); + } + Some(Commands::Snapshot { action }) => { + let workspace_root = std::env::current_dir()?; + let store = snapshot::SnapshotStore::new(config.snapshot.clone(), workspace_root); + match action { + SnapshotAction::Create { label } => { + let meta = store.create("cli", label.as_deref())?; + println!("Snapshot created: {} ({})", &meta.id[..8], meta.size_human()); + } + SnapshotAction::List => { + let snaps = store.list()?; + if snaps.is_empty() { + println!("No snapshots found."); + } else { + for s in &snaps { + let label = s.label.as_deref().unwrap_or(""); + println!(" {} | {} | {} | {} {}", + &s.id[..8], s.created_at, s.size_human(), s.session_key, label); + } + } + } + SnapshotAction::Diff { id_a, id_b } => { + let diff = store.diff(&id_a, &id_b)?; + println!("{}", diff); + } + SnapshotAction::Restore { id, apply } => { + let result = store.restore(&id, !apply)?; + println!("{}", result); + } + } + } Some(Commands::Chat) | None => run_chat(config, memory, auto_approve).await?, } diff --git a/src/snapshot.rs b/src/snapshot.rs new file mode 100644 index 0000000..98eae8d --- /dev/null +++ b/src/snapshot.rs @@ -0,0 +1,311 @@ +//! Workspace snapshot and time-travel debugging. +//! +//! Creates compressed tar.gz snapshots of the workspace before agent task execution. +//! Supports listing, diffing, and restoring snapshots. + +use std::path::{Path, PathBuf}; +use std::process::Command; + +use crate::config::SnapshotConfig; +use crate::error::{SparksError, Result}; + +/// Metadata for a single snapshot. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct SnapshotMeta { + pub id: String, + pub created_at: String, + pub session_key: String, + pub label: Option, + pub size_bytes: u64, + pub path: PathBuf, +} + +impl SnapshotMeta { + pub fn size_human(&self) -> String { + let kb = self.size_bytes / 1024; + if kb < 1024 { + format!("{} KB", kb) + } else { + format!("{:.1} MB", kb as f64 / 1024.0) + } + } +} + +/// Store for workspace snapshots. +pub struct SnapshotStore { + config: SnapshotConfig, + workspace_root: PathBuf, +} + +impl SnapshotStore { + pub fn new(config: SnapshotConfig, workspace_root: PathBuf) -> Self { + Self { config, workspace_root } + } + + fn snapshot_dir(&self) -> PathBuf { + if let Some(dir) = &self.config.snapshot_dir { + PathBuf::from(dir) + } else { + dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(".sparks") + .join("snapshots") + } + } + + /// Create a new snapshot of the workspace. + /// Returns the snapshot metadata on success. + pub fn create(&self, session_key: &str, label: Option<&str>) -> Result { + if !self.config.enabled { + return Err(SparksError::Config("Snapshots are not enabled (set snapshot.enabled = true)".into())); + } + + // Check workspace size + if self.config.max_workspace_mb > 0 { + let size_mb = workspace_size_mb(&self.workspace_root); + if size_mb > self.config.max_workspace_mb { + return Err(SparksError::Config(format!( + "Workspace is {}MB, exceeding snapshot limit of {}MB", + size_mb, self.config.max_workspace_mb + ))); + } + } + + let snap_dir = self.snapshot_dir(); + std::fs::create_dir_all(&snap_dir).map_err(|e| SparksError::Tool(e.to_string()))?; + + let id = uuid::Uuid::new_v4().to_string(); + let created_at = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S").to_string(); + let filename = format!("{}.tar.gz", id); + let snap_path = snap_dir.join(&filename); + + // Build tar command with exclusions + let mut cmd = Command::new("tar"); + cmd.arg("czf") + .arg(&snap_path); + for excl in &self.config.exclude { + cmd.arg(format!("--exclude={}", excl)); + } + cmd.arg("-C").arg(&self.workspace_root); + for incl in &self.config.include { + cmd.arg(incl); + } + + let output = cmd.output().map_err(|e| SparksError::Tool(format!("tar failed: {}", e)))?; + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(SparksError::Tool(format!("tar error: {}", stderr))); + } + + let size_bytes = std::fs::metadata(&snap_path) + .map(|m| m.len()) + .unwrap_or(0); + + let meta = SnapshotMeta { + id: id.clone(), + created_at, + session_key: session_key.to_string(), + label: label.map(str::to_string), + size_bytes, + path: snap_path.clone(), + }; + + // Save metadata sidecar + let meta_path = snap_path.with_extension("").with_extension("json"); + let meta_json = serde_json::to_string_pretty(&meta) + .map_err(|e| SparksError::Internal(e.to_string()))?; + std::fs::write(&meta_path, meta_json).map_err(|e| SparksError::Tool(e.to_string()))?; + + // Prune old snapshots if over limit + if self.config.max_snapshots > 0 { + self.prune_old_snapshots()?; + } + + Ok(meta) + } + + /// List all snapshots, newest first. + pub fn list(&self) -> Result> { + let snap_dir = self.snapshot_dir(); + if !snap_dir.exists() { + return Ok(vec![]); + } + + let mut metas = Vec::new(); + for entry in std::fs::read_dir(&snap_dir).map_err(|e| SparksError::Tool(e.to_string()))? { + let entry = entry.map_err(|e| SparksError::Tool(e.to_string()))?; + let path = entry.path(); + if path.extension().and_then(|e| e.to_str()) == Some("json") { + if let Ok(content) = std::fs::read_to_string(&path) { + if let Ok(meta) = serde_json::from_str::(&content) { + metas.push(meta); + } + } + } + } + metas.sort_by(|a, b| b.created_at.cmp(&a.created_at)); + Ok(metas) + } + + /// Get a snapshot by ID prefix. + pub fn get(&self, id_prefix: &str) -> Result { + let all = self.list()?; + let matches: Vec<_> = all.into_iter().filter(|m| m.id.starts_with(id_prefix)).collect(); + match matches.len() { + 0 => Err(SparksError::Tool(format!("No snapshot found with id starting '{}'", id_prefix))), + 1 => Ok(matches.into_iter().next().unwrap()), + n => Err(SparksError::Tool(format!("{} snapshots match '{}', be more specific", n, id_prefix))), + } + } + + /// Show file-level diff between two snapshots. + pub fn diff(&self, id_a: &str, id_b: &str) -> Result { + let meta_a = self.get(id_a)?; + let meta_b = self.get(id_b)?; + + // Extract both to temp dirs + let tmp_a = std::env::temp_dir().join(format!("sparks_snap_{}", &meta_a.id[..8])); + let tmp_b = std::env::temp_dir().join(format!("sparks_snap_{}", &meta_b.id[..8])); + std::fs::create_dir_all(&tmp_a).ok(); + std::fs::create_dir_all(&tmp_b).ok(); + + extract_snapshot(&meta_a.path, &tmp_a)?; + extract_snapshot(&meta_b.path, &tmp_b)?; + + // Use diff -rq for file-level diff + let output = Command::new("diff") + .arg("-rq") + .arg("--brief") + .arg(&tmp_a) + .arg(&tmp_b) + .output() + .map_err(|e| SparksError::Tool(format!("diff failed: {}", e)))?; + + let diff_text = String::from_utf8_lossy(&output.stdout).to_string(); + let header = format!( + "Diff: {} ({}) -> {} ({})\n\n", + &meta_a.id[..8], meta_a.created_at, + &meta_b.id[..8], meta_b.created_at, + ); + + // Cleanup temp dirs + std::fs::remove_dir_all(&tmp_a).ok(); + std::fs::remove_dir_all(&tmp_b).ok(); + + if diff_text.is_empty() { + Ok(format!("{}No differences found.", header)) + } else { + let cleaned = diff_text + .replace(tmp_a.to_string_lossy().as_ref(), "snapshot_a") + .replace(tmp_b.to_string_lossy().as_ref(), "snapshot_b"); + Ok(format!("{}{}", header, cleaned)) + } + } + + /// Restore a snapshot to the workspace (dry-run by default). + pub fn restore(&self, id_prefix: &str, dry_run: bool) -> Result { + let meta = self.get(id_prefix)?; + if dry_run { + return Ok(format!( + "Would restore snapshot {} ({}) to {}\nUse --apply to actually restore.", + &meta.id[..8], meta.created_at, self.workspace_root.display() + )); + } + extract_snapshot(&meta.path, &self.workspace_root)?; + Ok(format!( + "Restored snapshot {} ({}) to {}", + &meta.id[..8], meta.created_at, self.workspace_root.display() + )) + } + + fn prune_old_snapshots(&self) -> Result<()> { + let mut all = self.list()?; + while all.len() > self.config.max_snapshots { + if let Some(oldest) = all.pop() { + std::fs::remove_file(&oldest.path).ok(); + let meta_path = oldest.path.with_extension("").with_extension("json"); + std::fs::remove_file(&meta_path).ok(); + } + } + Ok(()) + } +} + +fn extract_snapshot(archive: &Path, dest: &Path) -> Result<()> { + let output = Command::new("tar") + .arg("xzf") + .arg(archive) + .arg("-C") + .arg(dest) + .output() + .map_err(|e| SparksError::Tool(format!("tar extract failed: {}", e)))?; + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(SparksError::Tool(format!("tar extract error: {}", stderr))); + } + Ok(()) +} + +fn workspace_size_mb(root: &Path) -> u64 { + let output = Command::new("du") + .arg("-sm") + .arg(root) + .output(); + match output { + Ok(o) => { + let s = String::from_utf8_lossy(&o.stdout); + s.split_whitespace().next().and_then(|n| n.parse().ok()).unwrap_or(0) + } + Err(_) => 0, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn snapshot_meta_size_human_kb() { + let meta = SnapshotMeta { + id: "test".into(), + created_at: "2026-01-01 00:00:00".into(), + session_key: "s".into(), + label: None, + size_bytes: 512 * 1024, + path: PathBuf::from("/tmp/test.tar.gz"), + }; + assert_eq!(meta.size_human(), "512 KB"); + } + + #[test] + fn snapshot_meta_size_human_mb() { + let meta = SnapshotMeta { + id: "test".into(), + created_at: "2026-01-01 00:00:00".into(), + session_key: "s".into(), + label: None, + size_bytes: 2 * 1024 * 1024, + path: PathBuf::from("/tmp/test.tar.gz"), + }; + assert!(meta.size_human().contains("MB")); + } + + #[test] + fn snapshot_config_defaults() { + let c = SnapshotConfig::default(); + assert!(!c.enabled); // opt-in + assert_eq!(c.max_snapshots, 20); + assert!(!c.exclude.is_empty()); + assert!(c.exclude.iter().any(|e| e.contains("target"))); + } + + #[test] + fn snapshot_store_list_empty_dir() { + let tmp = std::env::temp_dir().join(format!("sparks_snap_test_{}", uuid::Uuid::new_v4())); + let mut config = SnapshotConfig::default(); + config.snapshot_dir = Some(tmp.to_string_lossy().to_string()); + let store = SnapshotStore::new(config, PathBuf::from(".")); + let list = store.list().unwrap(); + assert!(list.is_empty()); + } +} diff --git a/src/sonarqube.rs b/src/sonarqube.rs new file mode 100644 index 0000000..e1d6be1 --- /dev/null +++ b/src/sonarqube.rs @@ -0,0 +1,305 @@ +//! SonarQube quality gate client. +//! +//! Polls the SonarQube API until the quality gate passes (or times out). +//! Used as a mandatory check in the CI pipeline before PRs are opened. + +use serde::Deserialize; + +use crate::config::SonarqubeConfig; + +/// The status of a SonarQube quality gate. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum GateStatus { + Ok, + Warn, + Error, + None, + Pending, +} + +impl GateStatus { + fn from_str(s: &str) -> Self { + match s { + "OK" => Self::Ok, + "WARN" => Self::Warn, + "ERROR" => Self::Error, + "NONE" => Self::None, + _ => Self::Pending, + } + } + + pub fn is_passing(&self) -> bool { + matches!(self, Self::Ok | Self::Warn) + } +} + +impl std::fmt::Display for GateStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Ok => write!(f, "OK"), + Self::Warn => write!(f, "WARN"), + Self::Error => write!(f, "ERROR"), + Self::None => write!(f, "NONE"), + Self::Pending => write!(f, "PENDING"), + } + } +} + +#[derive(Debug, Deserialize)] +struct ApiProjectStatus { + status: String, + conditions: Option>, +} + +#[derive(Debug, Deserialize)] +struct ApiCondition { + status: String, + #[serde(rename = "metricKey")] + metric_key: String, + #[serde(rename = "actualValue", default)] + actual_value: String, + #[serde(rename = "errorThreshold", default)] + error_threshold: String, +} + +#[derive(Debug, Deserialize)] +struct ApiProjectStatusResponse { + #[serde(rename = "projectStatus")] + project_status: ApiProjectStatus, +} + +/// A failed quality gate condition. +#[derive(Debug, Clone)] +pub struct FailedCondition { + pub metric: String, + pub actual: String, + pub threshold: String, +} + +/// Result of a quality gate check. +#[derive(Debug)] +pub struct GateResult { + pub status: GateStatus, + pub failed_conditions: Vec, +} + +impl GateResult { + pub fn is_passing(&self) -> bool { + self.status.is_passing() + } + + pub fn summary(&self) -> String { + if self.is_passing() { + format!("Quality gate: {} \u{2705}", self.status) + } else { + let conditions = self + .failed_conditions + .iter() + .map(|c| { + format!( + " \u{2022} {} = {} (threshold: {})", + c.metric, c.actual, c.threshold + ) + }) + .collect::>() + .join("\n"); + format!( + "Quality gate: {} \u{274c}\nFailed conditions:\n{}", + self.status, conditions + ) + } + } +} + +/// SonarQube API client. +pub struct SonarClient { + http: reqwest::Client, + config: SonarqubeConfig, +} + +impl SonarClient { + pub fn new(config: SonarqubeConfig) -> Self { + let http = reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(30)) + .build() + .expect("Failed to build HTTP client"); + Self { http, config } + } + + fn token(&self) -> Option { + self.config + .token + .clone() + .or_else(|| std::env::var("SPARKS_SONAR_TOKEN").ok()) + } + + /// Fetch the current quality gate status for the configured project. + pub async fn get_gate_status(&self) -> anyhow::Result { + let project_key = self + .config + .project_key + .as_deref() + .ok_or_else(|| anyhow::anyhow!("sonarqube.project_key is required"))?; + + let base_url = format!( + "{}/api/qualitygates/project_status", + self.config.server_url.trim_end_matches('/') + ); + + let mut request = self + .http + .get(&base_url) + .query(&[("projectKey", project_key)]); + + if let Some(org) = &self.config.organization { + request = request.query(&[("organization", org.as_str())]); + } + if let Some(token) = self.token() { + request = request.bearer_auth(&token); + } + + let resp = request.send().await?; + if !resp.status().is_success() { + let status = resp.status(); + let body = resp.text().await.unwrap_or_default(); + return Err(anyhow::anyhow!("SonarQube API error {}: {}", status, body)); + } + + let data: ApiProjectStatusResponse = resp + .json() + .await + .map_err(|e| anyhow::anyhow!("Failed to parse SonarQube response: {}", e))?; + + let status = GateStatus::from_str(&data.project_status.status); + let failed_conditions = data + .project_status + .conditions + .unwrap_or_default() + .into_iter() + .filter(|c| c.status == "ERROR") + .map(|c| FailedCondition { + metric: c.metric_key, + actual: c.actual_value, + threshold: c.error_threshold, + }) + .collect(); + + Ok(GateResult { + status, + failed_conditions, + }) + } + + /// Poll until the quality gate passes, times out, or fails definitively. + pub async fn wait_for_gate(&self) -> anyhow::Result { + let timeout = tokio::time::Duration::from_secs(self.config.gate_timeout_secs); + let poll = tokio::time::Duration::from_secs(self.config.poll_interval_secs); + let deadline = tokio::time::Instant::now() + timeout; + + loop { + let result = self.get_gate_status().await?; + + match result.status { + GateStatus::Pending => { + // Analysis still running; keep waiting + } + GateStatus::Ok | GateStatus::Warn => return Ok(result), + GateStatus::Error | GateStatus::None => { + return Ok(result); + } + } + + if tokio::time::Instant::now() >= deadline { + return Err(anyhow::anyhow!( + "SonarQube quality gate timed out after {}s", + self.config.gate_timeout_secs + )); + } + tokio::time::sleep(poll).await; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn gate_status_from_str_all_variants() { + assert_eq!(GateStatus::from_str("OK"), GateStatus::Ok); + assert_eq!(GateStatus::from_str("WARN"), GateStatus::Warn); + assert_eq!(GateStatus::from_str("ERROR"), GateStatus::Error); + assert_eq!(GateStatus::from_str("NONE"), GateStatus::None); + assert_eq!(GateStatus::from_str("PENDING"), GateStatus::Pending); + assert_eq!(GateStatus::from_str("IN_PROGRESS"), GateStatus::Pending); + assert_eq!(GateStatus::from_str(""), GateStatus::Pending); + } + + #[test] + fn gate_status_is_passing() { + assert!(GateStatus::Ok.is_passing()); + assert!(GateStatus::Warn.is_passing()); + assert!(!GateStatus::Error.is_passing()); + assert!(!GateStatus::None.is_passing()); + assert!(!GateStatus::Pending.is_passing()); + } + + #[test] + fn gate_result_summary_passing() { + let result = GateResult { + status: GateStatus::Ok, + failed_conditions: vec![], + }; + let summary = result.summary(); + assert!(summary.contains("OK")); + assert!(result.is_passing()); + } + + #[test] + fn gate_result_summary_failing() { + let result = GateResult { + status: GateStatus::Error, + failed_conditions: vec![ + FailedCondition { + metric: "coverage".to_string(), + actual: "45.2".to_string(), + threshold: "80.0".to_string(), + }, + FailedCondition { + metric: "duplicated_lines_density".to_string(), + actual: "12.5".to_string(), + threshold: "3.0".to_string(), + }, + ], + }; + let summary = result.summary(); + assert!(summary.contains("ERROR")); + assert!(summary.contains("Failed conditions")); + assert!(summary.contains("coverage")); + assert!(summary.contains("45.2")); + assert!(summary.contains("80.0")); + assert!(summary.contains("duplicated_lines_density")); + assert!(!result.is_passing()); + } + + #[test] + fn gate_status_display() { + assert_eq!(GateStatus::Ok.to_string(), "OK"); + assert_eq!(GateStatus::Warn.to_string(), "WARN"); + assert_eq!(GateStatus::Error.to_string(), "ERROR"); + assert_eq!(GateStatus::None.to_string(), "NONE"); + assert_eq!(GateStatus::Pending.to_string(), "PENDING"); + } + + #[test] + fn failed_condition_fields() { + let c = FailedCondition { + metric: "new_bugs".to_string(), + actual: "3".to_string(), + threshold: "0".to_string(), + }; + assert_eq!(c.metric, "new_bugs"); + assert_eq!(c.actual, "3"); + assert_eq!(c.threshold, "0"); + } +} From 9e81ecd3c70ed18a186dd40495eb429c883c0a3b Mon Sep 17 00:00:00 2001 From: Enreign Date: Tue, 17 Mar 2026 07:09:47 +0100 Subject: [PATCH 3/4] fix(cost-tracking): remove stray mod alerts declaration src/alerts.rs only exists on the proactive-alerting branch, not here. The stray `mod alerts;` caused a compile error that failed all CI jobs. Co-Authored-By: Claude Sonnet 4.6 --- src/main.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index 7f3aa63..0abf95a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,3 @@ -mod alerts; mod ci_monitor; mod config; mod confirm; From 5eabb2e77ef5109a1570b902a208b9f8fd48f1c2 Mon Sep 17 00:00:00 2001 From: Enreign Date: Tue, 17 Mar 2026 07:18:22 +0100 Subject: [PATCH 4/4] fix(cost-tracking): remove stray alerts::AlertEngine usage The proactive alerting engine only exists on the proactive-alerting branch. Remove the AlertEngine instantiation that was erroneously added to the telegram startup path, which caused a compile error when building with --features telegram. Co-Authored-By: Claude Sonnet 4.6 --- src/main.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/main.rs b/src/main.rs index 0abf95a..3c6a826 100644 --- a/src/main.rs +++ b/src/main.rs @@ -768,13 +768,6 @@ async fn main() -> anyhow::Result<()> { started_at: tokio::time::Instant::now(), }; let handle = SparksCore::start(telegram_config.clone(), memory).await?; - if telegram_config.alerts.enabled { - let engine = Arc::new(alerts::AlertEngine::new( - telegram_config.alerts.clone(), - handle.activity_log.clone(), - )); - tokio::spawn(engine.run()); - } telegram::run_telegram(handle, telegram_config.telegram, system_info).await?; } Some(Commands::Openai { action }) => {