diff --git a/interface/src/api/client.ts b/interface/src/api/client.ts
index 8a320a265..0b2c95f17 100644
--- a/interface/src/api/client.ts
+++ b/interface/src/api/client.ts
@@ -494,6 +494,7 @@ export interface CortexSection {
bulletin_interval_secs: number;
bulletin_max_words: number;
bulletin_max_turns: number;
+ auto_display_name: boolean;
}
export interface CoalesceSection {
@@ -570,6 +571,7 @@ export interface CortexUpdate {
bulletin_interval_secs?: number;
bulletin_max_words?: number;
bulletin_max_turns?: number;
+ auto_display_name?: boolean;
}
export interface CoalesceUpdate {
diff --git a/interface/src/routes/AgentConfig.tsx b/interface/src/routes/AgentConfig.tsx
index 8655eb339..8658db30f 100644
--- a/interface/src/routes/AgentConfig.tsx
+++ b/interface/src/routes/AgentConfig.tsx
@@ -640,6 +640,12 @@ function ConfigSectionEditor({ sectionId, label, description, detail, config, on
case "cortex":
return (
+ handleChange("auto_display_name", v)}
+ />
diff --git a/interface/src/ui/forms/SwitchField.tsx b/interface/src/ui/forms/SwitchField.tsx
index dacde8874..d7be7ba32 100644
--- a/interface/src/ui/forms/SwitchField.tsx
+++ b/interface/src/ui/forms/SwitchField.tsx
@@ -40,7 +40,7 @@ export const SwitchField: React.FC = ({
diff --git a/src/agent/channel.rs b/src/agent/channel.rs
index 7c3c08193..285a3dc6e 100644
--- a/src/agent/channel.rs
+++ b/src/agent/channel.rs
@@ -282,11 +282,10 @@ impl Channel {
_ = tokio::time::sleep(sleep_duration), if next_deadline.is_some() => {
let now = tokio::time::Instant::now();
// Check coalesce deadline
- if self.coalesce_deadline.is_some_and(|d| d <= now) {
- if let Err(error) = self.flush_coalesce_buffer().await {
+ if self.coalesce_deadline.is_some_and(|d| d <= now)
+ && let Err(error) = self.flush_coalesce_buffer().await {
tracing::error!(%error, channel_id = %self.id, "error flushing coalesce buffer on deadline");
}
- }
// Check retrigger deadline
if self.retrigger_deadline.is_some_and(|d| d <= now) {
self.flush_pending_retrigger().await;
@@ -391,7 +390,10 @@ impl Channel {
if messages.len() == 1 {
// Single message - process normally
- let message = messages.into_iter().next().ok_or_else(|| anyhow::anyhow!("empty iterator after length check"))?;
+ let message = messages
+ .into_iter()
+ .next()
+ .ok_or_else(|| anyhow::anyhow!("empty iterator after length check"))?;
self.handle_message(message).await
} else {
// Multiple messages - batch them
@@ -462,10 +464,11 @@ impl Channel {
.get("telegram_chat_type")
.and_then(|v| v.as_str())
});
- self.conversation_context = Some(
- prompt_engine
- .render_conversation_context(&first.source, server_name, channel_name)?,
- );
+ self.conversation_context = Some(prompt_engine.render_conversation_context(
+ &first.source,
+ server_name,
+ channel_name,
+ )?);
}
// Persist each message to conversation log (individual audit trail)
@@ -605,8 +608,11 @@ impl Channel {
let browser_enabled = rc.browser_config.load().enabled;
let web_search_enabled = rc.brave_search_key.load().is_some();
let opencode_enabled = rc.opencode.load().enabled;
- let worker_capabilities =
- prompt_engine.render_worker_capabilities(browser_enabled, web_search_enabled, opencode_enabled)?;
+ let worker_capabilities = prompt_engine.render_worker_capabilities(
+ browser_enabled,
+ web_search_enabled,
+ opencode_enabled,
+ )?;
let status_text = {
let status = self.state.status_block.read().await;
@@ -712,10 +718,11 @@ impl Channel {
.get("telegram_chat_type")
.and_then(|v| v.as_str())
});
- self.conversation_context = Some(
- prompt_engine
- .render_conversation_context(&message.source, server_name, channel_name)?,
- );
+ self.conversation_context = Some(prompt_engine.render_conversation_context(
+ &message.source,
+ server_name,
+ channel_name,
+ )?);
}
let system_prompt = self.build_system_prompt().await?;
@@ -802,8 +809,11 @@ impl Channel {
let browser_enabled = rc.browser_config.load().enabled;
let web_search_enabled = rc.brave_search_key.load().is_some();
let opencode_enabled = rc.opencode.load().enabled;
- let worker_capabilities = prompt_engine
- .render_worker_capabilities(browser_enabled, web_search_enabled, opencode_enabled)?;
+ let worker_capabilities = prompt_engine.render_worker_capabilities(
+ browser_enabled,
+ web_search_enabled,
+ opencode_enabled,
+ )?;
let status_text = {
let status = self.state.status_block.read().await;
@@ -814,17 +824,16 @@ impl Channel {
let empty_to_none = |s: String| if s.is_empty() { None } else { Some(s) };
- prompt_engine
- .render_channel_prompt(
- empty_to_none(identity_context),
- empty_to_none(memory_bulletin.to_string()),
- empty_to_none(skills_prompt),
- worker_capabilities,
- self.conversation_context.clone(),
- empty_to_none(status_text),
- None, // coalesce_hint - only set for batched messages
- available_channels,
- )
+ prompt_engine.render_channel_prompt(
+ empty_to_none(identity_context),
+ empty_to_none(memory_bulletin.to_string()),
+ empty_to_none(skills_prompt),
+ worker_capabilities,
+ self.conversation_context.clone(),
+ empty_to_none(status_text),
+ None, // coalesce_hint - only set for batched messages
+ available_channels,
+ )
}
/// Register per-turn tools, run the LLM agentic loop, and clean up.
@@ -1147,8 +1156,10 @@ impl Channel {
for (key, value) in retrigger_metadata {
self.pending_retrigger_metadata.insert(key, value);
}
- self.retrigger_deadline =
- Some(tokio::time::Instant::now() + std::time::Duration::from_millis(RETRIGGER_DEBOUNCE_MS));
+ self.retrigger_deadline = Some(
+ tokio::time::Instant::now()
+ + std::time::Duration::from_millis(RETRIGGER_DEBOUNCE_MS),
+ );
}
}
diff --git a/src/agent/cortex.rs b/src/agent/cortex.rs
index 982aa262e..40f322df2 100644
--- a/src/agent/cortex.rs
+++ b/src/agent/cortex.rs
@@ -584,8 +584,38 @@ struct ProfileLlmResponse {
///
/// Uses the current memory bulletin and identity files as context, then asks
/// an LLM to produce a display name, status line, and short bio.
+///
+/// When `auto_display_name` is disabled, the display name is set to the agent
+/// ID and cortex will not overwrite it.
#[tracing::instrument(skip(deps, logger), fields(agent_id = %deps.agent_id))]
async fn generate_profile(deps: &AgentDeps, logger: &CortexLogger) {
+ let cortex_config = **deps.runtime_config.cortex.load();
+
+ // If auto_display_name is disabled, ensure the profile exists with
+ // display_name = agent_id and only update status/bio fields.
+ if !cortex_config.auto_display_name {
+ let agent_id = &deps.agent_id;
+ let avatar_seed = agent_id.to_string();
+ if let Err(error) = sqlx::query(
+ "INSERT INTO agent_profile (agent_id, display_name, avatar_seed, generated_at, updated_at) \
+ VALUES (?, ?, ?, datetime('now'), datetime('now')) \
+ ON CONFLICT(agent_id) DO UPDATE SET \
+ display_name = COALESCE(agent_profile.display_name, excluded.display_name), \
+ avatar_seed = excluded.avatar_seed, \
+ updated_at = datetime('now')",
+ )
+ .bind(agent_id.as_ref())
+ .bind(agent_id.as_ref())
+ .bind(&avatar_seed)
+ .execute(&deps.sqlite_pool)
+ .await
+ {
+ tracing::warn!(%error, "failed to ensure agent profile with agent_id as display_name");
+ }
+ tracing::info!("auto_display_name disabled, skipping cortex profile generation");
+ return;
+ }
+
tracing::info!("cortex generating agent profile");
let started = Instant::now();
diff --git a/src/agent/cortex_chat.rs b/src/agent/cortex_chat.rs
index ebff771b3..ed5ec0033 100644
--- a/src/agent/cortex_chat.rs
+++ b/src/agent/cortex_chat.rs
@@ -75,7 +75,8 @@ impl PromptHook for CortexChatHook {
) -> ToolCallHookAction {
self.send(CortexChatEvent::ToolStarted {
tool: tool_name.to_string(),
- }).await;
+ })
+ .await;
ToolCallHookAction::Continue
}
@@ -95,7 +96,8 @@ impl PromptHook for CortexChatHook {
self.send(CortexChatEvent::ToolCompleted {
tool: tool_name.to_string(),
result_preview: preview,
- }).await;
+ })
+ .await;
HookAction::Continue
}
@@ -295,18 +297,22 @@ impl CortexChatSession {
let _ = store
.save_message(&thread_id, "assistant", &response, channel_ref)
.await;
- let _ = event_tx.send(CortexChatEvent::Done {
- full_text: response,
- }).await;
+ let _ = event_tx
+ .send(CortexChatEvent::Done {
+ full_text: response,
+ })
+ .await;
}
Err(error) => {
let error_text = format!("Cortex chat error: {error}");
let _ = store
.save_message(&thread_id, "assistant", &error_text, channel_ref)
.await;
- let _ = event_tx.send(CortexChatEvent::Error {
- message: error_text,
- }).await;
+ let _ = event_tx
+ .send(CortexChatEvent::Error {
+ message: error_text,
+ })
+ .await;
}
}
});
@@ -314,7 +320,10 @@ impl CortexChatSession {
Ok(event_rx)
}
- async fn build_system_prompt(&self, channel_context_id: Option<&str>) -> crate::error::Result {
+ async fn build_system_prompt(
+ &self,
+ channel_context_id: Option<&str>,
+ ) -> crate::error::Result {
let runtime_config = &self.deps.runtime_config;
let prompt_engine = runtime_config.prompts.load();
@@ -324,8 +333,11 @@ impl CortexChatSession {
let browser_enabled = runtime_config.browser_config.load().enabled;
let web_search_enabled = runtime_config.brave_search_key.load().is_some();
let opencode_enabled = runtime_config.opencode.load().enabled;
- let worker_capabilities =
- prompt_engine.render_worker_capabilities(browser_enabled, web_search_enabled, opencode_enabled)?;
+ let worker_capabilities = prompt_engine.render_worker_capabilities(
+ browser_enabled,
+ web_search_enabled,
+ opencode_enabled,
+ )?;
// Load channel transcript if a channel context is active
let channel_transcript = if let Some(channel_id) = channel_context_id {
diff --git a/src/agent/ingestion.rs b/src/agent/ingestion.rs
index 506baa45c..712f68bbf 100644
--- a/src/agent/ingestion.rs
+++ b/src/agent/ingestion.rs
@@ -298,11 +298,24 @@ async fn read_ingest_content(path: &Path) -> anyhow::Result {
.await
.with_context(|| format!("failed to read pdf file: {}", path.display()))?;
- let extracted =
- tokio::task::spawn_blocking(move || pdf_extract::extract_text_from_mem(&bytes))
- .await
- .context("pdf extraction task failed")?
- .with_context(|| format!("failed to extract text from pdf: {}", path.display()))?;
+ let extracted = tokio::task::spawn_blocking(move || {
+ match std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
+ pdf_extract::extract_text_from_mem(&bytes)
+ })) {
+ Ok(result) => result.map_err(|e| anyhow::anyhow!(e)),
+ Err(panic) => {
+ let msg = panic
+ .downcast_ref::<&str>()
+ .copied()
+ .or_else(|| panic.downcast_ref::().map(|s| s.as_str()))
+ .unwrap_or("unknown panic");
+ Err(anyhow::anyhow!("pdf extraction panicked: {msg}"))
+ }
+ }
+ })
+ .await
+ .context("pdf extraction task failed")?
+ .with_context(|| format!("failed to extract text from pdf: {}", path.display()))?;
return Ok(extracted);
}
diff --git a/src/agent/worker.rs b/src/agent/worker.rs
index 558956db7..ae369ad70 100644
--- a/src/agent/worker.rs
+++ b/src/agent/worker.rs
@@ -264,7 +264,10 @@ impl Worker {
None
}
})
- .unwrap_or_else(|| "Worker reached maximum segments without a final response.".to_string());
+ .unwrap_or_else(|| {
+ "Worker reached maximum segments without a final response."
+ .to_string()
+ });
}
self.maybe_compact_history(&mut history).await;
@@ -358,8 +361,7 @@ impl Worker {
self.hook.send_status("compacting (overflow recovery)");
self.force_compact_history(&mut history).await;
let prompt_engine = self.deps.runtime_config.prompts.load();
- let overflow_msg =
- prompt_engine.render_system_worker_overflow()?;
+ let overflow_msg = prompt_engine.render_system_worker_overflow()?;
follow_up_prompt = format!("{follow_up}\n\n{overflow_msg}");
}
Err(error) => {
diff --git a/src/api/agents.rs b/src/api/agents.rs
index f1a936555..3132248de 100644
--- a/src/api/agents.rs
+++ b/src/api/agents.rs
@@ -234,6 +234,23 @@ pub(super) async fn create_agent(
"message": format!("Agent '{agent_id}' already exists")
})));
}
+ // Also check runtime state — an agent may still be running even if
+ // it was removed from config during a hot-reload (its RuntimeConfig
+ // and redb lock survive until the process restarts).
+ let runtime_configs = state.runtime_configs.load();
+ if runtime_configs.contains_key(&agent_id) {
+ return Ok(Json(serde_json::json!({
+ "success": false,
+ "message": format!("Agent '{agent_id}' is still active in the runtime. Restart the server to fully remove it before re-creating.")
+ })));
+ }
+ let pools = state.agent_pools.load();
+ if pools.contains_key(&agent_id) {
+ return Ok(Json(serde_json::json!({
+ "success": false,
+ "message": format!("Agent '{agent_id}' still has active database connections. Restart the server to fully remove it before re-creating.")
+ })));
+ }
}
let config_path = state.config_path.read().await.clone();
@@ -254,6 +271,20 @@ pub(super) async fn create_agent(
StatusCode::INTERNAL_SERVER_ERROR
})?;
+ // Check if agent already exists in the TOML file itself (belt-and-suspenders
+ // check — prevents duplicate [[agents]] entries even if runtime state is stale).
+ if let Some(agents_array) = doc.get("agents").and_then(|a| a.as_array_of_tables()) {
+ let already_in_toml = agents_array
+ .iter()
+ .any(|t| t.get("id").and_then(|v| v.as_str()) == Some(&agent_id));
+ if already_in_toml {
+ return Ok(Json(serde_json::json!({
+ "success": false,
+ "message": format!("Agent '{agent_id}' already exists in config.toml")
+ })));
+ }
+ }
+
if doc.get("agents").is_none() {
doc["agents"] = toml_edit::Item::ArrayOfTables(toml_edit::ArrayOfTables::new());
}
@@ -325,7 +356,16 @@ pub(super) async fn create_agent(
let settings_path = agent_config.data_dir.join("settings.redb");
let settings_store = std::sync::Arc::new(
crate::settings::SettingsStore::new(&settings_path).map_err(|error| {
- tracing::error!(%error, agent_id = %agent_id, "failed to init settings store");
+ let err_str = error.to_string();
+ if err_str.contains("already open") || err_str.contains("acquire lock") {
+ tracing::error!(
+ agent_id = %agent_id,
+ path = %settings_path.display(),
+ "settings.redb is locked by another process or existing agent runtime"
+ );
+ } else {
+ tracing::error!(%error, agent_id = %agent_id, "failed to init settings store");
+ }
StatusCode::INTERNAL_SERVER_ERROR
})?,
);
diff --git a/src/api/bindings.rs b/src/api/bindings.rs
index 3ea11acb2..eeb118a24 100644
--- a/src/api/bindings.rs
+++ b/src/api/bindings.rs
@@ -377,12 +377,10 @@ pub(super) async fn create_binding(
Some(existing) => existing.clone(),
None => {
drop(perms_guard);
- let Some(discord_config) = new_config
- .messaging
- .discord
- .as_ref()
- else {
- tracing::error!("discord config missing despite token being provided");
+ let Some(discord_config) = new_config.messaging.discord.as_ref() else {
+ tracing::error!(
+ "discord config missing despite token being provided"
+ );
return Err(StatusCode::INTERNAL_SERVER_ERROR);
};
let perms = crate::config::DiscordPermissions::from_config(
@@ -409,12 +407,10 @@ pub(super) async fn create_binding(
Some(existing) => existing.clone(),
None => {
drop(perms_guard);
- let Some(slack_config) = new_config
- .messaging
- .slack
- .as_ref()
- else {
- tracing::error!("slack config missing despite tokens being provided");
+ let Some(slack_config) = new_config.messaging.slack.as_ref() else {
+ tracing::error!(
+ "slack config missing despite tokens being provided"
+ );
return Err(StatusCode::INTERNAL_SERVER_ERROR);
};
let perms = crate::config::SlackPermissions::from_config(
@@ -453,11 +449,7 @@ pub(super) async fn create_binding(
if let Some(token) = new_telegram_token {
let telegram_perms = {
- let Some(telegram_config) = new_config
- .messaging
- .telegram
- .as_ref()
- else {
+ let Some(telegram_config) = new_config.messaging.telegram.as_ref() else {
tracing::error!("telegram config missing despite token being provided");
return Err(StatusCode::INTERNAL_SERVER_ERROR);
};
@@ -475,11 +467,7 @@ pub(super) async fn create_binding(
}
if let Some((username, oauth_token)) = new_twitch_creds {
- let Some(twitch_config) = new_config
- .messaging
- .twitch
- .as_ref()
- else {
+ let Some(twitch_config) = new_config.messaging.twitch.as_ref() else {
tracing::error!("twitch config missing despite credentials being provided");
return Err(StatusCode::INTERNAL_SERVER_ERROR);
};
diff --git a/src/api/config.rs b/src/api/config.rs
index bb9209331..a36833089 100644
--- a/src/api/config.rs
+++ b/src/api/config.rs
@@ -43,6 +43,7 @@ pub(super) struct CortexSection {
bulletin_interval_secs: u64,
bulletin_max_words: usize,
bulletin_max_turns: usize,
+ auto_display_name: bool,
}
#[derive(Serialize, Debug)]
@@ -148,6 +149,7 @@ pub(super) struct CortexUpdate {
bulletin_interval_secs: Option,
bulletin_max_words: Option,
bulletin_max_turns: Option,
+ auto_display_name: Option,
}
#[derive(Deserialize, Debug)]
@@ -226,6 +228,7 @@ pub(super) async fn get_agent_config(
bulletin_interval_secs: cortex.bulletin_interval_secs,
bulletin_max_words: cortex.bulletin_max_words,
bulletin_max_turns: cortex.bulletin_max_turns,
+ auto_display_name: cortex.auto_display_name,
},
coalesce: CoalesceSection {
enabled: coalesce.enabled,
@@ -522,6 +525,9 @@ fn update_cortex_table(
if let Some(v) = cortex.bulletin_max_turns {
table["bulletin_max_turns"] = toml_edit::value(v as i64);
}
+ if let Some(v) = cortex.auto_display_name {
+ table["auto_display_name"] = toml_edit::value(v);
+ }
Ok(())
}
diff --git a/src/api/server.rs b/src/api/server.rs
index 7e4f0285a..9c95478d6 100644
--- a/src/api/server.rs
+++ b/src/api/server.rs
@@ -15,8 +15,8 @@ use axum::middleware::{self, Next};
use axum::response::{Html, IntoResponse, Response};
use axum::routing::{delete, get, post, put};
use rust_embed::Embed;
-use tower_http::cors::CorsLayer;
use serde_json::json;
+use tower_http::cors::CorsLayer;
use std::net::SocketAddr;
use std::sync::Arc;
@@ -45,11 +45,7 @@ pub async fn start_http_server(
axum::http::Method::DELETE,
axum::http::Method::OPTIONS,
])
- .allow_headers([
- header::CONTENT_TYPE,
- header::AUTHORIZATION,
- header::ACCEPT,
- ]);
+ .allow_headers([header::CONTENT_TYPE, header::AUTHORIZATION, header::ACCEPT]);
let api_routes = Router::new()
.route("/health", get(system::health))
@@ -163,7 +159,8 @@ pub async fn start_http_server(
.layer(middleware::from_fn_with_state(
state.clone(),
api_auth_middleware,
- ));
+ ))
+ .layer(DefaultBodyLimit::max(100 * 1024 * 1024));
let app = Router::new()
.nest("/api", api_routes)
diff --git a/src/config.rs b/src/config.rs
index d67ef6a27..ac605cd97 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -177,24 +177,66 @@ pub struct LlmConfig {
impl std::fmt::Debug for LlmConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("LlmConfig")
- .field("anthropic_key", &self.anthropic_key.as_ref().map(|_| "[REDACTED]"))
- .field("openai_key", &self.openai_key.as_ref().map(|_| "[REDACTED]"))
- .field("openrouter_key", &self.openrouter_key.as_ref().map(|_| "[REDACTED]"))
+ .field(
+ "anthropic_key",
+ &self.anthropic_key.as_ref().map(|_| "[REDACTED]"),
+ )
+ .field(
+ "openai_key",
+ &self.openai_key.as_ref().map(|_| "[REDACTED]"),
+ )
+ .field(
+ "openrouter_key",
+ &self.openrouter_key.as_ref().map(|_| "[REDACTED]"),
+ )
.field("zhipu_key", &self.zhipu_key.as_ref().map(|_| "[REDACTED]"))
.field("groq_key", &self.groq_key.as_ref().map(|_| "[REDACTED]"))
- .field("together_key", &self.together_key.as_ref().map(|_| "[REDACTED]"))
- .field("fireworks_key", &self.fireworks_key.as_ref().map(|_| "[REDACTED]"))
- .field("deepseek_key", &self.deepseek_key.as_ref().map(|_| "[REDACTED]"))
+ .field(
+ "together_key",
+ &self.together_key.as_ref().map(|_| "[REDACTED]"),
+ )
+ .field(
+ "fireworks_key",
+ &self.fireworks_key.as_ref().map(|_| "[REDACTED]"),
+ )
+ .field(
+ "deepseek_key",
+ &self.deepseek_key.as_ref().map(|_| "[REDACTED]"),
+ )
.field("xai_key", &self.xai_key.as_ref().map(|_| "[REDACTED]"))
- .field("mistral_key", &self.mistral_key.as_ref().map(|_| "[REDACTED]"))
- .field("gemini_key", &self.gemini_key.as_ref().map(|_| "[REDACTED]"))
- .field("ollama_key", &self.ollama_key.as_ref().map(|_| "[REDACTED]"))
+ .field(
+ "mistral_key",
+ &self.mistral_key.as_ref().map(|_| "[REDACTED]"),
+ )
+ .field(
+ "gemini_key",
+ &self.gemini_key.as_ref().map(|_| "[REDACTED]"),
+ )
+ .field(
+ "ollama_key",
+ &self.ollama_key.as_ref().map(|_| "[REDACTED]"),
+ )
.field("ollama_base_url", &self.ollama_base_url)
- .field("opencode_zen_key", &self.opencode_zen_key.as_ref().map(|_| "[REDACTED]"))
- .field("nvidia_key", &self.nvidia_key.as_ref().map(|_| "[REDACTED]"))
- .field("minimax_key", &self.minimax_key.as_ref().map(|_| "[REDACTED]"))
- .field("moonshot_key", &self.moonshot_key.as_ref().map(|_| "[REDACTED]"))
- .field("zai_coding_plan_key", &self.zai_coding_plan_key.as_ref().map(|_| "[REDACTED]"))
+ .field(
+ "opencode_zen_key",
+ &self.opencode_zen_key.as_ref().map(|_| "[REDACTED]"),
+ )
+ .field(
+ "nvidia_key",
+ &self.nvidia_key.as_ref().map(|_| "[REDACTED]"),
+ )
+ .field(
+ "minimax_key",
+ &self.minimax_key.as_ref().map(|_| "[REDACTED]"),
+ )
+ .field(
+ "moonshot_key",
+ &self.moonshot_key.as_ref().map(|_| "[REDACTED]"),
+ )
+ .field(
+ "zai_coding_plan_key",
+ &self.zai_coding_plan_key.as_ref().map(|_| "[REDACTED]"),
+ )
.field("providers", &self.providers)
.finish()
}
@@ -284,7 +326,10 @@ impl std::fmt::Debug for DefaultsConfig {
.field("cortex", &self.cortex)
.field("browser", &self.browser)
.field("mcp", &self.mcp)
- .field("brave_search_key", &self.brave_search_key.as_ref().map(|_| "[REDACTED]"))
+ .field(
+ "brave_search_key",
+ &self.brave_search_key.as_ref().map(|_| "[REDACTED]"),
+ )
.field("history_backfill_count", &self.history_backfill_count)
.field("cron", &self.cron)
.field("opencode", &self.opencode)
@@ -500,6 +545,9 @@ pub struct CortexConfig {
pub association_updates_threshold: f32,
/// Max associations to create per pass (rate limit).
pub association_max_per_pass: usize,
+ /// When true, cortex auto-generates the display name. When false,
+ /// the display name equals the agent ID and cortex won't overwrite it.
+ pub auto_display_name: bool,
}
impl Default for CortexConfig {
@@ -516,6 +564,7 @@ impl Default for CortexConfig {
association_similarity_threshold: 0.85,
association_updates_threshold: 0.95,
association_max_per_pass: 100,
+ auto_display_name: true,
}
}
}
@@ -1501,6 +1550,7 @@ struct TomlCortexConfig {
association_similarity_threshold: Option,
association_updates_threshold: Option,
association_max_per_pass: Option,
+ auto_display_name: Option,
}
#[derive(Deserialize)]
@@ -1719,9 +1769,7 @@ fn resolve_cron_timezone(
.and_then(|value| normalize_timezone(&value))
});
- let Some(timezone) = timezone else {
- return None;
- };
+ let timezone = timezone?;
if timezone.parse::().is_err() {
tracing::warn!(
@@ -2365,10 +2413,7 @@ impl Config {
.into_iter()
.map(|(provider_id, config)| {
let api_key = resolve_env_value(&config.api_key).ok_or_else(|| {
- anyhow::anyhow!(
- "failed to resolve API key for provider '{}'",
- provider_id
- )
+ anyhow::anyhow!("failed to resolve API key for provider '{}'", provider_id)
})?;
Ok((
provider_id.to_lowercase(),
@@ -2637,6 +2682,9 @@ impl Config {
association_max_per_pass: c
.association_max_per_pass
.unwrap_or(base_defaults.cortex.association_max_per_pass),
+ auto_display_name: c
+ .auto_display_name
+ .unwrap_or(base_defaults.cortex.auto_display_name),
})
.unwrap_or(base_defaults.cortex),
browser: toml
@@ -2813,6 +2861,9 @@ impl Config {
association_max_per_pass: c
.association_max_per_pass
.unwrap_or(defaults.cortex.association_max_per_pass),
+ auto_display_name: c
+ .auto_display_name
+ .unwrap_or(defaults.cortex.auto_display_name),
}),
browser: a.browser.map(|b| BrowserConfig {
enabled: b.enabled.unwrap_or(defaults.browser.enabled),
@@ -2844,6 +2895,25 @@ impl Config {
})
.collect::>>()?;
+ // Deduplicate agents by ID — keep the last definition (latest wins).
+ // This prevents crash loops when config.toml has duplicate [[agents]]
+ // entries (e.g. from multiple create_agent API calls).
+ {
+ let mut seen = std::collections::HashSet::new();
+ let before = agents.len();
+ // Reverse-dedup: iterate from the end so the last definition wins,
+ // then reverse back to preserve original order.
+ agents.reverse();
+ agents.retain(|a| seen.insert(a.id.clone()));
+ agents.reverse();
+ if agents.len() < before {
+ tracing::warn!(
+ duplicates_removed = before - agents.len(),
+ "removed duplicate agent entries from config"
+ );
+ }
+ }
+
if agents.is_empty() {
agents.push(AgentConfig {
id: "main".into(),
diff --git a/src/cron/scheduler.rs b/src/cron/scheduler.rs
index 50efb6766..1d3a8fb2b 100644
--- a/src/cron/scheduler.rs
+++ b/src/cron/scheduler.rs
@@ -519,26 +519,6 @@ fn hour_in_active_window(current_hour: u8, start_hour: u8, end_hour: u8) -> bool
}
}
-#[cfg(test)]
-mod tests {
- use super::hour_in_active_window;
-
- #[test]
- fn test_hour_in_active_window_non_wrapping() {
- assert!(hour_in_active_window(9, 9, 17));
- assert!(hour_in_active_window(16, 9, 17));
- assert!(!hour_in_active_window(8, 9, 17));
- assert!(!hour_in_active_window(17, 9, 17));
- }
-
- #[test]
- fn test_hour_in_active_window_midnight_wrapping() {
- assert!(hour_in_active_window(22, 22, 6));
- assert!(hour_in_active_window(3, 22, 6));
- assert!(!hour_in_active_window(12, 22, 6));
- }
-}
-
/// Execute a single cron job: create a fresh channel, run the prompt, deliver the result.
#[tracing::instrument(skip(context), fields(cron_id = %job.id, agent_id = %context.deps.agent_id))]
async fn run_cron_job(job: &CronJob, context: &CronContext) -> Result<()> {
@@ -670,3 +650,23 @@ async fn run_cron_job(job: &CronJob, context: &CronContext) -> Result<()> {
Ok(())
}
+
+#[cfg(test)]
+mod tests {
+ use super::hour_in_active_window;
+
+ #[test]
+ fn test_hour_in_active_window_non_wrapping() {
+ assert!(hour_in_active_window(9, 9, 17));
+ assert!(hour_in_active_window(16, 9, 17));
+ assert!(!hour_in_active_window(8, 9, 17));
+ assert!(!hour_in_active_window(17, 9, 17));
+ }
+
+ #[test]
+ fn test_hour_in_active_window_midnight_wrapping() {
+ assert!(hour_in_active_window(22, 22, 6));
+ assert!(hour_in_active_window(3, 22, 6));
+ assert!(!hour_in_active_window(12, 22, 6));
+ }
+}
diff --git a/src/hooks/spacebot.rs b/src/hooks/spacebot.rs
index 595780d76..6f867f9c4 100644
--- a/src/hooks/spacebot.rs
+++ b/src/hooks/spacebot.rs
@@ -90,51 +90,44 @@ impl SpacebotHook {
// Percent-encoded secrets (e.g. sk%2Dant%2D...)
let url_decoded = urlencoding::decode(content).unwrap_or(std::borrow::Cow::Borrowed(""));
- if url_decoded != content {
- if let Some(matched) = Self::match_patterns(&url_decoded) {
- return Some(format!("url-encoded: {matched}"));
- }
+ if url_decoded != content
+ && let Some(matched) = Self::match_patterns(&url_decoded)
+ {
+ return Some(format!("url-encoded: {matched}"));
}
// Base64-wrapped secrets. Minimum 24 chars avoids false positives on
// short alphanumeric strings while catching any encoded API key.
- static BASE64_SEGMENT: LazyLock = LazyLock::new(|| {
- Regex::new(r"[A-Za-z0-9+/]{24,}={0,2}").expect("hardcoded regex")
- });
+ static BASE64_SEGMENT: LazyLock =
+ LazyLock::new(|| Regex::new(r"[A-Za-z0-9+/]{24,}={0,2}").expect("hardcoded regex"));
for segment in BASE64_SEGMENT.find_iter(content) {
if let Ok(decoded_bytes) =
base64::engine::general_purpose::STANDARD.decode(segment.as_str())
+ && let Ok(decoded) = std::str::from_utf8(&decoded_bytes)
+ && let Some(matched) = Self::match_patterns(decoded)
{
- if let Ok(decoded) = std::str::from_utf8(&decoded_bytes) {
- if let Some(matched) = Self::match_patterns(decoded) {
- return Some(format!("base64-encoded: {matched}"));
- }
- }
+ return Some(format!("base64-encoded: {matched}"));
}
if let Ok(decoded_bytes) =
base64::engine::general_purpose::URL_SAFE.decode(segment.as_str())
+ && let Ok(decoded) = std::str::from_utf8(&decoded_bytes)
+ && let Some(matched) = Self::match_patterns(decoded)
{
- if let Ok(decoded) = std::str::from_utf8(&decoded_bytes) {
- if let Some(matched) = Self::match_patterns(decoded) {
- return Some(format!("base64-encoded: {matched}"));
- }
- }
+ return Some(format!("base64-encoded: {matched}"));
}
}
// Hex-encoded secrets. Minimum 40 hex chars (20 bytes) to reduce
// false positives while catching any hex-wrapped API key.
- static HEX_SEGMENT: LazyLock = LazyLock::new(|| {
- Regex::new(r"(?i)(?:0x)?([0-9a-f]{40,})").expect("hardcoded regex")
- });
+ static HEX_SEGMENT: LazyLock =
+ LazyLock::new(|| Regex::new(r"(?i)(?:0x)?([0-9a-f]{40,})").expect("hardcoded regex"));
for caps in HEX_SEGMENT.captures_iter(content) {
let hex_str = caps.get(1).map_or("", |m| m.as_str());
- if let Ok(decoded_bytes) = hex::decode(hex_str) {
- if let Ok(decoded) = std::str::from_utf8(&decoded_bytes) {
- if let Some(matched) = Self::match_patterns(decoded) {
- return Some(format!("hex-encoded: {matched}"));
- }
- }
+ if let Ok(decoded_bytes) = hex::decode(hex_str)
+ && let Ok(decoded) = std::str::from_utf8(&decoded_bytes)
+ && let Some(matched) = Self::match_patterns(decoded)
+ {
+ return Some(format!("hex-encoded: {matched}"));
}
}
diff --git a/src/llm/model.rs b/src/llm/model.rs
index 2d52e914b..8f0afdceb 100644
--- a/src/llm/model.rs
+++ b/src/llm/model.rs
@@ -341,16 +341,31 @@ impl CompletionModel for SpacebotModel {
if usage.input_tokens > 0 || usage.output_tokens > 0 {
metrics
.llm_tokens_total
- .with_label_values(&[agent_label, &self.full_model_name, tier_label, "input"])
+ .with_label_values(&[
+ agent_label,
+ &self.full_model_name,
+ tier_label,
+ "input",
+ ])
.inc_by(usage.input_tokens);
metrics
.llm_tokens_total
- .with_label_values(&[agent_label, &self.full_model_name, tier_label, "output"])
+ .with_label_values(&[
+ agent_label,
+ &self.full_model_name,
+ tier_label,
+ "output",
+ ])
.inc_by(usage.output_tokens);
if usage.cached_input_tokens > 0 {
metrics
.llm_tokens_total
- .with_label_values(&[agent_label, &self.full_model_name, tier_label, "cached_input"])
+ .with_label_values(&[
+ agent_label,
+ &self.full_model_name,
+ tier_label,
+ "cached_input",
+ ])
.inc_by(usage.cached_input_tokens);
}
@@ -735,9 +750,7 @@ impl SpacebotModel {
})?;
if !status.is_success() {
- let message = response_body["error"]["message"]
- .as_str()
- .unwrap_or("unknown error");
+ let message = extract_error_message(&response_body, &response_text);
return Err(CompletionError::ProviderError(format!(
"{provider_display_name} API error ({status}): {message}"
)));
@@ -825,9 +838,7 @@ impl SpacebotModel {
})?;
if !status.is_success() {
- let message = response_body["error"]["message"]
- .as_str()
- .unwrap_or("unknown error");
+ let message = extract_error_message(&response_body, &response_text);
return Err(CompletionError::ProviderError(format!(
"{provider_display_name} API error ({status}): {message}"
)));
@@ -838,6 +849,40 @@ impl SpacebotModel {
}
// --- Helpers ---
+/// Extract the most useful error message from an API error response.
+///
+/// Different providers return errors in different formats:
+/// - OpenAI/Gemini: `{"error": {"message": "...", "code": ...}}`
+/// - Some providers: `{"error": "string message"}`
+/// - Others: `{"detail": "..."}` or `{"message": "..."}`
+///
+/// Falls back to the raw (truncated) response body if no known field is found.
+fn extract_error_message(body: &serde_json::Value, raw_text: &str) -> String {
+ // Standard OpenAI format: {"error": {"message": "..."}}
+ if let Some(msg) = body["error"]["message"].as_str() {
+ return msg.to_string();
+ }
+ // Error as a plain string: {"error": "..."}
+ if let Some(msg) = body["error"].as_str() {
+ return msg.to_string();
+ }
+ // Gemini/Google sometimes uses: {"error": {"status": "INVALID_ARGUMENT", ...}}
+ if let Some(status) = body["error"]["status"].as_str() {
+ let code = body["error"]["code"].as_i64().unwrap_or(0);
+ return format!("{status} (code {code})");
+ }
+ // FastAPI / generic: {"detail": "..."}
+ if let Some(msg) = body["detail"].as_str() {
+ return msg.to_string();
+ }
+ // Top-level message
+ if let Some(msg) = body["message"].as_str() {
+ return msg.to_string();
+ }
+ // Last resort: truncated raw body
+ truncate_body(raw_text).to_string()
+}
+
#[allow(dead_code)]
fn normalize_ollama_base_url(configured: Option) -> String {
let mut base_url = configured
@@ -991,14 +1036,20 @@ fn convert_messages_to_openai(messages: &OneOrMany) -> Vec {}
}
@@ -1187,7 +1238,12 @@ fn truncate_body(body: &str) -> &str {
// --- Response parsing ---
-fn make_tool_call(id: String, name: String, arguments: serde_json::Value) -> ToolCall {
+fn make_tool_call(
+ id: String,
+ name: String,
+ arguments: serde_json::Value,
+ signature: Option,
+) -> ToolCall {
ToolCall {
id,
call_id: None,
@@ -1195,7 +1251,7 @@ fn make_tool_call(id: String, name: String, arguments: serde_json::Value) -> Too
name: name.trim().to_string(),
arguments,
},
- signature: None,
+ signature,
additional_params: None,
}
}
@@ -1220,7 +1276,7 @@ fn parse_anthropic_response(
let name = block["name"].as_str().unwrap_or("").to_string();
let arguments = block["input"].clone();
assistant_content.push(AssistantContent::ToolCall(make_tool_call(
- id, name, arguments,
+ id, name, arguments, None,
)));
}
Some("thinking") => {
@@ -1308,8 +1364,11 @@ fn parse_openai_response(
.and_then(|raw| serde_json::from_str(raw).ok())
.or_else(|| arguments_field.as_object().map(|_| arguments_field.clone()))
.unwrap_or(serde_json::json!({}));
+ // Gemini 2.5+ thinking models return a thought_signature that must
+ // be echoed back on subsequent turns, otherwise the API rejects with 400.
+ let signature = tc["thought_signature"].as_str().map(|s| s.to_string());
assistant_content.push(AssistantContent::ToolCall(make_tool_call(
- id, name, arguments,
+ id, name, arguments, signature,
)));
}
}
@@ -1379,7 +1438,7 @@ fn parse_openai_responses_response(
.unwrap_or(serde_json::json!({}));
assistant_content.push(AssistantContent::ToolCall(make_tool_call(
- call_id, name, arguments,
+ call_id, name, arguments, None,
)));
}
_ => {}
diff --git a/src/llm/pricing.rs b/src/llm/pricing.rs
index 4717ffa48..721772fd1 100644
--- a/src/llm/pricing.rs
+++ b/src/llm/pricing.rs
@@ -41,13 +41,11 @@ fn lookup_pricing(model_name: &str) -> ModelPricing {
output: per_m(15.0),
cached_input: per_m(0.30),
},
- m if m.starts_with("claude-3-5-haiku") || m.starts_with("claude-haiku-4") => {
- ModelPricing {
- input: per_m(0.80),
- output: per_m(4.0),
- cached_input: per_m(0.08),
- }
- }
+ m if m.starts_with("claude-3-5-haiku") || m.starts_with("claude-haiku-4") => ModelPricing {
+ input: per_m(0.80),
+ output: per_m(4.0),
+ cached_input: per_m(0.08),
+ },
m if m.starts_with("claude-3-opus") => ModelPricing {
input: per_m(15.0),
diff --git a/src/llm/routing.rs b/src/llm/routing.rs
index 45cdd3e10..2cdbd929a 100644
--- a/src/llm/routing.rs
+++ b/src/llm/routing.rs
@@ -316,10 +316,7 @@ pub fn defaults_for_provider(provider: &str) -> RoutingConfig {
cortex: worker.clone(),
voice: String::new(),
task_overrides: HashMap::from([("coding".into(), channel.clone())]),
- fallbacks: HashMap::from([
- (channel, vec![worker.clone()]),
- (worker, vec![lite]),
- ]),
+ fallbacks: HashMap::from([(channel, vec![worker.clone()]), (worker, vec![lite])]),
rate_limit_cooldown_secs: 60,
..RoutingConfig::default()
}
diff --git a/src/main.rs b/src/main.rs
index ca8e4aa76..bd0090bb2 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -610,11 +610,8 @@ async fn run(
let (agent_remove_tx, mut agent_remove_rx) = mpsc::channel::(8);
// Start HTTP API server if enabled
- let mut api_state = spacebot::api::ApiState::new_with_provider_sender(
- provider_tx,
- agent_tx,
- agent_remove_tx,
- );
+ let mut api_state =
+ spacebot::api::ApiState::new_with_provider_sender(provider_tx, agent_tx, agent_remove_tx);
api_state.auth_token = config.api.auth_token.clone();
let api_state = Arc::new(api_state);
@@ -1247,7 +1244,8 @@ async fn initialize_agents(
);
// Per-agent memory system
- let memory_store = spacebot::memory::MemoryStore::with_agent_id(db.sqlite.clone(), &agent_config.id);
+ let memory_store =
+ spacebot::memory::MemoryStore::with_agent_id(db.sqlite.clone(), &agent_config.id);
let embedding_table = spacebot::memory::EmbeddingTable::open_or_create(&db.lance)
.await
.with_context(|| {
diff --git a/src/memory/lance.rs b/src/memory/lance.rs
index 0942a9387..382bedefc 100644
--- a/src/memory/lance.rs
+++ b/src/memory/lance.rs
@@ -361,12 +361,10 @@ impl EmbeddingTable {
/// Validate that a memory ID is a well-formed UUID to prevent predicate injection.
fn validate_memory_id(memory_id: &str) -> Result<()> {
- if memory_id.len() != 36
- || !memory_id
- .chars()
- .all(|c| c.is_ascii_hexdigit() || c == '-')
- {
- return Err(DbError::LanceDb(format!("invalid memory ID format: {}", memory_id)).into());
+ if memory_id.len() != 36 || !memory_id.chars().all(|c| c.is_ascii_hexdigit() || c == '-') {
+ return Err(
+ DbError::LanceDb(format!("invalid memory ID format: {}", memory_id)).into(),
+ );
}
Ok(())
}
diff --git a/src/messaging/slack.rs b/src/messaging/slack.rs
index 22f6596b7..51e09375e 100644
--- a/src/messaging/slack.rs
+++ b/src/messaging/slack.rs
@@ -141,9 +141,11 @@ async fn handle_message_event(
let state_guard = states.read().await;
let adapter_state = state_guard
.get_user_state::>()
- .ok_or_else(|| Box::::from(
- "SlackAdapterState not found in user_state",
- ))?;
+ .ok_or_else(|| {
+ Box::::from(
+ "SlackAdapterState not found in user_state",
+ )
+ })?;
let user_id = msg_event.sender.user.as_ref().map(|u| u.0.clone());
@@ -242,9 +244,11 @@ async fn handle_app_mention_event(
let state_guard = states.read().await;
let adapter_state = state_guard
.get_user_state::>()
- .ok_or_else(|| Box::::from(
- "SlackAdapterState not found in user_state",
- ))?;
+ .ok_or_else(|| {
+ Box::::from(
+ "SlackAdapterState not found in user_state",
+ )
+ })?;
let user_id = mention.user.0.clone();
@@ -343,9 +347,11 @@ async fn handle_command_event(
let state_guard = states.read().await;
let adapter_state = state_guard
.get_user_state::>()
- .ok_or_else(|| Box::::from(
- "SlackAdapterState not found in user_state",
- ))?;
+ .ok_or_else(|| {
+ Box::::from(
+ "SlackAdapterState not found in user_state",
+ )
+ })?;
let command_str = event.command.0.clone();
let team_id = event.team_id.0.clone();
@@ -484,9 +490,11 @@ async fn handle_interaction_event(
let state_guard = states.read().await;
let adapter_state = state_guard
.get_user_state::>()
- .ok_or_else(|| Box::::from(
- "SlackAdapterState not found in user_state",
- ))?;
+ .ok_or_else(|| {
+ Box::::from(
+ "SlackAdapterState not found in user_state",
+ )
+ })?;
let user_id = block_actions
.user
diff --git a/src/messaging/telegram.rs b/src/messaging/telegram.rs
index 47c1bda48..d493d327d 100644
--- a/src/messaging/telegram.rs
+++ b/src/messaging/telegram.rs
@@ -910,6 +910,8 @@ fn should_retry_plain_caption(error: &RequestError) -> bool {
// -- Markdown-to-Telegram-HTML formatting --
+static BOLD_ITALIC_PATTERN: LazyLock =
+ LazyLock::new(|| Regex::new(r"\*\*\*(.+?)\*\*\*").expect("hardcoded regex"));
static BOLD_PATTERN: LazyLock =
LazyLock::new(|| Regex::new(r"\*\*(.+?)\*\*").expect("hardcoded regex"));
static BOLD_UNDERSCORE_PATTERN: LazyLock =
@@ -1066,7 +1068,8 @@ fn format_inline(line: &str) -> String {
/// Bold (`**`) is processed before italic (`*`) so double-star patterns
/// are consumed first and single stars only match true italic spans.
fn format_markdown_spans(text: &str) -> String {
- let text = BOLD_PATTERN.replace_all(text, "$1");
+ let text = BOLD_ITALIC_PATTERN.replace_all(text, "$1");
+ let text = BOLD_PATTERN.replace_all(&text, "$1");
let text = BOLD_UNDERSCORE_PATTERN.replace_all(&text, "$1");
let text = ITALIC_PATTERN.replace_all(&text, "$1");
let text = ITALIC_UNDERSCORE_PATTERN.replace_all(&text, "$1");
diff --git a/src/messaging/twitch.rs b/src/messaging/twitch.rs
index 57fde883c..d716bf795 100644
--- a/src/messaging/twitch.rs
+++ b/src/messaging/twitch.rs
@@ -16,7 +16,7 @@ use twitch_irc::{ClientConfig, SecureTCPTransport, TwitchIRCClient};
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
-use tokio::sync::{mpsc, RwLock};
+use tokio::sync::{RwLock, mpsc};
#[derive(Serialize, Deserialize)]
struct TwitchTokenFile {
@@ -43,15 +43,15 @@ impl TokenStorage for TwitchTokenStorage {
let mut expires_at = None;
if let Some(path) = &self.token_path {
- if let Ok(data) = std::fs::read_to_string(path) {
- if let Ok(file) = serde_json::from_str::(&data) {
- self.access_token = file.access_token;
- self.refresh_token = file.refresh_token;
- if let Some(stored_created) = file.created_at {
- created_at = stored_created;
- }
- expires_at = file.expires_at;
+ if let Ok(data) = std::fs::read_to_string(path)
+ && let Ok(file) = serde_json::from_str::(&data)
+ {
+ self.access_token = file.access_token;
+ self.refresh_token = file.refresh_token;
+ if let Some(stored_created) = file.created_at {
+ created_at = stored_created;
}
+ expires_at = file.expires_at;
}
if !self.refresh_token.is_empty() && expires_at.is_none() {
expires_at = Some(created_at + chrono::Duration::hours(1));
@@ -117,6 +117,7 @@ pub struct TwitchAdapter {
const MAX_MESSAGE_LENGTH: usize = 500;
impl TwitchAdapter {
+ #[allow(clippy::too_many_arguments)]
pub fn new(
username: impl Into,
oauth_token: impl Into,
diff --git a/src/secrets/store.rs b/src/secrets/store.rs
index 5b11681cc..d598be95d 100644
--- a/src/secrets/store.rs
+++ b/src/secrets/store.rs
@@ -1,7 +1,7 @@
//! Encrypted credentials storage (AES-256-GCM, redb).
use crate::error::SecretsError;
-use aes_gcm::{aead::Aead, Aes256Gcm, KeyInit, Nonce};
+use aes_gcm::{Aes256Gcm, KeyInit, Nonce, aead::Aead};
use rand::RngCore;
use redb::{Database, ReadableTable, TableDefinition};
use sha2::{Digest, Sha256};
diff --git a/src/skills.rs b/src/skills.rs
index d8aa20b4b..4b13759cf 100644
--- a/src/skills.rs
+++ b/src/skills.rs
@@ -115,7 +115,10 @@ impl SkillSet {
///
/// The channel sees skill names and descriptions but is instructed to
/// delegate actual skill execution to workers.
- pub fn render_channel_prompt(&self, prompt_engine: &crate::prompts::PromptEngine) -> crate::error::Result {
+ pub fn render_channel_prompt(
+ &self,
+ prompt_engine: &crate::prompts::PromptEngine,
+ ) -> crate::error::Result {
if self.skills.is_empty() {
return Ok(String::new());
}
diff --git a/src/telemetry/registry.rs b/src/telemetry/registry.rs
index 4e6dc4666..4d3121d35 100644
--- a/src/telemetry/registry.rs
+++ b/src/telemetry/registry.rs
@@ -112,7 +112,9 @@ impl Metrics {
"spacebot_llm_request_duration_seconds",
"LLM request duration in seconds",
)
- .buckets(vec![0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 15.0, 30.0, 60.0, 120.0]),
+ .buckets(vec![
+ 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 15.0, 30.0, 60.0, 120.0,
+ ]),
&["agent_id", "model", "tier"],
)
.expect("hardcoded metric descriptor");
@@ -167,16 +169,15 @@ impl Metrics {
"spacebot_worker_duration_seconds",
"Worker lifetime duration in seconds",
)
- .buckets(vec![1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0, 1800.0]),
+ .buckets(vec![
+ 1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0, 1800.0,
+ ]),
&["agent_id", "worker_type"],
)
.expect("hardcoded metric descriptor");
let process_errors_total = IntCounterVec::new(
- Opts::new(
- "spacebot_process_errors_total",
- "Process errors by type",
- ),
+ Opts::new("spacebot_process_errors_total", "Process errors by type"),
&["agent_id", "process_type", "error_type"],
)
.expect("hardcoded metric descriptor");
diff --git a/src/tools/browser.rs b/src/tools/browser.rs
index 34b418004..2d912b69e 100644
--- a/src/tools/browser.rs
+++ b/src/tools/browser.rs
@@ -4,7 +4,6 @@
//! via headless Chrome using chromiumoxide. Uses an accessibility-tree based
//! ref system for LLM-friendly element addressing.
-
use crate::config::BrowserConfig;
use chromiumoxide::browser::{Browser, BrowserConfig as ChromeConfig};
@@ -60,23 +59,22 @@ fn validate_url(url: &str) -> Result<(), BrowserError> {
}
// If the host parses as an IP address, check against blocked ranges
- if let Ok(ip) = host.parse::() {
- if is_blocked_ip(ip) {
- return Err(BrowserError::new(format!(
- "navigation to private/loopback address {ip} is blocked"
- )));
- }
+ if let Ok(ip) = host.parse::()
+ && is_blocked_ip(ip)
+ {
+ return Err(BrowserError::new(format!(
+ "navigation to private/loopback address {ip} is blocked"
+ )));
}
// IPv6 addresses in brackets (url crate strips them for host_str)
- if let Some(stripped) = host.strip_prefix('[').and_then(|h| h.strip_suffix(']')) {
- if let Ok(ip) = stripped.parse::() {
- if is_blocked_ip(ip) {
- return Err(BrowserError::new(format!(
- "navigation to private/loopback address {ip} is blocked"
- )));
- }
- }
+ if let Some(stripped) = host.strip_prefix('[').and_then(|h| h.strip_suffix(']'))
+ && let Ok(ip) = stripped.parse::()
+ && is_blocked_ip(ip)
+ {
+ return Err(BrowserError::new(format!(
+ "navigation to private/loopback address {ip} is blocked"
+ )));
}
Ok(())
diff --git a/src/tools/file.rs b/src/tools/file.rs
index f8e9a49df..c842b7275 100644
--- a/src/tools/file.rs
+++ b/src/tools/file.rs
@@ -58,14 +58,14 @@ impl FileTool {
if let Ok(relative) = canonical.strip_prefix(&workspace_canonical) {
for component in relative.components() {
check.push(component);
- if let Ok(metadata) = std::fs::symlink_metadata(&check) {
- if metadata.file_type().is_symlink() {
- return Err(FileError(
- "ACCESS DENIED: Symlinks are not allowed within the workspace \
+ if let Ok(metadata) = std::fs::symlink_metadata(&check)
+ && metadata.file_type().is_symlink()
+ {
+ return Err(FileError(
+ "ACCESS DENIED: Symlinks are not allowed within the workspace \
for security reasons. Use direct paths instead."
- .to_string(),
- ));
- }
+ .to_string(),
+ ));
}
}
}
@@ -204,7 +204,10 @@ impl Tool for FileTool {
// the dedicated identity API to keep update flow consistent.
let file_name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");
const PROTECTED_FILES: &[&str] = &["SOUL.md", "IDENTITY.md", "USER.md"];
- if PROTECTED_FILES.iter().any(|f| file_name.eq_ignore_ascii_case(f)) {
+ if PROTECTED_FILES
+ .iter()
+ .any(|f| file_name.eq_ignore_ascii_case(f))
+ {
return Err(FileError(
"ACCESS DENIED: Identity files are protected and cannot be modified \
through file operations. Use the identity management API instead."
diff --git a/src/tools/memory_save.rs b/src/tools/memory_save.rs
index 31a398fc6..07225bffe 100644
--- a/src/tools/memory_save.rs
+++ b/src/tools/memory_save.rs
@@ -175,12 +175,12 @@ impl Tool for MemorySaveTool {
)));
}
- if let Some(importance) = args.importance {
- if !(0.0..=1.0).contains(&importance) {
- return Err(MemorySaveError(format!(
- "importance must be between 0.0 and 1.0 (got {importance})"
- )));
- }
+ if let Some(importance) = args.importance
+ && !(0.0..=1.0).contains(&importance)
+ {
+ return Err(MemorySaveError(format!(
+ "importance must be between 0.0 and 1.0 (got {importance})"
+ )));
}
// Parse memory type
diff --git a/src/tools/shell.rs b/src/tools/shell.rs
index a461d3572..32e93b81f 100644
--- a/src/tools/shell.rs
+++ b/src/tools/shell.rs
@@ -106,7 +106,9 @@ impl ShellTool {
let after = pos + dollar_var.len();
let next_char = command[after..].chars().next();
// $VAR is a match if followed by non-alphanumeric/underscore or end-of-string
- if next_char.is_none() || (!next_char.unwrap().is_alphanumeric() && next_char.unwrap() != '_') {
+ if next_char.is_none()
+ || (!next_char.unwrap().is_alphanumeric() && next_char.unwrap() != '_')
+ {
return Err(ShellError {
message: "Cannot access secret environment variables.".to_string(),
exit_code: -1,
@@ -184,11 +186,19 @@ impl ShellTool {
}
// Block interpreter one-liners that can bypass shell-level restrictions
- for interpreter in ["python3 -c", "python -c", "perl -e", "ruby -e", "node -e", "node --eval"] {
+ for interpreter in [
+ "python3 -c",
+ "python -c",
+ "perl -e",
+ "ruby -e",
+ "node -e",
+ "node --eval",
+ ] {
if command.contains(interpreter) {
return Err(ShellError {
- message: "Inline interpreter execution is not permitted — use script files instead."
- .to_string(),
+ message:
+ "Inline interpreter execution is not permitted — use script files instead."
+ .to_string(),
exit_code: -1,
});
}
diff --git a/src/tools/spawn_worker.rs b/src/tools/spawn_worker.rs
index 8cd353062..e14b54681 100644
--- a/src/tools/spawn_worker.rs
+++ b/src/tools/spawn_worker.rs
@@ -112,9 +112,8 @@ impl Tool for SpawnWorkerTool {
}
});
- if opencode_enabled {
- if let Some(obj) = properties.as_object_mut() {
- obj.insert(
+ if opencode_enabled && let Some(obj) = properties.as_object_mut() {
+ obj.insert(
"worker_type".to_string(),
serde_json::json!({
"type": "string",
@@ -123,14 +122,13 @@ impl Tool for SpawnWorkerTool {
"description": "\"builtin\" (default) runs a Rig agent loop. \"opencode\" spawns a full OpenCode coding agent — use for complex multi-file coding tasks."
}),
);
- obj.insert(
+ obj.insert(
"directory".to_string(),
serde_json::json!({
"type": "string",
"description": "Working directory for the worker. Required when worker_type is \"opencode\". The OpenCode agent operates in this directory."
}),
);
- }
}
ToolDefinition {
diff --git a/tests/context_dump.rs b/tests/context_dump.rs
index 2f94d5ddf..4d2daa9fa 100644
--- a/tests/context_dump.rs
+++ b/tests/context_dump.rs
@@ -144,7 +144,7 @@ fn build_channel_system_prompt(rc: &spacebot::config::RuntimeConfig) -> String {
.render_channel_prompt(
empty_to_none(identity_context),
empty_to_none(memory_bulletin.to_string()),
- empty_to_none(skills_prompt),
+ empty_to_none(skills_prompt.unwrap_or_default()),
worker_capabilities,
conversation_context,
None,