diff --git a/dataclaw/cli.py b/dataclaw/cli.py index ece23df..e9adffc 100644 --- a/dataclaw/cli.py +++ b/dataclaw/cli.py @@ -4,13 +4,15 @@ import json import re import sys +import urllib.error import urllib.request from datetime import datetime, timezone from pathlib import Path +from typing import Any, Mapping, cast from .anonymizer import Anonymizer from .config import CONFIG_FILE, DataClawConfig, load_config, save_config -from .parser import CLAUDE_DIR, CODEX_DIR, GEMINI_DIR, discover_projects, parse_project_sessions +from .parser import CLAUDE_DIR, CODEX_DIR, GEMINI_DIR, OPENCODE_DIR, discover_projects, parse_project_sessions from .secrets import _has_mixed_char_types, _shannon_entropy, redact_session HF_TAG = "dataclaw" @@ -56,8 +58,8 @@ "Step 6/6: After explicit user approval, publish: dataclaw export --publish-attestation \"User explicitly approved publishing to Hugging Face.\"", ] -EXPLICIT_SOURCE_CHOICES = {"claude", "codex", "gemini", "all", "both"} -SOURCE_CHOICES = ["auto", "claude", "codex", "gemini", "all"] +EXPLICIT_SOURCE_CHOICES = {"claude", "codex", "gemini", "opencode", "all", "both"} +SOURCE_CHOICES = ["auto", "claude", "codex", "gemini", "opencode", "all"] def _mask_secret(s: str) -> str: @@ -67,7 +69,7 @@ def _mask_secret(s: str) -> str: return f"{s[:4]}...{s[-4:]}" -def _mask_config_for_display(config: dict) -> dict: +def _mask_config_for_display(config: Mapping[str, Any]) -> dict[str, Any]: """Return a copy of config with redact_strings values masked.""" out = dict(config) if out.get("redact_strings"): @@ -83,7 +85,9 @@ def _source_label(source_filter: str) -> str: return "Codex" if source_filter == "gemini": return "Gemini CLI" - return "Claude Code, Codex, or Gemini CLI" + if source_filter == "opencode": + return "OpenCode" + return "Claude Code, Codex, Gemini CLI, or OpenCode" def _normalize_source_filter(source_filter: str) -> str: @@ -123,7 +127,9 @@ def _has_session_sources(source_filter: str = "auto") -> bool: return CODEX_DIR.exists() if source_filter == "gemini": return GEMINI_DIR.exists() - return CLAUDE_DIR.exists() or CODEX_DIR.exists() or GEMINI_DIR.exists() + if source_filter == "opencode": + return OPENCODE_DIR.exists() + return CLAUDE_DIR.exists() or CODEX_DIR.exists() or GEMINI_DIR.exists() or OPENCODE_DIR.exists() def _filter_projects_by_source(projects: list[dict], source_filter: str) -> list[dict]: @@ -134,11 +140,12 @@ def _filter_projects_by_source(projects: list[dict], source_filter: str) -> list def _format_size(size_bytes: int) -> str: + size = float(size_bytes) for unit in ("B", "KB", "MB"): - if size_bytes < 1024: - return f"{size_bytes:.1f} {unit}" if unit != "B" else f"{size_bytes} B" - size_bytes /= 1024 - return f"{size_bytes:.1f} GB" + if size < 1024: + return f"{size:.1f} {unit}" if unit != "B" else f"{int(size)} B" + size /= 1024 + return f"{size:.1f} GB" def _format_token_count(count: int) -> str: @@ -1119,7 +1126,7 @@ def prep(source_filter: str = "auto") -> None: repo_id = default_repo_name(hf_user) # Build contextual next_steps - stage_config = dict(config) + stage_config = cast(DataClawConfig, dict(config)) if source_explicit: stage_config["source"] = resolved_source_choice next_steps, next_command = _build_status_next_steps(stage, stage_config, hf_user, repo_id) diff --git a/dataclaw/config.py b/dataclaw/config.py index 6d0d43c..45add4c 100644 --- a/dataclaw/config.py +++ b/dataclaw/config.py @@ -3,7 +3,7 @@ import json import sys from pathlib import Path -from typing import TypedDict +from typing import TypedDict, cast CONFIG_DIR = Path.home() / ".dataclaw" CONFIG_FILE = CONFIG_DIR / "config.json" @@ -20,6 +20,10 @@ class DataClawConfig(TypedDict, total=False): last_export: dict stage: str | None # "auth" | "configure" | "review" | "confirmed" | "done" projects_confirmed: bool # True once user has addressed folder exclusions + review_attestations: dict + review_verification: dict + last_confirm: dict + publish_attestation: str DEFAULT_CONFIG: DataClawConfig = { @@ -35,10 +39,10 @@ def load_config() -> DataClawConfig: try: with open(CONFIG_FILE) as f: stored = json.load(f) - return {**DEFAULT_CONFIG, **stored} + return cast(DataClawConfig, {**DEFAULT_CONFIG, **stored}) except (json.JSONDecodeError, OSError) as e: print(f"Warning: could not read {CONFIG_FILE}: {e}", file=sys.stderr) - return dict(DEFAULT_CONFIG) + return cast(DataClawConfig, dict(DEFAULT_CONFIG)) def save_config(config: DataClawConfig) -> None: diff --git a/dataclaw/parser.py b/dataclaw/parser.py index 7df8207..12f3889 100644 --- a/dataclaw/parser.py +++ b/dataclaw/parser.py @@ -1,9 +1,10 @@ -"""Parse Claude Code and Codex session JSONL files into structured conversations.""" +"""Parse Claude Code, Codex, and OpenCode session data into conversations.""" import dataclasses import hashlib import json import logging +import sqlite3 from datetime import datetime, timezone from pathlib import Path from typing import Any @@ -16,6 +17,7 @@ CLAUDE_SOURCE = "claude" CODEX_SOURCE = "codex" GEMINI_SOURCE = "gemini" +OPENCODE_SOURCE = "opencode" CLAUDE_DIR = Path.home() / ".claude" PROJECTS_DIR = CLAUDE_DIR / "projects" @@ -27,8 +29,13 @@ GEMINI_DIR = Path.home() / ".gemini" / "tmp" +OPENCODE_DIR = Path.home() / ".local" / "share" / "opencode" +OPENCODE_DB_PATH = OPENCODE_DIR / "opencode.db" +UNKNOWN_OPENCODE_CWD = "" + _CODEX_PROJECT_INDEX: dict[str, list[Path]] = {} _GEMINI_HASH_MAP: dict[str, str] = {} +_OPENCODE_PROJECT_INDEX: dict[str, list[str]] = {} def _build_gemini_hash_map() -> dict[str, str]: @@ -114,6 +121,7 @@ def discover_projects() -> list[dict]: projects = _discover_claude_projects() projects.extend(_discover_codex_projects()) projects.extend(_discover_gemini_projects()) + projects.extend(_discover_opencode_projects()) return sorted(projects, key=lambda p: (p["display_name"], p["source"])) @@ -190,6 +198,28 @@ def _discover_gemini_projects() -> list[dict]: return projects +def _discover_opencode_projects() -> list[dict]: + index = _get_opencode_project_index(refresh=True) + total_sessions = sum(len(session_ids) for session_ids in index.values()) + db_size = OPENCODE_DB_PATH.stat().st_size if OPENCODE_DB_PATH.exists() else 0 + + projects = [] + for cwd, session_ids in sorted(index.items()): + if not session_ids: + continue + estimated_size = int(db_size * (len(session_ids) / total_sessions)) if total_sessions else 0 + projects.append( + { + "dir_name": cwd, + "display_name": _build_opencode_project_name(cwd), + "session_count": len(session_ids), + "total_size_bytes": estimated_size, + "source": OPENCODE_SOURCE, + } + ) + return projects + + def parse_project_sessions( project_dir_name: str, anonymizer: Anonymizer, @@ -210,6 +240,23 @@ def parse_project_sessions( sessions.append(parsed) return sessions + if source == OPENCODE_SOURCE: + index = _get_opencode_project_index() + session_ids = index.get(project_dir_name, []) + sessions = [] + for session_id in session_ids: + parsed = _parse_opencode_session( + session_id, + anonymizer=anonymizer, + include_thinking=include_thinking, + target_cwd=project_dir_name, + ) + if parsed and parsed["messages"]: + parsed["project"] = _build_opencode_project_name(project_dir_name) + parsed["source"] = OPENCODE_SOURCE + sessions.append(parsed) + return sessions + if source == CODEX_SOURCE: index = _get_codex_project_index() session_files = index.get(project_dir_name, []) @@ -249,6 +296,98 @@ def parse_project_sessions( return sessions +def _parse_opencode_session( + session_id: str, + anonymizer: Anonymizer, + include_thinking: bool, + target_cwd: str, +) -> dict | None: + if not OPENCODE_DB_PATH.exists(): + return None + + messages: list[dict[str, Any]] = [] + metadata: dict[str, Any] = { + "session_id": session_id, + "cwd": None, + "git_branch": None, + "model": None, + "start_time": None, + "end_time": None, + } + stats = _make_stats() + + try: + with sqlite3.connect(OPENCODE_DB_PATH) as conn: + conn.row_factory = sqlite3.Row + session_row = conn.execute( + "SELECT id, directory, time_created, time_updated FROM session WHERE id = ?", + (session_id,), + ).fetchone() + if session_row is None: + return None + + raw_cwd = session_row["directory"] + if isinstance(raw_cwd, str) and raw_cwd.strip(): + if raw_cwd != target_cwd: + return None + metadata["cwd"] = anonymizer.path(raw_cwd) + elif target_cwd != UNKNOWN_OPENCODE_CWD: + return None + + metadata["start_time"] = _normalize_timestamp(session_row["time_created"]) + metadata["end_time"] = _normalize_timestamp(session_row["time_updated"]) + + message_rows = conn.execute( + "SELECT id, data, time_created FROM message WHERE session_id = ? ORDER BY time_created ASC, id ASC", + (session_id,), + ).fetchall() + + for message_row in message_rows: + message_data = _load_json_field(message_row["data"]) + role = message_data.get("role") + timestamp = _normalize_timestamp(message_row["time_created"]) + + model = _extract_opencode_model(message_data) + if metadata["model"] is None and model: + metadata["model"] = model + + part_rows = conn.execute( + "SELECT data FROM part WHERE message_id = ? ORDER BY time_created ASC, id ASC", + (message_row["id"],), + ).fetchall() + parts = [_load_json_field(part_row["data"]) for part_row in part_rows] + + if role == "user": + content = _extract_opencode_user_content(parts, anonymizer) + if content is not None: + messages.append({"role": "user", "content": content, "timestamp": timestamp}) + stats["user_messages"] += 1 + _update_time_bounds(metadata, timestamp) + elif role == "assistant": + msg = _extract_opencode_assistant_content(parts, anonymizer, include_thinking) + if msg: + msg["timestamp"] = timestamp + messages.append(msg) + stats["assistant_messages"] += 1 + stats["tool_uses"] += len(msg.get("tool_uses", [])) + _update_time_bounds(metadata, timestamp) + + tokens = message_data.get("tokens", {}) + if isinstance(tokens, dict): + cache = tokens.get("cache", {}) + cache_read = _safe_int(cache.get("read")) if isinstance(cache, dict) else 0 + cache_write = _safe_int(cache.get("write")) if isinstance(cache, dict) else 0 + stats["input_tokens"] += _safe_int(tokens.get("input")) + cache_read + cache_write + stats["output_tokens"] += _safe_int(tokens.get("output")) + except (sqlite3.Error, OSError): + return None + + if metadata["model"] is None: + metadata["model"] = "opencode-unknown" + + return _make_session_result(metadata, messages, stats) + + def _make_stats() -> dict[str, int]: return { "user_messages": 0, @@ -700,6 +839,92 @@ def _safe_int(value: Any) -> int: return 0 +def _load_json_field(value: Any) -> dict[str, Any]: + if isinstance(value, dict): + return value + if isinstance(value, str): + try: + parsed = json.loads(value) + except json.JSONDecodeError: + return {} + if isinstance(parsed, dict): + return parsed + return {} + + +def _extract_opencode_model(message_data: dict[str, Any]) -> str | None: + model = message_data.get("model") + if not isinstance(model, dict): + return None + provider_id = model.get("providerID") + model_id = model.get("modelID") + if isinstance(provider_id, str) and provider_id.strip() and isinstance(model_id, str) and model_id.strip(): + return f"{provider_id}/{model_id}" + if isinstance(model_id, str) and model_id.strip(): + return model_id + return None + + +def _extract_opencode_user_content(parts: list[dict[str, Any]], anonymizer: Anonymizer) -> str | None: + text_parts: list[str] = [] + for part in parts: + if not isinstance(part, dict): + continue + if part.get("type") != "text": + continue + text = part.get("text") + if isinstance(text, str) and text.strip(): + text_parts.append(anonymizer.text(text.strip())) + + if not text_parts: + return None + return "\n\n".join(text_parts) + + +def _extract_opencode_assistant_content( + parts: list[dict[str, Any]], anonymizer: Anonymizer, include_thinking: bool, +) -> dict[str, Any] | None: + text_parts: list[str] = [] + thinking_parts: list[str] = [] + tool_uses: list[dict[str, str | None]] = [] + + for part in parts: + if not isinstance(part, dict): + continue + part_type = part.get("type") + + if part_type == "text": + text = part.get("text") + if isinstance(text, str) and text.strip(): + text_parts.append(anonymizer.text(text.strip())) + elif part_type == "reasoning" and include_thinking: + text = part.get("text") + if isinstance(text, str) and text.strip(): + thinking_parts.append(anonymizer.text(text.strip())) + elif part_type == "tool": + tool_name = part.get("tool") + state = part.get("state", {}) + tool_input = state.get("input", {}) if isinstance(state, dict) else {} + tool_uses.append( + { + "tool": tool_name, + "input": _summarize_tool_input(tool_name, tool_input, anonymizer), + } + ) + + if not text_parts and not thinking_parts and not tool_uses: + return None + + msg: dict[str, Any] = {"role": "assistant"} + if text_parts: + msg["content"] = "\n\n".join(text_parts) + if thinking_parts: + msg["thinking"] = "\n\n".join(thinking_parts) + if tool_uses: + msg["tool_uses"] = tool_uses + return msg + + def _get_codex_project_index(refresh: bool = False) -> dict[str, list[Path]]: global _CODEX_PROJECT_INDEX if refresh or not _CODEX_PROJECT_INDEX: @@ -742,6 +967,40 @@ def _build_codex_project_name(cwd: str) -> str: return f"codex:{Path(cwd).name or cwd}" +def _build_opencode_project_name(cwd: str) -> str: + if cwd == UNKNOWN_OPENCODE_CWD: + return "opencode:unknown" + return f"opencode:{Path(cwd).name or cwd}" + + +def _get_opencode_project_index(refresh: bool = False) -> dict[str, list[str]]: + global _OPENCODE_PROJECT_INDEX + if refresh or not _OPENCODE_PROJECT_INDEX: + _OPENCODE_PROJECT_INDEX = _build_opencode_project_index() + return _OPENCODE_PROJECT_INDEX + + +def _build_opencode_project_index() -> dict[str, list[str]]: + if not OPENCODE_DB_PATH.exists(): + return {} + + index: dict[str, list[str]] = {} + try: + with sqlite3.connect(OPENCODE_DB_PATH) as conn: + rows = conn.execute( + "SELECT id, directory FROM session ORDER BY time_updated DESC, id DESC" + ).fetchall() + except sqlite3.Error: + return {} + + for session_id, cwd in rows: + normalized_cwd = cwd if isinstance(cwd, str) and cwd.strip() else UNKNOWN_OPENCODE_CWD + if not isinstance(session_id, str) or not session_id: + continue + index.setdefault(normalized_cwd, []).append(session_id) + return index + + def _process_entry( entry: dict[str, Any], messages: list[dict[str, Any]], @@ -826,7 +1085,7 @@ def _extract_assistant_content( if not text_parts and not tool_uses and not thinking_parts: return None - msg = {"role": "assistant"} + msg: dict[str, Any] = {"role": "assistant"} if text_parts: msg["content"] = "\n\n".join(text_parts) if thinking_parts: diff --git a/tests/test_cli.py b/tests/test_cli.py index 97afca4..adfd118 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -400,7 +400,7 @@ def test_no_projects(self, monkeypatch, capsys): monkeypatch.setattr("dataclaw.cli.discover_projects", lambda: []) list_projects() captured = capsys.readouterr() - assert "No Claude Code, Codex, or Gemini CLI sessions" in captured.out + assert "No Claude Code, Codex, Gemini CLI, or OpenCode sessions" in captured.out def test_source_filter_codex(self, monkeypatch, capsys): monkeypatch.setattr( @@ -626,7 +626,7 @@ def test_export_requires_explicit_source_selection(self, monkeypatch, capsys): assert payload["error"] == "Source scope is not confirmed yet." assert payload["blocked_on_step"] == "Step 2/6" assert len(payload["process_steps"]) == 6 - assert payload["allowed_sources"] == ["all", "both", "claude", "codex", "gemini"] + assert payload["allowed_sources"] == ["all", "both", "claude", "codex", "gemini", "opencode"] assert payload["next_command"] == "dataclaw config --source all" def test_configure_next_steps_require_full_folder_presentation(self): diff --git a/tests/test_parser.py b/tests/test_parser.py index 6892435..0f0e506 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -1,6 +1,7 @@ """Tests for dataclaw.parser — JSONL parsing and project discovery.""" import json +import sqlite3 import pytest @@ -392,9 +393,42 @@ def test_blank_lines_skipped(self, tmp_path, mock_anonymizer): class TestDiscoverProjects: def _disable_codex(self, tmp_path, monkeypatch): + monkeypatch.setattr("dataclaw.parser.PROJECTS_DIR", tmp_path / "no-claude-projects") monkeypatch.setattr("dataclaw.parser.CODEX_SESSIONS_DIR", tmp_path / "no-codex-sessions") monkeypatch.setattr("dataclaw.parser.CODEX_ARCHIVED_DIR", tmp_path / "no-codex-archived") monkeypatch.setattr("dataclaw.parser._CODEX_PROJECT_INDEX", {}) + monkeypatch.setattr("dataclaw.parser.GEMINI_DIR", tmp_path / "no-gemini") + monkeypatch.setattr("dataclaw.parser.OPENCODE_DB_PATH", tmp_path / "no-opencode.db") + monkeypatch.setattr("dataclaw.parser._OPENCODE_PROJECT_INDEX", {}) + + def _write_opencode_db(self, db_path): + conn = sqlite3.connect(db_path) + conn.execute( + "CREATE TABLE session (" + "id TEXT PRIMARY KEY, " + "directory TEXT, " + "time_created INTEGER, " + "time_updated INTEGER" + ")" + ) + conn.execute( + "CREATE TABLE message (" + "id TEXT PRIMARY KEY, " + "session_id TEXT, " + "time_created INTEGER, " + "data TEXT" + ")" + ) + conn.execute( + "CREATE TABLE part (" + "id TEXT PRIMARY KEY, " + "message_id TEXT, " + "time_created INTEGER, " + "data TEXT" + ")" + ) + conn.commit() + return conn def test_with_projects(self, tmp_path, monkeypatch, mock_anonymizer): self._disable_codex(tmp_path, monkeypatch) @@ -451,6 +485,7 @@ def test_parse_nonexistent_project(self, tmp_path, monkeypatch, mock_anonymizer) assert parse_project_sessions("nope", mock_anonymizer) == [] def test_discover_codex_projects(self, tmp_path, monkeypatch): + self._disable_codex(tmp_path, monkeypatch) projects_dir = tmp_path / "projects" monkeypatch.setattr("dataclaw.parser.PROJECTS_DIR", projects_dir / "nonexistent") @@ -645,6 +680,108 @@ def test_codex_thinking_not_duplicated(self, tmp_path, monkeypatch, mock_anonymi paragraphs = [p.strip() for p in thinking.split("\n\n") if p.strip()] assert paragraphs == ["Planning fix", "Reading code"] + def test_discover_opencode_projects(self, tmp_path, monkeypatch): + self._disable_codex(tmp_path, monkeypatch) + db_path = tmp_path / "opencode.db" + conn = self._write_opencode_db(db_path) + conn.execute( + "INSERT INTO session (id, directory, time_created, time_updated) VALUES (?, ?, ?, ?)", + ("ses_1", "/Users/testuser/work/repo", 1706000000000, 1706000002000), + ) + conn.commit() + conn.close() + + monkeypatch.setattr("dataclaw.parser.OPENCODE_DB_PATH", db_path) + monkeypatch.setattr("dataclaw.parser._OPENCODE_PROJECT_INDEX", {}) + + projects = discover_projects() + assert len(projects) == 1 + assert projects[0]["source"] == "opencode" + assert projects[0]["display_name"] == "opencode:repo" + + def test_parse_opencode_project_sessions(self, tmp_path, monkeypatch, mock_anonymizer): + self._disable_codex(tmp_path, monkeypatch) + db_path = tmp_path / "opencode.db" + conn = self._write_opencode_db(db_path) + + session_id = "ses_1" + cwd = "/Users/testuser/work/repo" + conn.execute( + "INSERT INTO session (id, directory, time_created, time_updated) VALUES (?, ?, ?, ?)", + (session_id, cwd, 1706000000000, 1706000005000), + ) + + user_msg_data = { + "role": "user", + "model": {"providerID": "openai", "modelID": "gpt-5.3-codex"}, + } + assistant_msg_data = { + "role": "assistant", + "model": {"providerID": "openai", "modelID": "gpt-5.3-codex"}, + "tokens": { + "input": 120, + "output": 40, + "reasoning": 10, + "cache": {"read": 30, "write": 0}, + }, + } + conn.execute( + "INSERT INTO message (id, session_id, time_created, data) VALUES (?, ?, ?, ?)", + ("msg_1", session_id, 1706000001000, json.dumps(user_msg_data)), + ) + conn.execute( + "INSERT INTO message (id, session_id, time_created, data) VALUES (?, ?, ?, ?)", + ("msg_2", session_id, 1706000002000, json.dumps(assistant_msg_data)), + ) + + conn.execute( + "INSERT INTO part (id, message_id, time_created, data) VALUES (?, ?, ?, ?)", + ("prt_1", "msg_1", 1706000001001, json.dumps({"type": "text", "text": "please list files"})), + ) + conn.execute( + "INSERT INTO part (id, message_id, time_created, data) VALUES (?, ?, ?, ?)", + ("prt_2", "msg_2", 1706000002001, json.dumps({"type": "reasoning", "text": "Thinking..."})), + ) + conn.execute( + "INSERT INTO part (id, message_id, time_created, data) VALUES (?, ?, ?, ?)", + ( + "prt_3", + "msg_2", + 1706000002002, + json.dumps( + { + "type": "tool", + "tool": "bash", + "state": {"status": "completed", "input": {"command": "ls -la"}}, + } + ), + ), + ) + conn.execute( + "INSERT INTO part (id, message_id, time_created, data) VALUES (?, ?, ?, ?)", + ( + "prt_4", + "msg_2", + 1706000002003, + json.dumps({"type": "text", "text": "I checked the directory."}), + ), + ) + conn.commit() + conn.close() + + monkeypatch.setattr("dataclaw.parser.OPENCODE_DB_PATH", db_path) + monkeypatch.setattr("dataclaw.parser._OPENCODE_PROJECT_INDEX", {}) + + sessions = parse_project_sessions(cwd, mock_anonymizer, source="opencode") + assert len(sessions) == 1 + assert sessions[0]["project"] == "opencode:repo" + assert sessions[0]["model"] == "openai/gpt-5.3-codex" + assert sessions[0]["stats"]["input_tokens"] == 150 + assert sessions[0]["stats"]["output_tokens"] == 40 + assert sessions[0]["messages"][0]["role"] == "user" + assert sessions[0]["messages"][1]["role"] == "assistant" + assert sessions[0]["messages"][1]["tool_uses"][0]["tool"] == "bash" + # --- Subagent-only session discovery and parsing --- @@ -808,6 +945,9 @@ def _disable_codex(self, tmp_path, monkeypatch): monkeypatch.setattr("dataclaw.parser.CODEX_SESSIONS_DIR", tmp_path / "no-codex-sessions") monkeypatch.setattr("dataclaw.parser.CODEX_ARCHIVED_DIR", tmp_path / "no-codex-archived") monkeypatch.setattr("dataclaw.parser._CODEX_PROJECT_INDEX", {}) + monkeypatch.setattr("dataclaw.parser.GEMINI_DIR", tmp_path / "no-gemini") + monkeypatch.setattr("dataclaw.parser.OPENCODE_DB_PATH", tmp_path / "no-opencode.db") + monkeypatch.setattr("dataclaw.parser._OPENCODE_PROJECT_INDEX", {}) def test_discover_includes_subagent_sessions(self, tmp_path, monkeypatch, mock_anonymizer): self._disable_codex(tmp_path, monkeypatch)