diff --git a/.gitignore b/.gitignore index 1f232b0..f80b86e 100644 --- a/.gitignore +++ b/.gitignore @@ -59,3 +59,6 @@ specs/ # Local development directories plugindocs/ safetypluginclone/.claude/.state/ + +# Local refactor checklist +refactor-plan.md diff --git a/src/scc_cli/adapters/__init__.py b/src/scc_cli/adapters/__init__.py new file mode 100644 index 0000000..a17353b --- /dev/null +++ b/src/scc_cli/adapters/__init__.py @@ -0,0 +1 @@ +"""Concrete adapters for SCC ports.""" diff --git a/src/scc_cli/adapters/claude_agent_runner.py b/src/scc_cli/adapters/claude_agent_runner.py new file mode 100644 index 0000000..7ae6a77 --- /dev/null +++ b/src/scc_cli/adapters/claude_agent_runner.py @@ -0,0 +1,26 @@ +"""Claude Code adapter for AgentRunner port.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any + +from scc_cli.ports.agent_runner import AgentRunner +from scc_cli.ports.models import AgentCommand, AgentSettings + +DEFAULT_SETTINGS_PATH = Path("/home/agent/.claude/settings.json") + + +class ClaudeAgentRunner(AgentRunner): + """AgentRunner implementation for Claude Code.""" + + def build_settings( + self, config: dict[str, Any], *, path: Path = DEFAULT_SETTINGS_PATH + ) -> AgentSettings: + return AgentSettings(content=config, path=path) + + def build_command(self, settings: AgentSettings) -> AgentCommand: + return AgentCommand(argv=["claude"], env={}, workdir=settings.path.parent) + + def describe(self) -> str: + return "Claude Code" diff --git a/src/scc_cli/adapters/docker_sandbox_runtime.py b/src/scc_cli/adapters/docker_sandbox_runtime.py new file mode 100644 index 0000000..fc2f85c --- /dev/null +++ b/src/scc_cli/adapters/docker_sandbox_runtime.py @@ -0,0 +1,76 @@ +"""Docker sandbox runtime adapter for SandboxRuntime port.""" + +from __future__ import annotations + +from scc_cli import docker +from scc_cli.ports.models import SandboxHandle, SandboxSpec, SandboxState, SandboxStatus +from scc_cli.ports.sandbox_runtime import SandboxRuntime + + +def _extract_container_name(cmd: list[str]) -> str | None: + for idx, arg in enumerate(cmd): + if arg == "--name" and idx + 1 < len(cmd): + return cmd[idx + 1] + if arg.startswith("--name="): + return arg.split("=", 1)[1] + if cmd and cmd[-1].startswith("scc-"): + return cmd[-1] + return None + + +class DockerSandboxRuntime(SandboxRuntime): + """SandboxRuntime backed by Docker sandbox CLI.""" + + def ensure_available(self) -> None: + docker.check_docker_available() + + def run(self, spec: SandboxSpec) -> SandboxHandle: + docker.prepare_sandbox_volume_for_credentials() + docker_cmd, _is_resume = docker.get_or_create_container( + workspace=spec.workspace_mount.source, + branch=None, + profile=None, + force_new=spec.force_new, + continue_session=spec.continue_session, + env_vars=spec.env or None, + ) + container_name = _extract_container_name(docker_cmd) + plugin_settings = spec.agent_settings.content if spec.agent_settings else None + docker.run( + docker_cmd, + org_config=spec.org_config, + container_workdir=spec.workdir, + plugin_settings=plugin_settings, + ) + return SandboxHandle( + sandbox_id=container_name or "sandbox", + name=container_name, + ) + + def resume(self, handle: SandboxHandle) -> None: + docker.resume_container(handle.sandbox_id) + + def stop(self, handle: SandboxHandle) -> None: + docker.stop_container(handle.sandbox_id) + + def remove(self, handle: SandboxHandle) -> None: + docker.remove_container(handle.sandbox_id, force=True) + + def list_running(self) -> list[SandboxHandle]: + return [ + SandboxHandle(sandbox_id=container.id, name=container.name) + for container in docker.list_running_sandboxes() + ] + + def status(self, handle: SandboxHandle) -> SandboxStatus: + status = docker.get_container_status(handle.sandbox_id) + if not status: + return SandboxStatus(state=SandboxState.UNKNOWN) + normalized = status.lower() + if "up" in normalized or "running" in normalized: + state = SandboxState.RUNNING + elif "exited" in normalized or "stopped" in normalized: + state = SandboxState.STOPPED + else: + state = SandboxState.UNKNOWN + return SandboxStatus(state=state) diff --git a/src/scc_cli/adapters/local_filesystem.py b/src/scc_cli/adapters/local_filesystem.py new file mode 100644 index 0000000..cf91e41 --- /dev/null +++ b/src/scc_cli/adapters/local_filesystem.py @@ -0,0 +1,55 @@ +"""Local filesystem adapter for Filesystem port.""" + +from __future__ import annotations + +import os +import tempfile +from pathlib import Path + +from scc_cli.ports.filesystem import Filesystem + + +class LocalFilesystem(Filesystem): + """Filesystem adapter using the host OS.""" + + def read_text(self, path: Path, *, encoding: str = "utf-8") -> str: + return path.read_text(encoding=encoding) + + def write_text(self, path: Path, content: str, *, encoding: str = "utf-8") -> None: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(content, encoding=encoding) + + def write_text_atomic(self, path: Path, content: str, *, encoding: str = "utf-8") -> None: + path.parent.mkdir(parents=True, exist_ok=True) + temp_path = self._write_temp_file(path, content, encoding=encoding) + try: + temp_path.replace(path) + except Exception: + temp_path.unlink(missing_ok=True) + raise + + def exists(self, path: Path) -> bool: + return path.exists() + + def mkdir(self, path: Path, *, parents: bool = False, exist_ok: bool = False) -> None: + path.mkdir(parents=parents, exist_ok=exist_ok) + + def unlink(self, path: Path, *, missing_ok: bool = False) -> None: + path.unlink(missing_ok=missing_ok) + + def iterdir(self, path: Path) -> list[Path]: + return list(path.iterdir()) + + def _write_temp_file(self, path: Path, content: str, *, encoding: str) -> Path: + with tempfile.NamedTemporaryFile( + mode="w", + encoding=encoding, + delete=False, + dir=path.parent, + prefix=f".{path.name}.", + suffix=".tmp", + ) as temp_file: + temp_file.write(content) + temp_file.flush() + os.fsync(temp_file.fileno()) + return Path(temp_file.name) diff --git a/src/scc_cli/adapters/local_git_client.py b/src/scc_cli/adapters/local_git_client.py new file mode 100644 index 0000000..cb2db0e --- /dev/null +++ b/src/scc_cli/adapters/local_git_client.py @@ -0,0 +1,37 @@ +"""Local git adapter for GitClient port.""" + +from __future__ import annotations + +from pathlib import Path + +from scc_cli.ports.git_client import GitClient +from scc_cli.services.git import branch as git_branch +from scc_cli.services.git import core as git_core + + +class LocalGitClient(GitClient): + """Git client adapter backed by local git CLI.""" + + def check_available(self) -> None: + git_core.check_git_available() + + def check_installed(self) -> bool: + return git_core.check_git_installed() + + def get_version(self) -> str | None: + return git_core.get_git_version() + + def is_git_repo(self, path: Path) -> bool: + return git_core.is_git_repo(path) + + def init_repo(self, path: Path) -> bool: + return git_core.init_repo(path) + + def create_empty_initial_commit(self, path: Path) -> tuple[bool, str | None]: + return git_core.create_empty_initial_commit(path) + + def detect_workspace_root(self, start_dir: Path) -> tuple[Path | None, Path]: + return git_core.detect_workspace_root(start_dir) + + def get_current_branch(self, path: Path) -> str | None: + return git_branch.get_current_branch(path) diff --git a/src/scc_cli/adapters/requests_fetcher.py b/src/scc_cli/adapters/requests_fetcher.py new file mode 100644 index 0000000..0e6b94e --- /dev/null +++ b/src/scc_cli/adapters/requests_fetcher.py @@ -0,0 +1,27 @@ +"""Requests adapter for RemoteFetcher port.""" + +from __future__ import annotations + +import requests + +from scc_cli.ports.remote_fetcher import RemoteFetcher, RemoteResponse + + +class RequestsFetcher(RemoteFetcher): + """RemoteFetcher implementation using requests.""" + + def get( + self, + url: str, + *, + headers: dict[str, str] | None = None, + timeout: float | None = None, + ) -> RemoteResponse: + response = requests.get(url, headers=headers, timeout=timeout) + normalized_headers = {key: str(value) for key, value in response.headers.items()} + return RemoteResponse( + status_code=response.status_code, + text=response.text, + content=response.content, + headers=normalized_headers, + ) diff --git a/src/scc_cli/adapters/system_clock.py b/src/scc_cli/adapters/system_clock.py new file mode 100644 index 0000000..fc5df2a --- /dev/null +++ b/src/scc_cli/adapters/system_clock.py @@ -0,0 +1,14 @@ +"""System clock adapter for Clock port.""" + +from __future__ import annotations + +from datetime import datetime, timezone + +from scc_cli.ports.clock import Clock + + +class SystemClock(Clock): + """Clock implementation using system time.""" + + def now(self) -> datetime: + return datetime.now(timezone.utc) diff --git a/src/scc_cli/application/__init__.py b/src/scc_cli/application/__init__.py new file mode 100644 index 0000000..86943df --- /dev/null +++ b/src/scc_cli/application/__init__.py @@ -0,0 +1 @@ +"""Application use cases and orchestration.""" diff --git a/src/scc_cli/application/compute_effective_config.py b/src/scc_cli/application/compute_effective_config.py new file mode 100644 index 0000000..8d35ded --- /dev/null +++ b/src/scc_cli/application/compute_effective_config.py @@ -0,0 +1,645 @@ +"""Compute effective configuration for profiles and projects.""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from fnmatch import fnmatch +from pathlib import Path +from typing import TYPE_CHECKING, Any +from urllib.parse import urlparse + +from scc_cli import config as config_module + +if TYPE_CHECKING: + pass + + +@dataclass +class ConfigDecision: + """Tracks where a config value came from (for scc config explain).""" + + field: str + value: Any + reason: str + source: str # "org.security" | "org.defaults" | "team.X" | "project" + + +@dataclass +class BlockedItem: + """Tracks an item blocked by security pattern.""" + + item: str + blocked_by: str # The pattern that matched + source: str # Always "org.security" + target_type: str = "plugin" # "plugin" | "mcp_server" + + +@dataclass +class DelegationDenied: + """Tracks an addition denied due to delegation rules.""" + + item: str + requested_by: str # "team" | "project" + reason: str + target_type: str = "plugin" # "plugin" | "mcp_server" + + +@dataclass +class MCPServer: + """Represents an MCP server configuration. + + Supports three transport types: + - sse: Server-Sent Events (requires url) + - stdio: Standard I/O (requires command, optional args and env) + - http: HTTP transport (requires url, optional headers) + """ + + name: str + type: str # "sse" | "stdio" | "http" + url: str | None = None + command: str | None = None + args: list[str] | None = None + env: dict[str, str] | None = None + headers: dict[str, str] | None = None + + +@dataclass +class SessionConfig: + """Session configuration.""" + + timeout_hours: int | None = None + auto_resume: bool | None = None + + +@dataclass +class EffectiveConfig: + """The computed effective configuration after 3-layer merge. + + Contains: + - Final resolved values (plugins, mcp_servers, etc.) + - Tracking information for debugging (decisions, blocked_items, denied_additions) + """ + + plugins: set[str] = field(default_factory=set) + mcp_servers: list[MCPServer] = field(default_factory=list) + network_policy: str | None = None + session_config: SessionConfig = field(default_factory=SessionConfig) + + decisions: list[ConfigDecision] = field(default_factory=list) + blocked_items: list[BlockedItem] = field(default_factory=list) + denied_additions: list[DelegationDenied] = field(default_factory=list) + + +@dataclass +class StdioValidationResult: + """Result of validating a stdio MCP server configuration. + + stdio servers are the "sharpest knife" - they have elevated privileges: + - Mounted workspace (write access) + - Network access (required for some tools) + - Tokens in environment variables + + This validation implements layered defense: + - Gate 1: Feature gate (org must explicitly enable) + - Gate 2: Absolute path required (prevents ./evil injection) + - Gate 3: Prefix allowlist + commonpath (prevents path traversal) + - Warnings for host-side checks (command runs in container, not host) + """ + + blocked: bool + reason: str = "" + warnings: list[str] = field(default_factory=list) + + +def matches_blocked(item: str, blocked_patterns: list[str]) -> str | None: + """Check whether item matches any blocked pattern using fnmatch. + + Use casefold() for case-insensitive matching. This is important because: + - casefold() handles Unicode edge cases (e.g., German ss -> ss) + - Pattern "Malicious-*" should block "malicious-tool" + + Args: + item: The item to check (plugin name, MCP server name/URL, etc.) + blocked_patterns: List of fnmatch patterns + + Returns: + The pattern that matched, or None if no match + """ + normalized_item = item.strip().casefold() + + for pattern in blocked_patterns: + normalized_pattern = pattern.strip().casefold() + if fnmatch(normalized_item, normalized_pattern): + return pattern + return None + + +def is_allowed(item: str, allowed_patterns: list[str] | None) -> bool: + """Check whether item is allowed by an optional allowlist.""" + if allowed_patterns is None: + return True + if not allowed_patterns: + return False + return matches_blocked(item, allowed_patterns) is not None + + +def mcp_candidates(server: dict[str, Any]) -> list[str]: + """Collect candidate strings for MCP allow/block matching.""" + candidates: list[str] = [] + name = server.get("name", "") + if name: + candidates.append(name) + url = server.get("url", "") + if url: + candidates.append(url) + domain = _extract_domain(url) + if domain: + candidates.append(domain) + command = server.get("command", "") + if command: + candidates.append(command) + return candidates + + +def is_mcp_allowed(server: dict[str, Any], allowed_patterns: list[str] | None) -> bool: + """Check whether MCP server is allowed by patterns.""" + if allowed_patterns is None: + return True + if not allowed_patterns: + return False + for candidate in mcp_candidates(server): + if matches_blocked(candidate, allowed_patterns): + return True + return False + + +def validate_stdio_server( + server: dict[str, Any], + org_config: dict[str, Any], +) -> StdioValidationResult: + """Validate a stdio MCP server configuration against org security policy. + + stdio servers are the "sharpest knife" - they have elevated privileges: + - Mounted workspace (write access) + - Network access (required for some tools) + - Tokens in environment variables + + Validation gates (in order): + 1. Feature gate: security.allow_stdio_mcp must be true (default: false) + 2. Absolute path: command must be an absolute path (not relative) + 3. Prefix allowlist: if allowed_stdio_prefixes is set, command must be under one + + Host-side checks (existence, executable) generate warnings only because + the command runs inside the container, not on the host. + + Args: + server: MCP server dict with 'name', 'type', 'command' fields + org_config: Organization config dict + + Returns: + StdioValidationResult with blocked=True/False, reason, and warnings + """ + import os + + command = server.get("command", "") + warnings: list[str] = [] + security = org_config.get("security", {}) + + if not security.get("allow_stdio_mcp", False): + return StdioValidationResult( + blocked=True, + reason="stdio MCP disabled by org policy", + ) + + if not os.path.isabs(command): + return StdioValidationResult( + blocked=True, + reason="stdio command must be absolute path", + ) + + prefixes = security.get("allowed_stdio_prefixes", []) + if prefixes: + try: + resolved = os.path.realpath(command) + except OSError: + resolved = command + + normalized_prefixes = [] + for prefix in prefixes: + try: + normalized_prefixes.append(os.path.realpath(prefix.rstrip("/"))) + except OSError: + normalized_prefixes.append(prefix.rstrip("/")) + + allowed = False + for prefix in normalized_prefixes: + try: + common = os.path.commonpath([resolved, prefix]) + if common == prefix: + allowed = True + break + except ValueError: + continue + + if not allowed: + return StdioValidationResult( + blocked=True, + reason=f"Resolved path {resolved} not in allowed prefixes", + ) + + if not os.path.exists(command): + warnings.append(f"Command not found on host: {command}") + elif not os.access(command, os.X_OK): + warnings.append(f"Command not executable on host: {command}") + + return StdioValidationResult( + blocked=False, + warnings=warnings, + ) + + +def _extract_domain(url: str) -> str: + """Extract domain from URL for pattern matching.""" + parsed = urlparse(url) + return parsed.netloc or url + + +def is_team_delegated_for_plugins(org_config: dict[str, Any], team_name: str | None) -> bool: + """Check whether team is allowed to add additional plugins.""" + if not team_name: + return False + + delegation = org_config.get("delegation", {}) + teams_delegation = delegation.get("teams", {}) + allowed_patterns = teams_delegation.get("allow_additional_plugins", []) + + return matches_blocked(team_name, allowed_patterns) is not None + + +def is_team_delegated_for_mcp(org_config: dict[str, Any], team_name: str | None) -> bool: + """Check whether team is allowed to add MCP servers.""" + if not team_name: + return False + + delegation = org_config.get("delegation", {}) + teams_delegation = delegation.get("teams", {}) + allowed_patterns = teams_delegation.get("allow_additional_mcp_servers", []) + + return matches_blocked(team_name, allowed_patterns) is not None + + +def is_project_delegated(org_config: dict[str, Any], team_name: str | None) -> tuple[bool, str]: + """Check whether project-level additions are allowed.""" + if not team_name: + return (False, "No team specified") + + delegation = org_config.get("delegation", {}) + projects_delegation = delegation.get("projects", {}) + org_allows = projects_delegation.get("inherit_team_delegation", False) + + if not org_allows: + return (False, "Org disabled project delegation (inherit_team_delegation: false)") + + profiles = org_config.get("profiles", {}) + team_config = profiles.get(team_name, {}) + team_delegation = team_config.get("delegation", {}) + team_allows = team_delegation.get("allow_project_overrides", False) + + if not team_allows: + return ( + False, + f"Team '{team_name}' disabled project overrides (allow_project_overrides: false)", + ) + + return (True, "") + + +def compute_effective_config( + org_config: dict[str, Any], + team_name: str | None, + project_config: dict[str, Any] | None = None, + workspace_path: str | Path | None = None, +) -> EffectiveConfig: + """Compute effective configuration by merging org defaults → team → project.""" + if workspace_path is not None: + project_config = config_module.read_project_config(workspace_path) + + result = EffectiveConfig() + + security = org_config.get("security", {}) + blocked_plugins = security.get("blocked_plugins", []) + blocked_mcp_servers = security.get("blocked_mcp_servers", []) + + defaults = org_config.get("defaults", {}) + default_plugins = defaults.get("enabled_plugins", []) + disabled_plugins = defaults.get("disabled_plugins", []) + allowed_plugins = defaults.get("allowed_plugins") + allowed_mcp_servers = defaults.get("allowed_mcp_servers") + default_network_policy = defaults.get("network_policy") + default_session = defaults.get("session", {}) + + for plugin in default_plugins: + blocked_by = matches_blocked(plugin, blocked_plugins) + if blocked_by: + result.blocked_items.append( + BlockedItem(item=plugin, blocked_by=blocked_by, source="org.security") + ) + continue + + if matches_blocked(plugin, disabled_plugins): + continue + + result.plugins.add(plugin) + result.decisions.append( + ConfigDecision( + field="plugins", + value=plugin, + reason="Included in organization defaults", + source="org.defaults", + ) + ) + + if default_network_policy: + result.network_policy = default_network_policy + result.decisions.append( + ConfigDecision( + field="network_policy", + value=default_network_policy, + reason="Organization default network policy", + source="org.defaults", + ) + ) + + if default_session.get("timeout_hours") is not None: + result.session_config.timeout_hours = default_session["timeout_hours"] + result.decisions.append( + ConfigDecision( + field="session.timeout_hours", + value=default_session["timeout_hours"], + reason="Organization default session timeout", + source="org.defaults", + ) + ) + if default_session.get("auto_resume") is not None: + result.session_config.auto_resume = default_session["auto_resume"] + + profiles = org_config.get("profiles", {}) + team_config = profiles.get(team_name, {}) + + team_plugins = team_config.get("additional_plugins", []) + team_delegated_plugins = is_team_delegated_for_plugins(org_config, team_name) + + for plugin in team_plugins: + blocked_by = matches_blocked(plugin, blocked_plugins) + if blocked_by: + result.blocked_items.append( + BlockedItem(item=plugin, blocked_by=blocked_by, source="org.security") + ) + continue + + if not team_delegated_plugins: + result.denied_additions.append( + DelegationDenied( + item=plugin, + requested_by="team", + reason=f"Team '{team_name}' not allowed to add plugins", + ) + ) + continue + + if not is_allowed(plugin, allowed_plugins): + result.denied_additions.append( + DelegationDenied( + item=plugin, + requested_by="team", + reason="Plugin not allowed by defaults.allowed_plugins", + ) + ) + continue + + result.plugins.add(plugin) + result.decisions.append( + ConfigDecision( + field="plugins", + value=plugin, + reason=f"Added by team profile '{team_name}'", + source=f"team.{team_name}", + ) + ) + + team_mcp_servers = team_config.get("additional_mcp_servers", []) + team_delegated_mcp = is_team_delegated_for_mcp(org_config, team_name) + + for server_dict in team_mcp_servers: + server_name = server_dict.get("name", "") + server_url = server_dict.get("url", "") + + blocked_by = matches_blocked(server_name, blocked_mcp_servers) + if not blocked_by and server_url: + domain = _extract_domain(server_url) + blocked_by = matches_blocked(domain, blocked_mcp_servers) + + if blocked_by: + result.blocked_items.append( + BlockedItem( + item=server_name or server_url, + blocked_by=blocked_by, + source="org.security", + target_type="mcp_server", + ) + ) + continue + + if not team_delegated_mcp: + result.denied_additions.append( + DelegationDenied( + item=server_name, + requested_by="team", + reason=f"Team '{team_name}' not allowed to add MCP servers", + target_type="mcp_server", + ) + ) + continue + + if not is_mcp_allowed(server_dict, allowed_mcp_servers): + result.denied_additions.append( + DelegationDenied( + item=server_name or server_url, + requested_by="team", + reason="MCP server not allowed by defaults.allowed_mcp_servers", + target_type="mcp_server", + ) + ) + continue + + if server_dict.get("type") == "stdio": + stdio_result = validate_stdio_server(server_dict, org_config) + if stdio_result.blocked: + result.blocked_items.append( + BlockedItem( + item=server_name, + blocked_by=stdio_result.reason, + source="org.security", + target_type="mcp_server", + ) + ) + continue + + mcp_server = MCPServer( + name=server_name, + type=server_dict.get("type", "sse"), + url=server_url or None, + command=server_dict.get("command"), + args=server_dict.get("args"), + ) + result.mcp_servers.append(mcp_server) + result.decisions.append( + ConfigDecision( + field="mcp_servers", + value=server_name, + reason=f"Added by team profile '{team_name}'", + source=f"team.{team_name}", + ) + ) + + team_session = team_config.get("session", {}) + if team_session.get("timeout_hours") is not None: + result.session_config.timeout_hours = team_session["timeout_hours"] + result.decisions.append( + ConfigDecision( + field="session.timeout_hours", + value=team_session["timeout_hours"], + reason=f"Overridden by team profile '{team_name}'", + source=f"team.{team_name}", + ) + ) + + if project_config: + project_delegated, delegation_reason = is_project_delegated(org_config, team_name) + + project_plugins = project_config.get("additional_plugins", []) + for plugin in project_plugins: + blocked_by = matches_blocked(plugin, blocked_plugins) + if blocked_by: + result.blocked_items.append( + BlockedItem(item=plugin, blocked_by=blocked_by, source="org.security") + ) + continue + + if not project_delegated: + result.denied_additions.append( + DelegationDenied( + item=plugin, + requested_by="project", + reason=delegation_reason, + ) + ) + continue + + if not is_allowed(plugin, allowed_plugins): + result.denied_additions.append( + DelegationDenied( + item=plugin, + requested_by="project", + reason="Plugin not allowed by defaults.allowed_plugins", + ) + ) + continue + + result.plugins.add(plugin) + result.decisions.append( + ConfigDecision( + field="plugins", + value=plugin, + reason="Added by project config", + source="project", + ) + ) + + project_mcp_servers = project_config.get("additional_mcp_servers", []) + for server_dict in project_mcp_servers: + server_name = server_dict.get("name", "") + server_url = server_dict.get("url", "") + + blocked_by = matches_blocked(server_name, blocked_mcp_servers) + if not blocked_by and server_url: + domain = _extract_domain(server_url) + blocked_by = matches_blocked(domain, blocked_mcp_servers) + + if blocked_by: + result.blocked_items.append( + BlockedItem( + item=server_name or server_url, + blocked_by=blocked_by, + source="org.security", + target_type="mcp_server", + ) + ) + continue + + if not project_delegated: + result.denied_additions.append( + DelegationDenied( + item=server_name, + requested_by="project", + reason=delegation_reason, + target_type="mcp_server", + ) + ) + continue + + if not is_mcp_allowed(server_dict, allowed_mcp_servers): + result.denied_additions.append( + DelegationDenied( + item=server_name or server_url, + requested_by="project", + reason="MCP server not allowed by defaults.allowed_mcp_servers", + target_type="mcp_server", + ) + ) + continue + + if server_dict.get("type") == "stdio": + stdio_result = validate_stdio_server(server_dict, org_config) + if stdio_result.blocked: + result.blocked_items.append( + BlockedItem( + item=server_name, + blocked_by=stdio_result.reason, + source="org.security", + target_type="mcp_server", + ) + ) + continue + + mcp_server = MCPServer( + name=server_name, + type=server_dict.get("type", "sse"), + url=server_url or None, + command=server_dict.get("command"), + args=server_dict.get("args"), + ) + result.mcp_servers.append(mcp_server) + result.decisions.append( + ConfigDecision( + field="mcp_servers", + value=server_name, + reason="Added by project config", + source="project", + ) + ) + + project_session = project_config.get("session", {}) + if project_session.get("timeout_hours") is not None: + if project_delegated: + result.session_config.timeout_hours = project_session["timeout_hours"] + result.decisions.append( + ConfigDecision( + field="session.timeout_hours", + value=project_session["timeout_hours"], + reason="Overridden by project config", + source="project", + ) + ) + + return result diff --git a/src/scc_cli/application/dashboard.py b/src/scc_cli/application/dashboard.py new file mode 100644 index 0000000..f3ba55e --- /dev/null +++ b/src/scc_cli/application/dashboard.py @@ -0,0 +1,1065 @@ +"""Dashboard view models and flow orchestration.""" + +from __future__ import annotations + +from collections.abc import Callable, Mapping, Sequence +from dataclasses import dataclass, replace +from datetime import datetime +from enum import Enum, auto +from typing import Any, TypeAlias + +from scc_cli.docker.core import ContainerInfo +from scc_cli.services.git.worktree import WorktreeInfo + + +class DashboardTab(Enum): + """Available dashboard tabs.""" + + STATUS = auto() + CONTAINERS = auto() + SESSIONS = auto() + WORKTREES = auto() + + @property + def display_name(self) -> str: + """Human-readable name for display in chrome.""" + names = { + DashboardTab.STATUS: "Status", + DashboardTab.CONTAINERS: "Containers", + DashboardTab.SESSIONS: "Sessions", + DashboardTab.WORKTREES: "Worktrees", + } + return names[self] + + +TAB_ORDER: tuple[DashboardTab, ...] = ( + DashboardTab.STATUS, + DashboardTab.CONTAINERS, + DashboardTab.SESSIONS, + DashboardTab.WORKTREES, +) + + +class StatusAction(Enum): + """Supported actions for status tab items.""" + + START_SESSION = auto() + RESUME_SESSION = auto() + SWITCH_TEAM = auto() + OPEN_TAB = auto() + INSTALL_STATUSLINE = auto() + OPEN_PROFILE = auto() + OPEN_SETTINGS = auto() + + +class PlaceholderKind(Enum): + """Placeholder rows for empty or error states.""" + + NO_CONTAINERS = auto() + NO_SESSIONS = auto() + NO_WORKTREES = auto() + NO_GIT = auto() + ERROR = auto() + CONFIG_ERROR = auto() + + +@dataclass(frozen=True) +class StatusItem: + """Status tab row with optional action metadata.""" + + label: str + description: str + action: StatusAction | None = None + action_tab: DashboardTab | None = None + session: dict[str, Any] | None = None + + +@dataclass(frozen=True) +class PlaceholderItem: + """Placeholder row for empty/error states.""" + + label: str + description: str + kind: PlaceholderKind + startable: bool = False + + +@dataclass(frozen=True) +class ContainerItem: + """Container row backed by Docker metadata.""" + + label: str + description: str + container: ContainerInfo + + +@dataclass(frozen=True) +class SessionItem: + """Session row backed by session metadata.""" + + label: str + description: str + session: dict[str, Any] + + +@dataclass(frozen=True) +class WorktreeItem: + """Worktree row backed by git worktree data.""" + + label: str + description: str + path: str + + +DashboardItem: TypeAlias = StatusItem | PlaceholderItem | ContainerItem | SessionItem | WorktreeItem + + +@dataclass(frozen=True) +class DashboardTabData: + """View model for a single dashboard tab.""" + + tab: DashboardTab + title: str + items: Sequence[DashboardItem] + count_active: int + count_total: int + + @property + def subtitle(self) -> str: + """Generate subtitle from counts.""" + if self.count_active == self.count_total: + return f"{self.count_total} total" + return f"{self.count_active} active, {self.count_total} total" + + +@dataclass(frozen=True) +class DashboardViewModel: + """View model for a full dashboard render.""" + + active_tab: DashboardTab + tabs: Mapping[DashboardTab, DashboardTabData] + status_message: str | None + verbose_worktrees: bool + + +@dataclass(frozen=True) +class DashboardFlowState: + """Flow state preserved between dashboard runs.""" + + restore_tab: DashboardTab | None = None + toast_message: str | None = None + verbose_worktrees: bool = False + + +class StartFlowDecision(Enum): + """Decision outcomes from the start flow.""" + + LAUNCHED = auto() + CANCELLED = auto() + QUIT = auto() + + +@dataclass(frozen=True) +class StartFlowResult: + """Result from executing the start flow.""" + + decision: StartFlowDecision + + @classmethod + def from_legacy(cls, result: bool | None) -> StartFlowResult: + """Convert legacy bool/None start result into a structured outcome.""" + if result is None: + return cls(decision=StartFlowDecision.QUIT) + if result is True: + return cls(decision=StartFlowDecision.LAUNCHED) + return cls(decision=StartFlowDecision.CANCELLED) + + +@dataclass(frozen=True) +class TeamSwitchEvent: + """Event for switching teams.""" + + +@dataclass(frozen=True) +class StartFlowEvent: + """Event for starting a new session flow.""" + + return_to: DashboardTab + reason: str + + +@dataclass(frozen=True) +class RefreshEvent: + """Event for refreshing dashboard data.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class SessionResumeEvent: + """Event for resuming a session.""" + + return_to: DashboardTab + session: dict[str, Any] + + +@dataclass(frozen=True) +class StatuslineInstallEvent: + """Event for installing statusline.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class RecentWorkspacesEvent: + """Event for picking a recent workspace.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class GitInitEvent: + """Event for initializing git.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class CreateWorktreeEvent: + """Event for creating a worktree or cloning.""" + + return_to: DashboardTab + is_git_repo: bool + + +@dataclass(frozen=True) +class VerboseToggleEvent: + """Event for toggling verbose worktree status.""" + + return_to: DashboardTab + verbose: bool + + +@dataclass(frozen=True) +class SettingsEvent: + """Event for opening settings.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class ContainerStopEvent: + """Event for stopping a container.""" + + return_to: DashboardTab + container_id: str + container_name: str + + +@dataclass(frozen=True) +class ContainerResumeEvent: + """Event for resuming a container.""" + + return_to: DashboardTab + container_id: str + container_name: str + + +@dataclass(frozen=True) +class ContainerRemoveEvent: + """Event for removing a container.""" + + return_to: DashboardTab + container_id: str + container_name: str + + +@dataclass(frozen=True) +class ProfileMenuEvent: + """Event for opening the profile menu.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class SandboxImportEvent: + """Event for importing sandbox plugins.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class ContainerActionMenuEvent: + """Event for the container action menu.""" + + return_to: DashboardTab + container_id: str + container_name: str + + +@dataclass(frozen=True) +class SessionActionMenuEvent: + """Event for the session action menu.""" + + return_to: DashboardTab + session: dict[str, Any] + + +@dataclass(frozen=True) +class WorktreeActionMenuEvent: + """Event for the worktree action menu.""" + + return_to: DashboardTab + worktree_path: str + + +DashboardEvent: TypeAlias = ( + TeamSwitchEvent + | StartFlowEvent + | RefreshEvent + | SessionResumeEvent + | StatuslineInstallEvent + | RecentWorkspacesEvent + | GitInitEvent + | CreateWorktreeEvent + | VerboseToggleEvent + | SettingsEvent + | ContainerStopEvent + | ContainerResumeEvent + | ContainerRemoveEvent + | ProfileMenuEvent + | SandboxImportEvent + | ContainerActionMenuEvent + | SessionActionMenuEvent + | WorktreeActionMenuEvent +) + +DashboardEffect: TypeAlias = ( + TeamSwitchEvent + | StartFlowEvent + | SessionResumeEvent + | StatuslineInstallEvent + | RecentWorkspacesEvent + | GitInitEvent + | CreateWorktreeEvent + | SettingsEvent + | ContainerStopEvent + | ContainerResumeEvent + | ContainerRemoveEvent + | ProfileMenuEvent + | SandboxImportEvent + | ContainerActionMenuEvent + | SessionActionMenuEvent + | WorktreeActionMenuEvent +) + + +@dataclass(frozen=True) +class DashboardEffectRequest: + """Effect request emitted from a dashboard event.""" + + state: DashboardFlowState + effect: DashboardEffect + + +@dataclass(frozen=True) +class DashboardFlowOutcome: + """Outcome after handling an event or effect.""" + + state: DashboardFlowState + exit_dashboard: bool = False + + +DashboardNextStep: TypeAlias = DashboardEffectRequest | DashboardFlowOutcome + +DashboardDataLoader: TypeAlias = Callable[[bool], Mapping[DashboardTab, DashboardTabData]] + + +def placeholder_tip(kind: PlaceholderKind) -> str: + """Return contextual help for placeholder rows.""" + tips = { + PlaceholderKind.NO_CONTAINERS: "No containers running. Press n to start or run `scc start `.", + PlaceholderKind.NO_SESSIONS: "No sessions yet. Press n to create your first session.", + PlaceholderKind.NO_WORKTREES: "No worktrees yet. Press c to create, w for recent, v for status.", + PlaceholderKind.NO_GIT: "Not a git repository. Press i to init or c to clone.", + PlaceholderKind.ERROR: "Unable to load data. Run `scc doctor` to diagnose.", + PlaceholderKind.CONFIG_ERROR: "Configuration issue detected. Run `scc doctor` to fix it.", + } + return tips.get(kind, "No details available for this item.") + + +def placeholder_start_reason(item: PlaceholderItem) -> str: + """Return start flow reason for a startable placeholder.""" + mapping = { + PlaceholderKind.NO_CONTAINERS: "no_containers", + PlaceholderKind.NO_SESSIONS: "no_sessions", + } + return mapping.get(item.kind, "unknown") + + +def build_dashboard_view( + state: DashboardFlowState, + loader: DashboardDataLoader, +) -> tuple[DashboardViewModel, DashboardFlowState]: + """Build the dashboard view and clear one-time state.""" + tabs = loader(state.verbose_worktrees) + active_tab = state.restore_tab or DashboardTab.STATUS + if active_tab not in tabs: + active_tab = DashboardTab.STATUS + view = DashboardViewModel( + active_tab=active_tab, + tabs=tabs, + status_message=state.toast_message, + verbose_worktrees=state.verbose_worktrees, + ) + next_state = replace(state, restore_tab=None, toast_message=None) + return view, next_state + + +def handle_dashboard_event(state: DashboardFlowState, event: DashboardEvent) -> DashboardNextStep: + """Translate a dashboard event into an effect or state update.""" + if isinstance(event, TeamSwitchEvent): + return DashboardEffectRequest(state=state, effect=event) + + if isinstance(event, StartFlowEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, RefreshEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardFlowOutcome(state=next_state) + + if isinstance(event, SessionResumeEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, StatuslineInstallEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, RecentWorkspacesEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, GitInitEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, CreateWorktreeEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, VerboseToggleEvent): + message = "Status on" if event.verbose else "Status off" + next_state = replace( + state, + restore_tab=event.return_to, + verbose_worktrees=event.verbose, + toast_message=message, + ) + return DashboardFlowOutcome(state=next_state) + + if isinstance(event, SettingsEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, ContainerStopEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, ContainerResumeEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, ContainerRemoveEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, ProfileMenuEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, SandboxImportEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, ContainerActionMenuEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, SessionActionMenuEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + if isinstance(event, WorktreeActionMenuEvent): + next_state = replace(state, restore_tab=event.return_to) + return DashboardEffectRequest(state=next_state, effect=event) + + msg = f"Unsupported event: {event}" + raise ValueError(msg) + + +def apply_dashboard_effect_result( + state: DashboardFlowState, + effect: DashboardEffect, + result: object, +) -> DashboardFlowOutcome: + """Apply effect results to dashboard state.""" + if isinstance(effect, TeamSwitchEvent): + return DashboardFlowOutcome(state=state) + + if isinstance(effect, StartFlowEvent): + if not isinstance(result, StartFlowResult): + msg = "Start flow effect requires StartFlowResult" + raise TypeError(msg) + if result.decision is StartFlowDecision.QUIT: + return DashboardFlowOutcome(state=state, exit_dashboard=True) + if result.decision is StartFlowDecision.LAUNCHED: + return DashboardFlowOutcome(state=state, exit_dashboard=True) + next_state = replace(state, toast_message="Start cancelled") + return DashboardFlowOutcome(state=next_state) + + if isinstance(effect, SessionResumeEvent): + if not isinstance(result, bool): + msg = "Session resume effect requires bool result" + raise TypeError(msg) + if result: + return DashboardFlowOutcome(state=state, exit_dashboard=True) + next_state = replace(state, toast_message="Session resume failed") + return DashboardFlowOutcome(state=next_state) + + if isinstance(effect, StatuslineInstallEvent): + if not isinstance(result, bool): + msg = "Statusline install effect requires bool result" + raise TypeError(msg) + message = ( + "Statusline installed successfully" if result else "Statusline installation failed" + ) + next_state = replace(state, toast_message=message) + return DashboardFlowOutcome(state=next_state) + + if isinstance(effect, RecentWorkspacesEvent): + if not isinstance(result, (str, type(None))): + msg = "Recent workspaces effect requires str or None" + raise TypeError(msg) + if result is None: + message = "Cancelled" + else: + message = f"Selected: {result}" + next_state = replace(state, toast_message=message) + return DashboardFlowOutcome(state=next_state) + + if isinstance(effect, GitInitEvent): + if not isinstance(result, bool): + msg = "Git init effect requires bool result" + raise TypeError(msg) + message = "Git repository initialized" if result else "Git init cancelled or failed" + next_state = replace(state, toast_message=message) + return DashboardFlowOutcome(state=next_state) + + if isinstance(effect, CreateWorktreeEvent): + if not isinstance(result, bool): + msg = "Create worktree effect requires bool result" + raise TypeError(msg) + if effect.is_git_repo: + message = "Worktree created" if result else "Worktree creation cancelled" + else: + message = "Repository cloned" if result else "Clone cancelled" + next_state = replace(state, toast_message=message) + return DashboardFlowOutcome(state=next_state) + + if isinstance(effect, SettingsEvent): + if not isinstance(result, (str, type(None))): + msg = "Settings effect requires str or None" + raise TypeError(msg) + next_state = replace(state, toast_message=result) + return DashboardFlowOutcome(state=next_state) + + if isinstance(effect, ContainerStopEvent): + return _apply_container_message(state, result, "Container stopped", "Stop failed") + + if isinstance(effect, ContainerResumeEvent): + return _apply_container_message(state, result, "Container resumed", "Resume failed") + + if isinstance(effect, ContainerRemoveEvent): + return _apply_container_message(state, result, "Container removed", "Remove failed") + + if isinstance(effect, ProfileMenuEvent): + if not isinstance(result, (str, type(None))): + msg = "Profile menu effect requires str or None" + raise TypeError(msg) + next_state = replace(state, toast_message=result) + return DashboardFlowOutcome(state=next_state) + + if isinstance(effect, SandboxImportEvent): + if not isinstance(result, (str, type(None))): + msg = "Sandbox import effect requires str or None" + raise TypeError(msg) + next_state = replace(state, toast_message=result) + return DashboardFlowOutcome(state=next_state) + + if isinstance(effect, ContainerActionMenuEvent): + if not isinstance(result, (str, type(None))): + msg = "Container action menu effect requires str or None" + raise TypeError(msg) + next_state = replace(state, toast_message=result) + return DashboardFlowOutcome(state=next_state) + + if isinstance(effect, SessionActionMenuEvent): + if not isinstance(result, (str, type(None))): + msg = "Session action menu effect requires str or None" + raise TypeError(msg) + next_state = replace(state, toast_message=result) + return DashboardFlowOutcome(state=next_state) + + if isinstance(effect, WorktreeActionMenuEvent): + if not isinstance(result, (str, type(None))): + msg = "Worktree action menu effect requires str or None" + raise TypeError(msg) + next_state = replace(state, toast_message=result) + return DashboardFlowOutcome(state=next_state) + + msg = f"Unsupported effect: {effect}" + raise ValueError(msg) + + +def _apply_container_message( + state: DashboardFlowState, + result: object, + success_message: str, + failure_message: str, +) -> DashboardFlowOutcome: + if not isinstance(result, tuple) or len(result) != 2: + msg = "Container effect requires tuple[bool, str | None]" + raise TypeError(msg) + success, message = result + if not isinstance(success, bool): + msg = "Container effect success flag must be bool" + raise TypeError(msg) + if message is not None and not isinstance(message, str): + msg = "Container effect message must be str or None" + raise TypeError(msg) + fallback = success_message if success else failure_message + next_state = replace(state, toast_message=message or fallback) + return DashboardFlowOutcome(state=next_state) + + +def load_status_tab_data(refresh_at: datetime | None = None) -> DashboardTabData: + """Load Status tab data showing quick actions and context.""" + import os + from pathlib import Path + + from scc_cli import config, sessions + from scc_cli.core.personal_profiles import get_profile_status + from scc_cli.docker import core as docker_core + + _ = refresh_at + + items: list[DashboardItem] = [] + + items.append( + StatusItem( + label="New session", + description="", + action=StatusAction.START_SESSION, + ) + ) + + try: + recent_session = sessions.get_most_recent() + if recent_session: + workspace = recent_session.get("workspace", "") + workspace_name = workspace.split("/")[-1] if workspace else "unknown" + last_used = recent_session.get("last_used") + last_used_display = "" + if last_used: + try: + dt = datetime.fromisoformat(last_used) + last_used_display = sessions.format_relative_time(dt) + except ValueError: + last_used_display = last_used + desc_parts = [workspace_name] + if recent_session.get("branch"): + desc_parts.append(str(recent_session.get("branch"))) + if last_used_display: + desc_parts.append(last_used_display) + items.append( + StatusItem( + label="Resume last", + description=" · ".join(desc_parts), + action=StatusAction.RESUME_SESSION, + session=recent_session, + ) + ) + except Exception: + pass + + try: + user_config = config.load_user_config() + team = user_config.get("selected_profile") + org_source = user_config.get("organization_source") + + if team: + items.append( + StatusItem( + label=f"Team: {team}", + description="", + action=StatusAction.SWITCH_TEAM, + ) + ) + else: + items.append( + StatusItem( + label="Team: none", + description="", + action=StatusAction.SWITCH_TEAM, + ) + ) + + try: + workspace = Path(os.getcwd()) + profile_status = get_profile_status(workspace) + + if profile_status.exists: + if profile_status.import_count > 0: + profile_label = f"Profile: saved · ↓ {profile_status.import_count} importable" + elif profile_status.has_drift: + profile_label = "Profile: saved · ◇ drifted" + else: + profile_label = "Profile: saved · ✓ synced" + items.append( + StatusItem( + label=profile_label, + description="", + action=StatusAction.OPEN_PROFILE, + ) + ) + else: + items.append( + StatusItem( + label="Profile: none", + description="", + action=StatusAction.OPEN_PROFILE, + ) + ) + except Exception: + pass + + if org_source and isinstance(org_source, dict): + org_url = org_source.get("url", "") + if org_url: + org_name = None + try: + org_config = config.load_cached_org_config() + if org_config: + org_name = org_config.get("organization", {}).get("name") + except Exception: + org_name = None + + if not org_name: + org_name = org_url.replace("https://", "").replace("http://", "").split("/")[0] + + items.append( + StatusItem( + label=f"Organization: {org_name}", + description="", + ) + ) + elif user_config.get("standalone"): + items.append( + StatusItem( + label="Mode: standalone", + description="", + ) + ) + + except Exception: + items.append( + StatusItem( + label="Config: error", + description="", + ) + ) + + try: + containers = docker_core.list_scc_containers() + running = sum(1 for container in containers if "Up" in container.status) + total = len(containers) + items.append( + StatusItem( + label=f"Containers: {running}/{total} running", + description="", + action=StatusAction.OPEN_TAB, + action_tab=DashboardTab.CONTAINERS, + ) + ) + except Exception: + pass + + items.append( + StatusItem( + label="Settings", + description="", + action=StatusAction.OPEN_SETTINGS, + ) + ) + + return DashboardTabData( + tab=DashboardTab.STATUS, + title="Status", + items=items, + count_active=len(items), + count_total=len(items), + ) + + +def load_containers_tab_data() -> DashboardTabData: + """Load Containers tab data showing SCC-managed containers.""" + from scc_cli.docker import core as docker_core + + items: list[DashboardItem] = [] + + try: + containers = docker_core.list_scc_containers() + running_count = 0 + + for container in containers: + is_running = "Up" in container.status if container.status else False + if is_running: + running_count += 1 + label = container.name + description = _format_container_description(container) + items.append(ContainerItem(label=label, description=description, container=container)) + + if not items: + items.append( + PlaceholderItem( + label="No containers", + description="Press 'n' to start or run `scc start `", + kind=PlaceholderKind.NO_CONTAINERS, + startable=True, + ) + ) + + return DashboardTabData( + tab=DashboardTab.CONTAINERS, + title="Containers", + items=items, + count_active=running_count, + count_total=len(containers), + ) + + except Exception: + return DashboardTabData( + tab=DashboardTab.CONTAINERS, + title="Containers", + items=[ + PlaceholderItem( + label="Error", + description="Unable to query Docker", + kind=PlaceholderKind.ERROR, + ) + ], + count_active=0, + count_total=0, + ) + + +def load_sessions_tab_data() -> DashboardTabData: + """Load Sessions tab data showing recent Claude sessions.""" + from scc_cli import sessions + + items: list[DashboardItem] = [] + + try: + recent = sessions.list_recent(limit=20) + + for session in recent: + name = session.get("name", "Unnamed") + desc_parts = [] + + if session.get("team"): + desc_parts.append(str(session["team"])) + if session.get("branch"): + desc_parts.append(str(session["branch"])) + if session.get("last_used"): + desc_parts.append(str(session["last_used"])) + + items.append( + SessionItem( + label=name, + description=" · ".join(desc_parts), + session=session, + ) + ) + + if not items: + items.append( + PlaceholderItem( + label="No sessions", + description="Press Enter to start", + kind=PlaceholderKind.NO_SESSIONS, + startable=True, + ) + ) + + return DashboardTabData( + tab=DashboardTab.SESSIONS, + title="Sessions", + items=items, + count_active=len(recent), + count_total=len(recent), + ) + + except Exception: + return DashboardTabData( + tab=DashboardTab.SESSIONS, + title="Sessions", + items=[ + PlaceholderItem( + label="Error", + description="Unable to load sessions", + kind=PlaceholderKind.ERROR, + ) + ], + count_active=0, + count_total=0, + ) + + +def load_worktrees_tab_data(verbose: bool = False) -> DashboardTabData: + """Load Worktrees tab data showing git worktrees.""" + import os + from pathlib import Path + + from scc_cli.services.git.worktree import get_worktree_status, get_worktrees_data + + items: list[DashboardItem] = [] + + try: + cwd = Path(os.getcwd()) + worktrees = get_worktrees_data(cwd) + current_path = os.path.realpath(cwd) + + for worktree in worktrees: + if os.path.realpath(worktree.path) == current_path: + worktree.is_current = True + + if verbose: + staged, modified, untracked, timed_out = get_worktree_status(worktree.path) + worktree.staged_count = staged + worktree.modified_count = modified + worktree.untracked_count = untracked + worktree.status_timed_out = timed_out + worktree.has_changes = (staged + modified + untracked) > 0 + + current_count = sum(1 for worktree in worktrees if worktree.is_current) + + for worktree in worktrees: + description = _format_worktree_description(worktree, verbose=verbose) + items.append( + WorktreeItem( + label=Path(worktree.path).name, + description=description, + path=worktree.path, + ) + ) + + if not items: + items.append( + PlaceholderItem( + label="No worktrees", + description="Press w for recent · i to init · c to clone", + kind=PlaceholderKind.NO_WORKTREES, + ) + ) + + return DashboardTabData( + tab=DashboardTab.WORKTREES, + title="Worktrees", + items=items, + count_active=current_count, + count_total=len(worktrees), + ) + + except Exception: + return DashboardTabData( + tab=DashboardTab.WORKTREES, + title="Worktrees", + items=[ + PlaceholderItem( + label="Not available", + description="Press w for recent · i to init · c to clone", + kind=PlaceholderKind.NO_GIT, + ) + ], + count_active=0, + count_total=0, + ) + + +def load_all_tab_data(verbose_worktrees: bool = False) -> Mapping[DashboardTab, DashboardTabData]: + """Load data for all dashboard tabs.""" + return { + DashboardTab.STATUS: load_status_tab_data(), + DashboardTab.CONTAINERS: load_containers_tab_data(), + DashboardTab.SESSIONS: load_sessions_tab_data(), + DashboardTab.WORKTREES: load_worktrees_tab_data(verbose=verbose_worktrees), + } + + +def _format_container_description(container: ContainerInfo) -> str: + desc_parts: list[str] = [] + + if container.workspace: + workspace_name = container.workspace.split("/")[-1] + desc_parts.append(workspace_name) + + if container.status: + time_str = _extract_container_time(container.status) + if container.status.startswith("Up"): + desc_parts.append(f"● {time_str}") + else: + desc_parts.append("○ stopped") + + return " · ".join(desc_parts) + + +def _extract_container_time(status: str) -> str: + import re + + match = re.search(r"Up\s+(.+)", status) + if match: + return match.group(1) + return status + + +def _format_worktree_description(worktree: WorktreeInfo, *, verbose: bool) -> str: + from scc_cli import git + + desc_parts: list[str] = [] + if worktree.branch: + desc_parts.append(git.get_display_branch(worktree.branch)) + + if verbose: + if worktree.status_timed_out: + desc_parts.append("status timeout") + else: + status_parts = [] + if worktree.staged_count > 0: + status_parts.append(f"+{worktree.staged_count}") + if worktree.modified_count > 0: + status_parts.append(f"!{worktree.modified_count}") + if worktree.untracked_count > 0: + status_parts.append(f"?{worktree.untracked_count}") + if status_parts: + desc_parts.append(" ".join(status_parts)) + elif not worktree.has_changes: + desc_parts.append("clean") + elif worktree.has_changes: + desc_parts.append("modified") + + if worktree.is_current: + desc_parts.append("(current)") + + return " ".join(desc_parts) diff --git a/src/scc_cli/application/profiles.py b/src/scc_cli/application/profiles.py new file mode 100644 index 0000000..4d213c1 --- /dev/null +++ b/src/scc_cli/application/profiles.py @@ -0,0 +1,250 @@ +""" +Profile resolution and marketplace URL logic. + +Renamed from teams.py to better reflect profile resolution responsibilities. +Supports multi-marketplace architecture with org/team/project inheritance. + +Key features: +- HTTPS-only enforcement: All marketplace URLs must use HTTPS protocol. +- Config inheritance: 3-layer merge (org defaults -> team -> project) +- Security boundaries: Blocked items (fnmatch patterns) never allowed +- Delegation control: Org controls whether teams can delegate to projects +""" + +from __future__ import annotations + +from typing import Any, cast +from urllib.parse import urlparse, urlunparse + +# ═══════════════════════════════════════════════════════════════════════════════ +# Core Profile Resolution Functions (New Architecture) +# ═══════════════════════════════════════════════════════════════════════════════ + + +def list_profiles(org_config: dict[str, Any]) -> list[dict[str, Any]]: + """ + List all available profiles from org config. + + Return list of profile dicts with name, description, plugin, and marketplace. + """ + profiles = org_config.get("profiles", {}) + result = [] + + for name, info in profiles.items(): + result.append( + { + "name": name, + "description": info.get("description", ""), + "plugin": info.get("plugin"), + "marketplace": info.get("marketplace"), + } + ) + + return result + + +def resolve_profile(org_config: dict[str, Any], profile_name: str) -> dict[str, Any]: + """ + Resolve profile by name, raise ValueError if not found. + + Return profile dict with name and all profile fields. + """ + profiles = org_config.get("profiles", {}) + + if profile_name not in profiles: + available = ", ".join(sorted(profiles.keys())) or "(none)" + raise ValueError(f"Profile '{profile_name}' not found. Available: {available}") + + profile_info = profiles[profile_name] + return {"name": profile_name, **profile_info} + + +def resolve_marketplace(org_config: dict[Any, Any], profile: dict[Any, Any]) -> dict[Any, Any]: + """ + Resolve marketplace for a profile and translate to claude_adapter format. + + This is the SINGLE translation layer between org-config schema and + claude_adapter expected format. All schema changes should be handled here. + + Schema Translation: + org-config (source/owner/repo) → claude_adapter (type/repo combined) + + Args: + org_config: Organization config with marketplaces dict + profile: Profile dict with a "marketplace" field + + Returns: + Marketplace dict normalized for claude_adapter: + - name: marketplace name (from dict key) + - type: "github" | "gitlab" | "https" + - repo: combined "owner/repo" for github + - url: for git/url sources + - ref: translated from "branch" + + Raises: + ValueError: If marketplace not found, invalid source, or missing fields + """ + marketplace_name = profile.get("marketplace") + if not marketplace_name: + raise ValueError(f"Profile '{profile.get('name')}' has no marketplace field") + + # Dict-based lookup + marketplaces: dict[str, dict[Any, Any]] = org_config.get("marketplaces", {}) + marketplace_config = marketplaces.get(marketplace_name) + + if not marketplace_config: + raise ValueError( + f"Marketplace '{marketplace_name}' not found for profile '{profile.get('name')}'" + ) + + # Validate and translate source type + source = marketplace_config.get("source", "") + valid_sources = {"github", "git", "url"} + if source not in valid_sources: + raise ValueError( + f"Marketplace '{marketplace_name}' has invalid source '{source}'. " + f"Valid sources: {', '.join(sorted(valid_sources))}" + ) + + result: dict[str, Any] = {"name": marketplace_name} + + if source == "github": + # GitHub: requires owner + repo, combine into single repo field + owner = marketplace_config.get("owner", "") + repo = marketplace_config.get("repo", "") + if not owner or not repo: + raise ValueError( + f"GitHub marketplace '{marketplace_name}' requires 'owner' and 'repo' fields" + ) + result["type"] = "github" + result["repo"] = f"{owner}/{repo}" + + elif source == "git": + # Generic git: maps to gitlab type + # Supports two patterns: + # 1. Direct URL: {"source": "git", "url": "https://..."} + # 2. Host + owner + repo: {"source": "git", "host": "gitlab.example.org", "owner": "group", "repo": "name"} + url = marketplace_config.get("url", "") + host = marketplace_config.get("host", "") + owner = marketplace_config.get("owner", "") + repo = marketplace_config.get("repo", "") + + result["type"] = "gitlab" + + if url: + # Pattern 1: Direct URL provided + result["url"] = url + elif host and owner and repo: + # Pattern 2: Construct from host/owner/repo + result["host"] = host + result["repo"] = f"{owner}/{repo}" + else: + raise ValueError( + f"Git marketplace '{marketplace_name}' requires either 'url' field " + f"or 'host', 'owner', 'repo' fields" + ) + + elif source == "url": + # HTTPS URL: requires url + url = marketplace_config.get("url", "") + if not url: + raise ValueError(f"URL marketplace '{marketplace_name}' requires 'url' field") + result["type"] = "https" + result["url"] = url + + # Translate branch -> ref (optional) + if marketplace_config.get("branch"): + result["ref"] = marketplace_config["branch"] + + # Preserve optional fields + for field_name in ("host", "auth", "headers", "path"): + if marketplace_config.get(field_name): + result[field_name] = marketplace_config[field_name] + + return result + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Marketplace URL Resolution (HTTPS-only enforcement) +# ═══════════════════════════════════════════════════════════════════════════════ + + +def _normalize_repo_path(repo: str) -> str: + """ + Normalize repo path: strip whitespace, leading slashes, .git suffix. + """ + repo = repo.strip().lstrip("/") + if repo.endswith(".git"): + repo = repo[:-4] + return repo + + +def get_marketplace_url(marketplace: dict[str, Any]) -> str: + """ + Resolve marketplace to HTTPS URL. + + SECURITY: Rejects SSH URLs (git@, ssh://) and HTTP URLs. + Only HTTPS is allowed for marketplace access. + + URL Resolution Logic: + 1. If 'url' is provided, validate and normalize it + 2. Otherwise, construct from 'host' + 'repo' + 3. For github/gitlab types, use default hosts if not specified + + Args: + marketplace: Marketplace config dict with type, url/host, repo + + Returns: + Normalized HTTPS URL string + + Raises: + ValueError: For SSH URLs, HTTP URLs, unsupported schemes, or missing config + """ + # Check for direct URL first + if raw := marketplace.get("url"): + raw = raw.strip() + + # Reject SSH URLs early (git@ format) + if raw.startswith("git@"): + raise ValueError(f"SSH URL not supported: {raw}") + + # Reject ssh:// protocol + if raw.startswith("ssh://"): + raise ValueError(f"SSH URL not supported: {raw}") + + parsed = urlparse(raw) + + # HTTPS only - reject http:// for security + if parsed.scheme == "http": + raise ValueError(f"HTTP not allowed (use HTTPS): {raw}") + + if parsed.scheme != "https": + raise ValueError(f"Unsupported URL scheme: {parsed.scheme!r}") + + # Normalize: remove trailing slash, drop fragments + normalized_path = parsed.path.rstrip("/") + normalized = parsed._replace(path=normalized_path, fragment="") + return cast(str, urlunparse(normalized)) + + # No URL provided - construct from host + repo + host = (marketplace.get("host") or "").strip() + + if not host: + # Use default hosts for known types + defaults = {"github": "github.com", "gitlab": "gitlab.com"} + host = defaults.get(marketplace.get("type") or "") + + if not host: + raise ValueError( + f"Marketplace type '{marketplace.get('type')}' requires 'url' or 'host'" + ) + + # Reject host with path components (ambiguous config) + if "/" in host: + raise ValueError(f"'host' must not include path: {host!r}") + + # Get and normalize repo path + repo = marketplace.get("repo", "") + repo = _normalize_repo_path(repo) + + return f"https://{host}/{repo}" diff --git a/src/scc_cli/application/settings/__init__.py b/src/scc_cli/application/settings/__init__.py new file mode 100644 index 0000000..8aaca56 --- /dev/null +++ b/src/scc_cli/application/settings/__init__.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +from .models import ( + ConfirmationKind, + DoctorInfo, + PathsInfo, + ProfileDiffInfo, + ProfileSyncMode, + ProfileSyncPathPayload, + ProfileSyncPayload, + ProfileSyncPreview, + ProfileSyncResult, + SettingsAction, + SettingsActionResult, + SettingsActionStatus, + SettingsCategory, + SettingsChangeRequest, + SettingsHeader, + SettingsValidationRequest, + SettingsValidationResult, + SettingsViewModel, + SupportBundleInfo, + SupportBundlePayload, + VersionInfo, +) +from .use_cases import ( + SettingsContext, + apply_settings_change, + load_settings_state, + validate_settings, +) + +__all__ = [ + "ConfirmationKind", + "DoctorInfo", + "PathsInfo", + "ProfileDiffInfo", + "ProfileSyncMode", + "ProfileSyncPayload", + "ProfileSyncPathPayload", + "ProfileSyncPreview", + "ProfileSyncResult", + "SettingsAction", + "SettingsActionResult", + "SettingsActionStatus", + "SettingsCategory", + "SettingsChangeRequest", + "SettingsContext", + "SettingsHeader", + "SettingsValidationRequest", + "SettingsValidationResult", + "SettingsViewModel", + "SupportBundleInfo", + "SupportBundlePayload", + "VersionInfo", + "apply_settings_change", + "load_settings_state", + "validate_settings", +] diff --git a/src/scc_cli/application/settings/models.py b/src/scc_cli/application/settings/models.py new file mode 100644 index 0000000..30e9b84 --- /dev/null +++ b/src/scc_cli/application/settings/models.py @@ -0,0 +1,212 @@ +from __future__ import annotations + +from dataclasses import dataclass, field +from enum import Enum, auto +from pathlib import Path +from typing import TypeAlias + +from scc_cli.core.personal_profiles import StructuredDiff +from scc_cli.doctor.types import DoctorResult +from scc_cli.maintenance import MaintenancePreview, PathInfo, RiskTier + + +class SettingsCategory(Enum): + """Categories for the settings screen.""" + + MAINTENANCE = auto() + PROFILES = auto() + DIAGNOSTICS = auto() + ABOUT = auto() + + +@dataclass(frozen=True) +class SettingsAction: + """Represents a settings action with its metadata.""" + + id: str + label: str + description: str + risk_tier: RiskTier + category: SettingsCategory + + +@dataclass(frozen=True) +class SettingsHeader: + """Header metadata for the settings screen.""" + + profile_name: str + org_name: str | None + + +@dataclass(frozen=True) +class SettingsViewModel: + """View model for settings screen rendering.""" + + header: SettingsHeader + categories: list[SettingsCategory] + actions_by_category: dict[SettingsCategory, list[SettingsAction]] + sync_repo_path: str + + +class ProfileSyncMode(Enum): + """Profile sync operation modes.""" + + CHANGE_PATH = "change_path" + EXPORT = "export" + IMPORT = "import" + FULL_SYNC = "full_sync" + + +@dataclass(frozen=True) +class ProfileSyncPayload: + """Input for profile sync operations.""" + + mode: ProfileSyncMode + repo_path: Path + create_dir: bool = False + + +@dataclass(frozen=True) +class ProfileSyncPathPayload: + """Input for updating profile sync paths.""" + + new_path: str + + +@dataclass(frozen=True) +class SupportBundlePayload: + """Input for generating support bundles.""" + + output_path: Path + redact_paths: bool = True + + +SettingsActionPayload: TypeAlias = ( + ProfileSyncPayload | ProfileSyncPathPayload | SupportBundlePayload +) + + +@dataclass(frozen=True) +class SettingsValidationRequest: + """Input for settings validation.""" + + action_id: str + workspace: Path + payload: SettingsActionPayload | None = None + + +class ConfirmationKind(Enum): + """Confirmation modes for settings actions.""" + + CONFIRM = auto() + TYPE_TO_CONFIRM = auto() + + +@dataclass(frozen=True) +class ProfileSyncPreview: + """Preview of a profile sync import.""" + + repo_path: Path + imported: int + skipped: int + warnings: list[str] = field(default_factory=list) + + +SettingsValidationDetail: TypeAlias = MaintenancePreview | ProfileSyncPreview + + +@dataclass(frozen=True) +class SettingsValidationResult: + """Validation data for settings actions.""" + + action: SettingsAction + confirmation: ConfirmationKind | None + detail: SettingsValidationDetail | None = None + message: str | None = None + required_phrase: str | None = None + error: str | None = None + + +@dataclass(frozen=True) +class SettingsChangeRequest: + """Input for applying settings actions.""" + + action_id: str + workspace: Path + payload: SettingsActionPayload | None = None + confirmed: bool = False + + +class SettingsActionStatus(Enum): + """Result status for settings actions.""" + + SUCCESS = auto() + NOOP = auto() + ERROR = auto() + + +@dataclass(frozen=True) +class PathsInfo: + """Rendered path information for settings.""" + + paths: list[PathInfo] + total_size: int + + +@dataclass(frozen=True) +class VersionInfo: + """Version information for the CLI.""" + + version: str + + +@dataclass(frozen=True) +class ProfileDiffInfo: + """Structured diff details for profile comparisons.""" + + diff: StructuredDiff + + +@dataclass(frozen=True) +class SupportBundleInfo: + """Details about a generated support bundle.""" + + output_path: Path + + +@dataclass(frozen=True) +class DoctorInfo: + """Doctor result payload.""" + + result: DoctorResult + + +@dataclass(frozen=True) +class ProfileSyncResult: + """Outcome details for profile sync operations.""" + + mode: ProfileSyncMode + repo_path: Path + imported: int = 0 + exported: int = 0 + skipped: int = 0 + profile_ids: list[str] = field(default_factory=list) + warnings: list[str] = field(default_factory=list) + + +SettingsActionDetail: TypeAlias = ( + PathsInfo | VersionInfo | ProfileDiffInfo | SupportBundleInfo | DoctorInfo | ProfileSyncResult +) + + +@dataclass(frozen=True) +class SettingsActionResult: + """Outcome of running a settings action.""" + + status: SettingsActionStatus + message: str | None = None + detail: SettingsActionDetail | None = None + details: list[str] = field(default_factory=list) + warnings: list[str] = field(default_factory=list) + needs_ack: bool = False + error: str | None = None diff --git a/src/scc_cli/application/settings/use_cases.py b/src/scc_cli/application/settings/use_cases.py new file mode 100644 index 0000000..880352e --- /dev/null +++ b/src/scc_cli/application/settings/use_cases.py @@ -0,0 +1,703 @@ +from __future__ import annotations + +from dataclasses import dataclass +from importlib.metadata import PackageNotFoundError +from importlib.metadata import version as get_version +from pathlib import Path + +from scc_cli import config +from scc_cli.core.personal_profiles import ( + compute_fingerprints, + compute_structured_diff, + export_profiles_to_repo, + import_profiles_from_repo, + list_personal_profiles, + load_personal_profile, + load_workspace_mcp, + load_workspace_settings, + merge_personal_mcp, + merge_personal_settings, + save_applied_state, + save_personal_profile, + write_workspace_mcp, + write_workspace_settings, +) +from scc_cli.doctor.core import run_doctor +from scc_cli.maintenance import ( + MaintenancePreview, + ResetResult, + RiskTier, + clear_cache, + clear_contexts, + delete_all_sessions, + factory_reset, + get_paths, + get_total_size, + preview_operation, + prune_containers, + prune_sessions, + reset_config, + reset_exceptions, +) +from scc_cli.support_bundle import create_bundle + +from .models import ( + ConfirmationKind, + DoctorInfo, + PathsInfo, + ProfileDiffInfo, + ProfileSyncMode, + ProfileSyncPathPayload, + ProfileSyncPayload, + ProfileSyncPreview, + ProfileSyncResult, + SettingsAction, + SettingsActionResult, + SettingsActionStatus, + SettingsCategory, + SettingsChangeRequest, + SettingsHeader, + SettingsValidationRequest, + SettingsValidationResult, + SettingsViewModel, + SupportBundleInfo, + SupportBundlePayload, + VersionInfo, +) + + +@dataclass(frozen=True) +class SettingsContext: + """Context for settings use cases.""" + + workspace: Path + + +SETTINGS_ACTIONS: list[SettingsAction] = [ + SettingsAction( + id="clear_cache", + label="Clear cache", + description="Remove regenerable cache files", + risk_tier=RiskTier.SAFE, + category=SettingsCategory.MAINTENANCE, + ), + SettingsAction( + id="clear_contexts", + label="Clear contexts", + description="Clear recent work contexts", + risk_tier=RiskTier.CHANGES_STATE, + category=SettingsCategory.MAINTENANCE, + ), + SettingsAction( + id="prune_containers", + label="Prune containers", + description="Remove stopped Docker containers", + risk_tier=RiskTier.CHANGES_STATE, + category=SettingsCategory.MAINTENANCE, + ), + SettingsAction( + id="prune_sessions", + label="Prune sessions", + description="Remove old sessions (keeps recent)", + risk_tier=RiskTier.CHANGES_STATE, + category=SettingsCategory.MAINTENANCE, + ), + SettingsAction( + id="reset_exceptions", + label="Reset exceptions", + description="Clear all policy exceptions", + risk_tier=RiskTier.DESTRUCTIVE, + category=SettingsCategory.MAINTENANCE, + ), + SettingsAction( + id="delete_sessions", + label="Delete all sessions", + description="Remove entire session history", + risk_tier=RiskTier.DESTRUCTIVE, + category=SettingsCategory.MAINTENANCE, + ), + SettingsAction( + id="reset_config", + label="Reset configuration", + description="Reset to defaults (requires setup)", + risk_tier=RiskTier.DESTRUCTIVE, + category=SettingsCategory.MAINTENANCE, + ), + SettingsAction( + id="factory_reset", + label="Factory reset", + description="Remove all SCC data", + risk_tier=RiskTier.FACTORY_RESET, + category=SettingsCategory.MAINTENANCE, + ), + SettingsAction( + id="profile_save", + label="Save profile", + description="Capture current workspace settings", + risk_tier=RiskTier.SAFE, + category=SettingsCategory.PROFILES, + ), + SettingsAction( + id="profile_apply", + label="Apply profile", + description="Restore saved settings to workspace", + risk_tier=RiskTier.CHANGES_STATE, + category=SettingsCategory.PROFILES, + ), + SettingsAction( + id="profile_diff", + label="Show diff", + description="Compare profile vs workspace", + risk_tier=RiskTier.SAFE, + category=SettingsCategory.PROFILES, + ), + SettingsAction( + id="profile_sync", + label="Sync profiles", + description="Export/import via repo", + risk_tier=RiskTier.SAFE, + category=SettingsCategory.PROFILES, + ), + SettingsAction( + id="run_doctor", + label="Run doctor", + description="Check prerequisites and system health", + risk_tier=RiskTier.SAFE, + category=SettingsCategory.DIAGNOSTICS, + ), + SettingsAction( + id="generate_support_bundle", + label="Generate support bundle", + description="Create diagnostic bundle for troubleshooting", + risk_tier=RiskTier.SAFE, + category=SettingsCategory.DIAGNOSTICS, + ), + SettingsAction( + id="show_paths", + label="Show paths", + description="Show SCC file locations", + risk_tier=RiskTier.SAFE, + category=SettingsCategory.ABOUT, + ), + SettingsAction( + id="show_version", + label="Show version", + description="Show build info and CLI version", + risk_tier=RiskTier.SAFE, + category=SettingsCategory.ABOUT, + ), +] + + +def load_settings_state(context: SettingsContext) -> SettingsViewModel: + """Load settings state for UI rendering.""" + + header = SettingsHeader( + profile_name=config.get_selected_profile() or "standalone", + org_name=_load_org_name(), + ) + return SettingsViewModel( + header=header, + categories=list(SettingsCategory), + actions_by_category=_group_actions_by_category(SETTINGS_ACTIONS), + sync_repo_path=_load_sync_repo_path(), + ) + + +def validate_settings(request: SettingsValidationRequest) -> SettingsValidationResult | None: + """Validate a settings action and return confirmation details.""" + + action = _get_action(request.action_id) + if action is None: + raise ValueError(f"Unknown settings action: {request.action_id}") + + if action.id == "profile_sync" and isinstance(request.payload, ProfileSyncPayload): + return _validate_profile_sync(action, request.payload) + + if action.risk_tier in (RiskTier.CHANGES_STATE, RiskTier.DESTRUCTIVE): + preview = _safe_preview(action.id) + return SettingsValidationResult( + action=action, + confirmation=ConfirmationKind.CONFIRM, + detail=preview, + message=f"{action.label}: {action.description}", + ) + + if action.risk_tier == RiskTier.FACTORY_RESET: + preview = _safe_preview(action.id) + return SettingsValidationResult( + action=action, + confirmation=ConfirmationKind.TYPE_TO_CONFIRM, + detail=preview, + message="Type RESET to confirm", + required_phrase="RESET", + ) + + return None + + +def apply_settings_change(request: SettingsChangeRequest) -> SettingsActionResult: + """Apply a settings action and return the result.""" + + action = _get_action(request.action_id) + if action is None: + raise ValueError(f"Unknown settings action: {request.action_id}") + + try: + if action.id == "clear_cache": + result = clear_cache() + message = f"Cache cleared: {result.bytes_freed_human}" + return _result_from_reset(result, message) + + if action.id == "clear_contexts": + result = clear_contexts() + message = f"Cleared {result.removed_count} contexts" + return _result_from_reset(result, message) + + if action.id == "prune_containers": + result = prune_containers(dry_run=False) + message = f"Pruned {result.removed_count} containers" + return _result_from_reset(result, message) + + if action.id == "prune_sessions": + result = prune_sessions(older_than_days=30, keep_n=20, dry_run=False) + message = f"Pruned {result.removed_count} sessions" + return _result_from_reset(result, message) + + if action.id == "reset_exceptions": + result = reset_exceptions(scope="all") + message = f"Reset {result.removed_count} exceptions" + return _result_from_reset(result, message) + + if action.id == "delete_sessions": + result = delete_all_sessions() + message = f"Deleted {result.removed_count} sessions" + return _result_from_reset(result, message) + + if action.id == "reset_config": + result = reset_config() + message = "Configuration reset. Run 'scc setup' to reconfigure." + return _result_from_reset(result, message) + + if action.id == "factory_reset": + results = factory_reset() + failed = [r for r in results if not r.success] + if failed: + error = failed[0].message + return SettingsActionResult( + status=SettingsActionStatus.ERROR, + message=error, + error=error, + ) + message = "Factory reset complete. Run 'scc setup' to reconfigure." + return SettingsActionResult(status=SettingsActionStatus.SUCCESS, message=message) + + if action.id == "profile_save": + return _apply_profile_save(request.workspace) + + if action.id == "profile_apply": + return _apply_profile_apply(request.workspace) + + if action.id == "profile_diff": + return _apply_profile_diff(request.workspace) + + if action.id == "profile_sync": + return _apply_profile_sync(request) + + if action.id == "run_doctor": + doctor_result = run_doctor(request.workspace) + return SettingsActionResult( + status=SettingsActionStatus.SUCCESS, + detail=DoctorInfo(result=doctor_result), + needs_ack=True, + ) + + if action.id == "generate_support_bundle": + return _apply_support_bundle(request) + + if action.id == "show_paths": + return SettingsActionResult( + status=SettingsActionStatus.SUCCESS, + detail=PathsInfo(paths=get_paths(), total_size=get_total_size()), + needs_ack=True, + ) + + if action.id == "show_version": + version = _load_version() + return SettingsActionResult( + status=SettingsActionStatus.SUCCESS, + detail=VersionInfo(version=version), + needs_ack=True, + ) + + return SettingsActionResult( + status=SettingsActionStatus.NOOP, + message=None, + ) + except Exception as exc: # pragma: no cover - defensive for unexpected failures + return SettingsActionResult( + status=SettingsActionStatus.ERROR, + message=str(exc), + error=str(exc), + ) + + +def _group_actions_by_category( + actions: list[SettingsAction], +) -> dict[SettingsCategory, list[SettingsAction]]: + grouped: dict[SettingsCategory, list[SettingsAction]] = { + category: [] for category in SettingsCategory + } + for action in actions: + grouped[action.category].append(action) + return grouped + + +def _get_action(action_id: str) -> SettingsAction | None: + for action in SETTINGS_ACTIONS: + if action.id == action_id: + return action + return None + + +def _load_org_name() -> str | None: + org_config = config.load_cached_org_config() + if not org_config: + return None + org_data = org_config.get("organization", {}) if isinstance(org_config, dict) else {} + name = org_data.get("name") or org_data.get("id") + return str(name) if name is not None else None + + +def _load_sync_repo_path() -> str: + try: + cfg = config.load_user_config() + except Exception: + cfg = {} + last_repo = cfg.get("sync", {}).get("last_repo") if isinstance(cfg, dict) else None + if last_repo: + return str(last_repo) + return "~/dotfiles/scc-profiles" + + +def _save_sync_repo_path(path: str) -> None: + try: + cfg = config.load_user_config() + except Exception: + cfg = {} + if not isinstance(cfg, dict): + cfg = {} + cfg.setdefault("sync", {}) + if isinstance(cfg["sync"], dict): + cfg["sync"]["last_repo"] = path + config.save_user_config(cfg) + + +def _safe_preview(action_id: str) -> MaintenancePreview | None: + try: + return preview_operation(action_id) + except Exception: + return None + + +def _validate_profile_sync( + action: SettingsAction, + payload: ProfileSyncPayload, +) -> SettingsValidationResult | None: + if payload.mode == ProfileSyncMode.EXPORT: + if not payload.repo_path.exists(): + return SettingsValidationResult( + action=action, + confirmation=ConfirmationKind.CONFIRM, + message=f"Create directory? {payload.repo_path}", + ) + return None + + if payload.mode == ProfileSyncMode.IMPORT: + if not payload.repo_path.exists(): + return SettingsValidationResult( + action=action, + confirmation=None, + error=f"Path not found: {payload.repo_path}", + ) + preview = import_profiles_from_repo(payload.repo_path, dry_run=True) + if preview.imported == 0 and preview.skipped == 0: + return SettingsValidationResult( + action=action, + confirmation=None, + error="No profiles found in repository.", + ) + return SettingsValidationResult( + action=action, + confirmation=ConfirmationKind.CONFIRM, + detail=ProfileSyncPreview( + repo_path=payload.repo_path, + imported=preview.imported, + skipped=preview.skipped, + warnings=preview.warnings, + ), + message=f"Import preview from {payload.repo_path}", + ) + + return None + + +def _result_from_reset(result: ResetResult, message: str) -> SettingsActionResult: + if not result.success: + error = result.message + return SettingsActionResult( + status=SettingsActionStatus.ERROR, + message=error, + error=error, + ) + return SettingsActionResult(status=SettingsActionStatus.SUCCESS, message=message) + + +def _apply_profile_save(workspace: Path) -> SettingsActionResult: + settings = load_workspace_settings(workspace) + mcp = load_workspace_mcp(workspace) + + if not settings and not mcp: + return SettingsActionResult( + status=SettingsActionStatus.NOOP, + details=[ + "No workspace settings found to save.", + "Create .claude/settings.local.json or .mcp.json first.", + ], + needs_ack=True, + ) + + profile = save_personal_profile(workspace, settings, mcp) + fingerprints = compute_fingerprints(workspace) + save_applied_state(workspace, profile.profile_id, fingerprints) + + return SettingsActionResult( + status=SettingsActionStatus.SUCCESS, + message="Profile saved", + details=[f"Profile saved: {profile.path.name}"], + needs_ack=True, + ) + + +def _apply_profile_apply(workspace: Path) -> SettingsActionResult: + profile = load_personal_profile(workspace) + if not profile: + return SettingsActionResult( + status=SettingsActionStatus.NOOP, + details=[ + "No profile saved for this workspace.", + "Use 'Save profile' first.", + ], + needs_ack=True, + ) + + current_settings = load_workspace_settings(workspace) or {} + current_mcp = load_workspace_mcp(workspace) or {} + + if profile.settings: + merged_settings = merge_personal_settings(workspace, current_settings, profile.settings) + write_workspace_settings(workspace, merged_settings) + + if profile.mcp: + merged_mcp = merge_personal_mcp(current_mcp, profile.mcp) + write_workspace_mcp(workspace, merged_mcp) + + fingerprints = compute_fingerprints(workspace) + save_applied_state(workspace, profile.profile_id, fingerprints) + + return SettingsActionResult( + status=SettingsActionStatus.SUCCESS, + message="Profile applied", + details=["Profile applied to workspace"], + needs_ack=True, + ) + + +def _apply_profile_diff(workspace: Path) -> SettingsActionResult: + profile = load_personal_profile(workspace) + if not profile: + return SettingsActionResult( + status=SettingsActionStatus.NOOP, + details=["No profile saved for this workspace."], + needs_ack=True, + ) + + current_settings = load_workspace_settings(workspace) or {} + current_mcp = load_workspace_mcp(workspace) or {} + + diff = compute_structured_diff( + workspace_settings=current_settings, + profile_settings=profile.settings, + workspace_mcp=current_mcp, + profile_mcp=profile.mcp, + ) + + if diff.is_empty: + return SettingsActionResult( + status=SettingsActionStatus.SUCCESS, + details=["Profile is in sync with workspace"], + needs_ack=True, + ) + + return SettingsActionResult( + status=SettingsActionStatus.SUCCESS, + detail=ProfileDiffInfo(diff=diff), + needs_ack=True, + ) + + +def _apply_profile_sync(request: SettingsChangeRequest) -> SettingsActionResult: + payload = request.payload + if isinstance(payload, ProfileSyncPathPayload): + _save_sync_repo_path(payload.new_path) + return SettingsActionResult( + status=SettingsActionStatus.SUCCESS, + details=[f"Path updated to: {payload.new_path}"], + needs_ack=True, + ) + + if not isinstance(payload, ProfileSyncPayload): + return SettingsActionResult( + status=SettingsActionStatus.ERROR, + message="Profile sync requires a payload", + error="missing payload", + ) + + if payload.mode == ProfileSyncMode.EXPORT: + return _apply_profile_sync_export(payload) + if payload.mode == ProfileSyncMode.IMPORT: + return _apply_profile_sync_import(payload, request.confirmed) + if payload.mode == ProfileSyncMode.FULL_SYNC: + return _apply_profile_sync_full(payload) + + return SettingsActionResult(status=SettingsActionStatus.NOOP) + + +def _apply_profile_sync_export(payload: ProfileSyncPayload) -> SettingsActionResult: + profiles = list_personal_profiles() + if not profiles: + return SettingsActionResult( + status=SettingsActionStatus.NOOP, + details=[ + "No profiles to export.", + "Save a profile first with 'Save profile'.", + ], + needs_ack=True, + ) + + if not payload.repo_path.exists() and not payload.create_dir: + return SettingsActionResult( + status=SettingsActionStatus.NOOP, + details=[f"Path does not exist: {payload.repo_path}"], + needs_ack=True, + ) + + payload.repo_path.mkdir(parents=True, exist_ok=True) + result = export_profiles_to_repo(payload.repo_path, profiles) + _save_sync_repo_path(str(payload.repo_path)) + + sync_result = ProfileSyncResult( + mode=ProfileSyncMode.EXPORT, + repo_path=payload.repo_path, + exported=result.exported, + profile_ids=[profile.repo_id for profile in profiles], + warnings=result.warnings, + ) + + return SettingsActionResult( + status=SettingsActionStatus.SUCCESS, + message=f"Exported {result.exported} profile(s)", + detail=sync_result, + needs_ack=True, + ) + + +def _apply_profile_sync_import( + payload: ProfileSyncPayload, + confirmed: bool, +) -> SettingsActionResult: + if not payload.repo_path.exists(): + return SettingsActionResult( + status=SettingsActionStatus.NOOP, + details=[f"Path not found: {payload.repo_path}"], + needs_ack=True, + ) + + if not confirmed: + return SettingsActionResult(status=SettingsActionStatus.NOOP) + + result = import_profiles_from_repo(payload.repo_path, dry_run=False) + _save_sync_repo_path(str(payload.repo_path)) + + sync_result = ProfileSyncResult( + mode=ProfileSyncMode.IMPORT, + repo_path=payload.repo_path, + imported=result.imported, + skipped=result.skipped, + warnings=result.warnings, + ) + + return SettingsActionResult( + status=SettingsActionStatus.SUCCESS, + message=f"Imported {result.imported} profile(s)", + detail=sync_result, + needs_ack=True, + ) + + +def _apply_profile_sync_full(payload: ProfileSyncPayload) -> SettingsActionResult: + imported = 0 + exported = 0 + if payload.repo_path.exists(): + import_result = import_profiles_from_repo(payload.repo_path, dry_run=False) + imported = import_result.imported + else: + payload.repo_path.mkdir(parents=True, exist_ok=True) + + profiles = list_personal_profiles() + if profiles: + export_result = export_profiles_to_repo(payload.repo_path, profiles) + exported = export_result.exported + + _save_sync_repo_path(str(payload.repo_path)) + + sync_result = ProfileSyncResult( + mode=ProfileSyncMode.FULL_SYNC, + repo_path=payload.repo_path, + imported=imported, + exported=exported, + ) + + return SettingsActionResult( + status=SettingsActionStatus.SUCCESS, + message=f"Synced: {imported} imported, {exported} exported", + detail=sync_result, + needs_ack=True, + ) + + +def _apply_support_bundle(request: SettingsChangeRequest) -> SettingsActionResult: + payload = request.payload + if not isinstance(payload, SupportBundlePayload): + return SettingsActionResult( + status=SettingsActionStatus.ERROR, + message="Support bundle requires a payload", + error="missing payload", + ) + + create_bundle(output_path=payload.output_path, redact_paths_flag=payload.redact_paths) + info = SupportBundleInfo(output_path=payload.output_path) + + return SettingsActionResult( + status=SettingsActionStatus.SUCCESS, + message=f"Support bundle saved to {payload.output_path.name}", + detail=info, + needs_ack=True, + ) + + +def _load_version() -> str: + try: + return get_version("scc-cli") + except PackageNotFoundError: + return "unknown" diff --git a/src/scc_cli/application/start_session.py b/src/scc_cli/application/start_session.py new file mode 100644 index 0000000..aa15da8 --- /dev/null +++ b/src/scc_cli/application/start_session.py @@ -0,0 +1,221 @@ +"""Start session use case for launch workflow.""" + +from __future__ import annotations + +from dataclasses import dataclass +from pathlib import Path +from typing import Any + +from scc_cli.application.compute_effective_config import EffectiveConfig, compute_effective_config +from scc_cli.application.sync_marketplace import ( + EffectiveConfigResolver, + MarketplaceMaterializer, + SyncError, + SyncMarketplaceDependencies, + SyncResult, + sync_marketplace_settings, +) +from scc_cli.core.constants import AGENT_CONFIG_DIR, SANDBOX_IMAGE +from scc_cli.core.errors import WorkspaceNotFoundError +from scc_cli.core.workspace import ResolverResult +from scc_cli.ports.agent_runner import AgentRunner +from scc_cli.ports.clock import Clock +from scc_cli.ports.filesystem import Filesystem +from scc_cli.ports.git_client import GitClient +from scc_cli.ports.models import AgentSettings, MountSpec, SandboxHandle, SandboxSpec +from scc_cli.ports.remote_fetcher import RemoteFetcher +from scc_cli.ports.sandbox_runtime import SandboxRuntime +from scc_cli.services.workspace import resolve_launch_context + + +@dataclass(frozen=True) +class StartSessionDependencies: + """Dependencies for the start session use case.""" + + filesystem: Filesystem + remote_fetcher: RemoteFetcher + clock: Clock + git_client: GitClient + agent_runner: AgentRunner + sandbox_runtime: SandboxRuntime + resolve_effective_config: EffectiveConfigResolver + materialize_marketplace: MarketplaceMaterializer + + +@dataclass(frozen=True) +class StartSessionRequest: + """Input data for preparing a start session.""" + + workspace_path: Path + workspace_arg: str | None + entry_dir: Path + team: str | None + session_name: str | None + resume: bool + fresh: bool + offline: bool + standalone: bool + dry_run: bool + allow_suspicious: bool + org_config: dict[str, Any] | None + org_config_url: str | None = None + + +@dataclass(frozen=True) +class StartSessionPlan: + """Prepared data needed to launch a session.""" + + resolver_result: ResolverResult + workspace_path: Path + team: str | None + session_name: str | None + resume: bool + fresh: bool + current_branch: str | None + effective_config: EffectiveConfig | None + sync_result: SyncResult | None + sync_error_message: str | None + agent_settings: AgentSettings | None + sandbox_spec: SandboxSpec | None + + +def prepare_start_session( + request: StartSessionRequest, + *, + dependencies: StartSessionDependencies, +) -> StartSessionPlan: + """Prepare launch data and settings for a session. + + This resolves workspace context, computes config, syncs marketplace settings, + and builds the sandbox specification. + """ + resolver_result = _resolve_workspace_context(request) + effective_config = _compute_effective_config(request) + sync_result, sync_error_message = _sync_marketplace_settings(request, dependencies) + agent_settings = _build_agent_settings(sync_result, dependencies.agent_runner) + current_branch = _resolve_current_branch(request.workspace_path, dependencies.git_client) + sandbox_spec = _build_sandbox_spec( + request=request, + resolver_result=resolver_result, + effective_config=effective_config, + agent_settings=agent_settings, + ) + return StartSessionPlan( + resolver_result=resolver_result, + workspace_path=request.workspace_path, + team=request.team, + session_name=request.session_name, + resume=request.resume, + fresh=request.fresh, + current_branch=current_branch, + effective_config=effective_config, + sync_result=sync_result, + sync_error_message=sync_error_message, + agent_settings=agent_settings, + sandbox_spec=sandbox_spec, + ) + + +def start_session( + plan: StartSessionPlan, + *, + dependencies: StartSessionDependencies, +) -> SandboxHandle: + """Launch the sandbox runtime for a prepared session.""" + if plan.sandbox_spec is None: + raise ValueError("Sandbox spec is required to start a session") + return dependencies.sandbox_runtime.run(plan.sandbox_spec) + + +def _resolve_workspace_context(request: StartSessionRequest) -> ResolverResult: + result = resolve_launch_context( + request.entry_dir, + request.workspace_arg, + allow_suspicious=request.allow_suspicious, + ) + if result is None: + raise WorkspaceNotFoundError(path=str(request.workspace_path)) + return result + + +def _compute_effective_config(request: StartSessionRequest) -> EffectiveConfig | None: + if request.org_config is None or request.team is None: + return None + return compute_effective_config( + request.org_config, + request.team, + workspace_path=request.workspace_path, + ) + + +def _sync_marketplace_settings( + request: StartSessionRequest, + dependencies: StartSessionDependencies, +) -> tuple[SyncResult | None, str | None]: + if request.dry_run or request.offline or request.standalone: + return None, None + if request.org_config is None or request.team is None: + return None, None + sync_dependencies = SyncMarketplaceDependencies( + filesystem=dependencies.filesystem, + remote_fetcher=dependencies.remote_fetcher, + clock=dependencies.clock, + resolve_effective_config=dependencies.resolve_effective_config, + materialize_marketplace=dependencies.materialize_marketplace, + ) + try: + result = sync_marketplace_settings( + project_dir=request.workspace_path, + org_config_data=request.org_config, + team_id=request.team, + org_config_url=request.org_config_url, + write_to_workspace=False, + container_path_prefix=str(request.workspace_path), + dependencies=sync_dependencies, + ) + except SyncError as exc: + return None, str(exc) + return result, None + + +def _build_agent_settings( + sync_result: SyncResult | None, + agent_runner: AgentRunner, +) -> AgentSettings | None: + if not sync_result or not sync_result.rendered_settings: + return None + settings_path = Path("/home/agent") / AGENT_CONFIG_DIR / "settings.json" + return agent_runner.build_settings(sync_result.rendered_settings, path=settings_path) + + +def _resolve_current_branch(workspace_path: Path, git_client: GitClient) -> str | None: + try: + if not git_client.is_git_repo(workspace_path): + return None + return git_client.get_current_branch(workspace_path) + except (OSError, ValueError): + return None + + +def _build_sandbox_spec( + *, + request: StartSessionRequest, + resolver_result: ResolverResult, + effective_config: EffectiveConfig | None, + agent_settings: AgentSettings | None, +) -> SandboxSpec | None: + if request.dry_run: + return None + return SandboxSpec( + image=SANDBOX_IMAGE, + workspace_mount=MountSpec( + source=resolver_result.mount_root, + target=resolver_result.mount_root, + ), + workdir=Path(resolver_result.container_workdir), + network_policy=effective_config.network_policy if effective_config else None, + continue_session=request.resume, + force_new=request.fresh, + agent_settings=agent_settings, + org_config=request.org_config, + ) diff --git a/src/scc_cli/application/sync_marketplace.py b/src/scc_cli/application/sync_marketplace.py new file mode 100644 index 0000000..87d5fa0 --- /dev/null +++ b/src/scc_cli/application/sync_marketplace.py @@ -0,0 +1,307 @@ +"""Marketplace sync use case for Claude Code integration.""" + +from __future__ import annotations + +import json +from dataclasses import dataclass +from pathlib import Path +from typing import TYPE_CHECKING, Any, Protocol + +from scc_cli.marketplace.managed import ManagedState, save_managed_state +from scc_cli.marketplace.materialize import MaterializationError +from scc_cli.marketplace.normalize import matches_pattern +from scc_cli.marketplace.render import check_conflicts, merge_settings, render_settings +from scc_cli.marketplace.schema import OrganizationConfig, normalize_org_config_data +from scc_cli.ports.clock import Clock +from scc_cli.ports.filesystem import Filesystem +from scc_cli.ports.remote_fetcher import RemoteFetcher + +if TYPE_CHECKING: + from scc_cli.marketplace.materialize import MaterializedMarketplace + from scc_cli.marketplace.resolve import EffectiveConfig + from scc_cli.marketplace.schema import MarketplaceSource + + +class EffectiveConfigResolver(Protocol): + """Protocol for resolving effective marketplace config.""" + + def __call__(self, config: OrganizationConfig, team_id: str) -> EffectiveConfig: + """Return effective config for the specified team.""" + + +class MarketplaceMaterializer(Protocol): + """Protocol for materializing marketplace sources.""" + + def __call__( + self, + name: str, + source: MarketplaceSource, + project_dir: Path, + force_refresh: bool = False, + fetcher: RemoteFetcher | None = None, + ) -> MaterializedMarketplace: + """Materialize a marketplace source.""" + + +@dataclass(frozen=True) +class SyncMarketplaceDependencies: + """Dependencies for marketplace sync use case.""" + + filesystem: Filesystem + remote_fetcher: RemoteFetcher + clock: Clock + resolve_effective_config: EffectiveConfigResolver + materialize_marketplace: MarketplaceMaterializer + + +class SyncError(Exception): + """Error during marketplace sync operation.""" + + def __init__(self, message: str, details: dict[str, Any] | None = None) -> None: + self.details = details or {} + super().__init__(message) + + +class SyncResult: + """Result of a marketplace sync operation.""" + + def __init__( + self, + success: bool, + plugins_enabled: list[str] | None = None, + marketplaces_materialized: list[str] | None = None, + warnings: list[str] | None = None, + settings_path: Path | None = None, + rendered_settings: dict[str, Any] | None = None, + ) -> None: + self.success = success + self.plugins_enabled = plugins_enabled or [] + self.marketplaces_materialized = marketplaces_materialized or [] + self.warnings = warnings or [] + self.settings_path = settings_path + # Computed settings dict for container injection (when write_to_workspace=False) + self.rendered_settings = rendered_settings + + +def sync_marketplace_settings( + project_dir: Path, + org_config_data: dict[str, Any], + team_id: str | None = None, + org_config_url: str | None = None, + force_refresh: bool = False, + dry_run: bool = False, + write_to_workspace: bool = True, + container_path_prefix: str = "", + *, + dependencies: SyncMarketplaceDependencies, +) -> SyncResult: + """Sync marketplace settings for a project. + + Orchestrates the full pipeline: + 1. Parse and validate org config + 2. Compute effective plugins for team + 3. Materialize required marketplaces + 4. Render settings to Claude format + 5. Merge with existing user settings (non-destructive) + 6. Save managed state tracking + 7. Write settings.local.json (unless dry_run or write_to_workspace=False) + + Args: + project_dir: Project root directory + org_config_data: Parsed org config dictionary + team_id: Team profile ID (uses defaults if None) + org_config_url: URL where org config was fetched (for tracking) + force_refresh: Force re-materialization of marketplaces + dry_run: If True, compute but don't write files + write_to_workspace: If False, skip writing settings.local.json + and instead return rendered_settings for container injection. + This prevents host Claude from seeing container-only plugins. + container_path_prefix: Path prefix for marketplace paths in container. + When set (e.g., "/workspace"), paths become absolute container paths + like "/workspace/.claude/.scc-marketplaces/...". Required when + write_to_workspace=False since settings will be in container HOME. + dependencies: Injected dependencies for filesystem, time, and IO ports + + Returns: + SyncResult with success status and details. When write_to_workspace=False, + rendered_settings contains the computed settings for container injection. + + Raises: + SyncError: On validation or processing errors + TeamNotFoundError: If team_id not found in config + """ + warnings: list[str] = [] + + # ── Step 1: Parse org config ───────────────────────────────────────────── + # Org config is already validated by JSON Schema before caching. + try: + org_config = OrganizationConfig.model_validate(normalize_org_config_data(org_config_data)) + except Exception as exc: + raise SyncError(f"Invalid org config: {exc}") from exc + + # ── Step 2: Resolve effective config (federation-aware) ─────────────────── + if team_id is None: + raise SyncError("team_id is required for marketplace sync") + + # Use resolve_effective_config for federation support + # This handles both inline and federated teams uniformly + effective_config = dependencies.resolve_effective_config(org_config, team_id=team_id) + + # Check for blocked plugins that user has installed + # First, check if org-enabled plugins were blocked + if effective_config.blocked_plugins: + existing = _load_existing_plugins(project_dir, dependencies.filesystem) + conflict_warnings = check_conflicts( + existing_plugins=existing, + blocked_plugins=[ + { + "plugin_id": blocked.plugin_id, + "reason": blocked.reason, + "pattern": blocked.pattern, + } + for blocked in effective_config.blocked_plugins + ], + ) + warnings.extend(conflict_warnings) + + # Also check user's existing plugins against security.blocked_plugins patterns + security = org_config.security + if security.blocked_plugins: + existing = _load_existing_plugins(project_dir, dependencies.filesystem) + for plugin in existing: + for pattern in security.blocked_plugins: + if matches_pattern(plugin, pattern): + warnings.append( + f"⚠️ Plugin '{plugin}' is blocked by organization policy " + f"(matched pattern: {pattern})" + ) + break # Only one warning per plugin + + # ── Step 3: Materialize required marketplaces ─────────────────────────── + materialized: dict[str, Any] = {} + marketplaces_used = set() + + # Determine which marketplaces are needed + for plugin_ref in effective_config.enabled_plugins: + if "@" in plugin_ref: + marketplace_name = plugin_ref.split("@")[1] + marketplaces_used.add(marketplace_name) + + # Also include any extra marketplaces from the effective result + for marketplace_name in effective_config.extra_marketplaces: + marketplaces_used.add(marketplace_name) + + # Materialize each marketplace + for marketplace_name in marketplaces_used: + # Skip implicit marketplaces (claude-plugins-official) + from scc_cli.marketplace.constants import IMPLICIT_MARKETPLACES + + if marketplace_name in IMPLICIT_MARKETPLACES: + continue + + # Find source configuration from effective marketplaces (includes team sources for federated) + source = effective_config.marketplaces.get(marketplace_name) + if source is None: + warnings.append(f"Marketplace '{marketplace_name}' not defined in effective config") + continue + + try: + result = dependencies.materialize_marketplace( + name=marketplace_name, + source=source, + project_dir=project_dir, + force_refresh=force_refresh, + fetcher=dependencies.remote_fetcher, + ) + materialized[marketplace_name] = { + "relative_path": result.relative_path, + "source_type": result.source_type, + "canonical_name": result.canonical_name, # Critical for alias → canonical translation + } + except MaterializationError as exc: + warnings.append(f"Failed to materialize '{marketplace_name}': {exc}") + + # ── Step 3b: Check for canonical name collisions ──────────────────────── + # Multiple aliases resolving to the same canonical name is a configuration error + canonical_to_aliases: dict[str, list[str]] = {} + for alias_name, data in materialized.items(): + canonical = data.get("canonical_name", alias_name) + if canonical not in canonical_to_aliases: + canonical_to_aliases[canonical] = [] + canonical_to_aliases[canonical].append(alias_name) + + for canonical, aliases in canonical_to_aliases.items(): + if len(aliases) > 1: + raise SyncError( + f"Canonical name collision: marketplace.json name '{canonical}' " + f"is used by multiple org config entries: {', '.join(aliases)}. " + "Each marketplace must have a unique canonical name.", + details={"canonical_name": canonical, "conflicting_aliases": aliases}, + ) + + # ── Step 4: Render settings ───────────────────────────────────────────── + effective_dict = { + "enabled": effective_config.enabled_plugins, + "extra_marketplaces": effective_config.extra_marketplaces, + } + # Pass path_prefix for container-only mode (absolute paths in container HOME) + rendered = render_settings(effective_dict, materialized, path_prefix=container_path_prefix) + + # ── Step 5: Merge with existing settings (only if writing to workspace) ── + # When write_to_workspace=False, we skip merging because settings go to + # container HOME, not the workspace settings.local.json + if write_to_workspace: + merged = merge_settings(project_dir, rendered, filesystem=dependencies.filesystem) + else: + # For container-only mode, use rendered settings directly + # (no merging with workspace settings since we're not writing there) + merged = rendered + + # ── Step 6: Prepare managed state ─────────────────────────────────────── + managed_state = ManagedState( + managed_plugins=list(effective_config.enabled_plugins), + managed_marketplaces=[item.get("relative_path", "") for item in materialized.values()], + last_sync=dependencies.clock.now(), + org_config_url=org_config_url, + team_id=team_id, + ) + + # ── Step 7: Write files (unless dry_run or write_to_workspace=False) ───── + settings_path = project_dir / ".claude" / "settings.local.json" + claude_dir = project_dir / ".claude" + + if not dry_run and write_to_workspace: + dependencies.filesystem.mkdir(claude_dir, parents=True, exist_ok=True) + dependencies.filesystem.write_text(settings_path, json.dumps(merged, indent=2)) + save_managed_state(project_dir, managed_state, filesystem=dependencies.filesystem) + elif not dry_run and not write_to_workspace: + # Container-only mode: ensure .claude dir exists for marketplaces + # (marketplaces are still materialized to workspace for bind-mount access) + dependencies.filesystem.mkdir(claude_dir, parents=True, exist_ok=True) + save_managed_state(project_dir, managed_state, filesystem=dependencies.filesystem) + + return SyncResult( + success=True, + plugins_enabled=list(effective_config.enabled_plugins), + marketplaces_materialized=list(materialized.keys()), + warnings=warnings, + settings_path=settings_path if (not dry_run and write_to_workspace) else None, + # Return rendered settings for container injection when not writing to workspace + rendered_settings=merged if not write_to_workspace else None, + ) + + +def _load_existing_plugins(project_dir: Path, filesystem: Filesystem) -> list[str]: + """Load existing plugins from settings.local.json.""" + settings_path = project_dir / ".claude" / "settings.local.json" + if not filesystem.exists(settings_path): + return [] + + try: + data: dict[str, Any] = json.loads(filesystem.read_text(settings_path)) + plugins = data.get("enabledPlugins", []) + if isinstance(plugins, list): + return [str(plugin) for plugin in plugins] + return [] + except (json.JSONDecodeError, OSError): + return [] diff --git a/src/scc_cli/bootstrap.py b/src/scc_cli/bootstrap.py new file mode 100644 index 0000000..2aace5a --- /dev/null +++ b/src/scc_cli/bootstrap.py @@ -0,0 +1,45 @@ +"""Composition root wiring SCC adapters.""" + +from __future__ import annotations + +from dataclasses import dataclass +from functools import lru_cache + +from scc_cli.adapters.claude_agent_runner import ClaudeAgentRunner +from scc_cli.adapters.docker_sandbox_runtime import DockerSandboxRuntime +from scc_cli.adapters.local_filesystem import LocalFilesystem +from scc_cli.adapters.local_git_client import LocalGitClient +from scc_cli.adapters.requests_fetcher import RequestsFetcher +from scc_cli.adapters.system_clock import SystemClock +from scc_cli.ports.agent_runner import AgentRunner +from scc_cli.ports.clock import Clock +from scc_cli.ports.filesystem import Filesystem +from scc_cli.ports.git_client import GitClient +from scc_cli.ports.remote_fetcher import RemoteFetcher +from scc_cli.ports.sandbox_runtime import SandboxRuntime + + +@dataclass(frozen=True) +class DefaultAdapters: + """Container for default adapter instances.""" + + filesystem: Filesystem + git_client: GitClient + remote_fetcher: RemoteFetcher + clock: Clock + agent_runner: AgentRunner + sandbox_runtime: SandboxRuntime + + +@lru_cache(maxsize=1) +def get_default_adapters() -> DefaultAdapters: + """Return the default adapter wiring for SCC.""" + + return DefaultAdapters( + filesystem=LocalFilesystem(), + git_client=LocalGitClient(), + remote_fetcher=RequestsFetcher(), + clock=SystemClock(), + agent_runner=ClaudeAgentRunner(), + sandbox_runtime=DockerSandboxRuntime(), + ) diff --git a/src/scc_cli/claude_adapter.py b/src/scc_cli/claude_adapter.py index dbfd93f..6bff40f 100644 --- a/src/scc_cli/claude_adapter.py +++ b/src/scc_cli/claude_adapter.py @@ -27,7 +27,7 @@ from scc_cli.profiles import get_marketplace_url if TYPE_CHECKING: - from scc_cli.profiles import EffectiveConfig, MCPServer + from scc_cli.application.compute_effective_config import EffectiveConfig, MCPServer # ═══════════════════════════════════════════════════════════════════════════════ diff --git a/src/scc_cli/cli_common.py b/src/scc_cli/cli_common.py index 0543104..7081fda 100644 --- a/src/scc_cli/cli_common.py +++ b/src/scc_cli/cli_common.py @@ -14,6 +14,7 @@ from rich.console import Console from rich.table import Table +from .core.error_mapping import to_exit_code, to_human_message from .core.errors import SCCError from .core.exit_codes import EXIT_CANCELLED, EXIT_PREREQ, get_error_footer from .output_mode import is_json_command_mode @@ -81,13 +82,12 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: except SCCError as e: if is_json_command_mode(): # JSON mode: emit structured error envelope to stdout - from .core.exit_codes import get_exit_code_for_exception from .json_output import build_error_envelope from .output_mode import print_json envelope = build_error_envelope(e) print_json(envelope) - raise typer.Exit(get_exit_code_for_exception(e)) + raise typer.Exit(to_exit_code(e)) # Human mode: use stderr for errors (stdout purity for shell wrappers) render_error(err_console, e, debug=state.debug) # Show actionable hint if available (footer has its own styling) @@ -130,7 +130,7 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: err_console.print( create_warning_panel( "Unexpected Error", - str(e), + to_human_message(e), "Run with 'scc --debug ' for full traceback", ) ) diff --git a/src/scc_cli/commands/config.py b/src/scc_cli/commands/config.py index 9952386..2ef568b 100644 --- a/src/scc_cli/commands/config.py +++ b/src/scc_cli/commands/config.py @@ -8,11 +8,18 @@ from rich import box from rich.table import Table -from .. import config, profiles, setup +from .. import config, setup +from ..application.compute_effective_config import ( + BlockedItem, + ConfigDecision, + DelegationDenied, + EffectiveConfig, + compute_effective_config, +) from ..cli_common import console, handle_errors from ..core import personal_profiles from ..core.exit_codes import EXIT_USAGE -from ..core.maintenance import get_paths, get_total_size +from ..maintenance import get_paths, get_total_size from ..panels import create_error_panel, create_info_panel from ..source_resolver import ResolveError, resolve_source from ..stores.exception_store import RepoStore, UserStore @@ -280,7 +287,7 @@ def _config_explain(field_filter: str | None = None, workspace_path: str | None ws_path = Path(workspace_path) if workspace_path else Path.cwd() # Compute effective config - effective = profiles.compute_effective_config( + effective = compute_effective_config( org_config=org_config, team_name=team, workspace_path=ws_path, @@ -321,10 +328,10 @@ def _config_explain(field_filter: str | None = None, workspace_path: str | None console.print() -def _render_config_decisions(effective: profiles.EffectiveConfig, field_filter: str | None) -> None: +def _render_config_decisions(effective: EffectiveConfig, field_filter: str | None) -> None: """Render config decisions grouped by field.""" # Group decisions by field - by_field: dict[str, list[profiles.ConfigDecision]] = {} + by_field: dict[str, list[ConfigDecision]] = {} for decision in effective.decisions: field = decision.field.split(".")[0] # Get top-level field if field_filter and field != field_filter: @@ -441,7 +448,7 @@ def _render_personal_profile(ws_path: Path, field_filter: str | None) -> None: console.print() -def _render_blocked_items(blocked_items: list[profiles.BlockedItem]) -> None: +def _render_blocked_items(blocked_items: list[BlockedItem]) -> None: """Render blocked items with patterns and fix-it commands.""" from scc_cli.utils.fixit import generate_policy_exception_command @@ -456,7 +463,7 @@ def _render_blocked_items(blocked_items: list[profiles.BlockedItem]) -> None: console.print() -def _render_denied_additions(denied_additions: list[profiles.DelegationDenied]) -> None: +def _render_denied_additions(denied_additions: list[DelegationDenied]) -> None: """Render denied additions with reasons and fix-it commands.""" from scc_cli.utils.fixit import generate_unblock_command diff --git a/src/scc_cli/commands/exceptions.py b/src/scc_cli/commands/exceptions.py index a6fe73a..8bb4db8 100644 --- a/src/scc_cli/commands/exceptions.py +++ b/src/scc_cli/commands/exceptions.py @@ -26,7 +26,8 @@ from rich.console import Console from rich.table import Table -from .. import config, profiles +from .. import config +from ..application.compute_effective_config import compute_effective_config from ..cli_common import handle_errors from ..cli_helpers import create_audit_record, require_reason_for_governance from ..evaluation import EvaluationResult, evaluate @@ -507,7 +508,7 @@ def get_current_denials() -> EvaluationResult: return EvaluationResult() # Compute effective config for current workspace - effective = profiles.compute_effective_config( + effective = compute_effective_config( org_config=org_config, team_name=team, workspace_path=Path.cwd(), diff --git a/src/scc_cli/commands/launch/__init__.py b/src/scc_cli/commands/launch/__init__.py index ab063b6..34dfbac 100644 --- a/src/scc_cli/commands/launch/__init__.py +++ b/src/scc_cli/commands/launch/__init__.py @@ -3,19 +3,18 @@ This package contains the decomposed launch functionality: - render.py: Pure output/display functions (no business logic) -- app.py: Main command logic and orchestration -- (more modules to be added as extraction continues) +- flow.py: Start command logic and interactive flows +- app.py: Thin CLI wrapper for Typer registration Public API re-exports for backward compatibility. """ -from .app import ( +from .app import launch_app, start +from .flow import ( _configure_team_settings, _sync_marketplace_settings, interactive_start, - launch_app, run_start_wizard_flow, - start, ) from .render import ( build_dry_run_data, diff --git a/src/scc_cli/commands/launch/app.py b/src/scc_cli/commands/launch/app.py index 6b035c1..e742225 100644 --- a/src/scc_cli/commands/launch/app.py +++ b/src/scc_cli/commands/launch/app.py @@ -1,472 +1,15 @@ """ CLI Launch Commands. -Commands for starting Claude Code in Docker sandboxes. - -This module handles the `scc start` command, orchestrating: -- Session selection (--resume, --select, interactive) -- Workspace validation and preparation -- Team profile configuration -- Docker sandbox launch - -The main `start()` function delegates to focused helper functions -for maintainability and testability. +Thin wrapper around the start flow implementation. """ -from pathlib import Path -from typing import Any, cast +from __future__ import annotations import typer -from rich.status import Status - -from ... import config, docker, git, sessions, setup, teams -from ...cli_common import ( - console, - err_console, - handle_errors, -) -from ...contexts import load_recent_contexts, normalize_path -from ...core import personal_profiles -from ...core.errors import WorkspaceNotFoundError -from ...core.exit_codes import EXIT_CANCELLED, EXIT_CONFIG, EXIT_ERROR, EXIT_USAGE -from ...json_output import build_envelope -from ...kinds import Kind -from ...marketplace.sync import SyncError, SyncResult, sync_marketplace_settings -from ...output_mode import json_output_mode, print_json, set_pretty_mode -from ...panels import create_warning_panel -from ...theme import Colors, Indicators, Spinners, get_brand_header -from ...ui.chrome import print_with_layout, render_with_layout -from ...ui.gate import is_interactive_allowed -from ...ui.keys import _BackSentinel -from ...ui.picker import ( - QuickResumeResult, - TeamSwitchRequested, - pick_context_quick_resume, - pick_team, -) -from ...ui.prompts import ( - confirm_with_layout, - prompt_custom_workspace, - prompt_repo_url, - prompt_with_layout, - select_session, -) -from ...ui.wizard import ( - BACK, - WorkspaceSource, - pick_recent_workspace, - pick_team_repo, - pick_workspace_source, -) -from .render import ( - build_dry_run_data, - show_dry_run_panel, - warn_if_non_worktree, -) -from .sandbox import launch_sandbox -from .workspace import ( - prepare_workspace, - resolve_mount_and_branch, - resolve_workspace_team, - validate_and_resolve_workspace, -) - -# ───────────────────────────────────────────────────────────────────────────── -# Helper Functions (extracted for maintainability) -# ───────────────────────────────────────────────────────────────────────────── - - -def _resolve_session_selection( - workspace: str | None, - team: str | None, - resume: bool, - select: bool, - cfg: dict[str, Any], - *, - json_mode: bool = False, - standalone_override: bool = False, - no_interactive: bool = False, - dry_run: bool = False, -) -> tuple[str | None, str | None, str | None, str | None, bool, bool]: - """ - Handle session selection logic for --select, --resume, and interactive modes. - - Args: - workspace: Workspace path from command line. - team: Team name from command line. - resume: Whether --resume flag is set. - select: Whether --select flag is set. - cfg: Loaded configuration. - json_mode: Whether --json output is requested (blocks interactive). - standalone_override: Whether --standalone flag is set (overrides config). - - Returns: - Tuple of (workspace, team, session_name, worktree_name, cancelled, was_auto_detected) - If user cancels or no session found, workspace will be None. - cancelled is True only for explicit user cancellation. - was_auto_detected is True if workspace was found via resolver (git/.scc.yaml). - - Raises: - typer.Exit: If interactive mode required but not allowed (non-TTY, CI, --json). - """ - session_name = None - worktree_name = None - cancelled = False - - # Interactive mode if no workspace provided and no session flags - if workspace is None and not resume and not select: - # For --dry-run without workspace, use resolver to auto-detect (skip interactive) - if dry_run: - from pathlib import Path - - from ...services.workspace import resolve_launch_context - - result = resolve_launch_context(Path.cwd(), workspace_arg=None) - if result is not None: - return str(result.workspace_root), team, None, None, False, True # auto-detected - # No auto-detect possible, fall through to error - err_console.print( - "[red]Error:[/red] No workspace could be auto-detected.\n" - "[dim]Provide a workspace path: scc start --dry-run /path/to/project[/dim]", - highlight=False, - ) - raise typer.Exit(EXIT_USAGE) - - # Check TTY gating before entering interactive mode - if not is_interactive_allowed( - json_mode=json_mode, - no_interactive_flag=no_interactive, - ): - # Try auto-detect before failing - from pathlib import Path - - from ...services.workspace import resolve_launch_context - - result = resolve_launch_context(Path.cwd(), workspace_arg=None) - if result is not None: - return str(result.workspace_root), team, None, None, False, True # auto-detected - - err_console.print( - "[red]Error:[/red] Interactive mode requires a terminal (TTY).\n" - "[dim]Provide a workspace path: scc start /path/to/project[/dim]", - highlight=False, - ) - raise typer.Exit(EXIT_USAGE) - workspace_result, team, session_name, worktree_name = cast( - tuple[str | None, str | None, str | None, str | None], - interactive_start(cfg, standalone_override=standalone_override, team_override=team), - ) - if workspace_result is None: - return None, team, None, None, True, False - return ( - workspace_result, - team, - session_name, - worktree_name, - False, - False, - ) - - # Handle --select: interactive session picker - if select and workspace is None: - # Check TTY gating before showing session picker - if not is_interactive_allowed( - json_mode=json_mode, - no_interactive_flag=no_interactive, - ): - console.print( - "[red]Error:[/red] --select requires a terminal (TTY).\n" - "[dim]Use --resume to auto-select most recent session.[/dim]", - highlight=False, - ) - raise typer.Exit(EXIT_USAGE) - - # Prefer explicit --team, then selected_profile for filtering - effective_team = team or cfg.get("selected_profile") - if standalone_override: - effective_team = None - - # If org mode and no active team, require explicit selection - if effective_team is None and not standalone_override: - if not json_mode: - console.print( - "[yellow]No active team selected.[/yellow] " - "Run 'scc team switch' or pass --team to select." - ) - return None, team, None, None, False, False - - recent_sessions = sessions.list_recent(limit=10) - if effective_team is None: - filtered_sessions = [s for s in recent_sessions if s.get("team") is None] - else: - filtered_sessions = [s for s in recent_sessions if s.get("team") == effective_team] - - if not filtered_sessions: - if not json_mode: - console.print("[yellow]No recent sessions found.[/yellow]") - return None, team, None, None, False, False - - selected = select_session(console, filtered_sessions) - if selected is None: - return None, team, None, None, True, False - workspace = selected.get("workspace") - if not team: - team = selected.get("team") - # --standalone overrides any team from session (standalone means no team) - if standalone_override: - team = None - if not json_mode: - console.print(f"[dim]Selected: {workspace}[/dim]") - - # Handle --resume: auto-select most recent session - elif resume and workspace is None: - # Prefer explicit --team, then selected_profile for resume filtering - effective_team = team or cfg.get("selected_profile") - if standalone_override: - effective_team = None - - # If org mode and no active team, require explicit selection - if effective_team is None and not standalone_override: - if not json_mode: - console.print( - "[yellow]No active team selected.[/yellow] " - "Run 'scc team switch' or pass --team to resume." - ) - return None, team, None, None, False, False - - recent_sessions = sessions.list_recent(limit=50) - if effective_team is None: - filtered_sessions = [s for s in recent_sessions if s.get("team") is None] - else: - filtered_sessions = [s for s in recent_sessions if s.get("team") == effective_team] - - if filtered_sessions: - recent_session = filtered_sessions[0] - workspace = recent_session.get("workspace") - if not team: - team = recent_session.get("team") - # --standalone overrides any team from session (standalone means no team) - if standalone_override: - team = None - if not json_mode: - console.print(f"[dim]Resuming: {workspace}[/dim]") - else: - if not json_mode: - console.print("[yellow]No recent sessions found.[/yellow]") - return None, team, None, None, False, False - - return workspace, team, session_name, worktree_name, cancelled, False # explicit workspace - - -def _configure_team_settings(team: str | None, cfg: dict[str, Any]) -> None: - """ - Validate team profile exists. - - NOTE: Plugin settings are now sourced ONLY from workspace settings.local.json - (via _sync_marketplace_settings). Docker volume injection has been removed - to prevent plugin mixing across teams. - - IMPORTANT: This function must remain cache-only (no network calls). - It's called in offline mode where only cached org config is available. - If you need to add network operations, gate them with an offline check - or move them to _sync_marketplace_settings() which is already offline-aware. - - Raises: - typer.Exit: If team profile is not found. - """ - if not team: - return - - with Status( - f"[cyan]Validating {team} profile...[/cyan]", console=console, spinner=Spinners.SETUP - ): - # load_cached_org_config() reads from local cache only - safe for offline mode - org_config = config.load_cached_org_config() - - validation = teams.validate_team_profile(team, org_config) - if not validation["valid"]: - print_with_layout( - console, - create_warning_panel( - "Team Not Found", - f"No team profile named '{team}'.", - "Run 'scc team list' to see available profiles", - ), - constrain=True, - ) - raise typer.Exit(1) - - # NOTE: docker.inject_team_settings() removed - workspace settings.local.json - # is now the single source of truth for plugins (prevents cross-team mixing) - -def _sync_marketplace_settings( - workspace_path: Path | None, - team: str | None, - org_config_url: str | None = None, -) -> SyncResult | None: - """ - Sync marketplace settings for the workspace. - - Orchestrates the full marketplace pipeline: - 1. Compute effective plugins for team - 2. Materialize required marketplaces - 3. Render settings (NOT written to workspace to prevent host leakage) - 4. Return rendered_settings for container injection - - IMPORTANT: This uses container-only mode to prevent host Claude from seeing - SCC-managed plugins. Marketplaces are still materialized to workspace (for - container access via bind-mount), but settings.local.json is NOT written. - Instead, rendered_settings is returned for injection into container HOME. - - Args: - workspace_path: Path to the workspace directory. - team: Selected team profile name. - org_config_url: URL of the org config (for tracking). - - Returns: - SyncResult with details (including rendered_settings for container injection), - or None if no sync needed. - - Raises: - typer.Exit: If marketplace sync fails critically. - """ - if workspace_path is None or team is None: - return None - - org_config = config.load_cached_org_config() - if org_config is None: - return None - - with Status( - "[cyan]Syncing marketplace settings...[/cyan]", console=console, spinner=Spinners.NETWORK - ): - try: - # Use container-only mode: - # - write_to_workspace=False: Don't write settings.local.json (prevents host leakage) - # - container_path_prefix: Workspace path for absolute paths in container - # - # Docker sandbox mounts workspace at the same absolute path, so paths like - # "/Users/foo/project/.claude/.scc-marketplaces/..." will resolve correctly - # when settings are in container HOME (/home/agent/.claude/settings.json) - result = sync_marketplace_settings( - project_dir=workspace_path, - org_config_data=org_config, - team_id=team, - org_config_url=org_config_url, - write_to_workspace=False, # Container-only mode - container_path_prefix=str(workspace_path), # Absolute paths for container - ) - - # Display any warnings - if result.warnings: - console.print() - for warning in result.warnings: - print_with_layout(console, f"[yellow]{warning}[/yellow]") - console.print() - - # Log success - if result.plugins_enabled: - print_with_layout( - console, - f"[green]{Indicators.get('PASS')} Enabled {len(result.plugins_enabled)} team plugin(s)[/green]", - ) - if result.marketplaces_materialized: - print_with_layout( - console, - f"[green]{Indicators.get('PASS')} Materialized {len(result.marketplaces_materialized)} marketplace(s)[/green]", - ) - - # rendered_settings will be passed to launch_sandbox for container injection - return result - - except SyncError as e: - panel = create_warning_panel( - "Marketplace Sync Failed", - str(e), - "Team plugins may not be available. Use --dry-run to diagnose.", - ) - print_with_layout(console, panel, constrain=True) - # Non-fatal: continue without marketplace sync - return None - - -def _apply_personal_profile( - workspace_path: Path, - *, - json_mode: bool, - non_interactive: bool, -) -> tuple[str | None, bool]: - """Apply personal profile if available. - - Returns (profile_id, applied). - """ - profile, corrupt = personal_profiles.load_personal_profile_with_status(workspace_path) - if corrupt: - if not json_mode: - console.print("[yellow]Personal profile is invalid JSON. Skipping.[/yellow]") - return None, False - if profile is None: - return None, False - - drift = personal_profiles.detect_drift(workspace_path) - if drift and not personal_profiles.workspace_has_overrides(workspace_path): - drift = False - - if drift and not is_interactive_allowed( - json_mode=json_mode, no_interactive_flag=non_interactive - ): - if not json_mode: - console.print( - "[yellow]Workspace overrides detected; personal profile not applied.[/yellow]" - ) - return profile.profile_id, False - - if drift and not json_mode: - console.print("[yellow]Workspace overrides detected.[/yellow]") - if not confirm_with_layout(console, "Apply personal profile anyway?", default=False): - return profile.profile_id, False - - existing_settings, settings_invalid = personal_profiles.load_workspace_settings_with_status( - workspace_path - ) - existing_mcp, mcp_invalid = personal_profiles.load_workspace_mcp_with_status(workspace_path) - if settings_invalid: - if not json_mode: - console.print("[yellow]Invalid JSON in .claude/settings.local.json[/yellow]") - return profile.profile_id, False - if mcp_invalid: - if not json_mode: - console.print("[yellow]Invalid JSON in .mcp.json[/yellow]") - return profile.profile_id, False - - existing_settings = existing_settings or {} - existing_mcp = existing_mcp or {} - - merged_settings = personal_profiles.merge_personal_settings( - workspace_path, existing_settings, profile.settings or {} - ) - merged_mcp = personal_profiles.merge_personal_mcp(existing_mcp, profile.mcp or {}) - - personal_profiles.write_workspace_settings(workspace_path, merged_settings) - if profile.mcp: - personal_profiles.write_workspace_mcp(workspace_path, merged_mcp) - - personal_profiles.save_applied_state( - workspace_path, - profile.profile_id, - personal_profiles.compute_fingerprints(workspace_path), - ) - - if not json_mode: - console.print("[green]Applied personal profile.[/green]") - - return profile.profile_id, True - - -# ───────────────────────────────────────────────────────────────────────────── -# Launch App -# ───────────────────────────────────────────────────────────────────────────── +from ...cli_common import handle_errors +from .flow import start as _start launch_app = typer.Typer( name="launch", @@ -475,904 +18,4 @@ def _apply_personal_profile( context_settings={"help_option_names": ["-h", "--help"]}, ) - -# ───────────────────────────────────────────────────────────────────────────── -# Start Command -# ───────────────────────────────────────────────────────────────────────────── - - -@handle_errors -def start( - workspace: str | None = typer.Argument(None, help="Path to workspace (optional)"), - team: str | None = typer.Option(None, "-t", "--team", help="Team profile to use"), - session_name: str | None = typer.Option(None, "--session", help="Session name"), - resume: bool = typer.Option(False, "-r", "--resume", help="Resume most recent session"), - select: bool = typer.Option(False, "-s", "--select", help="Select from recent sessions"), - worktree_name: str | None = typer.Option(None, "-w", "--worktree", help="Worktree name"), - fresh: bool = typer.Option(False, "--fresh", help="Force new container"), - install_deps: bool = typer.Option(False, "--install-deps", help="Install dependencies"), - offline: bool = typer.Option(False, "--offline", help="Use cached config only (error if none)"), - standalone: bool = typer.Option(False, "--standalone", help="Run without organization config"), - dry_run: bool = typer.Option(False, "--dry-run", help="Preview config without launching"), - json_output: bool = typer.Option(False, "--json", help="Output as JSON"), - pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), - non_interactive: bool = typer.Option( - False, - "--non-interactive", - "--no-interactive", - help="Fail fast if interactive input would be required", - ), - debug: bool = typer.Option( - False, - "--debug", - hidden=True, - ), - allow_suspicious_workspace: bool = typer.Option( - False, - "--allow-suspicious-workspace", - help="Allow starting in suspicious directories (e.g., home, /tmp) in non-interactive mode", - ), -) -> None: - """ - Start Claude Code in a Docker sandbox. - - If no arguments provided, launches interactive mode. - """ - from pathlib import Path - - # Capture original CWD for entry_dir tracking (before any directory changes) - original_cwd = Path.cwd() - - if isinstance(debug, bool) and debug: - err_console.print( - "[red]Error:[/red] --debug is a global flag and must be placed before the command.", - highlight=False, - ) - err_console.print( - "[dim]Use: scc --debug start [/dim]", - highlight=False, - ) - err_console.print( - "[dim]With uv: uv run scc --debug start [/dim]", - highlight=False, - ) - raise typer.Exit(EXIT_USAGE) - - # ── Fast Fail: Validate mode flags before any processing ────────────────── - from scc_cli.ui.gate import validate_mode_flags - - validate_mode_flags( - json_mode=(json_output or pretty), - select=select, - ) - - # ── Step 0: Handle --standalone mode (skip org config entirely) ─────────── - if standalone: - # In standalone mode, never ask for team and never load org config - team = None - if not json_output and not pretty: - console.print("[dim]Running in standalone mode (no organization config)[/dim]") - - # ── Step 0.5: Handle --offline mode (cache-only, fail fast) ─────────────── - if offline and not standalone: - # Check if cached org config exists - cached = config.load_cached_org_config() - if cached is None: - err_console.print( - "[red]Error:[/red] --offline requires cached organization config.\n" - "[dim]Run 'scc setup' first to cache your org config.[/dim]", - highlight=False, - ) - raise typer.Exit(EXIT_CONFIG) - if not json_output and not pretty: - console.print("[dim]Using cached organization config (offline mode)[/dim]") - - # ── Step 1: First-run detection ────────────────────────────────────────── - # Skip setup wizard in standalone mode (no org config needed) - # Skip in offline mode (can't fetch remote - already validated cache exists) - if not standalone and not offline and setup.is_setup_needed(): - if not setup.maybe_run_setup(console): - raise typer.Exit(1) - - cfg = config.load_user_config() - - # ── Step 2: Session selection (interactive, --select, --resume) ────────── - workspace, team, session_name, worktree_name, cancelled, was_auto_detected = ( - _resolve_session_selection( - workspace=workspace, - team=team, - resume=resume, - select=select, - cfg=cfg, - json_mode=(json_output or pretty), - standalone_override=standalone, - no_interactive=non_interactive, - dry_run=dry_run, - ) - ) - if workspace is None: - if cancelled: - if not json_output and not pretty: - console.print("[dim]Cancelled.[/dim]") - raise typer.Exit(EXIT_CANCELLED) - if select or resume: - raise typer.Exit(EXIT_ERROR) - raise typer.Exit(EXIT_CANCELLED) - - # ── Step 3: Docker availability check ──────────────────────────────────── - # Skip Docker check for dry-run (just previewing config) - if not dry_run: - with Status("[cyan]Checking Docker...[/cyan]", console=console, spinner=Spinners.DOCKER): - docker.check_docker_available() - - # ── Step 4: Workspace validation and platform checks ───────────────────── - workspace_path = validate_and_resolve_workspace( - workspace, - no_interactive=non_interactive, - allow_suspicious=allow_suspicious_workspace, - json_mode=(json_output or pretty), - ) - if workspace_path is None: - if not json_output and not pretty: - console.print("[dim]Cancelled.[/dim]") - raise typer.Exit(EXIT_CANCELLED) - if not workspace_path.exists(): - raise WorkspaceNotFoundError(path=str(workspace_path)) - - # ── Step 5: Workspace preparation (worktree, deps, git safety) ─────────── - # Skip for dry-run (no worktree creation, no deps, no branch safety prompts) - if not dry_run: - workspace_path = prepare_workspace(workspace_path, worktree_name, install_deps) - - # ── Step 5.5: Resolve team from workspace pinning ──────────────────────── - team = resolve_workspace_team( - workspace_path, - team, - cfg, - json_mode=(json_output or pretty), - standalone=standalone, - no_interactive=non_interactive, - ) - - # ── Step 6: Team configuration ─────────────────────────────────────────── - # Skip team config in standalone mode (no org config to apply) - # In offline mode, team config still applies from cached org config - sync_result: SyncResult | None = None - if not dry_run and not standalone: - _configure_team_settings(team, cfg) - - # ── Step 6.5: Sync marketplace settings ──────────────────────────────── - # Skip sync in offline mode (can't fetch remote data) - if not offline: - sync_result = _sync_marketplace_settings(workspace_path, team) - - # ── Step 6.55: Apply personal profile (local overlay) ───────────────────── - personal_profile_id = None - personal_applied = False - if not dry_run and workspace_path is not None: - personal_profile_id, personal_applied = _apply_personal_profile( - workspace_path, - json_mode=(json_output or pretty), - non_interactive=non_interactive, - ) - - # ── Step 6.6: Active stack summary ─────────────────────────────────────── - if not (json_output or pretty) and workspace_path is not None: - personal_label = "project" if personal_profile_id else "none" - if personal_profile_id and not personal_applied: - personal_label = "skipped" - workspace_label = ( - "overrides" if personal_profiles.workspace_has_overrides(workspace_path) else "none" - ) - console.print( - "[dim]Active stack:[/dim] " - f"Team: {team or 'standalone'} | " - f"Personal: {personal_label} | " - f"Workspace: {workspace_label}" - ) - - # ── Step 6.7: Resolve mount path for worktrees (needed for dry-run too) ──── - # At this point workspace_path is guaranteed to exist (validated above) - assert workspace_path is not None - mount_path, current_branch = resolve_mount_and_branch( - workspace_path, json_mode=(json_output or pretty) - ) - - # ── Step 6.8: Handle --dry-run (preview without launching) ──────────────── - if dry_run: - # Use resolver for consistent ED/MR/CW (single source of truth) - from ...services.workspace import resolve_launch_context - - # Pass None for workspace_arg if auto-detected (resolver finds it again) - # Pass explicit path if user provided one (preserves their intent) - workspace_arg = None if was_auto_detected else str(workspace_path) - result = resolve_launch_context( - original_cwd, workspace_arg, allow_suspicious=allow_suspicious_workspace - ) - # Workspace already validated, resolver must succeed - assert result is not None, f"Resolver failed for validated workspace: {workspace_path}" - - org_config = config.load_cached_org_config() - dry_run_data = build_dry_run_data( - workspace_path=workspace_path, - team=team, - org_config=org_config, - project_config=None, - entry_dir=result.entry_dir, - mount_root=result.mount_root, - container_workdir=result.container_workdir, - resolution_reason=result.reason, - ) - - # Handle --pretty implies --json - if pretty: - json_output = True - - if json_output: - with json_output_mode(): - if pretty: - set_pretty_mode(True) - try: - envelope = build_envelope(Kind.START_DRY_RUN, data=dry_run_data) - print_json(envelope) - finally: - if pretty: - set_pretty_mode(False) - else: - show_dry_run_panel(dry_run_data) - - raise typer.Exit(0) - - warn_if_non_worktree(workspace_path, json_mode=(json_output or pretty)) - - # ── Step 8: Launch sandbox ─────────────────────────────────────────────── - should_continue_session = resume - # Extract plugin settings from sync result for container injection - plugin_settings = sync_result.rendered_settings if sync_result else None - launch_sandbox( - workspace_path=workspace_path, - mount_path=mount_path, - team=team, - session_name=session_name, - current_branch=current_branch, - should_continue_session=should_continue_session, - fresh=fresh, - plugin_settings=plugin_settings, - ) - - -def interactive_start( - cfg: dict[str, Any], - *, - skip_quick_resume: bool = False, - allow_back: bool = False, - standalone_override: bool = False, - team_override: str | None = None, -) -> tuple[str | _BackSentinel | None, str | None, str | None, str | None]: - """Guide user through interactive session setup. - - Prompt for team selection, workspace source, optional worktree creation, - and session naming. - - The flow prioritizes quick resume by showing recent contexts first: - 0. Global Quick Resume - if contexts exist and skip_quick_resume=False - (filtered by effective_team: --team > selected_profile) - 1. Team selection - if no context selected (skipped in standalone mode) - 2. Workspace source selection - 2.5. Workspace-scoped Quick Resume - if contexts exist for selected workspace - 3. Worktree creation (optional) - 4. Session naming (optional) - - Navigation Semantics: - - 'q' anywhere: Quit wizard entirely (returns None) - - Esc at Step 0: BACK to dashboard (if allow_back) or skip to Step 1 - - Esc at Step 2: Go back to Step 1 (if team exists) or BACK to dashboard - - Esc at Step 2.5: Go back to Step 2 workspace picker - - 't' anywhere: Restart at Step 1 (team selection) - - 'a' at Quick Resume: Toggle between filtered and all-teams view - - Args: - cfg: Application configuration dictionary containing workspace_base - and other settings. - skip_quick_resume: If True, bypass the Quick Resume picker and go - directly to project source selection. Used when starting from - dashboard empty states (no_containers, no_sessions) where resume - doesn't make sense. - allow_back: If True, Esc at top level returns BACK sentinel instead - of None. Used when called from Dashboard to enable return to - dashboard on Esc. - standalone_override: If True, force standalone mode regardless of - config. Used when --standalone CLI flag is passed. - team_override: If provided, use this team for filtering instead of - selected_profile. Set by --team CLI flag. - - Returns: - Tuple of (workspace, team, session_name, worktree_name). - - Success: (path, team, session, worktree) with path always set - - Cancel: (None, None, None, None) if user pressed q - - Back: (BACK, None, None, None) if allow_back and user pressed Esc - """ - header = get_brand_header() - header_renderable = render_with_layout(console, header) - console.print(header_renderable, style=Colors.BRAND) - - # Determine mode: standalone vs organization - # CLI --standalone flag overrides config setting - standalone_mode = standalone_override or config.is_standalone_mode() - - # Calculate effective_team: --team flag takes precedence over selected_profile - # This is the team used for filtering Quick Resume contexts - selected_profile = cfg.get("selected_profile") - effective_team: str | None = team_override or selected_profile - - # Build display label for UI - if standalone_mode: - active_team_label = "standalone" - elif team_override: - # Show that --team flag is active with "(filtered)" indicator - active_team_label = f"{team_override} (filtered)" - elif selected_profile: - active_team_label = selected_profile - else: - active_team_label = "none (press 't' to choose)" - active_team_context = f"Team: {active_team_label}" - - # Get available teams (from org config if available) - org_config = config.load_cached_org_config() - available_teams = teams.list_teams(org_config) - - # Track if user dismissed global Quick Resume (to skip workspace-scoped QR) - user_dismissed_quick_resume = False - - # Step 0: Global Quick Resume - # Skip when: - # - entering from dashboard empty state (skip_quick_resume=True) - # - org mode with no active team (force team selection first) - # User can press 't' to switch teams (raises TeamSwitchRequested → skip to Step 1) - # - # In org mode without an effective team, skip Quick Resume entirely. - # This prevents showing cross-team sessions and forces user to pick a team first. - should_skip_quick_resume = skip_quick_resume - if not standalone_mode and not effective_team and available_teams: - # Org mode with no active team - skip to team picker - should_skip_quick_resume = True - console.print("[dim]Tip: Select a team first to see team-specific sessions[/dim]") - console.print() - - if not should_skip_quick_resume: - # Track whether showing all teams (toggled by 'a' key) - show_all_teams = False - - # Quick Resume loop: allows toggling between filtered and all-teams view - while True: - # Filter by effective_team unless user toggled to show all - team_filter = "all" if show_all_teams else effective_team - recent_contexts = load_recent_contexts(limit=10, team_filter=team_filter) - - # Update header based on view mode and build helpful subtitle - qr_subtitle: str | None = None - if show_all_teams: - qr_context_label = "All teams" - qr_title = "Quick Resume — All Teams" - if recent_contexts: - qr_subtitle = ( - "Showing all teams — resuming uses that team's plugins. " - "Press 'a' to filter." - ) - else: - qr_subtitle = "No sessions yet — start fresh" - else: - qr_context_label = active_team_context - qr_title = "Quick Resume" - if not recent_contexts: - all_contexts = load_recent_contexts(limit=10, team_filter="all") - team_label = effective_team or "standalone" - if all_contexts: - qr_subtitle = ( - f"No sessions yet for {team_label}. Press 'a' to show all teams." - ) - else: - qr_subtitle = "No sessions yet — start fresh" - - try: - result, selected_context = pick_context_quick_resume( - recent_contexts, - title=qr_title, - subtitle=qr_subtitle, - standalone=standalone_mode, - context_label=qr_context_label, - effective_team=effective_team, - ) - - match result: - case QuickResumeResult.SELECTED: - # User pressed Enter on a context - resume it - if selected_context is not None: - # Cross-team resume requires confirmation - if ( - effective_team - and selected_context.team - and selected_context.team != effective_team - ): - console.print() - if not confirm_with_layout( - console, - f"[yellow]Resume session from team '{selected_context.team}'?[/yellow]\n" - f"[dim]This will use {selected_context.team} plugins for this session.[/dim]", - default=False, - ): - continue # Back to QR picker loop - return ( - str(selected_context.worktree_path), - selected_context.team, - selected_context.last_session_id, - None, # worktree_name - not creating new worktree - ) - - case QuickResumeResult.BACK: - # User pressed Esc - go back if we can (Dashboard context) - if allow_back: - return (BACK, None, None, None) - # CLI context: no previous screen, treat as cancel - return (None, None, None, None) - - case QuickResumeResult.NEW_SESSION: - # User pressed 'n' or selected "New Session" entry - user_dismissed_quick_resume = True - console.print() - break # Exit QR loop, continue to wizard - - case QuickResumeResult.TOGGLE_ALL_TEAMS: - # User pressed 'a' - toggle all-teams view - if standalone_mode: - console.print( - "[dim]All teams view is unavailable in standalone mode[/dim]" - ) - console.print() - continue - show_all_teams = not show_all_teams - continue # Re-render with new filter - - case QuickResumeResult.CANCELLED: - # User pressed q - cancel entire wizard - return (None, None, None, None) - - except TeamSwitchRequested: - # User pressed 't' - skip to team selection (Step 1) - # Reset Quick Resume dismissal so new team's contexts are shown - user_dismissed_quick_resume = False - show_all_teams = False - console.print() - break # Exit QR loop, continue to team selection - - # ───────────────────────────────────────────────────────────────────────── - # MEGA-LOOP: Wraps Steps 1-2.5 to handle 't' key (TeamSwitchRequested) - # When user presses 't' anywhere, we restart from Step 1 (team selection) - # ───────────────────────────────────────────────────────────────────────── - while True: - # Step 1: Select team (mode-aware handling) - team: str | None = None - - if standalone_mode: - # P0.1: Standalone mode - skip team picker entirely - # Solo devs don't need team selection friction - # Only print banner if detected from config (CLI --standalone already printed in start()) - if not standalone_override: - console.print("[dim]Running in standalone mode (no organization config)[/dim]") - console.print() - elif not available_teams: - # P0.2: Org mode with no teams configured - exit with clear error - # Get org URL for context in error message - user_cfg = config.load_user_config() - org_source = user_cfg.get("organization_source", {}) - org_url = org_source.get("url", "unknown") - - console.print() - console.print( - create_warning_panel( - "No Teams Configured", - f"Organization config from: {org_url}\n" - "No team profiles are defined in this organization.", - "Contact your admin to add profiles, or use: scc start --standalone", - ) - ) - console.print() - raise typer.Exit(EXIT_CONFIG) - elif team_override: - # --team flag provided - use it directly, skip team picker - team = team_override - console.print(f"[dim]Using team from --team flag: {team}[/dim]") - console.print() - else: - # Normal flow: org mode with teams available - selected = pick_team( - available_teams, - current_team=str(selected_profile) if selected_profile else None, - title="Select Team", - ) - if selected is None: - return (None, None, None, None) - team = selected.get("name") - if team and team != selected_profile: - config.set_selected_profile(team) - selected_profile = team - effective_team = team - - # Step 2: Select workspace source (with back navigation support) - workspace: str | None = None - team_context_label = active_team_context - if team: - team_context_label = f"Team: {team}" - - # Check if team has repositories configured (must be inside mega-loop since team can change) - team_config = cfg.get("profiles", {}).get(team, {}) if team else {} - team_repos: list[dict[str, Any]] = team_config.get("repositories", []) - has_team_repos = bool(team_repos) - - try: - # Outer loop: allows Step 2.5 to go BACK to Step 2 (workspace picker) - while True: - # Step 2: Workspace selection loop - while workspace is None: - # Top-level picker: supports three-state contract - source = pick_workspace_source( - has_team_repos=has_team_repos, - team=team, - standalone=standalone_mode, - allow_back=allow_back or (team is not None), - context_label=team_context_label, - ) - - # Handle three-state return contract - if source is BACK: - if team is not None: - # Esc in org mode: go back to Step 1 (team selection) - raise TeamSwitchRequested() # Will be caught by mega-loop - elif allow_back: - # Esc in standalone mode with allow_back: return to dashboard - return (BACK, None, None, None) - else: - # Esc in standalone CLI mode: cancel wizard - return (None, None, None, None) - - if source is None: - # q pressed: quit entirely - return (None, None, None, None) - - if source == WorkspaceSource.CURRENT_DIR: - # Detect workspace root from CWD (handles subdirs + worktrees) - detected_root, _start_cwd = git.detect_workspace_root(Path.cwd()) - if detected_root: - workspace = str(detected_root) - else: - # Fall back to CWD if no workspace root detected - workspace = str(Path.cwd()) - - elif source == WorkspaceSource.RECENT: - recent = sessions.list_recent(10) - picker_result = pick_recent_workspace( - recent, - standalone=standalone_mode, - context_label=team_context_label, - ) - if picker_result is None: - return (None, None, None, None) # User pressed q - quit wizard - if picker_result is BACK: - continue # User pressed Esc - go back to source picker - workspace = cast(str, picker_result) - - elif source == WorkspaceSource.TEAM_REPOS: - workspace_base = cfg.get("workspace_base", "~/projects") - picker_result = pick_team_repo( - team_repos, - workspace_base, - standalone=standalone_mode, - context_label=team_context_label, - ) - if picker_result is None: - return (None, None, None, None) # User pressed q - quit wizard - if picker_result is BACK: - continue # User pressed Esc - go back to source picker - workspace = cast(str, picker_result) - - elif source == WorkspaceSource.CUSTOM: - workspace = prompt_custom_workspace(console) - # Empty input means go back - if workspace is None: - continue - - elif source == WorkspaceSource.CLONE: - repo_url = prompt_repo_url(console) - if repo_url: - workspace = git.clone_repo( - repo_url, cfg.get("workspace_base", "~/projects") - ) - # Empty URL means go back - if workspace is None: - continue - - # ───────────────────────────────────────────────────────────────── - # Step 2.5: Workspace-scoped Quick Resume - # After selecting a workspace, check if existing contexts exist - # and offer to resume one instead of starting fresh - # ───────────────────────────────────────────────────────────────── - normalized_workspace = normalize_path(workspace) - - # Smart filter: Match contexts related to this workspace AND team - workspace_contexts = [] - for ctx in load_recent_contexts(limit=30): - # Standalone: only show standalone contexts - if standalone_mode and ctx.team is not None: - continue - # Org mode: filter by team (prevents cross-team resume confusion) - if team is not None and ctx.team != team: - continue - - # Case 1: Exact worktree match (fastest check) - if ctx.worktree_path == normalized_workspace: - workspace_contexts.append(ctx) - continue - - # Case 2: User picked repo root - show all worktree contexts for this repo - if ctx.repo_root == normalized_workspace: - workspace_contexts.append(ctx) - continue - - # Case 3: User picked a subdir - match if inside a known worktree/repo - try: - if normalized_workspace.is_relative_to(ctx.worktree_path): - workspace_contexts.append(ctx) - continue - if normalized_workspace.is_relative_to(ctx.repo_root): - workspace_contexts.append(ctx) - except ValueError: - # is_relative_to raises ValueError if paths are on different drives - pass - - # Skip workspace-scoped Quick Resume if user already dismissed global Quick Resume - if workspace_contexts and not user_dismissed_quick_resume: - console.print() - - # Workspace QR loop for handling toggle (press 'a') - workspace_qr_show_all = False - while True: - # Filter contexts based on toggle state - displayed_contexts = workspace_contexts - if workspace_qr_show_all: - # Show all contexts for this workspace (ignore team filter) - # Use same 3-case matching logic as above - displayed_contexts = [] - for ctx in load_recent_contexts(limit=30): - # Case 1: Exact worktree match - if ctx.worktree_path == normalized_workspace: - displayed_contexts.append(ctx) - continue - # Case 2: User picked repo root - if ctx.repo_root == normalized_workspace: - displayed_contexts.append(ctx) - continue - # Case 3: User picked a subdir - try: - if normalized_workspace.is_relative_to(ctx.worktree_path): - displayed_contexts.append(ctx) - continue - if normalized_workspace.is_relative_to(ctx.repo_root): - displayed_contexts.append(ctx) - except ValueError: - pass - - qr_subtitle = "Existing sessions found for this workspace" - if workspace_qr_show_all: - qr_subtitle = ( - "All teams for this workspace — resuming uses that team's plugins" - ) - - result, selected_context = pick_context_quick_resume( - displayed_contexts, - title=f"Resume session in {Path(workspace).name}?", - subtitle=qr_subtitle, - standalone=standalone_mode, - context_label="All teams" - if workspace_qr_show_all - else f"Team: {team or active_team_label}", - effective_team=team or effective_team, - ) - # Note: TeamSwitchRequested bubbles up to mega-loop handler - - match result: - case QuickResumeResult.SELECTED: - # User wants to resume - return context info immediately - if selected_context is not None: - # Cross-team resume requires confirmation - current_team = team or effective_team - if ( - current_team - and selected_context.team - and selected_context.team != current_team - ): - console.print() - if not confirm_with_layout( - console, - "[yellow]Workspace overrides detected. Apply anyway?[/yellow]", - default=False, - ): - continue - - workspace_qr_show_all = not workspace_qr_show_all - continue # Re-render workspace QR - - case QuickResumeResult.CANCELLED: - # User pressed q - cancel entire wizard - return (None, None, None, None) - - # Check if we need to go back to workspace picker - if workspace is None: - continue # Continue outer loop to re-enter Step 2 - - # No contexts or user dismissed global Quick Resume - proceed to Step 3 - break # Exit outer loop (Step 2 + 2.5) - - except TeamSwitchRequested: - # User pressed 't' somewhere - restart at Step 1 (team selection) - # Reset Quick Resume dismissal so new team's contexts are shown - user_dismissed_quick_resume = False - console.print() - continue # Continue mega-loop - - # Successfully got a workspace - exit mega-loop - break - - # Step 3: Worktree option - worktree_name = None - console.print() - if confirm_with_layout( - console, - "[cyan]Create a worktree for isolated feature development?[/cyan]", - default=False, - ): - workspace_path = Path(workspace) - can_create_worktree = True - - # Check if directory is a git repository - if not git.is_git_repo(workspace_path): - console.print() - if confirm_with_layout( - console, - "[yellow]⚠️ Not a git repository. Initialize git?[/yellow]", - default=False, - ): - if git.init_repo(workspace_path): - console.print( - f" [green]{Indicators.get('PASS')}[/green] Initialized git repository" - ) - else: - err_console.print( - f" [red]{Indicators.get('FAIL')}[/red] Failed to initialize git" - ) - can_create_worktree = False - else: - # User declined git init - can't create worktree - console.print( - f" [dim]{Indicators.get('INFO')}[/dim] " - "Skipping worktree (requires git repository)" - ) - can_create_worktree = False - - # Check if repository has commits (worktree requires at least one) - if can_create_worktree and git.is_git_repo(workspace_path): - if not git.has_commits(workspace_path): - console.print() - if confirm_with_layout( - console, - "[yellow]⚠️ Worktree requires initial commit. " - "Create empty initial commit?[/yellow]", - default=True, - ): - success, error_msg = git.create_empty_initial_commit(workspace_path) - if success: - console.print( - f" [green]{Indicators.get('PASS')}[/green] Created initial commit" - ) - else: - err_console.print(f" [red]{Indicators.get('FAIL')}[/red] {error_msg}") - can_create_worktree = False - else: - # User declined empty commit - can't create worktree - console.print( - f" [dim]{Indicators.get('INFO')}[/dim] " - "Skipping worktree (requires initial commit)" - ) - can_create_worktree = False - - # Only ask for worktree name if we have a valid git repo with commits - if can_create_worktree: - worktree_name = prompt_with_layout(console, "[cyan]Feature/worktree name[/cyan]") - - # Step 4: Session name - console.print() - session_name = ( - prompt_with_layout( - console, - "[cyan]Session name[/cyan] [dim](optional, for easy resume)[/dim]", - default="", - ) - or None - ) - - return workspace, team, session_name, worktree_name - - -def run_start_wizard_flow( - *, skip_quick_resume: bool = False, allow_back: bool = False -) -> bool | None: - """Run the interactive start wizard and launch sandbox. - - This is the shared entrypoint for starting sessions from both the CLI - (scc start with no args) and the dashboard (Enter on empty containers). - - The function runs outside any Rich Live context to avoid nested Live - conflicts. It handles the complete flow: - 1. Run interactive wizard to get user selections - 2. If user cancels, return False/None - 3. Otherwise, validate and launch the sandbox - - Args: - skip_quick_resume: If True, bypass the Quick Resume picker and go - directly to project source selection. Used when starting from - dashboard empty states where "resume" doesn't make sense. - allow_back: If True, Esc returns BACK sentinel (for dashboard context). - If False, Esc returns None (for CLI context). - - Returns: - True if sandbox was launched successfully. - False if user pressed Esc to go back (only when allow_back=True). - None if user pressed q to quit or an error occurred. - """ - # Step 1: First-run detection - if setup.is_setup_needed(): - if not setup.maybe_run_setup(console): - return None # Error during setup - - cfg = config.load_user_config() - - # Step 2: Run interactive wizard - # Note: standalone_override=False (default) is correct here - dashboard path - # doesn't have CLI flags, so we rely on config.is_standalone_mode() inside - # interactive_start() to detect standalone mode from user's config file. - workspace, team, session_name, worktree_name = interactive_start( - cfg, skip_quick_resume=skip_quick_resume, allow_back=allow_back - ) - - # Three-state return handling: - # - workspace is BACK → user pressed Esc (go back to dashboard) - # - workspace is None → user pressed q (quit app) - if workspace is BACK: - return False # Go back to dashboard - if workspace is None: - return None # Quit app - - workspace_value = cast(str, workspace) - - try: - with Status("[cyan]Checking Docker...[/cyan]", console=console, spinner=Spinners.DOCKER): - docker.check_docker_available() - workspace_path = validate_and_resolve_workspace(workspace_value) - workspace_path = prepare_workspace(workspace_path, worktree_name, install_deps=False) - _configure_team_settings(team, cfg) - sync_result = _sync_marketplace_settings(workspace_path, team) - plugin_settings = sync_result.rendered_settings if sync_result else None - mount_path, current_branch = resolve_mount_and_branch(workspace_path) - launch_sandbox( - workspace_path=workspace_path, - mount_path=mount_path, - team=team, - session_name=session_name, - current_branch=current_branch, - should_continue_session=False, - fresh=False, - plugin_settings=plugin_settings, - ) - return True - except Exception as e: - err_console.print(f"[red]Error launching sandbox: {e}[/red]") - return False +start = handle_errors(_start) diff --git a/src/scc_cli/commands/launch/flow.py b/src/scc_cli/commands/launch/flow.py new file mode 100644 index 0000000..a67efa2 --- /dev/null +++ b/src/scc_cli/commands/launch/flow.py @@ -0,0 +1,1605 @@ +""" +Launch flow helpers for the start command. + +This module contains the core logic for starting sessions, interactive +launch flows, and dashboard entrypoints. The CLI wrapper in app.py should +stay thin and delegate to these functions. +""" + +from __future__ import annotations + +import logging +from pathlib import Path +from typing import Any, cast + +import typer +from rich.status import Status + +from ... import config, git, sessions, setup, teams +from ...application.start_session import ( + StartSessionDependencies, + StartSessionRequest, + prepare_start_session, + start_session, +) +from ...bootstrap import get_default_adapters +from ...cli_common import console, err_console +from ...contexts import WorkContext, load_recent_contexts, normalize_path, record_context +from ...core import personal_profiles +from ...core.errors import WorkspaceNotFoundError +from ...core.exit_codes import EXIT_CANCELLED, EXIT_CONFIG, EXIT_ERROR, EXIT_USAGE +from ...json_output import build_envelope +from ...kinds import Kind +from ...marketplace.materialize import materialize_marketplace +from ...marketplace.resolve import resolve_effective_config +from ...marketplace.sync import SyncError, SyncResult, sync_marketplace_settings +from ...output_mode import json_output_mode, print_human, print_json, set_pretty_mode +from ...panels import create_info_panel, create_warning_panel +from ...theme import Colors, Indicators, Spinners, get_brand_header +from ...ui.chrome import print_with_layout, render_with_layout +from ...ui.gate import is_interactive_allowed +from ...ui.git_interactive import clone_repo +from ...ui.keys import _BackSentinel +from ...ui.picker import ( + QuickResumeResult, + TeamSwitchRequested, + pick_context_quick_resume, + pick_team, +) +from ...ui.prompts import ( + confirm_with_layout, + prompt_custom_workspace, + prompt_repo_url, + prompt_with_layout, + select_session, +) +from ...ui.wizard import ( + BACK, + WorkspaceSource, + pick_recent_workspace, + pick_team_repo, + pick_workspace_source, +) +from .render import ( + build_dry_run_data, + show_dry_run_panel, + show_launch_panel, + warn_if_non_worktree, +) +from .workspace import ( + prepare_workspace, + resolve_workspace_team, + validate_and_resolve_workspace, +) + +# ───────────────────────────────────────────────────────────────────────────── +# Helper Functions (extracted for maintainability) +# ───────────────────────────────────────────────────────────────────────────── + + +def _resolve_session_selection( + workspace: str | None, + team: str | None, + resume: bool, + select: bool, + cfg: dict[str, Any], + *, + json_mode: bool = False, + standalone_override: bool = False, + no_interactive: bool = False, + dry_run: bool = False, +) -> tuple[str | None, str | None, str | None, str | None, bool, bool]: + """ + Handle session selection logic for --select, --resume, and interactive modes. + + Args: + workspace: Workspace path from command line. + team: Team name from command line. + resume: Whether --resume flag is set. + select: Whether --select flag is set. + cfg: Loaded configuration. + json_mode: Whether --json output is requested (blocks interactive). + standalone_override: Whether --standalone flag is set (overrides config). + + Returns: + Tuple of (workspace, team, session_name, worktree_name, cancelled, was_auto_detected) + If user cancels or no session found, workspace will be None. + cancelled is True only for explicit user cancellation. + was_auto_detected is True if workspace was found via resolver (git/.scc.yaml). + + Raises: + typer.Exit: If interactive mode required but not allowed (non-TTY, CI, --json). + """ + session_name = None + worktree_name = None + cancelled = False + + # Interactive mode if no workspace provided and no session flags + if workspace is None and not resume and not select: + # For --dry-run without workspace, use resolver to auto-detect (skip interactive) + if dry_run: + from pathlib import Path + + from ...services.workspace import resolve_launch_context + + result = resolve_launch_context(Path.cwd(), workspace_arg=None) + if result is not None: + return str(result.workspace_root), team, None, None, False, True # auto-detected + # No auto-detect possible, fall through to error + err_console.print( + "[red]Error:[/red] No workspace could be auto-detected.\n" + "[dim]Provide a workspace path: scc start --dry-run /path/to/project[/dim]", + highlight=False, + ) + raise typer.Exit(EXIT_USAGE) + + # Check TTY gating before entering interactive mode + if not is_interactive_allowed( + json_mode=json_mode, + no_interactive_flag=no_interactive, + ): + # Try auto-detect before failing + from pathlib import Path + + from ...services.workspace import resolve_launch_context + + result = resolve_launch_context(Path.cwd(), workspace_arg=None) + if result is not None: + return str(result.workspace_root), team, None, None, False, True # auto-detected + + err_console.print( + "[red]Error:[/red] Interactive mode requires a terminal (TTY).\n" + "[dim]Provide a workspace path: scc start /path/to/project[/dim]", + highlight=False, + ) + raise typer.Exit(EXIT_USAGE) + workspace_result, team, session_name, worktree_name = cast( + tuple[str | None, str | None, str | None, str | None], + interactive_start(cfg, standalone_override=standalone_override, team_override=team), + ) + if workspace_result is None: + return None, team, None, None, True, False + return ( + workspace_result, + team, + session_name, + worktree_name, + False, + False, + ) + + # Handle --select: interactive session picker + if select and workspace is None: + # Check TTY gating before showing session picker + if not is_interactive_allowed( + json_mode=json_mode, + no_interactive_flag=no_interactive, + ): + console.print( + "[red]Error:[/red] --select requires a terminal (TTY).\n" + "[dim]Use --resume to auto-select most recent session.[/dim]", + highlight=False, + ) + raise typer.Exit(EXIT_USAGE) + + # Prefer explicit --team, then selected_profile for filtering + effective_team = team or cfg.get("selected_profile") + if standalone_override: + effective_team = None + + # If org mode and no active team, require explicit selection + if effective_team is None and not standalone_override: + if not json_mode: + console.print( + "[yellow]No active team selected.[/yellow] " + "Run 'scc team switch' or pass --team to select." + ) + return None, team, None, None, False, False + + recent_sessions = sessions.list_recent(limit=10) + if effective_team is None: + filtered_sessions = [s for s in recent_sessions if s.get("team") is None] + else: + filtered_sessions = [s for s in recent_sessions if s.get("team") == effective_team] + + if not filtered_sessions: + if not json_mode: + console.print("[yellow]No recent sessions found.[/yellow]") + return None, team, None, None, False, False + + selected = select_session(console, filtered_sessions) + if selected is None: + return None, team, None, None, True, False + workspace = selected.get("workspace") + if not team: + team = selected.get("team") + # --standalone overrides any team from session (standalone means no team) + if standalone_override: + team = None + if not json_mode: + print_with_layout(console, f"[dim]Selected: {workspace}[/dim]") + + # Handle --resume: auto-select most recent session + elif resume and workspace is None: + # Prefer explicit --team, then selected_profile for resume filtering + effective_team = team or cfg.get("selected_profile") + if standalone_override: + effective_team = None + + # If org mode and no active team, require explicit selection + if effective_team is None and not standalone_override: + if not json_mode: + console.print( + "[yellow]No active team selected.[/yellow] " + "Run 'scc team switch' or pass --team to resume." + ) + return None, team, None, None, False, False + + recent_sessions = sessions.list_recent(limit=50) + if effective_team is None: + filtered_sessions = [s for s in recent_sessions if s.get("team") is None] + else: + filtered_sessions = [s for s in recent_sessions if s.get("team") == effective_team] + + if filtered_sessions: + recent_session = filtered_sessions[0] + workspace = recent_session.get("workspace") + if not team: + team = recent_session.get("team") + # --standalone overrides any team from session (standalone means no team) + if standalone_override: + team = None + if not json_mode: + print_with_layout(console, f"[dim]Resuming: {workspace}[/dim]") + else: + if not json_mode: + console.print("[yellow]No recent sessions found.[/yellow]") + return None, team, None, None, False, False + + return workspace, team, session_name, worktree_name, cancelled, False # explicit workspace + + +def _configure_team_settings(team: str | None, cfg: dict[str, Any]) -> None: + """ + Validate team profile exists. + + NOTE: Plugin settings are now sourced ONLY from workspace settings.local.json + (via _sync_marketplace_settings). Docker volume injection has been removed + to prevent plugin mixing across teams. + + IMPORTANT: This function must remain cache-only (no network calls). + It's called in offline mode where only cached org config is available. + If you need to add network operations, gate them with an offline check + or move them to _sync_marketplace_settings() which is already offline-aware. + + Raises: + typer.Exit: If team profile is not found. + """ + if not team: + return + + with Status( + f"[cyan]Validating {team} profile...[/cyan]", console=console, spinner=Spinners.SETUP + ): + # load_cached_org_config() reads from local cache only - safe for offline mode + org_config = config.load_cached_org_config() + + validation = teams.validate_team_profile(team, org_config) + if not validation["valid"]: + print_with_layout( + console, + create_warning_panel( + "Team Not Found", + f"No team profile named '{team}'.", + "Run 'scc team list' to see available profiles", + ), + constrain=True, + ) + raise typer.Exit(1) + + # NOTE: docker.inject_team_settings() removed - workspace settings.local.json + # is now the single source of truth for plugins (prevents cross-team mixing) + + +def _sync_marketplace_settings( + workspace_path: Path | None, + team: str | None, + org_config_url: str | None = None, +) -> SyncResult | None: + """ + Sync marketplace settings for the workspace. + + Orchestrates the full marketplace pipeline: + 1. Compute effective plugins for team + 2. Materialize required marketplaces + 3. Render settings (NOT written to workspace to prevent host leakage) + 4. Return rendered_settings for container injection + + IMPORTANT: This uses container-only mode to prevent host Claude from seeing + SCC-managed plugins. Marketplaces are still materialized to workspace (for + container access via bind-mount), but settings.local.json is NOT written. + Instead, rendered_settings is returned for injection into container HOME. + + Args: + workspace_path: Path to the workspace directory. + team: Selected team profile name. + org_config_url: URL of the org config (for tracking). + + Returns: + SyncResult with details (including rendered_settings for container injection), + or None if no sync needed. + + Raises: + typer.Exit: If marketplace sync fails critically. + """ + if workspace_path is None or team is None: + return None + + org_config = config.load_cached_org_config() + if org_config is None: + return None + + with Status( + "[cyan]Syncing marketplace settings...[/cyan]", console=console, spinner=Spinners.NETWORK + ): + try: + # Use container-only mode: + # - write_to_workspace=False: Don't write settings.local.json (prevents host leakage) + # - container_path_prefix: Workspace path for absolute paths in container + # + # Docker sandbox mounts workspace at the same absolute path, so paths like + # "/Users/foo/project/.claude/.scc-marketplaces/..." will resolve correctly + # when settings are in container HOME (/home/agent/.claude/settings.json) + result = sync_marketplace_settings( + project_dir=workspace_path, + org_config_data=org_config, + team_id=team, + org_config_url=org_config_url, + write_to_workspace=False, # Container-only mode + container_path_prefix=str(workspace_path), # Absolute paths for container + ) + + # Display any warnings + if result.warnings: + console.print() + for warning in result.warnings: + print_with_layout(console, f"[yellow]{warning}[/yellow]") + console.print() + + # Log success + if result.plugins_enabled: + print_with_layout( + console, + f"[green]{Indicators.get('PASS')} Enabled {len(result.plugins_enabled)} team plugin(s)[/green]", + ) + if result.marketplaces_materialized: + print_with_layout( + console, + f"[green]{Indicators.get('PASS')} Materialized {len(result.marketplaces_materialized)} marketplace(s)[/green]", + ) + + # rendered_settings will be passed to launch_sandbox for container injection + return result + + except SyncError as e: + panel = create_warning_panel( + "Marketplace Sync Failed", + str(e), + "Team plugins may not be available. Use --dry-run to diagnose.", + ) + print_with_layout(console, panel, constrain=True) + # Non-fatal: continue without marketplace sync + return None + + +def _apply_personal_profile( + workspace_path: Path, + *, + json_mode: bool, + non_interactive: bool, +) -> tuple[str | None, bool]: + """Apply personal profile if available. + + Returns (profile_id, applied). + """ + profile, corrupt = personal_profiles.load_personal_profile_with_status(workspace_path) + if corrupt: + if not json_mode: + console.print("[yellow]Personal profile is invalid JSON. Skipping.[/yellow]") + return None, False + if profile is None: + return None, False + + drift = personal_profiles.detect_drift(workspace_path) + if drift and not personal_profiles.workspace_has_overrides(workspace_path): + drift = False + + if drift and not is_interactive_allowed( + json_mode=json_mode, no_interactive_flag=non_interactive + ): + if not json_mode: + console.print( + "[yellow]Workspace overrides detected; personal profile not applied.[/yellow]" + ) + return profile.profile_id, False + + if drift and not json_mode: + console.print("[yellow]Workspace overrides detected.[/yellow]") + if not confirm_with_layout(console, "Apply personal profile anyway?", default=False): + return profile.profile_id, False + + existing_settings, settings_invalid = personal_profiles.load_workspace_settings_with_status( + workspace_path + ) + existing_mcp, mcp_invalid = personal_profiles.load_workspace_mcp_with_status(workspace_path) + if settings_invalid: + if not json_mode: + console.print("[yellow]Invalid JSON in .claude/settings.local.json[/yellow]") + return profile.profile_id, False + if mcp_invalid: + if not json_mode: + console.print("[yellow]Invalid JSON in .mcp.json[/yellow]") + return profile.profile_id, False + + existing_settings = existing_settings or {} + existing_mcp = existing_mcp or {} + + merged_settings = personal_profiles.merge_personal_settings( + workspace_path, existing_settings, profile.settings or {} + ) + merged_mcp = personal_profiles.merge_personal_mcp(existing_mcp, profile.mcp or {}) + + personal_profiles.write_workspace_settings(workspace_path, merged_settings) + if profile.mcp: + personal_profiles.write_workspace_mcp(workspace_path, merged_mcp) + + personal_profiles.save_applied_state( + workspace_path, + profile.profile_id, + personal_profiles.compute_fingerprints(workspace_path), + ) + + if not json_mode: + console.print("[green]Applied personal profile.[/green]") + + return profile.profile_id, True + + +def _record_session_and_context( + workspace_path: Path, + team: str | None, + session_name: str | None, + current_branch: str | None, +) -> None: + """Record session metadata and quick-resume context.""" + sessions.record_session( + workspace=str(workspace_path), + team=team, + session_name=session_name, + container_name=None, + branch=current_branch, + ) + repo_root = git.get_worktree_main_repo(workspace_path) or workspace_path + worktree_name = workspace_path.name + context = WorkContext( + team=team, + repo_root=repo_root, + worktree_path=workspace_path, + worktree_name=worktree_name, + branch=current_branch, + last_session_id=session_name, + ) + try: + record_context(context) + except (OSError, ValueError) as exc: + print_human( + "[yellow]Warning:[/yellow] Could not save Quick Resume context.", + highlight=False, + ) + print_human(f"[dim]{exc}[/dim]", highlight=False) + logging.debug(f"Failed to record context for Quick Resume: {exc}") + if team: + try: + config.set_workspace_team(str(workspace_path), team) + except (OSError, ValueError) as exc: + print_human( + "[yellow]Warning:[/yellow] Could not save workspace team preference.", + highlight=False, + ) + print_human(f"[dim]{exc}[/dim]", highlight=False) + logging.debug(f"Failed to store workspace team mapping: {exc}") + + +# ───────────────────────────────────────────────────────────────────────────── +# Start Command Flow +# ───────────────────────────────────────────────────────────────────────────── + + +def start( + workspace: str | None = typer.Argument(None, help="Path to workspace (optional)"), + team: str | None = typer.Option(None, "-t", "--team", help="Team profile to use"), + session_name: str | None = typer.Option(None, "--session", help="Session name"), + resume: bool = typer.Option(False, "-r", "--resume", help="Resume most recent session"), + select: bool = typer.Option(False, "-s", "--select", help="Select from recent sessions"), + worktree_name: str | None = typer.Option(None, "-w", "--worktree", help="Worktree name"), + fresh: bool = typer.Option(False, "--fresh", help="Force new container"), + install_deps: bool = typer.Option(False, "--install-deps", help="Install dependencies"), + offline: bool = typer.Option(False, "--offline", help="Use cached config only (error if none)"), + standalone: bool = typer.Option(False, "--standalone", help="Run without organization config"), + dry_run: bool = typer.Option(False, "--dry-run", help="Preview config without launching"), + json_output: bool = typer.Option(False, "--json", help="Output as JSON"), + pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), + non_interactive: bool = typer.Option( + False, + "--non-interactive", + "--no-interactive", + help="Fail fast if interactive input would be required", + ), + debug: bool = typer.Option( + False, + "--debug", + hidden=True, + ), + allow_suspicious_workspace: bool = typer.Option( + False, + "--allow-suspicious-workspace", + help="Allow starting in suspicious directories (e.g., home, /tmp) in non-interactive mode", + ), +) -> None: + """ + Start Claude Code in a Docker sandbox. + + If no arguments provided, launches interactive mode. + """ + from pathlib import Path + + # Capture original CWD for entry_dir tracking (before any directory changes) + original_cwd = Path.cwd() + + if isinstance(debug, bool) and debug: + err_console.print( + "[red]Error:[/red] --debug is a global flag and must be placed before the command.", + highlight=False, + ) + err_console.print( + "[dim]Use: scc --debug start [/dim]", + highlight=False, + ) + err_console.print( + "[dim]With uv: uv run scc --debug start [/dim]", + highlight=False, + ) + raise typer.Exit(EXIT_USAGE) + + # ── Fast Fail: Validate mode flags before any processing ────────────────── + from scc_cli.ui.gate import validate_mode_flags + + validate_mode_flags( + json_mode=(json_output or pretty), + select=select, + ) + + # ── Step 0: Handle --standalone mode (skip org config entirely) ─────────── + if standalone: + # In standalone mode, never ask for team and never load org config + team = None + if not json_output and not pretty: + console.print("[dim]Running in standalone mode (no organization config)[/dim]") + + org_config: dict[str, Any] | None = None + + # ── Step 0.5: Handle --offline mode (cache-only, fail fast) ─────────────── + if offline and not standalone: + # Check if cached org config exists + org_config = config.load_cached_org_config() + if org_config is None: + err_console.print( + "[red]Error:[/red] --offline requires cached organization config.\n" + "[dim]Run 'scc setup' first to cache your org config.[/dim]", + highlight=False, + ) + raise typer.Exit(EXIT_CONFIG) + if not json_output and not pretty: + console.print("[dim]Using cached organization config (offline mode)[/dim]") + + # ── Step 1: First-run detection ────────────────────────────────────────── + # Skip setup wizard in standalone mode (no org config needed) + # Skip in offline mode (can't fetch remote - already validated cache exists) + if not standalone and not offline and setup.is_setup_needed(): + if not setup.maybe_run_setup(console): + raise typer.Exit(1) + + cfg = config.load_user_config() + adapters = get_default_adapters() + + # ── Step 2: Session selection (interactive, --select, --resume) ────────── + workspace, team, session_name, worktree_name, cancelled, was_auto_detected = ( + _resolve_session_selection( + workspace=workspace, + team=team, + resume=resume, + select=select, + cfg=cfg, + json_mode=(json_output or pretty), + standalone_override=standalone, + no_interactive=non_interactive, + dry_run=dry_run, + ) + ) + if workspace is None: + if cancelled: + if not json_output and not pretty: + console.print("[dim]Cancelled.[/dim]") + raise typer.Exit(EXIT_CANCELLED) + if select or resume: + raise typer.Exit(EXIT_ERROR) + raise typer.Exit(EXIT_CANCELLED) + + # ── Step 3: Docker availability check ──────────────────────────────────── + # Skip Docker check for dry-run (just previewing config) + if not dry_run: + with Status("[cyan]Checking Docker...[/cyan]", console=console, spinner=Spinners.DOCKER): + adapters.sandbox_runtime.ensure_available() + + # ── Step 4: Workspace validation and platform checks ───────────────────── + workspace_path = validate_and_resolve_workspace( + workspace, + no_interactive=non_interactive, + allow_suspicious=allow_suspicious_workspace, + json_mode=(json_output or pretty), + ) + if workspace_path is None: + if not json_output and not pretty: + console.print("[dim]Cancelled.[/dim]") + raise typer.Exit(EXIT_CANCELLED) + if not workspace_path.exists(): + raise WorkspaceNotFoundError(path=str(workspace_path)) + + # ── Step 5: Workspace preparation (worktree, deps, git safety) ─────────── + # Skip for dry-run (no worktree creation, no deps, no branch safety prompts) + if not dry_run: + workspace_path = prepare_workspace(workspace_path, worktree_name, install_deps) + assert workspace_path is not None + + # ── Step 5.5: Resolve team from workspace pinning ──────────────────────── + team = resolve_workspace_team( + workspace_path, + team, + cfg, + json_mode=(json_output or pretty), + standalone=standalone, + no_interactive=non_interactive, + ) + + # ── Step 6: Team configuration ─────────────────────────────────────────── + # Skip team config in standalone mode (no org config to apply) + # In offline mode, team config still applies from cached org config + if not dry_run and not standalone: + _configure_team_settings(team, cfg) + + if org_config is None and team and not standalone: + org_config = config.load_cached_org_config() + + if worktree_name: + was_auto_detected = False + + start_dependencies = StartSessionDependencies( + filesystem=adapters.filesystem, + remote_fetcher=adapters.remote_fetcher, + clock=adapters.clock, + git_client=adapters.git_client, + agent_runner=adapters.agent_runner, + sandbox_runtime=adapters.sandbox_runtime, + resolve_effective_config=resolve_effective_config, + materialize_marketplace=materialize_marketplace, + ) + workspace_arg = None if was_auto_detected else str(workspace_path) + start_request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=workspace_arg, + entry_dir=original_cwd, + team=team, + session_name=session_name, + resume=resume, + fresh=fresh, + offline=offline, + standalone=standalone, + dry_run=dry_run, + allow_suspicious=allow_suspicious_workspace, + org_config=org_config, + ) + should_sync = ( + not dry_run + and not offline + and not standalone + and team is not None + and org_config is not None + ) + if should_sync: + with Status( + "[cyan]Syncing marketplace settings...[/cyan]", + console=console, + spinner=Spinners.NETWORK, + ): + start_plan = prepare_start_session(start_request, dependencies=start_dependencies) + else: + start_plan = prepare_start_session(start_request, dependencies=start_dependencies) + + if start_plan.sync_error_message: + panel = create_warning_panel( + "Marketplace Sync Failed", + start_plan.sync_error_message, + "Team plugins may not be available. Use --dry-run to diagnose.", + ) + print_with_layout(console, panel, constrain=True) + elif start_plan.sync_result: + if start_plan.sync_result.warnings: + console.print() + for warning in start_plan.sync_result.warnings: + print_with_layout(console, f"[yellow]{warning}[/yellow]") + console.print() + if start_plan.sync_result.plugins_enabled: + print_with_layout( + console, + f"[green]{Indicators.get('PASS')} Enabled " + f"{len(start_plan.sync_result.plugins_enabled)} team plugin(s)[/green]", + ) + if start_plan.sync_result.marketplaces_materialized: + print_with_layout( + console, + f"[green]{Indicators.get('PASS')} Materialized " + f"{len(start_plan.sync_result.marketplaces_materialized)} marketplace(s)[/green]", + ) + + # ── Step 6.55: Apply personal profile (local overlay) ───────────────────── + personal_profile_id = None + personal_applied = False + if not dry_run and workspace_path is not None: + personal_profile_id, personal_applied = _apply_personal_profile( + workspace_path, + json_mode=(json_output or pretty), + non_interactive=non_interactive, + ) + + # ── Step 6.6: Active stack summary ─────────────────────────────────────── + if not (json_output or pretty) and workspace_path is not None: + personal_label = "project" if personal_profile_id else "none" + if personal_profile_id and not personal_applied: + personal_label = "skipped" + workspace_label = ( + "overrides" if personal_profiles.workspace_has_overrides(workspace_path) else "none" + ) + print_with_layout( + console, + "[dim]Active stack:[/dim] " + f"Team: {team or 'standalone'} | " + f"Personal: {personal_label} | " + f"Workspace: {workspace_label}", + ) + + # ── Step 6.7: Resolve mount path for worktrees (needed for dry-run too) ──── + # At this point workspace_path is guaranteed to exist (validated above) + assert workspace_path is not None + resolver_result = start_plan.resolver_result + if resolver_result.is_mount_expanded and not (json_output or pretty): + console.print() + print_with_layout( + console, + create_info_panel( + "Worktree Detected", + f"Mounting parent directory for worktree support:\n{resolver_result.mount_root}", + "Both worktree and main repo will be accessible", + ), + constrain=True, + ) + console.print() + current_branch = start_plan.current_branch + + # ── Step 6.8: Handle --dry-run (preview without launching) ──────────────── + if dry_run: + result = start_plan.resolver_result + org_config_for_dry_run = config.load_cached_org_config() + dry_run_data = build_dry_run_data( + workspace_path=workspace_path, + team=team, + org_config=org_config_for_dry_run, + project_config=None, + entry_dir=result.entry_dir, + mount_root=result.mount_root, + container_workdir=result.container_workdir, + resolution_reason=result.reason, + ) + + # Handle --pretty implies --json + if pretty: + json_output = True + + if json_output: + with json_output_mode(): + if pretty: + set_pretty_mode(True) + try: + envelope = build_envelope(Kind.START_DRY_RUN, data=dry_run_data) + print_json(envelope) + finally: + if pretty: + set_pretty_mode(False) + else: + show_dry_run_panel(dry_run_data) + + raise typer.Exit(0) + + warn_if_non_worktree(workspace_path, json_mode=(json_output or pretty)) + + # ── Step 8: Launch sandbox ─────────────────────────────────────────────── + _record_session_and_context( + workspace_path, + team, + session_name, + current_branch, + ) + show_launch_panel( + workspace=workspace_path, + team=team, + session_name=session_name, + branch=current_branch, + is_resume=False, + ) + start_session(start_plan, dependencies=start_dependencies) + + +# ───────────────────────────────────────────────────────────────────────────── +# Interactive Flow +# ───────────────────────────────────────────────────────────────────────────── + + +def interactive_start( + cfg: dict[str, Any], + *, + skip_quick_resume: bool = False, + allow_back: bool = False, + standalone_override: bool = False, + team_override: str | None = None, +) -> tuple[str | _BackSentinel | None, str | None, str | None, str | None]: + """Guide user through interactive session setup. + + Prompt for team selection, workspace source, optional worktree creation, + and session naming. + + The flow prioritizes quick resume by showing recent contexts first: + 0. Global Quick Resume - if contexts exist and skip_quick_resume=False + (filtered by effective_team: --team > selected_profile) + 1. Team selection - if no context selected (skipped in standalone mode) + 2. Workspace source selection + 2.5. Workspace-scoped Quick Resume - if contexts exist for selected workspace + 3. Worktree creation (optional) + 4. Session naming (optional) + + Navigation Semantics: + - 'q' anywhere: Quit wizard entirely (returns None) + - Esc at Step 0: BACK to dashboard (if allow_back) or skip to Step 1 + - Esc at Step 2: Go back to Step 1 (if team exists) or BACK to dashboard + - Esc at Step 2.5: Go back to Step 2 workspace picker + - 't' anywhere: Restart at Step 1 (team selection) + - 'a' at Quick Resume: Toggle between filtered and all-teams view + + Args: + cfg: Application configuration dictionary containing workspace_base + and other settings. + skip_quick_resume: If True, bypass the Quick Resume picker and go + directly to project source selection. Used when starting from + dashboard empty states (no_containers, no_sessions) where resume + doesn't make sense. + allow_back: If True, Esc at top level returns BACK sentinel instead + of None. Used when called from Dashboard to enable return to + dashboard on Esc. + standalone_override: If True, force standalone mode regardless of + config. Used when --standalone CLI flag is passed. + team_override: If provided, use this team for filtering instead of + selected_profile. Set by --team CLI flag. + + Returns: + Tuple of (workspace, team, session_name, worktree_name). + - Success: (path, team, session, worktree) with path always set + - Cancel: (None, None, None, None) if user pressed q + - Back: (BACK, None, None, None) if allow_back and user pressed Esc + """ + header = get_brand_header() + header_renderable = render_with_layout(console, header) + console.print(header_renderable, style=Colors.BRAND) + + # Determine mode: standalone vs organization + # CLI --standalone flag overrides config setting + standalone_mode = standalone_override or config.is_standalone_mode() + + # Calculate effective_team: --team flag takes precedence over selected_profile + # This is the team used for filtering Quick Resume contexts + selected_profile = cfg.get("selected_profile") + effective_team: str | None = team_override or selected_profile + + # Build display label for UI + if standalone_mode: + active_team_label = "standalone" + elif team_override: + # Show that --team flag is active with "(filtered)" indicator + active_team_label = f"{team_override} (filtered)" + elif selected_profile: + active_team_label = selected_profile + else: + active_team_label = "none (press 't' to choose)" + active_team_context = f"Team: {active_team_label}" + + # Get available teams (from org config if available) + org_config = config.load_cached_org_config() + available_teams = teams.list_teams(org_config) + + # Track if user dismissed global Quick Resume (to skip workspace-scoped QR) + user_dismissed_quick_resume = False + + # Step 0: Global Quick Resume + # Skip when: + # - entering from dashboard empty state (skip_quick_resume=True) + # - org mode with no active team (force team selection first) + # User can press 't' to switch teams (raises TeamSwitchRequested → skip to Step 1) + # + # In org mode without an effective team, skip Quick Resume entirely. + # This prevents showing cross-team sessions and forces user to pick a team first. + should_skip_quick_resume = skip_quick_resume + if not standalone_mode and not effective_team and available_teams: + # Org mode with no active team - skip to team picker + should_skip_quick_resume = True + console.print("[dim]Tip: Select a team first to see team-specific sessions[/dim]") + console.print() + + if not should_skip_quick_resume: + # Track whether showing all teams (toggled by 'a' key) + show_all_teams = False + + # Quick Resume loop: allows toggling between filtered and all-teams view + while True: + # Filter by effective_team unless user toggled to show all + team_filter = "all" if show_all_teams else effective_team + recent_contexts = load_recent_contexts(limit=10, team_filter=team_filter) + + # Update header based on view mode and build helpful subtitle + qr_subtitle: str | None = None + if show_all_teams: + qr_context_label = "All teams" + qr_title = "Quick Resume — All Teams" + if recent_contexts: + qr_subtitle = ( + "Showing all teams — resuming uses that team's plugins. " + "Press 'a' to filter." + ) + else: + qr_subtitle = "No sessions yet — start fresh" + else: + qr_context_label = active_team_context + qr_title = "Quick Resume" + if not recent_contexts: + all_contexts = load_recent_contexts(limit=10, team_filter="all") + team_label = effective_team or "standalone" + if all_contexts: + qr_subtitle = ( + f"No sessions yet for {team_label}. Press 'a' to show all teams." + ) + else: + qr_subtitle = "No sessions yet — start fresh" + + try: + result, selected_context = pick_context_quick_resume( + recent_contexts, + title=qr_title, + subtitle=qr_subtitle, + standalone=standalone_mode, + context_label=qr_context_label, + effective_team=effective_team, + ) + + match result: + case QuickResumeResult.SELECTED: + # User pressed Enter on a context - resume it + if selected_context is not None: + # Cross-team resume requires confirmation + if ( + effective_team + and selected_context.team + and selected_context.team != effective_team + ): + console.print() + if not confirm_with_layout( + console, + f"[yellow]Resume session from team '{selected_context.team}'?[/yellow]\n" + f"[dim]This will use {selected_context.team} plugins for this session.[/dim]", + default=False, + ): + continue # Back to QR picker loop + return ( + str(selected_context.worktree_path), + selected_context.team, + selected_context.last_session_id, + None, # worktree_name - not creating new worktree + ) + + case QuickResumeResult.BACK: + # User pressed Esc - go back if we can (Dashboard context) + if allow_back: + return (BACK, None, None, None) + # CLI context: no previous screen, treat as cancel + return (None, None, None, None) + + case QuickResumeResult.NEW_SESSION: + # User pressed 'n' or selected "New Session" entry + user_dismissed_quick_resume = True + console.print() + break # Exit QR loop, continue to wizard + + case QuickResumeResult.TOGGLE_ALL_TEAMS: + # User pressed 'a' - toggle all-teams view + if standalone_mode: + console.print( + "[dim]All teams view is unavailable in standalone mode[/dim]" + ) + console.print() + continue + show_all_teams = not show_all_teams + continue # Re-render with new filter + + case QuickResumeResult.CANCELLED: + # User pressed q - cancel entire wizard + return (None, None, None, None) + + except TeamSwitchRequested: + # User pressed 't' - skip to team selection (Step 1) + # Reset Quick Resume dismissal so new team's contexts are shown + user_dismissed_quick_resume = False + show_all_teams = False + console.print() + break # Exit QR loop, continue to team selection + + # ───────────────────────────────────────────────────────────────────────── + # MEGA-LOOP: Wraps Steps 1-2.5 to handle 't' key (TeamSwitchRequested) + # When user presses 't' anywhere, we restart from Step 1 (team selection) + # ───────────────────────────────────────────────────────────────────────── + while True: + # Step 1: Select team (mode-aware handling) + team: str | None = None + + if standalone_mode: + # P0.1: Standalone mode - skip team picker entirely + # Solo devs don't need team selection friction + # Only print banner if detected from config (CLI --standalone already printed in start()) + if not standalone_override: + console.print("[dim]Running in standalone mode (no organization config)[/dim]") + console.print() + elif not available_teams: + # P0.2: Org mode with no teams configured - exit with clear error + # Get org URL for context in error message + user_cfg = config.load_user_config() + org_source = user_cfg.get("organization_source", {}) + org_url = org_source.get("url", "unknown") + + console.print() + console.print( + create_warning_panel( + "No Teams Configured", + f"Organization config from: {org_url}\n" + "No team profiles are defined in this organization.", + "Contact your admin to add profiles, or use: scc start --standalone", + ) + ) + console.print() + raise typer.Exit(EXIT_CONFIG) + elif team_override: + # --team flag provided - use it directly, skip team picker + team = team_override + console.print(f"[dim]Using team from --team flag: {team}[/dim]") + console.print() + else: + # Normal flow: org mode with teams available + selected = pick_team( + available_teams, + current_team=str(selected_profile) if selected_profile else None, + title="Select Team", + ) + if selected is None: + return (None, None, None, None) + team = selected.get("name") + if team and team != selected_profile: + config.set_selected_profile(team) + selected_profile = team + effective_team = team + + # Step 2: Select workspace source (with back navigation support) + workspace: str | None = None + team_context_label = active_team_context + if team: + team_context_label = f"Team: {team}" + + # Check if team has repositories configured (must be inside mega-loop since team can change) + team_config = cfg.get("profiles", {}).get(team, {}) if team else {} + team_repos: list[dict[str, Any]] = team_config.get("repositories", []) + has_team_repos = bool(team_repos) + + try: + # Outer loop: allows Step 2.5 to go BACK to Step 2 (workspace picker) + while True: + # Step 2: Workspace selection loop + while workspace is None: + # Top-level picker: supports three-state contract + source = pick_workspace_source( + has_team_repos=has_team_repos, + team=team, + standalone=standalone_mode, + allow_back=allow_back or (team is not None), + context_label=team_context_label, + ) + + # Handle three-state return contract + if source is BACK: + if team is not None: + # Esc in org mode: go back to Step 1 (team selection) + raise TeamSwitchRequested() # Will be caught by mega-loop + elif allow_back: + # Esc in standalone mode with allow_back: return to dashboard + return (BACK, None, None, None) + else: + # Esc in standalone CLI mode: cancel wizard + return (None, None, None, None) + + if source is None: + # q pressed: quit entirely + return (None, None, None, None) + + if source == WorkspaceSource.CURRENT_DIR: + from ...services.workspace import resolve_launch_context + + # Detect workspace root from CWD (handles subdirs + worktrees) + resolver_result = resolve_launch_context(Path.cwd(), workspace_arg=None) + if resolver_result is not None: + workspace = str(resolver_result.workspace_root) + else: + # Fall back to CWD if no workspace root detected + workspace = str(Path.cwd()) + + elif source == WorkspaceSource.RECENT: + recent = sessions.list_recent(10) + picker_result = pick_recent_workspace( + recent, + standalone=standalone_mode, + context_label=team_context_label, + ) + if picker_result is None: + return (None, None, None, None) # User pressed q - quit wizard + if picker_result is BACK: + continue # User pressed Esc - go back to source picker + workspace = cast(str, picker_result) + + elif source == WorkspaceSource.TEAM_REPOS: + workspace_base = cfg.get("workspace_base", "~/projects") + picker_result = pick_team_repo( + team_repos, + workspace_base, + standalone=standalone_mode, + context_label=team_context_label, + ) + if picker_result is None: + return (None, None, None, None) # User pressed q - quit wizard + if picker_result is BACK: + continue # User pressed Esc - go back to source picker + workspace = cast(str, picker_result) + + elif source == WorkspaceSource.CUSTOM: + workspace = prompt_custom_workspace(console) + # Empty input means go back + if workspace is None: + continue + + elif source == WorkspaceSource.CLONE: + repo_url = prompt_repo_url(console) + if repo_url: + workspace = clone_repo( + repo_url, + workspace_base, + ) + + # Empty URL means go back + if workspace is None: + continue + + # ───────────────────────────────────────────────────────────────── + # Step 2.5: Workspace-scoped Quick Resume + # After selecting a workspace, check if existing contexts exist + # and offer to resume one instead of starting fresh + # ───────────────────────────────────────────────────────────────── + normalized_workspace = normalize_path(workspace) + + # Smart filter: Match contexts related to this workspace AND team + workspace_contexts = [] + for ctx in load_recent_contexts(limit=30): + # Standalone: only show standalone contexts + if standalone_mode and ctx.team is not None: + continue + # Org mode: filter by team (prevents cross-team resume confusion) + if team is not None and ctx.team != team: + continue + + # Case 1: Exact worktree match (fastest check) + if ctx.worktree_path == normalized_workspace: + workspace_contexts.append(ctx) + continue + + # Case 2: User picked repo root - show all worktree contexts for this repo + if ctx.repo_root == normalized_workspace: + workspace_contexts.append(ctx) + continue + + # Case 3: User picked a subdir - match if inside a known worktree/repo + try: + if normalized_workspace.is_relative_to(ctx.worktree_path): + workspace_contexts.append(ctx) + continue + if normalized_workspace.is_relative_to(ctx.repo_root): + workspace_contexts.append(ctx) + except ValueError: + # is_relative_to raises ValueError if paths are on different drives + pass + + # Skip workspace-scoped Quick Resume if user already dismissed global Quick Resume + if workspace_contexts and not user_dismissed_quick_resume: + console.print() + + show_all_teams = False + while True: + # Filter contexts based on toggle state + displayed_contexts = workspace_contexts + if show_all_teams: + # Show all contexts for this workspace (ignore team filter) + # Use same 3-case matching logic as above + displayed_contexts = [] + for ctx in load_recent_contexts(limit=30): + # Case 1: Exact worktree match + if ctx.worktree_path == normalized_workspace: + displayed_contexts.append(ctx) + continue + # Case 2: User picked repo root + if ctx.repo_root == normalized_workspace: + displayed_contexts.append(ctx) + continue + # Case 3: User picked a subdir + try: + if normalized_workspace.is_relative_to(ctx.worktree_path): + displayed_contexts.append(ctx) + continue + if normalized_workspace.is_relative_to(ctx.repo_root): + displayed_contexts.append(ctx) + except ValueError: + pass + + qr_subtitle = "Existing sessions found for this workspace" + if show_all_teams: + qr_subtitle = ( + "All teams for this workspace — resuming uses that team's plugins" + ) + + result, selected_context = pick_context_quick_resume( + displayed_contexts, + title=f"Resume session in {Path(workspace).name}?", + subtitle=qr_subtitle, + standalone=standalone_mode, + context_label="All teams" + if show_all_teams + else f"Team: {team or active_team_label}", + effective_team=team or effective_team, + ) + # Note: TeamSwitchRequested bubbles up to mega-loop handler + + match result: + case QuickResumeResult.SELECTED: + # User pressed Enter on a context - resume it + if selected_context is not None: + # Cross-team resume requires confirmation + current_team = team or effective_team + if ( + current_team + and selected_context.team + and selected_context.team != current_team + ): + console.print() + if not confirm_with_layout( + console, + f"[yellow]Resume session from team '{selected_context.team}'?[/yellow]\n" + f"[dim]This will use {selected_context.team} plugins for this session.[/dim]", + default=False, + ): + continue # Back to QR picker loop + return ( + str(selected_context.worktree_path), + selected_context.team, + selected_context.last_session_id, + None, + ) + + case QuickResumeResult.BACK: + # User pressed Esc - go back to workspace picker + workspace = None + break + + case QuickResumeResult.NEW_SESSION: + # User pressed 'n' or selected "New Session" entry + console.print() + break # Exit workspace QR, continue to wizard + + case QuickResumeResult.TOGGLE_ALL_TEAMS: + # User pressed 'a' - toggle all-teams view + if standalone_mode: + console.print( + "[dim]All teams view is unavailable in standalone mode[/dim]" + ) + console.print() + continue + show_all_teams = not show_all_teams + continue # Re-render with new filter + + case QuickResumeResult.CANCELLED: + # User pressed q - cancel entire wizard + return (None, None, None, None) + + # Check if we need to go back to workspace picker + if workspace is None: + continue # Continue outer loop to re-enter Step 2 + + # No contexts or user dismissed global Quick Resume - proceed to Step 3 + break # Exit outer loop (Step 2 + 2.5) + + except TeamSwitchRequested: + # User pressed 't' somewhere - restart at Step 1 (team selection) + # Reset Quick Resume dismissal so new team's contexts are shown + user_dismissed_quick_resume = False + console.print() + continue # Continue mega-loop + + # Successfully got a workspace - exit mega-loop + break + + # Step 3: Worktree option + worktree_name = None + console.print() + if confirm_with_layout( + console, + "[cyan]Create a worktree for isolated feature development?[/cyan]", + default=False, + ): + workspace_path = Path(workspace) + can_create_worktree = True + + # Check if directory is a git repository + if not git.is_git_repo(workspace_path): + console.print() + if confirm_with_layout( + console, + "[yellow]⚠️ Not a git repository. Initialize git?[/yellow]", + default=False, + ): + if git.init_repo(workspace_path): + console.print( + f" [green]{Indicators.get('PASS')}[/green] Initialized git repository" + ) + else: + err_console.print( + f" [red]{Indicators.get('FAIL')}[/red] Failed to initialize git" + ) + can_create_worktree = False + else: + # User declined git init - can't create worktree + console.print( + f" [dim]{Indicators.get('INFO')}[/dim] " + "Skipping worktree (requires git repository)" + ) + can_create_worktree = False + + # Check if repository has commits (worktree requires at least one) + if can_create_worktree and git.is_git_repo(workspace_path): + if not git.has_commits(workspace_path): + console.print() + if confirm_with_layout( + console, + "[yellow]⚠️ Worktree requires initial commit. " + "Create empty initial commit?[/yellow]", + default=True, + ): + success, error_msg = git.create_empty_initial_commit(workspace_path) + if success: + console.print( + f" [green]{Indicators.get('PASS')}[/green] Created initial commit" + ) + else: + err_console.print(f" [red]{Indicators.get('FAIL')}[/red] {error_msg}") + can_create_worktree = False + else: + # User declined empty commit - can't create worktree + console.print( + f" [dim]{Indicators.get('INFO')}[/dim] " + "Skipping worktree (requires initial commit)" + ) + can_create_worktree = False + + # Only ask for worktree name if we have a valid git repo with commits + if can_create_worktree: + worktree_name = prompt_with_layout(console, "[cyan]Feature/worktree name[/cyan]") + + # Step 4: Session name + console.print() + session_name = ( + prompt_with_layout( + console, + "[cyan]Session name[/cyan] [dim](optional, for easy resume)[/dim]", + default="", + ) + or None + ) + + return workspace, team, session_name, worktree_name + + +# ───────────────────────────────────────────────────────────────────────────── +# Wizard entrypoint (dashboard + CLI) +# ───────────────────────────────────────────────────────────────────────────── + + +def run_start_wizard_flow( + *, skip_quick_resume: bool = False, allow_back: bool = False +) -> bool | None: + """Run the interactive start wizard and launch sandbox. + + This is the shared entrypoint for starting sessions from both the CLI + (scc start with no args) and the dashboard (Enter on empty containers). + + The function runs outside any Rich Live context to avoid nested Live + conflicts. It handles the complete flow: + 1. Run interactive wizard to get user selections + 2. If user cancels, return False/None + 3. Otherwise, validate and launch the sandbox + + Args: + skip_quick_resume: If True, bypass the Quick Resume picker and go + directly to project source selection. Used when starting from + dashboard empty states where "resume" doesn't make sense. + allow_back: If True, Esc returns BACK sentinel (for dashboard context). + If False, Esc returns None (for CLI context). + + Returns: + True if sandbox was launched successfully. + False if user pressed Esc to go back (only when allow_back=True). + None if user pressed q to quit or an error occurred. + """ + # Step 1: First-run detection + if setup.is_setup_needed(): + if not setup.maybe_run_setup(console): + return None # Error during setup + + cfg = config.load_user_config() + + # Step 2: Run interactive wizard + # Note: standalone_override=False (default) is correct here - dashboard path + # doesn't have CLI flags, so we rely on config.is_standalone_mode() inside + # interactive_start() to detect standalone mode from user's config file. + workspace, team, session_name, worktree_name = interactive_start( + cfg, skip_quick_resume=skip_quick_resume, allow_back=allow_back + ) + + # Three-state return handling: + # - workspace is BACK → user pressed Esc (go back to dashboard) + # - workspace is None → user pressed q (quit app) + if workspace is BACK: + return False # Go back to dashboard + if workspace is None: + return None # Quit app + + workspace_value = cast(str, workspace) + + try: + adapters = get_default_adapters() + with Status("[cyan]Checking Docker...[/cyan]", console=console, spinner=Spinners.DOCKER): + adapters.sandbox_runtime.ensure_available() + workspace_path = validate_and_resolve_workspace(workspace_value) + workspace_path = prepare_workspace(workspace_path, worktree_name, install_deps=False) + assert workspace_path is not None + _configure_team_settings(team, cfg) + + standalone_mode = config.is_standalone_mode() or team is None + org_config = None + if team and not standalone_mode: + org_config = config.load_cached_org_config() + + start_dependencies = StartSessionDependencies( + filesystem=adapters.filesystem, + remote_fetcher=adapters.remote_fetcher, + clock=adapters.clock, + git_client=adapters.git_client, + agent_runner=adapters.agent_runner, + sandbox_runtime=adapters.sandbox_runtime, + resolve_effective_config=resolve_effective_config, + materialize_marketplace=materialize_marketplace, + ) + start_request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=team, + session_name=session_name, + resume=False, + fresh=False, + offline=False, + standalone=standalone_mode, + dry_run=False, + allow_suspicious=False, + org_config=org_config, + ) + should_sync = team is not None and org_config is not None and not standalone_mode + if should_sync: + with Status( + "[cyan]Syncing marketplace settings...[/cyan]", + console=console, + spinner=Spinners.NETWORK, + ): + start_plan = prepare_start_session(start_request, dependencies=start_dependencies) + else: + start_plan = prepare_start_session(start_request, dependencies=start_dependencies) + + if start_plan.sync_error_message: + panel = create_warning_panel( + "Marketplace Sync Failed", + start_plan.sync_error_message, + "Team plugins may not be available. Use --dry-run to diagnose.", + ) + print_with_layout(console, panel, constrain=True) + elif start_plan.sync_result: + if start_plan.sync_result.warnings: + console.print() + for warning in start_plan.sync_result.warnings: + print_with_layout(console, f"[yellow]{warning}[/yellow]") + console.print() + if start_plan.sync_result.plugins_enabled: + print_with_layout( + console, + f"[green]{Indicators.get('PASS')} Enabled " + f"{len(start_plan.sync_result.plugins_enabled)} team plugin(s)[/green]", + ) + if start_plan.sync_result.marketplaces_materialized: + print_with_layout( + console, + f"[green]{Indicators.get('PASS')} Materialized " + f"{len(start_plan.sync_result.marketplaces_materialized)} marketplace(s)[/green]", + ) + + resolver_result = start_plan.resolver_result + if resolver_result.is_mount_expanded: + console.print() + console.print( + create_info_panel( + "Worktree Detected", + f"Mounting parent directory for worktree support:\n{resolver_result.mount_root}", + "Both worktree and main repo will be accessible", + ) + ) + console.print() + current_branch = start_plan.current_branch + _record_session_and_context( + workspace_path, + team, + session_name, + current_branch, + ) + show_launch_panel( + workspace=workspace_path, + team=team, + session_name=session_name, + branch=current_branch, + is_resume=False, + ) + start_session(start_plan, dependencies=start_dependencies) + return True + except Exception as e: + err_console.print(f"[red]Error launching sandbox: {e}[/red]") + return False diff --git a/src/scc_cli/commands/launch/render.py b/src/scc_cli/commands/launch/render.py index 532f36b..5900995 100644 --- a/src/scc_cli/commands/launch/render.py +++ b/src/scc_cli/commands/launch/render.py @@ -14,8 +14,7 @@ from rich.table import Table from ... import git -from ...cli_common import MAX_DISPLAY_PATH_LENGTH, PATH_TRUNCATE_LENGTH, console -from ...output_mode import print_human +from ...cli_common import MAX_DISPLAY_PATH_LENGTH, PATH_TRUNCATE_LENGTH, console, err_console from ...theme import Indicators from ...ui.chrome import print_with_layout @@ -30,8 +29,6 @@ def warn_if_non_worktree(workspace_path: Path | None, *, json_mode: bool = False workspace_path: Path to the workspace directory, or None. json_mode: If True, suppress the warning. """ - import sys - if json_mode or workspace_path is None: return @@ -41,12 +38,11 @@ def warn_if_non_worktree(workspace_path: Path | None, *, json_mode: bool = False if git.is_worktree(workspace_path): return - print_human( + print_with_layout( + err_console, "[yellow]Tip:[/yellow] You're working in the main repo. " "For isolation, try: scc worktree create . or " "scc start --worktree ", - file=sys.stderr, - highlight=False, ) @@ -84,10 +80,10 @@ def build_dry_run_data( blocked_items: list[str] = [] if org_config and team: - from ... import profiles + from ...application.compute_effective_config import compute_effective_config workspace_for_project = None if project_config is not None else workspace_path - effective = profiles.compute_effective_config( + effective = compute_effective_config( org_config, team, project_config=project_config, diff --git a/src/scc_cli/commands/launch/workspace.py b/src/scc_cli/commands/launch/workspace.py index 98c134e..8d53e04 100644 --- a/src/scc_cli/commands/launch/workspace.py +++ b/src/scc_cli/commands/launch/workspace.py @@ -29,6 +29,7 @@ from ...output_mode import print_human from ...panels import create_info_panel, create_success_panel, create_warning_panel from ...theme import Indicators, Spinners +from ...ui import check_branch_safety, create_worktree from ...ui.gate import is_interactive_allowed if TYPE_CHECKING: @@ -211,7 +212,7 @@ def prepare_workspace( # Handle worktree creation if worktree_name: - workspace_path = git.create_worktree(workspace_path, worktree_name) + workspace_path = create_worktree(workspace_path, worktree_name) console.print( create_success_panel( "Worktree Created", @@ -235,7 +236,7 @@ def prepare_workspace( # Check git safety (handles protected branch warnings) if workspace_path.exists(): - if not git.check_branch_safety(workspace_path, console): + if not check_branch_safety(workspace_path, console): console.print("[dim]Cancelled.[/dim]") raise typer.Exit(EXIT_CANCELLED) diff --git a/src/scc_cli/commands/reset.py b/src/scc_cli/commands/reset.py index 21b3554..373bf88 100644 --- a/src/scc_cli/commands/reset.py +++ b/src/scc_cli/commands/reset.py @@ -15,8 +15,8 @@ import json import sys -from collections.abc import Callable -from typing import Annotated +from pathlib import Path +from typing import Annotated, Literal, cast import typer from rich.console import Console @@ -28,27 +28,26 @@ EXIT_SUCCESS, EXIT_USAGE, ) -from ..core.maintenance import ( +from ..maintenance import ( MaintenanceLock, MaintenanceLockError, MaintenancePreview, + MaintenanceTask, + MaintenanceTaskContext, ResetResult, RiskTier, - cleanup_expired_exceptions, - clear_cache, - clear_contexts, - delete_all_sessions, - factory_reset, get_paths, + get_task, + list_tasks, preview_operation, - prune_containers, - prune_sessions, - reset_config, - reset_exceptions, + run_task, ) +from ..services.git import detect_workspace_root console = Console() +ExceptionScope = Literal["all", "user", "repo"] + # ═══════════════════════════════════════════════════════════════════════════════ # Risk Tier Helpers @@ -73,6 +72,51 @@ def _max_risk(tiers: list[RiskTier]) -> RiskTier: return max(tiers, key=lambda t: t.value) +# ═══════════════════════════════════════════════════════════════════════════════ +# Context Helpers +# ═══════════════════════════════════════════════════════════════════════════════ + + +def _normalize_exception_scope(scope: str | None) -> ExceptionScope: + """Normalize exception scope input.""" + if scope in ("all", "user", "repo"): + return cast(ExceptionScope, scope) + return "all" + + +def _resolve_repo_root() -> Path: + """Resolve repo root for repo-scoped exception tasks.""" + root, _ = detect_workspace_root(Path.cwd()) + return root or Path.cwd() + + +def _build_context( + *, + dry_run: bool = False, + create_backup: bool = True, + continue_on_error: bool = False, + exception_scope: ExceptionScope = "all", + repo_root: Path | None = None, +) -> MaintenanceTaskContext: + """Build a task context for maintenance operations.""" + return MaintenanceTaskContext( + dry_run=dry_run, + create_backup=create_backup, + continue_on_error=continue_on_error, + exception_scope=exception_scope, + repo_root=repo_root, + ) + + +def _preview_task(task: MaintenanceTask, context: MaintenanceTaskContext) -> MaintenancePreview: + """Generate a preview for a maintenance task.""" + return preview_operation( + task.id, + scope=context.exception_scope, + repo_root=context.repo_root, + ) + + # ═══════════════════════════════════════════════════════════════════════════════ # Output Helpers # ═══════════════════════════════════════════════════════════════════════════════ @@ -210,27 +254,14 @@ def _run_interactive_mode() -> None: """Run interactive maintenance picker.""" console.print("\n[bold cyan]SCC Maintenance[/bold cyan]\n") - # Define available actions - actions = [ - ("clear_cache", "Clear cache", RiskTier.SAFE), - ("cleanup_expired_exceptions", "Cleanup expired exceptions", RiskTier.SAFE), - ("clear_contexts", "Clear contexts", RiskTier.CHANGES_STATE), - ("prune_containers", "Prune containers", RiskTier.CHANGES_STATE), - ("prune_sessions", "Prune sessions (30d, keep 20)", RiskTier.CHANGES_STATE), - ("reset_exceptions", "Reset all exceptions", RiskTier.DESTRUCTIVE), - ("delete_all_sessions", "Delete all sessions", RiskTier.DESTRUCTIVE), - ("reset_config", "Reset configuration", RiskTier.DESTRUCTIVE), - ("factory_reset", "Factory reset (everything)", RiskTier.FACTORY_RESET), - ] - - # Display numbered list - for i, (action_id, name, tier) in enumerate(actions, 1): - console.print(f" {i}. {name} {_risk_badge(tier)}") + tasks = list_tasks() + + for i, task in enumerate(tasks, 1): + console.print(f" {i}. {task.label} {_risk_badge(task.risk_tier)}") console.print("\n 0. Cancel") console.print() - # Get selection choice = Prompt.ask("Select action", default="0") try: idx = int(choice) @@ -242,54 +273,36 @@ def _run_interactive_mode() -> None: console.print("[dim]Cancelled.[/dim]") raise typer.Exit(EXIT_CANCELLED) - if idx < 1 or idx > len(actions): + if idx < 1 or idx > len(tasks): console.print("[red]Invalid selection.[/red]") raise typer.Exit(EXIT_CANCELLED) - action_id, action_name, tier = actions[idx - 1] + task = tasks[idx - 1] + action_id = task.id + action_name = task.label + tier = task.risk_tier + context = _build_context(repo_root=_resolve_repo_root()) - # Get confirmation based on risk tier if tier == RiskTier.SAFE: - pass # No confirmation needed + pass elif tier == RiskTier.CHANGES_STATE: if not _confirm_tier_1(action_name, yes=False, non_interactive=False): raise typer.Exit(EXIT_CANCELLED) elif tier == RiskTier.DESTRUCTIVE: - preview = preview_operation(action_id) + preview = _preview_task(task, context) if not _confirm_tier_2(action_name, preview, yes=False, non_interactive=False): raise typer.Exit(EXIT_CANCELLED) elif tier == RiskTier.FACTORY_RESET: if not _confirm_factory_reset(yes=False, force=False, non_interactive=False): raise typer.Exit(EXIT_CANCELLED) - # Execute the action try: with MaintenanceLock(): - if action_id == "clear_cache": - result = clear_cache() - elif action_id == "cleanup_expired_exceptions": - result = cleanup_expired_exceptions() - elif action_id == "clear_contexts": - result = clear_contexts() - elif action_id == "prune_containers": - result = prune_containers() - elif action_id == "prune_sessions": - result = prune_sessions() - elif action_id == "reset_exceptions": - result = reset_exceptions() - elif action_id == "delete_all_sessions": - result = delete_all_sessions() - elif action_id == "reset_config": - result = reset_config() - elif action_id == "factory_reset": - results = factory_reset() - for r in results: - _print_result(r) + result = run_task(action_id, context) + if isinstance(result, list): + for item in result: + _print_result(item) return - else: - console.print(f"[red]Unknown action: {action_id}[/red]") - raise typer.Exit(EXIT_USAGE) - _print_result(result) except MaintenanceLockError as e: @@ -464,11 +477,14 @@ def reset_cmd( try: with MaintenanceLock(): - factory_results = factory_reset( + context = _build_context( dry_run=dry_run, create_backup=not no_backup, continue_on_error=continue_on_error, ) + factory_results = run_task("factory_reset", context) + if not isinstance(factory_results, list): + factory_results = [factory_results] if json_output: _print_json_results(factory_results) @@ -490,57 +506,68 @@ def reset_cmd( return # Build list of operations to perform - operations: list[tuple[str, Callable[..., ResetResult], RiskTier, dict]] = [] + operations: list[MaintenanceTask] = [] + + def _require_task(action_id: str) -> MaintenanceTask: + task = get_task(action_id) + if task is None: + console.print(f"[red]Unknown action: {action_id}[/red]") + raise typer.Exit(EXIT_USAGE) + return task if cache: - operations.append(("clear_cache", clear_cache, RiskTier.SAFE, {})) + operations.append(_require_task("clear_cache")) if exceptions_expired: - operations.append( - ("cleanup_expired_exceptions", cleanup_expired_exceptions, RiskTier.SAFE, {}) - ) + operations.append(_require_task("cleanup_expired_exceptions")) if contexts: - operations.append(("clear_contexts", clear_contexts, RiskTier.CHANGES_STATE, {})) + operations.append(_require_task("clear_contexts")) if containers: - operations.append(("prune_containers", prune_containers, RiskTier.CHANGES_STATE, {})) + operations.append(_require_task("prune_containers")) if sessions: - operations.append(("prune_sessions", prune_sessions, RiskTier.CHANGES_STATE, {})) + operations.append(_require_task("prune_sessions")) if exceptions: - scope = exceptions_scope or "all" - operations.append( - ("reset_exceptions", reset_exceptions, RiskTier.DESTRUCTIVE, {"scope": scope}) - ) + operations.append(_require_task("reset_exceptions")) if sessions_all: - operations.append(("delete_all_sessions", delete_all_sessions, RiskTier.DESTRUCTIVE, {})) + operations.append(_require_task("delete_all_sessions")) if config_flag: - operations.append(("reset_config", reset_config, RiskTier.DESTRUCTIVE, {})) + operations.append(_require_task("reset_config")) + + exception_scope = _normalize_exception_scope(exceptions_scope) + repo_root = _resolve_repo_root() if exceptions and exception_scope in ("all", "repo") else None + context = _build_context( + dry_run=dry_run, + create_backup=not no_backup, + continue_on_error=continue_on_error, + exception_scope=exception_scope, + repo_root=repo_root, + ) # Handle --plan mode if plan: console.print("\n[bold cyan]Reset Preview[/bold cyan]\n") - for action_id, _, tier, kwargs in operations: - preview = preview_operation(action_id, **kwargs) + for task in operations: + preview = _preview_task(task, context) _print_preview(preview) return # Get confirmation based on max risk tier - max_tier = _max_risk([tier for _, _, tier, _ in operations]) + max_tier = _max_risk([task.risk_tier for task in operations]) if max_tier == RiskTier.CHANGES_STATE and not yes: - action_names = ", ".join(aid for aid, _, _, _ in operations) + action_names = ", ".join(task.id for task in operations) if not _confirm_tier_1(action_names, yes, non_interactive): raise typer.Exit(EXIT_CANCELLED) elif max_tier == RiskTier.DESTRUCTIVE and not yes: - # Show previews for destructive operations - for action_id, _, tier, kwargs in operations: - if tier == RiskTier.DESTRUCTIVE: - preview = preview_operation(action_id, **kwargs) + for task in operations: + if task.risk_tier == RiskTier.DESTRUCTIVE: + preview = _preview_task(task, context) _print_preview(preview) if not Confirm.ask("\n[bold]Proceed with destructive operations?[/bold]"): @@ -551,29 +578,31 @@ def reset_cmd( try: with MaintenanceLock(): - for action_id, func, tier, kwargs in operations: - # Add common kwargs - kwargs["dry_run"] = dry_run - if tier == RiskTier.DESTRUCTIVE: - kwargs["create_backup"] = not no_backup - + for task in operations: try: - result = func(**kwargs) - results.append(result) - - if not json_output: - _print_result(result) - - if not result.success and not continue_on_error: + task_result = run_task(task.id, context) + if isinstance(task_result, list): + results.extend(task_result) + if not json_output: + for item in task_result: + _print_result(item) + success = all(item.success for item in task_result) + else: + results.append(task_result) + if not json_output: + _print_result(task_result) + success = task_result.success + + if not success and not continue_on_error: break - except Exception as e: + except Exception as exc: result = ResetResult( success=False, - action_id=action_id, - risk_tier=tier, - error=str(e), - message=f"Failed: {e}", + action_id=task.id, + risk_tier=task.risk_tier, + error=str(exc), + message=f"Failed: {exc}", ) results.append(result) diff --git a/src/scc_cli/commands/support.py b/src/scc_cli/commands/support.py index 1493bf2..d18759e 100644 --- a/src/scc_cli/commands/support.py +++ b/src/scc_cli/commands/support.py @@ -5,22 +5,19 @@ and path redaction for safe sharing. """ -import json -import platform -import re -import sys -import zipfile -from datetime import datetime, timezone from pathlib import Path -from typing import Any import typer -from .. import __version__, config, doctor from ..cli_common import console, handle_errors from ..json_output import build_envelope from ..kinds import Kind from ..output_mode import json_output_mode, print_json, set_pretty_mode +from ..support_bundle import ( + build_bundle_data, + create_bundle, + get_default_bundle_path, +) # noqa: F401 # ───────────────────────────────────────────────────────────────────────────── # Support App @@ -34,223 +31,6 @@ ) -# ───────────────────────────────────────────────────────────────────────────── -# Secret Redaction (Pure Function) -# ───────────────────────────────────────────────────────────────────────────── - -# Keys that should have their values redacted -SECRET_KEY_PATTERNS = [ - r"^auth$", - r".*token.*", - r".*api[_-]?key.*", - r".*apikey.*", - r".*password.*", - r".*secret.*", - r"^authorization$", - r".*credential.*", -] - -# Compiled regex patterns (case-insensitive) -_SECRET_PATTERNS = [re.compile(p, re.IGNORECASE) for p in SECRET_KEY_PATTERNS] - - -def _is_secret_key(key: str) -> bool: - """Check if a key name indicates a secret value.""" - return any(pattern.match(key) for pattern in _SECRET_PATTERNS) - - -def redact_secrets(data: dict[str, Any]) -> dict[str, Any]: - """Redact secret values from a dictionary. - - Recursively processes nested dicts and lists. - - Args: - data: Dictionary potentially containing secrets - - Returns: - Copy of dict with secret values replaced by [REDACTED] - """ - result: dict[str, Any] = {} - - for key, value in data.items(): - if _is_secret_key(key) and isinstance(value, str): - result[key] = "[REDACTED]" - elif isinstance(value, dict): - result[key] = redact_secrets(value) - elif isinstance(value, list): - result[key] = [ - redact_secrets(item) if isinstance(item, dict) else item for item in value - ] - else: - result[key] = value - - return result - - -# ───────────────────────────────────────────────────────────────────────────── -# Path Redaction (Pure Function) -# ───────────────────────────────────────────────────────────────────────────── - - -def redact_paths(data: dict[str, Any], redact: bool = True) -> dict[str, Any]: - """Redact home directory paths from a dictionary. - - Replaces absolute paths containing the home directory with ~ prefix. - - Args: - data: Dictionary potentially containing paths - redact: If False, return data unchanged - - Returns: - Copy of dict with home paths redacted - """ - if not redact: - return data - - home = str(Path.home()) - result: dict[str, Any] = {} - - for key, value in data.items(): - if isinstance(value, str) and home in value: - result[key] = value.replace(home, "~") - elif isinstance(value, dict): - result[key] = redact_paths(value, redact=redact) - elif isinstance(value, list): - result[key] = [ - redact_paths(item, redact=redact) - if isinstance(item, dict) - else (item.replace(home, "~") if isinstance(item, str) and home in item else item) - for item in value - ] - else: - result[key] = value - - return result - - -# ───────────────────────────────────────────────────────────────────────────── -# Bundle Data Collection (Pure Function) -# ───────────────────────────────────────────────────────────────────────────── - - -def build_bundle_data( - redact_paths_flag: bool = True, - workspace_path: Path | None = None, -) -> dict[str, Any]: - """Build support bundle data. - - Collects system info, config, doctor output, and other diagnostics. - All secrets are automatically redacted. - - Args: - redact_paths_flag: Whether to redact home directory paths - workspace_path: Optional workspace to include in diagnostics - - Returns: - Dictionary with all bundle data - """ - # System information - system_info = { - "platform": platform.system(), - "platform_version": platform.version(), - "platform_release": platform.release(), - "machine": platform.machine(), - "python_version": sys.version, - "python_implementation": platform.python_implementation(), - } - - # CLI version - cli_version = __version__ - - # Timestamp - generated_at = datetime.now(timezone.utc).isoformat() - - # Load and redact config - try: - user_config = config.load_user_config() - user_config = redact_secrets(user_config) - except Exception: - user_config = {"error": "Failed to load config"} - - # Load and redact org config - try: - org_config = config.load_cached_org_config() - if org_config: - org_config = redact_secrets(org_config) - except Exception: - org_config = {"error": "Failed to load org config"} - - # Run doctor checks - try: - doctor_result = doctor.run_doctor(workspace_path) - doctor_data = doctor.build_doctor_json_data(doctor_result) - except Exception as e: - doctor_data = {"error": f"Failed to run doctor: {e}"} - - # Build bundle data - bundle_data: dict[str, Any] = { - "generated_at": generated_at, - "cli_version": cli_version, - "system": system_info, - "config": user_config, - "org_config": org_config, - "doctor": doctor_data, - } - - # Include workspace info if provided - if workspace_path: - bundle_data["workspace"] = str(workspace_path) - - # Apply path redaction if enabled - if redact_paths_flag: - bundle_data = redact_paths(bundle_data) - - return bundle_data - - -# ───────────────────────────────────────────────────────────────────────────── -# Bundle File Creation -# ───────────────────────────────────────────────────────────────────────────── - - -def get_default_bundle_path() -> Path: - """Get default path for support bundle. - - Returns: - Path with timestamp-based filename - """ - timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") - return Path.cwd() / f"scc-support-bundle-{timestamp}.zip" - - -def create_bundle( - output_path: Path, - redact_paths_flag: bool = True, - workspace_path: Path | None = None, -) -> dict[str, Any]: - """Create a support bundle zip file. - - Args: - output_path: Path for the output zip file - redact_paths_flag: Whether to redact home directory paths - workspace_path: Optional workspace to include in diagnostics - - Returns: - The bundle data that was written to the manifest - """ - bundle_data = build_bundle_data( - redact_paths_flag=redact_paths_flag, - workspace_path=workspace_path, - ) - - # Create zip file with manifest - with zipfile.ZipFile(output_path, "w", zipfile.ZIP_DEFLATED) as zf: - manifest_json = json.dumps(bundle_data, indent=2) - zf.writestr("manifest.json", manifest_json) - - return bundle_data - - # ───────────────────────────────────────────────────────────────────────────── # Support Bundle Command # ───────────────────────────────────────────────────────────────────────────── diff --git a/src/scc_cli/commands/team.py b/src/scc_cli/commands/team.py index abc6b46..0f3c8ac 100644 --- a/src/scc_cli/commands/team.py +++ b/src/scc_cli/commands/team.py @@ -18,6 +18,7 @@ from rich.table import Table from .. import config, teams +from ..bootstrap import get_default_adapters from ..cli_common import console, handle_errors, render_responsive_table from ..json_command import json_command from ..kinds import Kind @@ -209,7 +210,12 @@ def team_list( org_url = org_source.get("url") org_auth = org_source.get("auth") if org_url: - fetched_config, _etag, status_code = fetch_org_config(org_url, org_auth) + adapters = get_default_adapters() + fetched_config, _etag, status_code = fetch_org_config( + org_url, + org_auth, + fetcher=adapters.remote_fetcher, + ) if fetched_config and status_code == 200: org_config = fetched_config # Save to cache diff --git a/src/scc_cli/commands/worktree/_helpers.py b/src/scc_cli/commands/worktree/_helpers.py index ba2fe3e..ff91f16 100644 --- a/src/scc_cli/commands/worktree/_helpers.py +++ b/src/scc_cli/commands/worktree/_helpers.py @@ -21,7 +21,7 @@ def build_worktree_list_data( """Build worktree list data for JSON output. Args: - worktrees: List of worktree dictionaries from git.list_worktrees() + worktrees: List of worktree dictionaries from ui.list_worktrees() workspace: Path to the workspace Returns: diff --git a/src/scc_cli/commands/worktree/session_commands.py b/src/scc_cli/commands/worktree/session_commands.py index 652c408..0c5abe0 100644 --- a/src/scc_cli/commands/worktree/session_commands.py +++ b/src/scc_cli/commands/worktree/session_commands.py @@ -11,7 +11,7 @@ from ... import config, sessions from ...cli_common import console, handle_errors, render_responsive_table from ...core.exit_codes import EXIT_CANCELLED -from ...core.maintenance import prune_sessions as maintenance_prune_sessions +from ...maintenance import prune_sessions as maintenance_prune_sessions from ...panels import create_warning_panel from ...ui.picker import TeamSwitchRequested, pick_session diff --git a/src/scc_cli/commands/worktree/worktree_commands.py b/src/scc_cli/commands/worktree/worktree_commands.py index 40e3d8d..038283d 100644 --- a/src/scc_cli/commands/worktree/worktree_commands.py +++ b/src/scc_cli/commands/worktree/worktree_commands.py @@ -20,6 +20,7 @@ from ...output_mode import is_json_mode from ...panels import create_success_panel, create_warning_panel from ...theme import Indicators, Spinners +from ...ui import cleanup_worktree, create_worktree, list_worktrees, render_worktrees from ...ui.gate import InteractivityContext from ...ui.picker import TeamSwitchRequested, pick_worktree from ._helpers import build_worktree_list_data @@ -98,7 +99,7 @@ def worktree_create_cmd( ) raise typer.Exit(1) - worktree_path = git.create_worktree(workspace_path, name, base_branch) + worktree_path = create_worktree(workspace_path, name, base_branch) console.print( create_success_panel( @@ -165,7 +166,7 @@ def worktree_list_cmd( if not workspace_path.exists(): raise WorkspaceNotFoundError(path=str(workspace_path)) - worktree_list = git.list_worktrees(workspace_path, verbose=verbose) + worktree_list = list_worktrees(workspace_path, verbose=verbose) # Convert WorktreeInfo dataclasses to dicts for JSON serialization worktree_dicts = [asdict(wt) for wt in worktree_list] @@ -199,8 +200,8 @@ def worktree_list_cmd( console.print("[dim]Use 'scc team switch' to change teams[/dim]") return None - # Use the beautiful worktree rendering from git.py - git.render_worktrees(worktree_list, console) + # Use the worktree rendering from the UI layer + render_worktrees(worktree_list, console) return data @@ -241,7 +242,7 @@ def worktree_switch_cmd( # No target: interactive picker if target is None: - worktree_list = git.list_worktrees(workspace_path) + worktree_list = list_worktrees(workspace_path) if not worktree_list: err_console.print( create_warning_panel( @@ -318,7 +319,7 @@ def worktree_switch_cmd( f"[cyan]No worktree for '{target}'. Create one?[/cyan]", default=False, # Explicit > implicit ): - worktree_path = git.create_worktree( + worktree_path = create_worktree( workspace_path, name=target, base_branch=target, @@ -424,7 +425,7 @@ def worktree_select_cmd( if not git.is_git_repo(workspace_path): raise NotAGitRepoError(path=str(workspace_path)) - worktree_list = git.list_worktrees(workspace_path) + worktree_list = list_worktrees(workspace_path) # Build combined list if including branches from ...git import WorktreeInfo @@ -481,7 +482,7 @@ def worktree_select_cmd( console=console, spinner=Spinners.SETUP, ): - worktree_path = git.create_worktree( + worktree_path = create_worktree( workspace_path, selected.branch, base_branch=selected.branch, @@ -543,7 +544,7 @@ def worktree_enter_cmd( if target is None: # No target: interactive picker - worktree_list = git.list_worktrees(workspace_path) + worktree_list = list_worktrees(workspace_path) if not worktree_list: err_console.print( create_warning_panel( @@ -585,7 +586,7 @@ def worktree_enter_cmd( elif target == "^": # Main branch worktree main_branch = git.get_default_branch(workspace_path) - worktree_list = git.list_worktrees(workspace_path) + worktree_list = list_worktrees(workspace_path) for wt in worktree_list: if wt.branch == main_branch or wt.branch in {"main", "master"}: worktree_path = Path(wt.path) @@ -683,7 +684,7 @@ def worktree_remove_cmd( raise WorkspaceNotFoundError(path=str(workspace_path)) # cleanup_worktree handles all output including success panels - git.cleanup_worktree(workspace_path, name, force, console, skip_confirm=yes, dry_run=dry_run) + cleanup_worktree(workspace_path, name, force, console, skip_confirm=yes, dry_run=dry_run) @handle_errors diff --git a/src/scc_cli/core/error_mapping.py b/src/scc_cli/core/error_mapping.py new file mode 100644 index 0000000..62c3f0b --- /dev/null +++ b/src/scc_cli/core/error_mapping.py @@ -0,0 +1,61 @@ +"""Shared error mapping helpers for CLI output.""" + +from __future__ import annotations + +from typing import Any + +from scc_cli.core.errors import ConfigError, PolicyViolationError, PrerequisiteError, SCCError +from scc_cli.core.exit_codes import ( + EXIT_CONFIG, + EXIT_ERROR, + EXIT_GOVERNANCE, + EXIT_PREREQ, + EXIT_VALIDATION, +) + + +def to_exit_code(exc: Exception) -> int: + """Map exceptions to standardized exit codes. + + This mirrors legacy json_command handling to preserve behavior. + """ + if isinstance(exc, PolicyViolationError): + return EXIT_GOVERNANCE + if isinstance(exc, PrerequisiteError): + return EXIT_PREREQ + if isinstance(exc, ConfigError): + return EXIT_CONFIG + if isinstance(exc, SCCError): + return getattr(exc, "exit_code", EXIT_ERROR) + if "Validation" in type(exc).__name__: + return EXIT_VALIDATION + return EXIT_ERROR + + +def to_json_payload(exc: Exception) -> dict[str, Any]: + """Return JSON-ready error data and messages.""" + error_data: dict[str, Any] = { + "error_type": type(exc).__name__, + } + + if isinstance(exc, SCCError): + error_data["user_message"] = exc.user_message + if exc.suggested_action: + error_data["suggested_action"] = exc.suggested_action + if exc.debug_context: + error_data["debug_context"] = exc.debug_context + error_message = exc.user_message + else: + error_message = str(exc) + + return { + "errors": [error_message], + "data": error_data, + } + + +def to_human_message(exc: Exception) -> str: + """Return a human-readable error message.""" + if isinstance(exc, SCCError): + return exc.user_message + return str(exc) diff --git a/src/scc_cli/core/maintenance.py b/src/scc_cli/core/maintenance.py index 386b348..7115b23 100644 --- a/src/scc_cli/core/maintenance.py +++ b/src/scc_cli/core/maintenance.py @@ -1,1003 +1,45 @@ -""" -Maintenance operations for SCC CLI. - -Pure functions for reset and cleanup operations. -Both CLI (scc reset) and TUI (Settings screen) delegate to this module. - -Key principles: -- Pure operations: no UI, no prompts, no console output -- Delegate to existing primitives where possible -- Return ResetResult with counts/bytes/paths for UI to display -- Atomic backups before destructive operations -""" +"""Backward-compatible facade for maintenance operations.""" from __future__ import annotations -import os -import shutil -import stat -import tempfile -from dataclasses import dataclass, field -from datetime import datetime, timedelta, timezone -from enum import Enum -from pathlib import Path -from typing import Any, Literal - -from .. import config, contexts, sessions -from ..stores.exception_store import RepoStore, UserStore -from ..utils.locks import file_lock, lock_path - -# ═══════════════════════════════════════════════════════════════════════════════ -# Risk Tiers -# ═══════════════════════════════════════════════════════════════════════════════ - - -class RiskTier(Enum): - """Risk level for maintenance operations. - - Tier 0: Safe - no confirmation needed - Tier 1: Changes State - Y/N confirmation - Tier 2: Destructive - Y/N + impact list - Tier 3: Factory Reset - type-to-confirm - """ - - SAFE = 0 - CHANGES_STATE = 1 - DESTRUCTIVE = 2 - FACTORY_RESET = 3 - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Result Types -# ═══════════════════════════════════════════════════════════════════════════════ - - -@dataclass -class PathInfo: - """Information about a configuration path. - - Attributes: - name: Human-readable name (e.g., "Config", "Sessions") - path: Absolute path to file or directory - exists: Whether the path exists - size_bytes: Size in bytes (0 if doesn't exist) - permissions: Permission string ("rw", "r-", "--") - """ - - name: str - path: Path - exists: bool - size_bytes: int - permissions: str - - @property - def size_human(self) -> str: - """Human-readable size (e.g., '2.1 KB').""" - if self.size_bytes == 0: - return "0 B" - for unit in ["B", "KB", "MB", "GB"]: - if self.size_bytes < 1024: - return ( - f"{self.size_bytes:.1f} {unit}" - if self.size_bytes >= 10 - else f"{self.size_bytes} {unit}" - ) - self.size_bytes = int(self.size_bytes / 1024) - return f"{self.size_bytes:.1f} TB" - - -@dataclass -class ResetResult: - """Result of a reset operation. - - All UI should render from these values, never hardcode paths. - """ - - success: bool - action_id: str - risk_tier: RiskTier - paths: list[Path] = field(default_factory=list) - removed_count: int = 0 - bytes_freed: int = 0 - backup_path: Path | None = None - message: str = "" - next_steps: list[str] = field(default_factory=list) - error: str | None = None - - @property - def bytes_freed_human(self) -> str: - """Human-readable bytes freed.""" - if self.bytes_freed == 0: - return "0 B" - size: float = self.bytes_freed - for unit in ["B", "KB", "MB", "GB"]: - if size < 1024: - return f"{size:.1f} {unit}" if size >= 10 else f"{int(size)} {unit}" - size = size / 1024 - return f"{size:.1f} TB" - - -@dataclass -class MaintenancePreview: - """Preview of what a maintenance operation would do. - - Used for --plan flag and [P]review button. - """ - - action_id: str - risk_tier: RiskTier - paths: list[Path] - description: str - item_count: int = 0 - bytes_estimate: int = 0 - backup_will_be_created: bool = False - parameters: dict[str, Any] = field(default_factory=dict) - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Maintenance Lock -# ═══════════════════════════════════════════════════════════════════════════════ - - -LOCK_FILE_NAME = "maintenance.lock" - - -def _get_lock_path() -> Path: - """Get path to maintenance lock file.""" - return config.CONFIG_DIR / LOCK_FILE_NAME - - -def _is_process_running(pid: int) -> bool: - """Check if a process with the given PID is still running.""" - try: - os.kill(pid, 0) # Signal 0 doesn't kill, just checks existence - return True - except OSError: - return False - - -def _get_lock_info(lock_file: Path) -> tuple[int | None, bool]: - """Get lock file info: (PID, is_stale). - - Returns: - Tuple of (PID from lock file, whether the lock appears stale) - """ - try: - if not lock_file.exists(): - return None, False - content = lock_file.read_text().strip() - if not content: - return None, False - pid = int(content) - is_stale = not _is_process_running(pid) - return pid, is_stale - except (ValueError, OSError): - return None, False - - -class MaintenanceLockError(Exception): - """Raised when maintenance is already running in another process.""" - - def __init__(self, message: str, is_stale: bool = False, pid: int | None = None): - super().__init__(message) - self.is_stale = is_stale - self.pid = pid - - -class MaintenanceLock: - """Context manager for maintenance lock. - - Prevents concurrent maintenance operations from CLI and TUI. - Detects stale locks from crashed processes. - - Usage: - with MaintenanceLock(): - # perform maintenance - """ - - def __init__(self, force: bool = False) -> None: - self._lock_path = _get_lock_path() - self._lock_file: Any = None - self._force = force - - def __enter__(self) -> MaintenanceLock: - self._lock_path.parent.mkdir(parents=True, exist_ok=True) - - # Use the existing file_lock utility - lf = lock_path("maintenance") - - # Check for stale lock before attempting to acquire - pid, is_stale = _get_lock_info(lf) - - # If force is set and lock is stale, remove the lock file - if self._force and is_stale and lf.exists(): - try: - lf.unlink() - except OSError: - pass - - try: - self._lock_file = file_lock(lf) - self._lock_file.__enter__() - except Exception: - # Re-check stale status for error message - pid, is_stale = _get_lock_info(lf) - - if is_stale: - raise MaintenanceLockError( - f"Lock file exists from PID {pid} which is no longer running.\n" - "The lock appears stale. Use 'scc reset --force-unlock' to recover.", - is_stale=True, - pid=pid, - ) - else: - raise MaintenanceLockError( - "Maintenance already running in another process. " - "Close other SCC sessions first.", - is_stale=False, - pid=pid, - ) - return self - - def __exit__(self, *args: Any) -> None: - if self._lock_file: - self._lock_file.__exit__(*args) - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Path Discovery -# ═══════════════════════════════════════════════════════════════════════════════ - - -def _get_size(path: Path) -> int: - """Get size of file or directory in bytes.""" - if not path.exists(): - return 0 - if path.is_file(): - return path.stat().st_size - # Directory: sum all files recursively - total = 0 - try: - for item in path.rglob("*"): - if item.is_file(): - try: - total += item.stat().st_size - except OSError: - pass - except OSError: - pass - return total - - -def _get_permissions(path: Path) -> str: - """Get permission string for path (rw, r-, --).""" - if not path.exists(): - return "--" - try: - mode = path.stat().st_mode - readable = bool(mode & stat.S_IRUSR) - writable = bool(mode & stat.S_IWUSR) - if readable and writable: - return "rw" - elif readable: - return "r-" - else: - return "--" - except OSError: - return "--" - - -def get_paths() -> list[PathInfo]: - """Get all SCC-related paths with their status. - - Returns XDG-aware paths with exists/size/permissions info. - """ - paths = [] - - # Config file - paths.append( - PathInfo( - name="Config", - path=config.CONFIG_FILE, - exists=config.CONFIG_FILE.exists(), - size_bytes=_get_size(config.CONFIG_FILE), - permissions=_get_permissions(config.CONFIG_FILE), - ) - ) - - # Sessions file - paths.append( - PathInfo( - name="Sessions", - path=config.SESSIONS_FILE, - exists=config.SESSIONS_FILE.exists(), - size_bytes=_get_size(config.SESSIONS_FILE), - permissions=_get_permissions(config.SESSIONS_FILE), - ) - ) - - # Exceptions file (user store) - exceptions_path = config.CONFIG_DIR / "exceptions.json" - paths.append( - PathInfo( - name="Exceptions", - path=exceptions_path, - exists=exceptions_path.exists(), - size_bytes=_get_size(exceptions_path), - permissions=_get_permissions(exceptions_path), - ) - ) - - # Cache directory - paths.append( - PathInfo( - name="Cache", - path=config.CACHE_DIR, - exists=config.CACHE_DIR.exists(), - size_bytes=_get_size(config.CACHE_DIR), - permissions=_get_permissions(config.CACHE_DIR), - ) - ) - - # Contexts file (in cache) - contexts_path = contexts._get_contexts_path() - paths.append( - PathInfo( - name="Contexts", - path=contexts_path, - exists=contexts_path.exists(), - size_bytes=_get_size(contexts_path), - permissions=_get_permissions(contexts_path), - ) - ) - - return paths - - -def get_total_size() -> int: - """Get total size of all SCC paths in bytes.""" - return sum(p.size_bytes for p in get_paths()) - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Backup Operations -# ═══════════════════════════════════════════════════════════════════════════════ - - -def _create_backup(path: Path) -> Path | None: - """Create a timestamped backup of a file. - - Backups are created atomically with 0600 permissions. - - Args: - path: File to backup - - Returns: - Path to backup file, or None if file doesn't exist - """ - if not path.exists(): - return None - - timestamp = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S") - backup_path = path.with_suffix(f".bak-{timestamp}{path.suffix}") - - # Atomic copy with temp file - backup_dir = path.parent - with tempfile.NamedTemporaryFile(mode="wb", dir=backup_dir, delete=False) as tmp: - tmp_path = Path(tmp.name) - try: - # Copy content - shutil.copy2(path, tmp_path) - # Set restrictive permissions (0600) - os.chmod(tmp_path, stat.S_IRUSR | stat.S_IWUSR) - # Atomic rename - tmp_path.rename(backup_path) - return backup_path - except Exception: - # Cleanup on failure - tmp_path.unlink(missing_ok=True) - raise - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Clear Operations (Tier 0 - Safe) -# ═══════════════════════════════════════════════════════════════════════════════ - - -def clear_cache(dry_run: bool = False) -> ResetResult: - """Clear regenerable cache files. - - Risk: Tier 0 (Safe) - Files regenerate automatically on next use. - """ - cache_dir = config.CACHE_DIR - result = ResetResult( - success=True, - action_id="clear_cache", - risk_tier=RiskTier.SAFE, - paths=[cache_dir], - message="Cache cleared", - ) - - if not cache_dir.exists(): - result.message = "No cache to clear" - return result - - # Calculate size before clearing - result.bytes_freed = _get_size(cache_dir) - - # Count files - file_count = 0 - try: - for item in cache_dir.rglob("*"): - if item.is_file(): - file_count += 1 - except OSError: - pass - result.removed_count = file_count - - if dry_run: - result.message = f"Would clear {file_count} cache files" - return result - - # Actually clear - try: - shutil.rmtree(cache_dir) - cache_dir.mkdir(parents=True, exist_ok=True) - result.message = f"Cleared {file_count} cache files" - except OSError as e: - result.success = False - result.error = str(e) - result.message = f"Failed to clear cache: {e}" - - return result - - -def cleanup_expired_exceptions(dry_run: bool = False) -> ResetResult: - """Remove only expired exceptions. - - Risk: Tier 0 (Safe) - Only removes already-expired items. - """ - result = ResetResult( - success=True, - action_id="cleanup_expired_exceptions", - risk_tier=RiskTier.SAFE, - message="Expired exceptions cleaned up", - ) - - user_store = UserStore() - result.paths = [user_store.path] - - # Count expired before cleanup - try: - exception_file = user_store.read() - expired_count = sum(1 for e in exception_file.exceptions if e.is_expired()) - result.removed_count = expired_count - except Exception: - result.removed_count = 0 - - if dry_run: - result.message = f"Would remove {result.removed_count} expired exceptions" - return result - - if result.removed_count == 0: - result.message = "No expired exceptions to clean up" - return result - - try: - # prune_expired removes expired exceptions - user_store.prune_expired() - result.message = f"Removed {result.removed_count} expired exceptions" - except Exception as e: - result.success = False - result.error = str(e) - result.message = f"Failed to cleanup: {e}" - - return result - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Clear Operations (Tier 1 - Changes State) -# ═══════════════════════════════════════════════════════════════════════════════ - - -def clear_contexts(dry_run: bool = False) -> ResetResult: - """Clear recent work contexts. - - Risk: Tier 1 (Changes State) - Clears Quick Resume list. - """ - result = ResetResult( - success=True, - action_id="clear_contexts", - risk_tier=RiskTier.CHANGES_STATE, - message="Contexts cleared", - next_steps=["Your Quick Resume list is now empty. New contexts will appear as you work."], - ) - - contexts_path = contexts._get_contexts_path() - result.paths = [contexts_path] - - # Get current count - current_contexts = contexts.load_recent_contexts() - result.removed_count = len(current_contexts) - - if result.removed_count == 0: - result.message = "No contexts to clear" - return result - - if dry_run: - result.message = f"Would clear {result.removed_count} contexts" - return result - - try: - result.bytes_freed = _get_size(contexts_path) - cleared = contexts.clear_contexts() - result.removed_count = cleared - result.message = f"Cleared {cleared} contexts" - except Exception as e: - result.success = False - result.error = str(e) - result.message = f"Failed to clear contexts: {e}" - - return result - - -def prune_containers(dry_run: bool = False) -> ResetResult: - """Remove stopped Docker containers. - - Risk: Tier 1 (Changes State) - Only removes stopped containers. - - This delegates to the existing container pruning logic. - """ - result = ResetResult( - success=True, - action_id="prune_containers", - risk_tier=RiskTier.CHANGES_STATE, - message="Containers pruned", - ) - - try: - from .sandbox import docker # type: ignore[import-untyped] - - # Get stopped containers - all_containers = docker._list_all_sandbox_containers() - stopped = [c for c in all_containers if c.get("status", "").lower() != "running"] - result.removed_count = len(stopped) - - if result.removed_count == 0: - result.message = "No stopped containers to prune" - return result - - if dry_run: - result.message = f"Would remove {result.removed_count} stopped containers" - return result - - # Actually prune - for container in stopped: - container_id = container.get("id") or container.get("name") - if container_id: - try: - docker._remove_container(container_id) - except Exception: - pass - - result.message = f"Removed {result.removed_count} stopped containers" - - except ImportError: - result.message = "Docker not available" - except Exception as e: - result.success = False - result.error = str(e) - result.message = f"Failed to prune containers: {e}" - - return result - - -def prune_sessions( - older_than_days: int = 30, - keep_n: int = 20, - team: str | None = None, - dry_run: bool = False, -) -> ResetResult: - """Prune old sessions while keeping recent ones. - - Risk: Tier 1 (Changes State) - Safe prune with defaults. - - Args: - older_than_days: Remove sessions older than this (default: 30) - keep_n: Keep at least this many recent sessions per team (default: 20) - team: Only prune sessions for this team (None = all) - dry_run: Preview only, don't actually delete - """ - result = ResetResult( - success=True, - action_id="prune_sessions", - risk_tier=RiskTier.CHANGES_STATE, - paths=[config.SESSIONS_FILE], - message=f"Pruned sessions older than {older_than_days}d (kept newest {keep_n} per team)", - ) - - try: - from ..utils.locks import file_lock, lock_path - - lock_file = lock_path("sessions") - with file_lock(lock_file): - all_sessions = sessions._load_sessions() - original_count = len(all_sessions) - - if original_count == 0: - result.message = "No sessions to prune" - return result - - # Calculate cutoff date - cutoff = datetime.now(timezone.utc) - timedelta(days=older_than_days) - - # Group sessions by team - by_team: dict[str | None, list[dict[str, Any]]] = {} - for s in all_sessions: - t = s.get("team") - if team is not None and t != team: - # Keep sessions from other teams - by_team.setdefault(t, []).append(s) - else: - by_team.setdefault(t, []).append(s) - - # For each team, keep newest keep_n, prune rest if older than cutoff - kept_sessions = [] - for t, team_sessions in by_team.items(): - # Sort by last_used descending - team_sessions.sort(key=lambda s: s.get("last_used", ""), reverse=True) - - # Always keep the newest keep_n - kept = team_sessions[:keep_n] - remaining = team_sessions[keep_n:] - - # From remaining, keep only if newer than cutoff - for s in remaining: - last_used = s.get("last_used", "") - if last_used: - try: - dt = datetime.fromisoformat(last_used.replace("Z", "+00:00")) - if dt > cutoff: - kept.append(s) - except (ValueError, TypeError): - pass - - kept_sessions.extend(kept) - - result.removed_count = original_count - len(kept_sessions) - - if result.removed_count == 0: - result.message = "No sessions to prune" - return result - - if dry_run: - result.message = f"Would prune {result.removed_count} sessions older than {older_than_days}d (kept newest {keep_n} per team)" - return result - - # Calculate bytes freed - result.bytes_freed = _get_size(config.SESSIONS_FILE) - - # Save filtered sessions - sessions._save_sessions(kept_sessions) - - # Recalculate bytes freed - new_size = _get_size(config.SESSIONS_FILE) - result.bytes_freed = result.bytes_freed - new_size - - result.message = f"Pruned {result.removed_count} sessions older than {older_than_days}d (kept newest {keep_n} per team)" - - except Exception as e: - result.success = False - result.error = str(e) - result.message = f"Failed to prune sessions: {e}" - - return result - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Reset Operations (Tier 2 - Destructive) -# ═══════════════════════════════════════════════════════════════════════════════ - - -def reset_exceptions( - scope: Literal["all", "user", "repo"] = "all", - repo_root: Path | None = None, - dry_run: bool = False, - create_backup: bool = True, -) -> ResetResult: - """Reset exception stores. - - Risk: Tier 2 (Destructive) - Removes policy exceptions. - - Args: - scope: Which stores to reset ("all", "user", "repo") - repo_root: Repo root for repo-scoped exceptions - dry_run: Preview only - create_backup: Create backup before deletion - """ - result = ResetResult( - success=True, - action_id="reset_exceptions", - risk_tier=RiskTier.DESTRUCTIVE, - message="Exceptions reset", - ) - - user_store = UserStore() - repo_store = RepoStore(repo_root) if repo_root else None - - # Determine which stores to reset - stores_to_reset: list[tuple[str, Any]] = [] - if scope in ("all", "user"): - stores_to_reset.append(("user", user_store)) - if scope in ("all", "repo") and repo_store: - stores_to_reset.append(("repo", repo_store)) - - for store_name, store in stores_to_reset: - result.paths.append(store.path) - if store.path.exists(): - result.removed_count += len(store.load()) - result.bytes_freed += _get_size(store.path) - - if result.removed_count == 0: - result.message = "No exceptions to reset" - return result - - if dry_run: - result.message = f"Would reset {result.removed_count} exceptions" - return result - - # Create backup if requested - if create_backup: - for store_name, store in stores_to_reset: - if store.path.exists(): - backup = _create_backup(store.path) - if backup and result.backup_path is None: - result.backup_path = backup - - # Reset stores - try: - for store_name, store in stores_to_reset: - store.reset() - result.message = f"Reset {result.removed_count} exceptions" - except Exception as e: - result.success = False - result.error = str(e) - result.message = f"Failed to reset exceptions: {e}" - - return result - - -def delete_all_sessions( - dry_run: bool = False, - create_backup: bool = True, -) -> ResetResult: - """Delete entire sessions store. - - Risk: Tier 2 (Destructive) - Removes all session history. - """ - result = ResetResult( - success=True, - action_id="delete_all_sessions", - risk_tier=RiskTier.DESTRUCTIVE, - paths=[config.SESSIONS_FILE], - message="All sessions deleted", - next_steps=["Your session history is now empty. New sessions will appear as you work."], - ) - - if not config.SESSIONS_FILE.exists(): - result.message = "No sessions to delete" - return result - - # Count sessions - try: - all_sessions = sessions._load_sessions() - result.removed_count = len(all_sessions) - except Exception: - result.removed_count = 0 - - result.bytes_freed = _get_size(config.SESSIONS_FILE) - - if dry_run: - result.message = f"Would delete {result.removed_count} sessions" - return result - - # Create backup if requested - if create_backup: - result.backup_path = _create_backup(config.SESSIONS_FILE) - - try: - sessions.clear_history() - result.message = f"Deleted {result.removed_count} sessions" - except Exception as e: - result.success = False - result.error = str(e) - result.message = f"Failed to delete sessions: {e}" - - return result - - -def reset_config( - dry_run: bool = False, - create_backup: bool = True, -) -> ResetResult: - """Reset user configuration to defaults. - - Risk: Tier 2 (Destructive) - Requires running setup again. - """ - result = ResetResult( - success=True, - action_id="reset_config", - risk_tier=RiskTier.DESTRUCTIVE, - paths=[config.CONFIG_FILE], - message="Configuration reset", - next_steps=["Run 'scc setup' to reconfigure"], - ) - - if not config.CONFIG_FILE.exists(): - result.message = "No configuration to reset" - return result - - result.bytes_freed = _get_size(config.CONFIG_FILE) - - if dry_run: - result.message = "Would reset configuration" - return result - - # Create backup if requested - if create_backup: - result.backup_path = _create_backup(config.CONFIG_FILE) - - try: - config.CONFIG_FILE.unlink() - result.message = "Configuration reset" - except Exception as e: - result.success = False - result.error = str(e) - result.message = f"Failed to reset config: {e}" - - return result - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Factory Reset (Tier 3) -# ═══════════════════════════════════════════════════════════════════════════════ - - -def factory_reset( - dry_run: bool = False, - create_backup: bool = True, - continue_on_error: bool = False, -) -> list[ResetResult]: - """Perform factory reset - remove all SCC data. - - Risk: Tier 3 (Factory Reset) - Complete clean slate. - - Order: Local files first (config, sessions, exceptions, contexts, cache), - containers last. This ensures Docker failures don't block local cleanup. - - Args: - dry_run: Preview only - create_backup: Create backups for Tier 2 operations - continue_on_error: Don't stop on first failure - - Returns: - List of ResetResult for each operation - """ - results: list[ResetResult] = [] - - # Order: local files first, containers last - operations = [ - ("reset_config", lambda: reset_config(dry_run=dry_run, create_backup=create_backup)), - ( - "delete_all_sessions", - lambda: delete_all_sessions(dry_run=dry_run, create_backup=create_backup), - ), - ( - "reset_exceptions", - lambda: reset_exceptions(dry_run=dry_run, create_backup=create_backup), - ), - ("clear_contexts", lambda: clear_contexts(dry_run=dry_run)), - ("clear_cache", lambda: clear_cache(dry_run=dry_run)), - ("prune_containers", lambda: prune_containers(dry_run=dry_run)), - ] - - for op_name, op_func in operations: - try: - result = op_func() - results.append(result) - - if not result.success and not continue_on_error: - # Stop on first failure - break - - except Exception as e: - results.append( - ResetResult( - success=False, - action_id=op_name, - risk_tier=RiskTier.FACTORY_RESET, - error=str(e), - message=f"Failed: {e}", - ) - ) - if not continue_on_error: - break - - return results - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Preview / Plan Operations -# ═══════════════════════════════════════════════════════════════════════════════ - - -def preview_operation(action_id: str, **kwargs: Any) -> MaintenancePreview: - """Get preview of what an operation would do. - - Used for --plan flag and [P]review button. - Fast, compute-only, no side effects. - """ - action_map = { - "clear_cache": (RiskTier.SAFE, "Clear regenerable cache files"), - "cleanup_expired_exceptions": (RiskTier.SAFE, "Remove only expired exceptions"), - "clear_contexts": (RiskTier.CHANGES_STATE, "Clear recent work contexts"), - "prune_containers": (RiskTier.CHANGES_STATE, "Remove stopped Docker containers"), - "prune_sessions": (RiskTier.CHANGES_STATE, "Prune old sessions (keeps recent)"), - "reset_exceptions": (RiskTier.DESTRUCTIVE, "Clear all policy exceptions"), - "delete_all_sessions": (RiskTier.DESTRUCTIVE, "Delete entire session history"), - "reset_config": (RiskTier.DESTRUCTIVE, "Reset configuration (requires setup)"), - "factory_reset": (RiskTier.FACTORY_RESET, "Remove all SCC data"), - } - - if action_id not in action_map: - raise ValueError(f"Unknown action: {action_id}") - - risk_tier, description = action_map[action_id] - - # Get paths affected - paths: list[Path] = [] - item_count = 0 - bytes_estimate = 0 - - if action_id == "clear_cache": - paths = [config.CACHE_DIR] - bytes_estimate = _get_size(config.CACHE_DIR) - elif action_id == "clear_contexts": - ctx_path = contexts._get_contexts_path() - paths = [ctx_path] - item_count = len(contexts.load_recent_contexts()) - bytes_estimate = _get_size(ctx_path) - elif action_id == "prune_sessions" or action_id == "delete_all_sessions": - paths = [config.SESSIONS_FILE] - try: - item_count = len(sessions._load_sessions()) - except Exception: - item_count = 0 - bytes_estimate = _get_size(config.SESSIONS_FILE) - elif action_id == "reset_config": - paths = [config.CONFIG_FILE] - bytes_estimate = _get_size(config.CONFIG_FILE) - elif action_id == "reset_exceptions": - user_store = UserStore() - paths = [user_store.path] - try: - item_count = len(user_store.read().exceptions) - except Exception: - item_count = 0 - bytes_estimate = _get_size(user_store.path) - elif action_id == "factory_reset": - paths = [config.CONFIG_DIR, config.CACHE_DIR] - bytes_estimate = get_total_size() - - backup_will_be_created = risk_tier == RiskTier.DESTRUCTIVE - - return MaintenancePreview( - action_id=action_id, - risk_tier=risk_tier, - paths=paths, - description=description, - item_count=item_count, - bytes_estimate=bytes_estimate, - backup_will_be_created=backup_will_be_created, - parameters=kwargs, - ) +from scc_cli.maintenance import ( + MaintenanceLock, + MaintenanceLockError, + MaintenancePreview, + PathInfo, + ResetResult, + RiskTier, + cleanup_expired_exceptions, + clear_cache, + clear_contexts, + delete_all_sessions, + factory_reset, + get_paths, + get_total_size, + preview_operation, + prune_containers, + prune_sessions, + reset_config, + reset_exceptions, +) + +__all__ = [ + "MaintenanceLock", + "MaintenanceLockError", + "MaintenancePreview", + "PathInfo", + "ResetResult", + "RiskTier", + "cleanup_expired_exceptions", + "clear_cache", + "clear_contexts", + "delete_all_sessions", + "factory_reset", + "get_paths", + "get_total_size", + "preview_operation", + "prune_containers", + "prune_sessions", + "reset_config", + "reset_exceptions", +] diff --git a/src/scc_cli/doctor/__init__.py b/src/scc_cli/doctor/__init__.py index 92f6234..18c1439 100644 --- a/src/scc_cli/doctor/__init__.py +++ b/src/scc_cli/doctor/__init__.py @@ -44,15 +44,15 @@ ) # Import orchestration and rendering functions from render.py +from scc_cli.doctor.core import run_doctor from scc_cli.doctor.render import ( - build_doctor_json_data, is_first_run, quick_check, render_doctor_compact, render_doctor_results, render_quick_status, - run_doctor, ) +from scc_cli.doctor.serialization import build_doctor_json_data # Import types from types.py from scc_cli.doctor.types import CheckResult, DoctorResult, JsonValidationResult diff --git a/src/scc_cli/doctor/core.py b/src/scc_cli/doctor/core.py new file mode 100644 index 0000000..1defec7 --- /dev/null +++ b/src/scc_cli/doctor/core.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +from pathlib import Path + +from .checks import ( + check_config_directory, + check_docker, + check_docker_running, + check_docker_sandbox, + check_git, + check_user_config_valid, + check_workspace_path, + check_wsl2, +) +from .types import DoctorResult + + +def run_doctor(workspace: Path | None = None) -> DoctorResult: + """Run all health checks and return comprehensive results.""" + + result = DoctorResult() + + git_check = check_git() + result.checks.append(git_check) + result.git_ok = git_check.passed + result.git_version = git_check.version + + docker_check = check_docker() + result.checks.append(docker_check) + result.docker_ok = docker_check.passed + result.docker_version = docker_check.version + + if result.docker_ok: + daemon_check = check_docker_running() + result.checks.append(daemon_check) + if not daemon_check.passed: + result.docker_ok = False + + if result.docker_ok: + sandbox_check = check_docker_sandbox() + result.checks.append(sandbox_check) + result.sandbox_ok = sandbox_check.passed + else: + result.sandbox_ok = False + + wsl2_check, is_wsl2 = check_wsl2() + result.checks.append(wsl2_check) + result.wsl2_detected = is_wsl2 + + if workspace: + path_check = check_workspace_path(workspace) + result.checks.append(path_check) + result.windows_path_warning = not path_check.passed and path_check.severity == "warning" + + config_check = check_config_directory() + result.checks.append(config_check) + + from .checks import ( + check_git_version_for_worktrees, + check_worktree_branch_conflicts, + check_worktree_health, + ) + + git_version_wt_check = check_git_version_for_worktrees() + if git_version_wt_check is not None: + result.checks.append(git_version_wt_check) + + worktree_health_check = check_worktree_health() + if worktree_health_check is not None: + result.checks.append(worktree_health_check) + + branch_conflict_check = check_worktree_branch_conflicts() + if branch_conflict_check is not None: + result.checks.append(branch_conflict_check) + + user_config_check = check_user_config_valid() + result.checks.append(user_config_check) + + return result diff --git a/src/scc_cli/doctor/render.py b/src/scc_cli/doctor/render.py index 26cc521..27614a8 100644 --- a/src/scc_cli/doctor/render.py +++ b/src/scc_cli/doctor/render.py @@ -1,7 +1,6 @@ """Orchestration and rendering functions for the doctor module. This module contains: -- run_doctor(): Main orchestrator that runs all health checks - build_doctor_json_data(): JSON serialization for CLI output - render_doctor_results(): Rich terminal UI rendering - render_doctor_compact(): Compact inline status display @@ -12,9 +11,6 @@ from __future__ import annotations -from pathlib import Path -from typing import Any - from rich import box from rich.console import Console from rich.panel import Panel @@ -23,154 +19,9 @@ from scc_cli import __version__ -from .checks import ( - check_config_directory, - check_docker, - check_docker_running, - check_docker_sandbox, - check_git, - check_user_config_valid, - check_workspace_path, - check_wsl2, -) +from .core import run_doctor from .types import DoctorResult -# ═══════════════════════════════════════════════════════════════════════════════ -# JSON Serialization -# ═══════════════════════════════════════════════════════════════════════════════ - - -def build_doctor_json_data(result: DoctorResult) -> dict[str, Any]: - """Build JSON-serializable data from DoctorResult. - - Args: - result: The DoctorResult to convert. - - Returns: - Dictionary suitable for JSON envelope data field. - """ - checks_data = [] - for check in result.checks: - check_dict: dict[str, Any] = { - "name": check.name, - "passed": check.passed, - "message": check.message, - "severity": check.severity, - } - if check.version: - check_dict["version"] = check.version - if check.fix_hint: - check_dict["fix_hint"] = check.fix_hint - if check.fix_url: - check_dict["fix_url"] = check.fix_url - if check.fix_commands: - check_dict["fix_commands"] = check.fix_commands - if check.code_frame: - check_dict["code_frame"] = check.code_frame - checks_data.append(check_dict) - - # Calculate summary stats - total = len(result.checks) - passed = sum(1 for c in result.checks if c.passed) - errors = sum(1 for c in result.checks if not c.passed and c.severity == "error") - warnings = sum(1 for c in result.checks if not c.passed and c.severity == "warning") - - return { - "checks": checks_data, - "summary": { - "total": total, - "passed": passed, - "errors": errors, - "warnings": warnings, - "all_ok": result.all_ok, - }, - } - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Main Doctor Orchestrator -# ═══════════════════════════════════════════════════════════════════════════════ - - -def run_doctor(workspace: Path | None = None) -> DoctorResult: - """Run all health checks and return comprehensive results. - - Args: - workspace: Optional workspace path to check for optimization - - Returns: - DoctorResult with all check results - """ - result = DoctorResult() - - # Git check - git_check = check_git() - result.checks.append(git_check) - result.git_ok = git_check.passed - result.git_version = git_check.version - - # Docker check - docker_check = check_docker() - result.checks.append(docker_check) - result.docker_ok = docker_check.passed - result.docker_version = docker_check.version - - # Docker daemon check (only if Docker is installed) - if result.docker_ok: - daemon_check = check_docker_running() - result.checks.append(daemon_check) - if not daemon_check.passed: - result.docker_ok = False - - # Docker sandbox check (only if Docker is OK) - if result.docker_ok: - sandbox_check = check_docker_sandbox() - result.checks.append(sandbox_check) - result.sandbox_ok = sandbox_check.passed - else: - result.sandbox_ok = False - - # WSL2 check - wsl2_check, is_wsl2 = check_wsl2() - result.checks.append(wsl2_check) - result.wsl2_detected = is_wsl2 - - # Workspace path check (if WSL2 and workspace provided) - if workspace: - path_check = check_workspace_path(workspace) - result.checks.append(path_check) - result.windows_path_warning = not path_check.passed and path_check.severity == "warning" - - # Config directory check - config_check = check_config_directory() - result.checks.append(config_check) - - # Git worktree health checks (may return None if not in a git repo) - from .checks import ( - check_git_version_for_worktrees, - check_worktree_branch_conflicts, - check_worktree_health, - ) - - git_version_wt_check = check_git_version_for_worktrees() - if git_version_wt_check is not None: - result.checks.append(git_version_wt_check) - - worktree_health_check = check_worktree_health() - if worktree_health_check is not None: - result.checks.append(worktree_health_check) - - branch_conflict_check = check_worktree_branch_conflicts() - if branch_conflict_check is not None: - result.checks.append(branch_conflict_check) - - # User config JSON validation check - user_config_check = check_user_config_valid() - result.checks.append(user_config_check) - - return result - - # ═══════════════════════════════════════════════════════════════════════════════ # Rich Terminal UI Rendering # ═══════════════════════════════════════════════════════════════════════════════ diff --git a/src/scc_cli/doctor/serialization.py b/src/scc_cli/doctor/serialization.py new file mode 100644 index 0000000..d2cff49 --- /dev/null +++ b/src/scc_cli/doctor/serialization.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from typing import Any + +from .types import DoctorResult + + +def build_doctor_json_data(result: DoctorResult) -> dict[str, Any]: + """Build JSON-serializable data from DoctorResult.""" + + checks_data = [] + for check in result.checks: + check_dict: dict[str, Any] = { + "name": check.name, + "passed": check.passed, + "message": check.message, + "severity": check.severity, + } + if check.version: + check_dict["version"] = check.version + if check.fix_hint: + check_dict["fix_hint"] = check.fix_hint + if check.fix_url: + check_dict["fix_url"] = check.fix_url + if check.fix_commands: + check_dict["fix_commands"] = check.fix_commands + if check.code_frame: + check_dict["code_frame"] = check.code_frame + checks_data.append(check_dict) + + total = len(result.checks) + passed = sum(1 for c in result.checks if c.passed) + errors = sum(1 for c in result.checks if not c.passed and c.severity == "error") + warnings = sum(1 for c in result.checks if not c.passed and c.severity == "warning") + + return { + "checks": checks_data, + "summary": { + "total": total, + "passed": passed, + "errors": errors, + "warnings": warnings, + "all_ok": result.all_ok, + }, + } diff --git a/src/scc_cli/evaluation/evaluate.py b/src/scc_cli/evaluation/evaluate.py index 24c55c4..c81e7c4 100644 --- a/src/scc_cli/evaluation/evaluate.py +++ b/src/scc_cli/evaluation/evaluate.py @@ -1,8 +1,8 @@ """Bridge function to convert EffectiveConfig to EvaluationResult. Provide the evaluate() function that converts the governance layer models -(profiles.py) to the exception system models (evaluation/models.py) with -proper BlockReason annotations. +(application compute_effective_config) to the exception system models +(evaluation/models.py) with proper BlockReason annotations. This is a pure function with no IO - all input comes from the EffectiveConfig parameter and output is a new EvaluationResult. @@ -20,17 +20,17 @@ from scc_cli.models.exceptions import BlockReason if TYPE_CHECKING: - from scc_cli.profiles import EffectiveConfig + from scc_cli.application.compute_effective_config import EffectiveConfig def evaluate(config: EffectiveConfig) -> EvaluationResult: """Convert EffectiveConfig to EvaluationResult with BlockReason annotations. - This function bridges the governance layer (profiles.py models) to the + This function bridges the governance layer (application models) to the exception system (evaluation/models.py) by converting: - - profiles.BlockedItem -> evaluation.BlockedItem with BlockReason.SECURITY - - profiles.DelegationDenied -> evaluation.DeniedAddition with BlockReason.DELEGATION + - compute_effective_config.BlockedItem -> evaluation.BlockedItem with BlockReason.SECURITY + - compute_effective_config.DelegationDenied -> evaluation.DeniedAddition with BlockReason.DELEGATION Args: config: The EffectiveConfig from the profile merge process diff --git a/src/scc_cli/git.py b/src/scc_cli/git.py index c1e4b79..9fefe68 100644 --- a/src/scc_cli/git.py +++ b/src/scc_cli/git.py @@ -1,15 +1,13 @@ """ Git operations - backward-compatible facade. -This module is a pure re-export facade after Phase 4 refactoring: -- Data functions: services/git/ (core.py, branch.py, worktree.py, hooks.py) -- Rendering functions: ui/git_render.py -- Interactive UI functions: ui/git_interactive.py +This module re-exports the pure git service API from services/git for backward +compatibility (e.g. `from scc_cli.git import WorktreeInfo`). -All symbols are re-exported for backward compatibility - existing imports -like `from scc_cli.git import WorktreeInfo` continue to work. +UI helpers now live in `scc_cli.ui.git_interactive` and `scc_cli.ui.git_render` +and should be imported directly. -NO Rich imports in this module - that's a key acceptance criterion. +No Rich imports are allowed in this module. """ # ═══════════════════════════════════════════════════════════════════════════════ @@ -63,22 +61,3 @@ # Keep _get_worktrees_data as alias for backward compatibility from .services.git.worktree import get_worktrees_data as _get_worktrees_data # noqa: F401 - -# ═══════════════════════════════════════════════════════════════════════════════ -# Re-exports from ui/ for backward compatibility -# ═══════════════════════════════════════════════════════════════════════════════ -# Interactive UI functions (extracted in Phase 4B) -from .ui.git_interactive import ( # noqa: F401 - check_branch_safety, - cleanup_worktree, - clone_repo, - create_worktree, - install_dependencies, - install_hooks, - list_worktrees, -) - -# Pure rendering functions -from .ui.git_render import format_git_status as _format_git_status # noqa: F401 -from .ui.git_render import render_worktrees # noqa: F401 -from .ui.git_render import render_worktrees_table as _render_worktrees_table # noqa: F401 diff --git a/src/scc_cli/json_command.py b/src/scc_cli/json_command.py index c44323a..fdbd020 100644 --- a/src/scc_cli/json_command.py +++ b/src/scc_cli/json_command.py @@ -27,16 +27,8 @@ def team_list( import typer -from .core.errors import ConfigError, PolicyViolationError, PrerequisiteError, SCCError -from .core.exit_codes import ( - EXIT_CANCELLED, - EXIT_CONFIG, - EXIT_ERROR, - EXIT_GOVERNANCE, - EXIT_PREREQ, - EXIT_SUCCESS, - EXIT_VALIDATION, -) +from .core.error_mapping import to_exit_code, to_json_payload +from .core.exit_codes import EXIT_CANCELLED, EXIT_SUCCESS from .json_output import build_envelope from .kinds import Kind from .output_mode import _pretty_mode, json_command_mode, json_output_mode, print_json @@ -45,28 +37,6 @@ def team_list( F = TypeVar("F", bound=Callable[..., Any]) -# ═══════════════════════════════════════════════════════════════════════════════ -# Exception to Exit Code Mapping -# ═══════════════════════════════════════════════════════════════════════════════ - - -def _get_exit_code_for_exception(exc: Exception) -> int: - """Map exception types to exit codes.""" - if isinstance(exc, PolicyViolationError): - return EXIT_GOVERNANCE - if isinstance(exc, PrerequisiteError): - return EXIT_PREREQ - if isinstance(exc, ConfigError): - return EXIT_CONFIG - if isinstance(exc, SCCError): - # Check exit_code attribute if available - return getattr(exc, "exit_code", EXIT_ERROR) - # Check for validation-like errors by name - if "Validation" in type(exc).__name__: - return EXIT_VALIDATION - return EXIT_ERROR - - # ═══════════════════════════════════════════════════════════════════════════════ # JSON Command Decorator # ═══════════════════════════════════════════════════════════════════════════════ @@ -135,25 +105,25 @@ def wrapper(**kwargs: Any) -> Any: raise except KeyboardInterrupt: + payload = to_json_payload(Exception("Cancelled")) envelope = build_envelope( kind, data={}, ok=False, - errors=["Cancelled"], + errors=payload["errors"], ) print_json(envelope) raise typer.Exit(EXIT_CANCELLED) except Exception as e: - # Map exception to exit code - exit_code = _get_exit_code_for_exception(e) + exit_code = to_exit_code(e) + payload = to_json_payload(e) - # Build and print error envelope envelope = build_envelope( kind, data={}, ok=False, - errors=[str(e)], + errors=payload["errors"], ) print_json(envelope) raise typer.Exit(exit_code) diff --git a/src/scc_cli/json_output.py b/src/scc_cli/json_output.py index 9e89b2c..23d531a 100644 --- a/src/scc_cli/json_output.py +++ b/src/scc_cli/json_output.py @@ -15,7 +15,7 @@ from typing import Any from . import __version__ -from .core.errors import SCCError +from .core.error_mapping import to_json_payload from .kinds import Kind # ═══════════════════════════════════════════════════════════════════════════════ @@ -128,20 +128,7 @@ def build_error_envelope(exc: Exception) -> dict[str, Any]: # Generate ISO 8601 timestamp in UTC generated_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") - # Build error data depending on exception type - error_data: dict[str, Any] = { - "error_type": type(exc).__name__, - } - - if isinstance(exc, SCCError): - error_data["user_message"] = exc.user_message - if exc.suggested_action: - error_data["suggested_action"] = exc.suggested_action - if exc.debug_context: - error_data["debug_context"] = exc.debug_context - error_message = exc.user_message - else: - error_message = str(exc) + payload = to_json_payload(exc) return { "apiVersion": API_VERSION, @@ -152,8 +139,8 @@ def build_error_envelope(exc: Exception) -> dict[str, Any]: }, "status": { "ok": False, - "errors": [error_message], + "errors": payload["errors"], "warnings": [], }, - "data": error_data, + "data": payload["data"], } diff --git a/src/scc_cli/maintenance/__init__.py b/src/scc_cli/maintenance/__init__.py new file mode 100644 index 0000000..d55d975 --- /dev/null +++ b/src/scc_cli/maintenance/__init__.py @@ -0,0 +1,48 @@ +"""Maintenance operations and task registry.""" + +from __future__ import annotations + +from .cache_cleanup import ( + cleanup_expired_exceptions, + clear_cache, + clear_contexts, + prune_containers, +) +from .health_checks import get_paths, get_total_size, preview_operation +from .lock import MaintenanceLock, MaintenanceLockError +from .migrations import factory_reset, reset_config, reset_exceptions +from .repair_sessions import delete_all_sessions, prune_sessions +from .tasks import ( + MaintenanceTask, + MaintenanceTaskContext, + get_task, + list_tasks, + run_task, +) +from .types import MaintenancePreview, PathInfo, ResetResult, RiskTier + +__all__ = [ + "RiskTier", + "PathInfo", + "ResetResult", + "MaintenancePreview", + "MaintenanceLock", + "MaintenanceLockError", + "clear_cache", + "cleanup_expired_exceptions", + "clear_contexts", + "prune_containers", + "prune_sessions", + "reset_exceptions", + "delete_all_sessions", + "reset_config", + "factory_reset", + "get_paths", + "get_total_size", + "preview_operation", + "MaintenanceTask", + "MaintenanceTaskContext", + "list_tasks", + "get_task", + "run_task", +] diff --git a/src/scc_cli/maintenance/backups.py b/src/scc_cli/maintenance/backups.py new file mode 100644 index 0000000..918c7f8 --- /dev/null +++ b/src/scc_cli/maintenance/backups.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +import os +import shutil +import stat +import tempfile +from datetime import datetime, timezone +from pathlib import Path + + +def _create_backup(path: Path) -> Path | None: + """Create a timestamped backup of a file. + + Backups are created atomically with 0600 permissions. + + Args: + path: File to backup + + Returns: + Path to backup file, or None if file doesn't exist + """ + if not path.exists(): + return None + + timestamp = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S") + backup_path = path.with_suffix(f".bak-{timestamp}{path.suffix}") + + # Atomic copy with temp file + backup_dir = path.parent + with tempfile.NamedTemporaryFile(mode="wb", dir=backup_dir, delete=False) as tmp: + tmp_path = Path(tmp.name) + try: + shutil.copy2(path, tmp_path) + os.chmod(tmp_path, stat.S_IRUSR | stat.S_IWUSR) + tmp_path.rename(backup_path) + return backup_path + except Exception: + tmp_path.unlink(missing_ok=True) + raise diff --git a/src/scc_cli/maintenance/cache_cleanup.py b/src/scc_cli/maintenance/cache_cleanup.py new file mode 100644 index 0000000..3d672a1 --- /dev/null +++ b/src/scc_cli/maintenance/cache_cleanup.py @@ -0,0 +1,186 @@ +from __future__ import annotations + +import shutil + +from scc_cli import config, contexts +from scc_cli.stores.exception_store import UserStore + +from .health_checks import _get_size +from .types import ResetResult, RiskTier + + +def clear_cache(dry_run: bool = False) -> ResetResult: + """Clear regenerable cache files. + + Risk: Tier 0 (Safe) - Files regenerate automatically on next use. + """ + cache_dir = config.CACHE_DIR + result = ResetResult( + success=True, + action_id="clear_cache", + risk_tier=RiskTier.SAFE, + paths=[cache_dir], + message="Cache cleared", + ) + + if not cache_dir.exists(): + result.message = "No cache to clear" + return result + + result.bytes_freed = _get_size(cache_dir) + + file_count = 0 + try: + for item in cache_dir.rglob("*"): + if item.is_file(): + file_count += 1 + except OSError: + pass + result.removed_count = file_count + + if dry_run: + result.message = f"Would clear {file_count} cache files" + return result + + try: + shutil.rmtree(cache_dir) + cache_dir.mkdir(parents=True, exist_ok=True) + result.message = f"Cleared {file_count} cache files" + except OSError as exc: + result.success = False + result.error = str(exc) + result.message = f"Failed to clear cache: {exc}" + + return result + + +def cleanup_expired_exceptions(dry_run: bool = False) -> ResetResult: + """Remove only expired exceptions. + + Risk: Tier 0 (Safe) - Only removes already-expired items. + """ + result = ResetResult( + success=True, + action_id="cleanup_expired_exceptions", + risk_tier=RiskTier.SAFE, + message="Expired exceptions cleaned up", + ) + + user_store = UserStore() + result.paths = [user_store.path] + + try: + exception_file = user_store.read() + expired_count = sum(1 for exception in exception_file.exceptions if exception.is_expired()) + result.removed_count = expired_count + except Exception: + result.removed_count = 0 + + if dry_run: + result.message = f"Would remove {result.removed_count} expired exceptions" + return result + + if result.removed_count == 0: + result.message = "No expired exceptions to clean up" + return result + + try: + user_store.prune_expired() + result.message = f"Removed {result.removed_count} expired exceptions" + except Exception as exc: + result.success = False + result.error = str(exc) + result.message = f"Failed to cleanup: {exc}" + + return result + + +def clear_contexts(dry_run: bool = False) -> ResetResult: + """Clear recent work contexts. + + Risk: Tier 1 (Changes State) - Clears Quick Resume list. + """ + result = ResetResult( + success=True, + action_id="clear_contexts", + risk_tier=RiskTier.CHANGES_STATE, + message="Contexts cleared", + next_steps=["Your Quick Resume list is now empty. New contexts will appear as you work."], + ) + + contexts_path = contexts._get_contexts_path() + result.paths = [contexts_path] + + current_contexts = contexts.load_recent_contexts() + result.removed_count = len(current_contexts) + + if result.removed_count == 0: + result.message = "No contexts to clear" + return result + + if dry_run: + result.message = f"Would clear {result.removed_count} contexts" + return result + + try: + result.bytes_freed = _get_size(contexts_path) + cleared = contexts.clear_contexts() + result.removed_count = cleared + result.message = f"Cleared {cleared} contexts" + except Exception as exc: + result.success = False + result.error = str(exc) + result.message = f"Failed to clear contexts: {exc}" + + return result + + +def prune_containers(dry_run: bool = False) -> ResetResult: + """Remove stopped Docker containers. + + Risk: Tier 1 (Changes State) - Only removes stopped containers. + + This delegates to the existing container pruning logic. + """ + result = ResetResult( + success=True, + action_id="prune_containers", + risk_tier=RiskTier.CHANGES_STATE, + message="Containers pruned", + ) + + try: + from scc_cli import docker + + all_containers = docker._list_all_sandbox_containers() + stopped = [ + container for container in all_containers if container.status.lower() != "running" + ] + result.removed_count = len(stopped) + + if result.removed_count == 0: + result.message = "No stopped containers to prune" + return result + + if dry_run: + result.message = f"Would remove {result.removed_count} stopped containers" + return result + + for container in stopped: + container_id = container.id or container.name + if container_id: + try: + docker.remove_container(container_id) + except Exception: + pass + + result.message = f"Removed {result.removed_count} stopped containers" + + except ImportError: + result.message = "Docker not available" + except Exception as exc: + result.success = False + result.error = str(exc) + result.message = f"Failed to prune containers: {exc}" + + return result diff --git a/src/scc_cli/maintenance/health_checks.py b/src/scc_cli/maintenance/health_checks.py new file mode 100644 index 0000000..3c01dec --- /dev/null +++ b/src/scc_cli/maintenance/health_checks.py @@ -0,0 +1,185 @@ +from __future__ import annotations + +import stat +from pathlib import Path +from typing import Any + +from scc_cli import config, contexts, sessions +from scc_cli.stores.exception_store import RepoStore, UserStore + +from .types import MaintenancePreview, PathInfo, RiskTier + + +def _get_size(path: Path) -> int: + """Get size of file or directory in bytes.""" + if not path.exists(): + return 0 + if path.is_file(): + return path.stat().st_size + total = 0 + try: + for item in path.rglob("*"): + if item.is_file(): + try: + total += item.stat().st_size + except OSError: + pass + except OSError: + pass + return total + + +def _get_permissions(path: Path) -> str: + """Get permission string for path (rw, r-, --).""" + if not path.exists(): + return "--" + try: + mode = path.stat().st_mode + readable = bool(mode & stat.S_IRUSR) + writable = bool(mode & stat.S_IWUSR) + if readable and writable: + return "rw" + if readable: + return "r-" + return "--" + except OSError: + return "--" + + +def get_paths() -> list[PathInfo]: + """Get all SCC-related paths with their status. + + Returns XDG-aware paths with exists/size/permissions info. + """ + paths: list[PathInfo] = [] + + paths.append( + PathInfo( + name="Config", + path=config.CONFIG_FILE, + exists=config.CONFIG_FILE.exists(), + size_bytes=_get_size(config.CONFIG_FILE), + permissions=_get_permissions(config.CONFIG_FILE), + ) + ) + + paths.append( + PathInfo( + name="Sessions", + path=config.SESSIONS_FILE, + exists=config.SESSIONS_FILE.exists(), + size_bytes=_get_size(config.SESSIONS_FILE), + permissions=_get_permissions(config.SESSIONS_FILE), + ) + ) + + exceptions_path = config.CONFIG_DIR / "exceptions.json" + paths.append( + PathInfo( + name="Exceptions", + path=exceptions_path, + exists=exceptions_path.exists(), + size_bytes=_get_size(exceptions_path), + permissions=_get_permissions(exceptions_path), + ) + ) + + paths.append( + PathInfo( + name="Cache", + path=config.CACHE_DIR, + exists=config.CACHE_DIR.exists(), + size_bytes=_get_size(config.CACHE_DIR), + permissions=_get_permissions(config.CACHE_DIR), + ) + ) + + contexts_path = contexts._get_contexts_path() + paths.append( + PathInfo( + name="Contexts", + path=contexts_path, + exists=contexts_path.exists(), + size_bytes=_get_size(contexts_path), + permissions=_get_permissions(contexts_path), + ) + ) + + return paths + + +def get_total_size() -> int: + """Get total size of all SCC paths in bytes.""" + return sum(path.size_bytes for path in get_paths()) + + +def preview_operation(action_id: str, **kwargs: Any) -> MaintenancePreview: + """Get preview of what an operation would do. + + Used for --plan flag and [P]review button. + Fast, compute-only, no side effects. + """ + from .tasks import get_task + + task = get_task(action_id) + if task is None: + raise ValueError(f"Unknown action: {action_id}") + + risk_tier = task.risk_tier + description = task.description + + paths: list[Path] = [] + item_count = 0 + bytes_estimate = 0 + + if action_id == "clear_cache": + paths = [config.CACHE_DIR] + bytes_estimate = _get_size(config.CACHE_DIR) + elif action_id == "clear_contexts": + ctx_path = contexts._get_contexts_path() + paths = [ctx_path] + item_count = len(contexts.load_recent_contexts()) + bytes_estimate = _get_size(ctx_path) + elif action_id in ("prune_sessions", "delete_all_sessions"): + paths = [config.SESSIONS_FILE] + try: + item_count = len(sessions._load_sessions()) + except Exception: + item_count = 0 + bytes_estimate = _get_size(config.SESSIONS_FILE) + elif action_id == "reset_config": + paths = [config.CONFIG_FILE] + bytes_estimate = _get_size(config.CONFIG_FILE) + elif action_id == "reset_exceptions": + scope = kwargs.get("scope", "all") + repo_root = kwargs.get("repo_root") + repo_root_path = Path(repo_root) if repo_root else None + stores: list[UserStore | RepoStore] = [] + if scope in ("all", "user"): + stores.append(UserStore()) + if scope in ("all", "repo") and repo_root_path: + stores.append(RepoStore(repo_root_path)) + + for store in stores: + paths.append(store.path) + try: + item_count += len(store.read().exceptions) + except Exception: + pass + bytes_estimate += _get_size(store.path) + elif action_id == "factory_reset": + paths = [config.CONFIG_DIR, config.CACHE_DIR] + bytes_estimate = get_total_size() + + backup_will_be_created = risk_tier == RiskTier.DESTRUCTIVE + + return MaintenancePreview( + action_id=action_id, + risk_tier=risk_tier, + paths=paths, + description=description, + item_count=item_count, + bytes_estimate=bytes_estimate, + backup_will_be_created=backup_will_be_created, + parameters=kwargs, + ) diff --git a/src/scc_cli/maintenance/lock.py b/src/scc_cli/maintenance/lock.py new file mode 100644 index 0000000..4c6032b --- /dev/null +++ b/src/scc_cli/maintenance/lock.py @@ -0,0 +1,110 @@ +from __future__ import annotations + +import os +from pathlib import Path +from typing import Any + +from scc_cli import config +from scc_cli.utils.locks import file_lock, lock_path + +LOCK_FILE_NAME = "maintenance.lock" + + +def _get_lock_path() -> Path: + """Get path to maintenance lock file.""" + return config.CONFIG_DIR / LOCK_FILE_NAME + + +def _is_process_running(pid: int) -> bool: + """Check if a process with the given PID is still running.""" + try: + os.kill(pid, 0) # Signal 0 doesn't kill, just checks existence + return True + except OSError: + return False + + +def _get_lock_info(lock_file: Path) -> tuple[int | None, bool]: + """Get lock file info: (PID, is_stale). + + Returns: + Tuple of (PID from lock file, whether the lock appears stale) + """ + try: + if not lock_file.exists(): + return None, False + content = lock_file.read_text().strip() + if not content: + return None, False + pid = int(content) + is_stale = not _is_process_running(pid) + return pid, is_stale + except (ValueError, OSError): + return None, False + + +class MaintenanceLockError(Exception): + """Raised when maintenance is already running in another process.""" + + def __init__(self, message: str, is_stale: bool = False, pid: int | None = None): + super().__init__(message) + self.is_stale = is_stale + self.pid = pid + + +class MaintenanceLock: + """Context manager for maintenance lock. + + Prevents concurrent maintenance operations from CLI and TUI. + Detects stale locks from crashed processes. + + Usage: + with MaintenanceLock(): + # perform maintenance + """ + + def __init__(self, force: bool = False) -> None: + self._lock_path = _get_lock_path() + self._lock_file: Any = None + self._force = force + + def __enter__(self) -> MaintenanceLock: + self._lock_path.parent.mkdir(parents=True, exist_ok=True) + + # Use the existing file_lock utility + lock_file = lock_path("maintenance") + + # Check for stale lock before attempting to acquire + pid, is_stale = _get_lock_info(lock_file) + + # If force is set and lock is stale, remove the lock file + if self._force and is_stale and lock_file.exists(): + try: + lock_file.unlink() + except OSError: + pass + + try: + self._lock_file = file_lock(lock_file) + self._lock_file.__enter__() + except Exception: + # Re-check stale status for error message + pid, is_stale = _get_lock_info(lock_file) + + if is_stale: + raise MaintenanceLockError( + f"Lock file exists from PID {pid} which is no longer running.\n" + "The lock appears stale. Use 'scc reset --force-unlock' to recover.", + is_stale=True, + pid=pid, + ) + raise MaintenanceLockError( + "Maintenance already running in another process. Close other SCC sessions first.", + is_stale=False, + pid=pid, + ) + return self + + def __exit__(self, *args: Any) -> None: + if self._lock_file: + self._lock_file.__exit__(*args) diff --git a/src/scc_cli/maintenance/migrations.py b/src/scc_cli/maintenance/migrations.py new file mode 100644 index 0000000..812c339 --- /dev/null +++ b/src/scc_cli/maintenance/migrations.py @@ -0,0 +1,178 @@ +from __future__ import annotations + +from pathlib import Path +from typing import Literal + +from scc_cli import config +from scc_cli.stores.exception_store import RepoStore, UserStore + +from .backups import _create_backup +from .cache_cleanup import clear_cache, clear_contexts, prune_containers +from .repair_sessions import delete_all_sessions +from .types import ResetResult, RiskTier + + +def reset_exceptions( + scope: Literal["all", "user", "repo"] = "all", + repo_root: Path | None = None, + dry_run: bool = False, + create_backup: bool = True, +) -> ResetResult: + """Reset exception stores. + + Risk: Tier 2 (Destructive) - Removes policy exceptions. + + Args: + scope: Which stores to reset ("all", "user", "repo") + repo_root: Repo root for repo-scoped exceptions + dry_run: Preview only + create_backup: Create backup before deletion + """ + result = ResetResult( + success=True, + action_id="reset_exceptions", + risk_tier=RiskTier.DESTRUCTIVE, + message="Exceptions reset", + ) + + user_store = UserStore() + repo_store = RepoStore(repo_root) if repo_root else None + + stores_to_reset: list[tuple[str, UserStore | RepoStore]] = [] + if scope in ("all", "user"): + stores_to_reset.append(("user", user_store)) + if scope in ("all", "repo") and repo_store: + stores_to_reset.append(("repo", repo_store)) + + for _store_name, store in stores_to_reset: + result.paths.append(store.path) + if store.path.exists(): + result.removed_count += len(store.read().exceptions) + result.bytes_freed += store.path.stat().st_size if store.path.exists() else 0 + + if result.removed_count == 0: + result.message = "No exceptions to reset" + return result + + if dry_run: + result.message = f"Would reset {result.removed_count} exceptions" + return result + + if create_backup: + for _store_name, store in stores_to_reset: + if store.path.exists(): + backup = _create_backup(store.path) + if backup and result.backup_path is None: + result.backup_path = backup + + try: + for _store_name, store in stores_to_reset: + store.reset() + result.message = f"Reset {result.removed_count} exceptions" + except Exception as exc: + result.success = False + result.error = str(exc) + result.message = f"Failed to reset exceptions: {exc}" + + return result + + +def reset_config( + dry_run: bool = False, + create_backup: bool = True, +) -> ResetResult: + """Reset user configuration to defaults. + + Risk: Tier 2 (Destructive) - Requires running setup again. + """ + result = ResetResult( + success=True, + action_id="reset_config", + risk_tier=RiskTier.DESTRUCTIVE, + paths=[config.CONFIG_FILE], + message="Configuration reset", + next_steps=["Run 'scc setup' to reconfigure"], + ) + + if not config.CONFIG_FILE.exists(): + result.message = "No configuration to reset" + return result + + result.bytes_freed = config.CONFIG_FILE.stat().st_size + + if dry_run: + result.message = "Would reset configuration" + return result + + if create_backup: + result.backup_path = _create_backup(config.CONFIG_FILE) + + try: + config.CONFIG_FILE.unlink() + result.message = "Configuration reset" + except Exception as exc: + result.success = False + result.error = str(exc) + result.message = f"Failed to reset config: {exc}" + + return result + + +def factory_reset( + dry_run: bool = False, + create_backup: bool = True, + continue_on_error: bool = False, +) -> list[ResetResult]: + """Perform factory reset - remove all SCC data. + + Risk: Tier 3 (Factory Reset) - Complete clean slate. + + Order: Local files first (config, sessions, exceptions, contexts, cache), + containers last. This ensures Docker failures don't block local cleanup. + + Args: + dry_run: Preview only + create_backup: Create backups for Tier 2 operations + continue_on_error: Don't stop on first failure + + Returns: + List of ResetResult for each operation + """ + results: list[ResetResult] = [] + + operations = [ + ("reset_config", lambda: reset_config(dry_run=dry_run, create_backup=create_backup)), + ( + "delete_all_sessions", + lambda: delete_all_sessions(dry_run=dry_run, create_backup=create_backup), + ), + ( + "reset_exceptions", + lambda: reset_exceptions(dry_run=dry_run, create_backup=create_backup), + ), + ("clear_contexts", lambda: clear_contexts(dry_run=dry_run)), + ("clear_cache", lambda: clear_cache(dry_run=dry_run)), + ("prune_containers", lambda: prune_containers(dry_run=dry_run)), + ] + + for op_name, op_func in operations: + try: + result = op_func() + results.append(result) + + if not result.success and not continue_on_error: + break + except Exception as exc: + results.append( + ResetResult( + success=False, + action_id=op_name, + risk_tier=RiskTier.FACTORY_RESET, + error=str(exc), + message=f"Failed: {exc}", + ) + ) + if not continue_on_error: + break + + return results diff --git a/src/scc_cli/maintenance/repair_sessions.py b/src/scc_cli/maintenance/repair_sessions.py new file mode 100644 index 0000000..619ece2 --- /dev/null +++ b/src/scc_cli/maintenance/repair_sessions.py @@ -0,0 +1,154 @@ +from __future__ import annotations + +from datetime import datetime, timedelta, timezone +from typing import Any + +from scc_cli import config, sessions +from scc_cli.utils.locks import file_lock, lock_path + +from .backups import _create_backup +from .health_checks import _get_size +from .types import ResetResult, RiskTier + + +def prune_sessions( + older_than_days: int = 30, + keep_n: int = 20, + team: str | None = None, + dry_run: bool = False, +) -> ResetResult: + """Prune old sessions while keeping recent ones. + + Risk: Tier 1 (Changes State) - Safe prune with defaults. + + Args: + older_than_days: Remove sessions older than this (default: 30) + keep_n: Keep at least this many recent sessions per team (default: 20) + team: Only prune sessions for this team (None = all) + dry_run: Preview only, don't actually delete + """ + result = ResetResult( + success=True, + action_id="prune_sessions", + risk_tier=RiskTier.CHANGES_STATE, + paths=[config.SESSIONS_FILE], + message=f"Pruned sessions older than {older_than_days}d (kept newest {keep_n} per team)", + ) + + try: + lock_file = lock_path("sessions") + with file_lock(lock_file): + all_sessions = sessions._load_sessions() + original_count = len(all_sessions) + + if original_count == 0: + result.message = "No sessions to prune" + return result + + cutoff = datetime.now(timezone.utc) - timedelta(days=older_than_days) + + by_team: dict[str | None, list[dict[str, Any]]] = {} + for session in all_sessions: + session_team = session.get("team") + if team is not None and session_team != team: + by_team.setdefault(session_team, []).append(session) + else: + by_team.setdefault(session_team, []).append(session) + + kept_sessions: list[dict[str, Any]] = [] + for _team, team_sessions in by_team.items(): + team_sessions.sort(key=lambda s: s.get("last_used", ""), reverse=True) + + kept = team_sessions[:keep_n] + remaining = team_sessions[keep_n:] + + for session in remaining: + last_used = session.get("last_used", "") + if last_used: + try: + dt = datetime.fromisoformat(last_used.replace("Z", "+00:00")) + if dt > cutoff: + kept.append(session) + except (ValueError, TypeError): + pass + + kept_sessions.extend(kept) + + result.removed_count = original_count - len(kept_sessions) + + if result.removed_count == 0: + result.message = "No sessions to prune" + return result + + if dry_run: + result.message = ( + f"Would prune {result.removed_count} sessions older than {older_than_days}d " + f"(kept newest {keep_n} per team)" + ) + return result + + result.bytes_freed = _get_size(config.SESSIONS_FILE) + + sessions._save_sessions(kept_sessions) + + new_size = _get_size(config.SESSIONS_FILE) + result.bytes_freed = result.bytes_freed - new_size + + result.message = ( + f"Pruned {result.removed_count} sessions older than {older_than_days}d " + f"(kept newest {keep_n} per team)" + ) + + except Exception as exc: + result.success = False + result.error = str(exc) + result.message = f"Failed to prune sessions: {exc}" + + return result + + +def delete_all_sessions( + dry_run: bool = False, + create_backup: bool = True, +) -> ResetResult: + """Delete entire sessions store. + + Risk: Tier 2 (Destructive) - Removes all session history. + """ + result = ResetResult( + success=True, + action_id="delete_all_sessions", + risk_tier=RiskTier.DESTRUCTIVE, + paths=[config.SESSIONS_FILE], + message="All sessions deleted", + next_steps=["Your session history is now empty. New sessions will appear as you work."], + ) + + if not config.SESSIONS_FILE.exists(): + result.message = "No sessions to delete" + return result + + try: + all_sessions = sessions._load_sessions() + result.removed_count = len(all_sessions) + except Exception: + result.removed_count = 0 + + result.bytes_freed = _get_size(config.SESSIONS_FILE) + + if dry_run: + result.message = f"Would delete {result.removed_count} sessions" + return result + + if create_backup: + result.backup_path = _create_backup(config.SESSIONS_FILE) + + try: + sessions.clear_history() + result.message = f"Deleted {result.removed_count} sessions" + except Exception as exc: + result.success = False + result.error = str(exc) + result.message = f"Failed to delete sessions: {exc}" + + return result diff --git a/src/scc_cli/maintenance/tasks.py b/src/scc_cli/maintenance/tasks.py new file mode 100644 index 0000000..881f18c --- /dev/null +++ b/src/scc_cli/maintenance/tasks.py @@ -0,0 +1,154 @@ +from __future__ import annotations + +from collections.abc import Callable +from dataclasses import dataclass +from pathlib import Path +from typing import Literal, TypeAlias + +from .cache_cleanup import ( + cleanup_expired_exceptions, + clear_cache, + clear_contexts, + prune_containers, +) +from .migrations import factory_reset, reset_config, reset_exceptions +from .repair_sessions import delete_all_sessions, prune_sessions +from .types import MaintenancePreview, ResetResult, RiskTier + +MaintenanceTaskResult: TypeAlias = ResetResult | list[ResetResult] +PreconditionResult: TypeAlias = tuple[bool, str | None] +Precondition: TypeAlias = Callable[["MaintenanceTaskContext"], PreconditionResult] + + +@dataclass(frozen=True) +class MaintenanceTaskContext: + """Parameters for running maintenance tasks.""" + + dry_run: bool = False + create_backup: bool = True + continue_on_error: bool = False + exception_scope: Literal["all", "user", "repo"] = "all" + repo_root: Path | None = None + older_than_days: int = 30 + keep_n: int = 20 + team: str | None = None + + +@dataclass(frozen=True) +class MaintenanceTask: + """Maintenance task descriptor used for registry lookups.""" + + id: str + label: str + description: str + risk_tier: RiskTier + run: Callable[[MaintenanceTaskContext], MaintenanceTaskResult] + preview: Callable[[MaintenanceTaskContext], MaintenancePreview] | None = None + preconditions: tuple[Precondition, ...] = () + + +TASKS: tuple[MaintenanceTask, ...] = ( + MaintenanceTask( + id="clear_cache", + label="Clear cache", + description="Clear regenerable cache files", + risk_tier=RiskTier.SAFE, + run=lambda ctx: clear_cache(dry_run=ctx.dry_run), + ), + MaintenanceTask( + id="cleanup_expired_exceptions", + label="Cleanup expired exceptions", + description="Remove only expired exceptions", + risk_tier=RiskTier.SAFE, + run=lambda ctx: cleanup_expired_exceptions(dry_run=ctx.dry_run), + ), + MaintenanceTask( + id="clear_contexts", + label="Clear contexts", + description="Clear recent work contexts", + risk_tier=RiskTier.CHANGES_STATE, + run=lambda ctx: clear_contexts(dry_run=ctx.dry_run), + ), + MaintenanceTask( + id="prune_containers", + label="Prune containers", + description="Remove stopped Docker containers", + risk_tier=RiskTier.CHANGES_STATE, + run=lambda ctx: prune_containers(dry_run=ctx.dry_run), + ), + MaintenanceTask( + id="prune_sessions", + label="Prune sessions (30d, keep 20)", + description="Prune old sessions (keeps recent)", + risk_tier=RiskTier.CHANGES_STATE, + run=lambda ctx: prune_sessions( + older_than_days=ctx.older_than_days, + keep_n=ctx.keep_n, + team=ctx.team, + dry_run=ctx.dry_run, + ), + ), + MaintenanceTask( + id="reset_exceptions", + label="Reset all exceptions", + description="Clear all policy exceptions", + risk_tier=RiskTier.DESTRUCTIVE, + run=lambda ctx: reset_exceptions( + scope=ctx.exception_scope, + repo_root=ctx.repo_root, + dry_run=ctx.dry_run, + create_backup=ctx.create_backup, + ), + ), + MaintenanceTask( + id="delete_all_sessions", + label="Delete all sessions", + description="Delete entire session history", + risk_tier=RiskTier.DESTRUCTIVE, + run=lambda ctx: delete_all_sessions( + dry_run=ctx.dry_run, + create_backup=ctx.create_backup, + ), + ), + MaintenanceTask( + id="reset_config", + label="Reset configuration", + description="Reset configuration (requires setup)", + risk_tier=RiskTier.DESTRUCTIVE, + run=lambda ctx: reset_config( + dry_run=ctx.dry_run, + create_backup=ctx.create_backup, + ), + ), + MaintenanceTask( + id="factory_reset", + label="Factory reset (everything)", + description="Remove all SCC data", + risk_tier=RiskTier.FACTORY_RESET, + run=lambda ctx: factory_reset( + dry_run=ctx.dry_run, + create_backup=ctx.create_backup, + continue_on_error=ctx.continue_on_error, + ), + ), +) + +_TASK_REGISTRY = {task.id: task for task in TASKS} + + +def list_tasks() -> tuple[MaintenanceTask, ...]: + """Return all registered maintenance tasks in display order.""" + return TASKS + + +def get_task(action_id: str) -> MaintenanceTask | None: + """Return a maintenance task by id.""" + return _TASK_REGISTRY.get(action_id) + + +def run_task(action_id: str, context: MaintenanceTaskContext) -> MaintenanceTaskResult: + """Run a maintenance task by id.""" + task = get_task(action_id) + if task is None: + raise ValueError(f"Unknown maintenance task: {action_id}") + return task.run(context) diff --git a/src/scc_cli/maintenance/types.py b/src/scc_cli/maintenance/types.py new file mode 100644 index 0000000..11af6b7 --- /dev/null +++ b/src/scc_cli/maintenance/types.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +from dataclasses import dataclass, field +from enum import Enum +from pathlib import Path +from typing import Any + + +class RiskTier(Enum): + """Risk level for maintenance operations. + + Tier 0: Safe - no confirmation needed + Tier 1: Changes State - Y/N confirmation + Tier 2: Destructive - Y/N + impact list + Tier 3: Factory Reset - type-to-confirm + """ + + SAFE = 0 + CHANGES_STATE = 1 + DESTRUCTIVE = 2 + FACTORY_RESET = 3 + + +@dataclass +class PathInfo: + """Information about a configuration path. + + Attributes: + name: Human-readable name (e.g., "Config", "Sessions") + path: Absolute path to file or directory + exists: Whether the path exists + size_bytes: Size in bytes (0 if doesn't exist) + permissions: Permission string ("rw", "r-", "--") + """ + + name: str + path: Path + exists: bool + size_bytes: int + permissions: str + + @property + def size_human(self) -> str: + """Human-readable size (e.g., '2.1 KB').""" + if self.size_bytes == 0: + return "0 B" + size: float = float(self.size_bytes) + for unit in ["B", "KB", "MB", "GB"]: + if size < 1024: + return f"{size:.1f} {unit}" if size >= 10 else f"{int(size)} {unit}" + size = size / 1024 + return f"{size:.1f} TB" + + +@dataclass +class ResetResult: + """Result of a reset operation. + + All UI should render from these values, never hardcode paths. + """ + + success: bool + action_id: str + risk_tier: RiskTier + paths: list[Path] = field(default_factory=list) + removed_count: int = 0 + bytes_freed: int = 0 + backup_path: Path | None = None + message: str = "" + next_steps: list[str] = field(default_factory=list) + error: str | None = None + + @property + def bytes_freed_human(self) -> str: + """Human-readable bytes freed.""" + if self.bytes_freed == 0: + return "0 B" + size: float = self.bytes_freed + for unit in ["B", "KB", "MB", "GB"]: + if size < 1024: + return f"{size:.1f} {unit}" if size >= 10 else f"{int(size)} {unit}" + size = size / 1024 + return f"{size:.1f} TB" + + +@dataclass +class MaintenancePreview: + """Preview of what a maintenance operation would do. + + Used for --plan flag and [P]review button. + """ + + action_id: str + risk_tier: RiskTier + paths: list[Path] + description: str + item_count: int = 0 + bytes_estimate: int = 0 + backup_will_be_created: bool = False + parameters: dict[str, Any] = field(default_factory=dict) diff --git a/src/scc_cli/marketplace/managed.py b/src/scc_cli/marketplace/managed.py index 4b8457e..7d84295 100644 --- a/src/scc_cli/marketplace/managed.py +++ b/src/scc_cli/marketplace/managed.py @@ -22,6 +22,7 @@ from typing import Any from scc_cli.marketplace.constants import MANAGED_STATE_FILE +from scc_cli.ports.filesystem import Filesystem @dataclass @@ -83,29 +84,46 @@ def from_dict(cls, data: dict[str, Any]) -> ManagedState: ) -def load_managed_state(project_dir: Path) -> ManagedState: +def load_managed_state( + project_dir: Path, + filesystem: Filesystem | None = None, +) -> ManagedState: """Load managed state from .scc-managed.json. Args: project_dir: Project root directory + filesystem: Optional filesystem port for IO Returns: ManagedState with tracking data, or empty state if file doesn't exist """ managed_path = project_dir / ".claude" / MANAGED_STATE_FILE - if not managed_path.exists(): + if filesystem is None: + if not managed_path.exists(): + return ManagedState() + + try: + return ManagedState.from_dict(json.loads(managed_path.read_text())) + except json.JSONDecodeError: + # Corrupted file - return empty state + return ManagedState() + + if not filesystem.exists(managed_path): return ManagedState() try: - data: dict[str, Any] = json.loads(managed_path.read_text()) - return ManagedState.from_dict(data) + return ManagedState.from_dict(json.loads(filesystem.read_text(managed_path))) except json.JSONDecodeError: # Corrupted file - return empty state return ManagedState() -def save_managed_state(project_dir: Path, state: ManagedState) -> None: +def save_managed_state( + project_dir: Path, + state: ManagedState, + filesystem: Filesystem | None = None, +) -> None: """Save managed state to .scc-managed.json. Creates .claude directory if it doesn't exist. @@ -113,23 +131,35 @@ def save_managed_state(project_dir: Path, state: ManagedState) -> None: Args: project_dir: Project root directory state: ManagedState to persist + filesystem: Optional filesystem port for IO """ claude_dir = project_dir / ".claude" - claude_dir.mkdir(parents=True, exist_ok=True) - managed_path = claude_dir / MANAGED_STATE_FILE - managed_path.write_text(json.dumps(state.to_dict(), indent=2)) + if filesystem is None: + claude_dir.mkdir(parents=True, exist_ok=True) + managed_path.write_text(json.dumps(state.to_dict(), indent=2)) + return -def clear_managed_state(project_dir: Path) -> None: + filesystem.mkdir(claude_dir, parents=True, exist_ok=True) + filesystem.write_text(managed_path, json.dumps(state.to_dict(), indent=2)) + + +def clear_managed_state(project_dir: Path, filesystem: Filesystem | None = None) -> None: """Remove managed state file. Used for reset operations. Preserves .claude directory and other files. Args: project_dir: Project root directory + filesystem: Optional filesystem port for IO """ managed_path = project_dir / ".claude" / MANAGED_STATE_FILE - if managed_path.exists(): - managed_path.unlink() + if filesystem is None: + if managed_path.exists(): + managed_path.unlink() + return + + if filesystem.exists(managed_path): + filesystem.unlink(managed_path) diff --git a/src/scc_cli/marketplace/materialize.py b/src/scc_cli/marketplace/materialize.py index 60f5976..92fb084 100644 --- a/src/scc_cli/marketplace/materialize.py +++ b/src/scc_cli/marketplace/materialize.py @@ -38,6 +38,7 @@ MarketplaceSourceGitHub, MarketplaceSourceURL, ) +from scc_cli.ports.remote_fetcher import RemoteFetcher # ───────────────────────────────────────────────────────────────────────────── # Exceptions @@ -396,6 +397,7 @@ def download_and_extract( target_dir: Path, headers: dict[str, str] | None = None, fallback_name: str = "", + fetcher: RemoteFetcher | None = None, ) -> DownloadResult: """Download and extract marketplace from URL. @@ -404,6 +406,7 @@ def download_and_extract( target_dir: Directory to extract into headers: Optional HTTP headers fallback_name: Fallback name if marketplace.json doesn't specify one + fetcher: Optional RemoteFetcher for HTTP downloads Returns: DownloadResult with success status, ETag, and canonical name @@ -411,90 +414,97 @@ def download_and_extract( import tarfile import tempfile - import requests + remote_fetcher = fetcher + if remote_fetcher is None: + from scc_cli.bootstrap import get_default_adapters + + remote_fetcher = get_default_adapters().remote_fetcher try: - # Download archive - response = requests.get(url, headers=headers, timeout=60) - response.raise_for_status() - - etag = response.headers.get("ETag") - - # Save to temp file - with tempfile.NamedTemporaryFile(delete=False, suffix=".tar.gz") as tmp: - tmp.write(response.content) - tmp_path = Path(tmp.name) - - try: - # Clean target directory if exists - if target_dir.exists(): - shutil.rmtree(target_dir) - target_dir.mkdir(parents=True) - - # Extract archive (path-safe) - with tarfile.open(tmp_path, "r:*") as tar: - safe_members: list[tarfile.TarInfo] = [] - for member in tar.getmembers(): - member_path = PurePosixPath(member.name) - windows_member_path = PureWindowsPath(member.name) - if member_path.is_absolute() or windows_member_path.is_absolute(): - return DownloadResult( - success=False, - error=f"Unsafe archive member (absolute path): {member.name}", - ) - if ".." in member_path.parts or ".." in windows_member_path.parts: - return DownloadResult( - success=False, - error=f"Unsafe archive member (path traversal): {member.name}", - ) - if "" in member_path.parts or "" in windows_member_path.parts: - return DownloadResult( - success=False, - error=f"Unsafe archive member (empty path segment): {member.name}", - ) - if "\\" in member.name or windows_member_path.drive: - return DownloadResult( - success=False, - error=f"Unsafe archive member (windows path): {member.name}", - ) - if ( - member.islnk() - or member.issym() - or member.ischr() - or member.isblk() - or member.isfifo() - ): - return DownloadResult( - success=False, - error=f"Unsafe archive member (link/device): {member.name}", - ) - safe_members.append(member) + response = remote_fetcher.get(url, headers=headers, timeout=60) + except Exception as exc: + return DownloadResult( + success=False, + error=str(exc), + ) + + if response.status_code != 200: + return DownloadResult( + success=False, + error=f"HTTP {response.status_code}: Failed to download marketplace", + ) + + etag = response.headers.get("ETag") - tar.extractall(target_dir, members=safe_members) + # Save to temp file + with tempfile.NamedTemporaryFile(delete=False, suffix=".tar.gz") as tmp: + tmp.write(response.content) + tmp_path = Path(tmp.name) - # Discover plugins and canonical name - discovery = _discover_plugins(target_dir, fallback_name=fallback_name) + try: + # Clean target directory if exists + if target_dir.exists(): + shutil.rmtree(target_dir) + target_dir.mkdir(parents=True) + + # Extract archive (path-safe) + with tarfile.open(tmp_path, "r:*") as tar: + safe_members: list[tarfile.TarInfo] = [] + for member in tar.getmembers(): + member_path = PurePosixPath(member.name) + windows_member_path = PureWindowsPath(member.name) + if member_path.is_absolute() or windows_member_path.is_absolute(): + return DownloadResult( + success=False, + error=f"Unsafe archive member (absolute path): {member.name}", + ) + if ".." in member_path.parts or ".." in windows_member_path.parts: + return DownloadResult( + success=False, + error=f"Unsafe archive member (path traversal): {member.name}", + ) + if "" in member_path.parts or "" in windows_member_path.parts: + return DownloadResult( + success=False, + error=f"Unsafe archive member (empty path segment): {member.name}", + ) + if "\\" in member.name or windows_member_path.drive: + return DownloadResult( + success=False, + error=f"Unsafe archive member (windows path): {member.name}", + ) + if ( + member.islnk() + or member.issym() + or member.ischr() + or member.isblk() + or member.isfifo() + ): + return DownloadResult( + success=False, + error=f"Unsafe archive member (link/device): {member.name}", + ) + safe_members.append(member) + + tar.extractall(target_dir, members=safe_members) - if discovery is None: - return DownloadResult( - success=False, - error="Missing .claude-plugin/marketplace.json", - ) + # Discover plugins and canonical name + discovery = _discover_plugins(target_dir, fallback_name=fallback_name) + if discovery is None: return DownloadResult( - success=True, - etag=etag, - plugins=discovery.plugins, - canonical_name=discovery.canonical_name, + success=False, + error="Missing .claude-plugin/marketplace.json", ) - finally: - tmp_path.unlink(missing_ok=True) - except requests.RequestException as e: return DownloadResult( - success=False, - error=str(e), + success=True, + etag=etag, + plugins=discovery.plugins, + canonical_name=discovery.canonical_name, ) + finally: + tmp_path.unlink(missing_ok=True) # ───────────────────────────────────────────────────────────────────────────── @@ -705,6 +715,7 @@ def materialize_url( name: str, source: dict[str, Any] | MarketplaceSourceURL, project_dir: Path, + fetcher: RemoteFetcher | None = None, ) -> MaterializedMarketplace: """Materialize a URL marketplace source. @@ -712,6 +723,7 @@ def materialize_url( name: Marketplace name (key in org config) - the "alias" source: URL source configuration project_dir: Project root directory + fetcher: Optional RemoteFetcher for URL downloads Returns: MaterializedMarketplace with materialization details including canonical_name @@ -742,7 +754,13 @@ def materialize_url( headers = {k: os.path.expandvars(v) for k, v in headers.items()} # Pass name as fallback in case marketplace.json doesn't specify one - result = download_and_extract(url, target_dir, headers=headers, fallback_name=name) + result = download_and_extract( + url, + target_dir, + headers=headers, + fallback_name=name, + fetcher=fetcher, + ) if not result.success: raise MaterializationError(result.error or "Download failed", name) @@ -775,6 +793,7 @@ def materialize_marketplace( source: MarketplaceSource, project_dir: Path, force_refresh: bool = False, + fetcher: RemoteFetcher | None = None, ) -> MaterializedMarketplace: """Materialize a marketplace source to local filesystem. @@ -786,6 +805,7 @@ def materialize_marketplace( source: Marketplace source configuration (discriminated union) project_dir: Project root directory force_refresh: Skip cache freshness check + fetcher: Optional RemoteFetcher for URL sources Returns: MaterializedMarketplace with materialization details @@ -834,7 +854,7 @@ def materialize_marketplace( elif isinstance(source, MarketplaceSourceDirectory): result = materialize_directory(name, source, project_dir) elif isinstance(source, MarketplaceSourceURL): - result = materialize_url(name, source, project_dir) + result = materialize_url(name, source, project_dir, fetcher=fetcher) else: raise MaterializationError(f"Unknown source type: {source.source}", name) diff --git a/src/scc_cli/marketplace/render.py b/src/scc_cli/marketplace/render.py index bcdf1eb..eda47d4 100644 --- a/src/scc_cli/marketplace/render.py +++ b/src/scc_cli/marketplace/render.py @@ -15,9 +15,10 @@ import json from pathlib import Path -from typing import Any +from typing import Any, cast from scc_cli.marketplace.constants import MANAGED_STATE_FILE +from scc_cli.ports.filesystem import Filesystem # ───────────────────────────────────────────────────────────────────────────── # Render Settings @@ -128,33 +129,50 @@ def render_settings( # ───────────────────────────────────────────────────────────────────────────── -def _load_settings(project_dir: Path) -> dict[str, Any]: +def _load_settings(project_dir: Path, filesystem: Filesystem | None = None) -> dict[str, Any]: """Load existing settings.local.json if it exists.""" settings_path = project_dir / ".claude" / "settings.local.json" - if settings_path.exists(): - try: - result: dict[str, Any] = json.loads(settings_path.read_text()) - return result - except json.JSONDecodeError: - return {} - return {} + if filesystem is None: + if settings_path.exists(): + try: + return cast(dict[str, Any], json.loads(settings_path.read_text())) + except json.JSONDecodeError: + return {} + return {} + if not filesystem.exists(settings_path): + return {} -def _load_managed_state(project_dir: Path) -> dict[str, Any]: + try: + return cast(dict[str, Any], json.loads(filesystem.read_text(settings_path))) + except json.JSONDecodeError: + return {} + + +def _load_managed_state(project_dir: Path, filesystem: Filesystem | None = None) -> dict[str, Any]: """Load the SCC managed state tracking file.""" managed_path = project_dir / ".claude" / MANAGED_STATE_FILE - if managed_path.exists(): - try: - result: dict[str, Any] = json.loads(managed_path.read_text()) - return result - except json.JSONDecodeError: - return {} - return {} + if filesystem is None: + if managed_path.exists(): + try: + return cast(dict[str, Any], json.loads(managed_path.read_text())) + except json.JSONDecodeError: + return {} + return {} + + if not filesystem.exists(managed_path): + return {} + + try: + return cast(dict[str, Any], json.loads(filesystem.read_text(managed_path))) + except json.JSONDecodeError: + return {} def merge_settings( project_dir: Path, new_settings: dict[str, Any], + filesystem: Filesystem | None = None, ) -> dict[str, Any]: """Non-destructively merge new settings with existing user settings. @@ -177,8 +195,8 @@ def merge_settings( Returns: Merged settings dict ready to write to settings.local.json """ - existing = _load_settings(project_dir) - managed = _load_managed_state(project_dir) + existing = _load_settings(project_dir, filesystem) + managed = _load_managed_state(project_dir, filesystem) # Get what was previously managed by SCC managed_plugins = set(managed.get("managed_plugins", [])) diff --git a/src/scc_cli/marketplace/sync.py b/src/scc_cli/marketplace/sync.py index 17d4b37..c014127 100644 --- a/src/scc_cli/marketplace/sync.py +++ b/src/scc_cli/marketplace/sync.py @@ -1,62 +1,21 @@ -""" -Marketplace sync orchestration for Claude Code integration. - -This module provides the high-level sync_marketplace_settings() function that -orchestrates the full pipeline: -1. Parse org config -2. Compute effective plugins for team -3. Materialize required marketplaces -4. Render settings to Claude format -5. Merge with existing user settings (non-destructive) -6. Save managed state tracking -7. Write settings.local.json - -This is the main entry point for integrating marketplace functionality -into the start command. -""" +"""Marketplace sync adapter wrapper for Claude Code integration.""" from __future__ import annotations -import json -from datetime import datetime, timezone from pathlib import Path from typing import Any -from scc_cli.marketplace.managed import ManagedState, save_managed_state -from scc_cli.marketplace.materialize import MaterializationError, materialize_marketplace -from scc_cli.marketplace.normalize import matches_pattern -from scc_cli.marketplace.render import check_conflicts, merge_settings, render_settings +from scc_cli.application.sync_marketplace import ( + SyncError, + SyncMarketplaceDependencies, + SyncResult, +) +from scc_cli.application.sync_marketplace import ( + sync_marketplace_settings as sync_marketplace_use_case, +) +from scc_cli.bootstrap import get_default_adapters +from scc_cli.marketplace.materialize import materialize_marketplace from scc_cli.marketplace.resolve import resolve_effective_config -from scc_cli.marketplace.schema import OrganizationConfig, normalize_org_config_data - - -class SyncError(Exception): - """Error during marketplace sync operation.""" - - def __init__(self, message: str, details: dict[str, Any] | None = None) -> None: - self.details = details or {} - super().__init__(message) - - -class SyncResult: - """Result of a marketplace sync operation.""" - - def __init__( - self, - success: bool, - plugins_enabled: list[str] | None = None, - marketplaces_materialized: list[str] | None = None, - warnings: list[str] | None = None, - settings_path: Path | None = None, - rendered_settings: dict[str, Any] | None = None, - ) -> None: - self.success = success - self.plugins_enabled = plugins_enabled or [] - self.marketplaces_materialized = marketplaces_materialized or [] - self.warnings = warnings or [] - self.settings_path = settings_path - # Computed settings dict for container injection (when write_to_workspace=False) - self.rendered_settings = rendered_settings def sync_marketplace_settings( @@ -71,213 +30,29 @@ def sync_marketplace_settings( ) -> SyncResult: """Sync marketplace settings for a project. - Orchestrates the full pipeline: - 1. Parse and validate org config - 2. Compute effective plugins for team - 3. Materialize required marketplaces - 4. Render settings to Claude format - 5. Merge with existing user settings (non-destructive) - 6. Save managed state tracking - 7. Write settings.local.json (unless dry_run or write_to_workspace=False) - - Args: - project_dir: Project root directory - org_config_data: Parsed org config dictionary - team_id: Team profile ID (uses defaults if None) - org_config_url: URL where org config was fetched (for tracking) - force_refresh: Force re-materialization of marketplaces - dry_run: If True, compute but don't write files - write_to_workspace: If False, skip writing settings.local.json - and instead return rendered_settings for container injection. - This prevents host Claude from seeing container-only plugins. - container_path_prefix: Path prefix for marketplace paths in container. - When set (e.g., "/workspace"), paths become absolute container paths - like "/workspace/.claude/.scc-marketplaces/...". Required when - write_to_workspace=False since settings will be in container HOME. - - Returns: - SyncResult with success status and details. When write_to_workspace=False, - rendered_settings contains the computed settings for container injection. - - Raises: - SyncError: On validation or processing errors - TeamNotFoundError: If team_id not found in config + This wrapper builds default dependencies and delegates to the application + use case. See scc_cli.application.sync_marketplace for full behavior. """ - warnings: list[str] = [] - - # ── Step 1: Parse org config ───────────────────────────────────────────── - # Org config is already validated by JSON Schema before caching. - try: - org_config = OrganizationConfig.model_validate(normalize_org_config_data(org_config_data)) - except Exception as e: - raise SyncError(f"Invalid org config: {e}") from e - - # ── Step 2: Resolve effective config (federation-aware) ──────────────────── - if team_id is None: - raise SyncError("team_id is required for marketplace sync") - - # Use resolve_effective_config for federation support - # This handles both inline and federated teams uniformly - effective_config = resolve_effective_config(org_config, team_id=team_id) - - # Check for blocked plugins that user has installed - # First, check if org-enabled plugins were blocked - if effective_config.blocked_plugins: - existing = _load_existing_plugins(project_dir) - conflict_warnings = check_conflicts( - existing_plugins=existing, - blocked_plugins=[ - {"plugin_id": b.plugin_id, "reason": b.reason, "pattern": b.pattern} - for b in effective_config.blocked_plugins - ], - ) - warnings.extend(conflict_warnings) - - # Also check user's existing plugins against security.blocked_plugins patterns - security = org_config.security - if security.blocked_plugins: - existing = _load_existing_plugins(project_dir) - for plugin in existing: - for pattern in security.blocked_plugins: - if matches_pattern(plugin, pattern): - warnings.append( - f"⚠️ Plugin '{plugin}' is blocked by organization policy " - f"(matched pattern: {pattern})" - ) - break # Only one warning per plugin - - # ── Step 3: Materialize required marketplaces ──────────────────────────── - materialized: dict[str, Any] = {} - marketplaces_used = set() - - # Determine which marketplaces are needed - for plugin_ref in effective_config.enabled_plugins: - if "@" in plugin_ref: - marketplace_name = plugin_ref.split("@")[1] - marketplaces_used.add(marketplace_name) - - # Also include any extra marketplaces from the effective result - for marketplace_name in effective_config.extra_marketplaces: - marketplaces_used.add(marketplace_name) - - # Materialize each marketplace - for marketplace_name in marketplaces_used: - # Skip implicit marketplaces (claude-plugins-official) - from scc_cli.marketplace.constants import IMPLICIT_MARKETPLACES - - if marketplace_name in IMPLICIT_MARKETPLACES: - continue - - # Find source configuration from effective marketplaces (includes team sources for federated) - source = effective_config.marketplaces.get(marketplace_name) - if source is None: - warnings.append(f"Marketplace '{marketplace_name}' not defined in effective config") - continue - - try: - result = materialize_marketplace( - name=marketplace_name, - source=source, - project_dir=project_dir, - force_refresh=force_refresh, - ) - materialized[marketplace_name] = { - "relative_path": result.relative_path, - "source_type": result.source_type, - "canonical_name": result.canonical_name, # Critical for alias → canonical translation - } - except MaterializationError as e: - warnings.append(f"Failed to materialize '{marketplace_name}': {e}") - - # ── Step 3b: Check for canonical name collisions ──────────────────────── - # Multiple aliases resolving to the same canonical name is a configuration error - canonical_to_aliases: dict[str, list[str]] = {} - for alias_name, data in materialized.items(): - canonical = data.get("canonical_name", alias_name) - if canonical not in canonical_to_aliases: - canonical_to_aliases[canonical] = [] - canonical_to_aliases[canonical].append(alias_name) - - for canonical, aliases in canonical_to_aliases.items(): - if len(aliases) > 1: - raise SyncError( - f"Canonical name collision: marketplace.json name '{canonical}' " - f"is used by multiple org config entries: {', '.join(aliases)}. " - f"Each marketplace must have a unique canonical name.", - details={"canonical_name": canonical, "conflicting_aliases": aliases}, - ) - - # ── Step 4: Render settings ────────────────────────────────────────────── - effective_dict = { - "enabled": effective_config.enabled_plugins, - "extra_marketplaces": effective_config.extra_marketplaces, - } - # Pass path_prefix for container-only mode (absolute paths in container HOME) - rendered = render_settings(effective_dict, materialized, path_prefix=container_path_prefix) - - # ── Step 5: Merge with existing settings (only if writing to workspace) ─── - # When write_to_workspace=False, we skip merging because settings go to - # container HOME, not the workspace settings.local.json - if write_to_workspace: - merged = merge_settings(project_dir, rendered) - else: - # For container-only mode, use rendered settings directly - # (no merging with workspace settings since we're not writing there) - merged = rendered - - # ── Step 6: Prepare managed state ──────────────────────────────────────── - managed_state = ManagedState( - managed_plugins=list(effective_config.enabled_plugins), - managed_marketplaces=[m.get("relative_path", "") for m in materialized.values()], - last_sync=datetime.now(timezone.utc), - org_config_url=org_config_url, - team_id=team_id, + adapters = get_default_adapters() + dependencies = SyncMarketplaceDependencies( + filesystem=adapters.filesystem, + remote_fetcher=adapters.remote_fetcher, + clock=adapters.clock, + resolve_effective_config=resolve_effective_config, + materialize_marketplace=materialize_marketplace, ) - # ── Step 7: Write files (unless dry_run or write_to_workspace=False) ───── - settings_path = project_dir / ".claude" / "settings.local.json" - - if not dry_run and write_to_workspace: - # Ensure .claude directory exists - claude_dir = project_dir / ".claude" - claude_dir.mkdir(parents=True, exist_ok=True) - - # Write settings to workspace (host-visible) - settings_path.write_text(json.dumps(merged, indent=2)) - - # Save managed state - save_managed_state(project_dir, managed_state) - elif not dry_run and not write_to_workspace: - # Container-only mode: ensure .claude dir exists for marketplaces - # (marketplaces are still materialized to workspace for bind-mount access) - claude_dir = project_dir / ".claude" - claude_dir.mkdir(parents=True, exist_ok=True) - - # Save managed state (for future cleanup) - save_managed_state(project_dir, managed_state) - - return SyncResult( - success=True, - plugins_enabled=list(effective_config.enabled_plugins), - marketplaces_materialized=list(materialized.keys()), - warnings=warnings, - settings_path=settings_path if (not dry_run and write_to_workspace) else None, - # Return rendered settings for container injection when not writing to workspace - rendered_settings=merged if not write_to_workspace else None, + return sync_marketplace_use_case( + project_dir=project_dir, + org_config_data=org_config_data, + team_id=team_id, + org_config_url=org_config_url, + force_refresh=force_refresh, + dry_run=dry_run, + write_to_workspace=write_to_workspace, + container_path_prefix=container_path_prefix, + dependencies=dependencies, ) -def _load_existing_plugins(project_dir: Path) -> list[str]: - """Load existing plugins from settings.local.json.""" - settings_path = project_dir / ".claude" / "settings.local.json" - if not settings_path.exists(): - return [] - - try: - data: dict[str, Any] = json.loads(settings_path.read_text()) - plugins = data.get("enabledPlugins", []) - if isinstance(plugins, list): - return [str(p) for p in plugins] - return [] - except (json.JSONDecodeError, OSError): - return [] +__all__ = ["SyncError", "SyncResult", "sync_marketplace_settings"] diff --git a/src/scc_cli/ports/__init__.py b/src/scc_cli/ports/__init__.py new file mode 100644 index 0000000..1ff193d --- /dev/null +++ b/src/scc_cli/ports/__init__.py @@ -0,0 +1 @@ +"""Protocol interfaces for SCC application ports.""" diff --git a/src/scc_cli/ports/agent_runner.py b/src/scc_cli/ports/agent_runner.py new file mode 100644 index 0000000..c34cf61 --- /dev/null +++ b/src/scc_cli/ports/agent_runner.py @@ -0,0 +1,21 @@ +"""Agent runner port definition.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any, Protocol + +from scc_cli.ports.models import AgentCommand, AgentSettings + + +class AgentRunner(Protocol): + """Abstract agent runner operations.""" + + def build_settings(self, config: dict[str, Any], *, path: Path) -> AgentSettings: + """Render agent settings from a config payload.""" + + def build_command(self, settings: AgentSettings) -> AgentCommand: + """Build the command used to launch the agent.""" + + def describe(self) -> str: + """Return a human-readable description of the runner.""" diff --git a/src/scc_cli/ports/clock.py b/src/scc_cli/ports/clock.py new file mode 100644 index 0000000..c8516c7 --- /dev/null +++ b/src/scc_cli/ports/clock.py @@ -0,0 +1,13 @@ +"""Clock port definition.""" + +from __future__ import annotations + +from datetime import datetime +from typing import Protocol + + +class Clock(Protocol): + """Abstract clock for time retrieval.""" + + def now(self) -> datetime: + """Return the current time.""" diff --git a/src/scc_cli/ports/filesystem.py b/src/scc_cli/ports/filesystem.py new file mode 100644 index 0000000..33a6062 --- /dev/null +++ b/src/scc_cli/ports/filesystem.py @@ -0,0 +1,32 @@ +"""Filesystem port definition for SCC adapters.""" + +from __future__ import annotations + +from collections.abc import Iterable +from pathlib import Path +from typing import Protocol + + +class Filesystem(Protocol): + """Filesystem operations required by application use cases.""" + + def read_text(self, path: Path, *, encoding: str = "utf-8") -> str: + """Read text content from a file.""" + + def write_text(self, path: Path, content: str, *, encoding: str = "utf-8") -> None: + """Write text content to a file.""" + + def write_text_atomic(self, path: Path, content: str, *, encoding: str = "utf-8") -> None: + """Write text content atomically within the target directory.""" + + def exists(self, path: Path) -> bool: + """Return True if the path exists.""" + + def mkdir(self, path: Path, *, parents: bool = False, exist_ok: bool = False) -> None: + """Create a directory.""" + + def unlink(self, path: Path, *, missing_ok: bool = False) -> None: + """Remove a file.""" + + def iterdir(self, path: Path) -> Iterable[Path]: + """Iterate directory entries.""" diff --git a/src/scc_cli/ports/git_client.py b/src/scc_cli/ports/git_client.py new file mode 100644 index 0000000..e82906d --- /dev/null +++ b/src/scc_cli/ports/git_client.py @@ -0,0 +1,34 @@ +"""Git client port definition.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Protocol + + +class GitClient(Protocol): + """Abstract git operations used by application logic.""" + + def check_available(self) -> None: + """Ensure git is installed and available.""" + + def check_installed(self) -> bool: + """Return True if git is available.""" + + def get_version(self) -> str | None: + """Return the git version string.""" + + def is_git_repo(self, path: Path) -> bool: + """Return True if the path is within a git repository.""" + + def init_repo(self, path: Path) -> bool: + """Initialize a git repository.""" + + def create_empty_initial_commit(self, path: Path) -> tuple[bool, str | None]: + """Create an empty initial commit if needed.""" + + def detect_workspace_root(self, start_dir: Path) -> tuple[Path | None, Path]: + """Detect the git workspace root from a starting directory.""" + + def get_current_branch(self, path: Path) -> str | None: + """Return the current branch name.""" diff --git a/src/scc_cli/ports/models.py b/src/scc_cli/ports/models.py new file mode 100644 index 0000000..5048dd1 --- /dev/null +++ b/src/scc_cli/ports/models.py @@ -0,0 +1,80 @@ +"""Domain models used by port protocols.""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from datetime import datetime +from enum import Enum +from pathlib import Path +from typing import Any + + +@dataclass(frozen=True) +class MountSpec: + """Describe a filesystem mount for a sandbox runtime.""" + + source: Path + target: Path + read_only: bool = False + + +@dataclass(frozen=True) +class SandboxSpec: + """Specification for launching a sandbox.""" + + image: str + workspace_mount: MountSpec + workdir: Path + env: dict[str, str] = field(default_factory=dict) + network_policy: str | None = None + user: str | None = None + group: str | None = None + extra_mounts: list[MountSpec] = field(default_factory=list) + continue_session: bool = False + force_new: bool = False + agent_settings: AgentSettings | None = None + org_config: dict[str, Any] | None = None + + +@dataclass(frozen=True) +class SandboxHandle: + """Opaque identifier for a sandbox session.""" + + sandbox_id: str + name: str | None = None + + +class SandboxState(str, Enum): + """Lifecycle state for a sandbox session.""" + + CREATED = "created" + RUNNING = "running" + STOPPED = "stopped" + UNKNOWN = "unknown" + + +@dataclass(frozen=True) +class SandboxStatus: + """Status for a sandbox session with timestamps.""" + + state: SandboxState + created_at: datetime | None = None + started_at: datetime | None = None + stopped_at: datetime | None = None + + +@dataclass(frozen=True) +class AgentCommand: + """Command specification for launching an agent.""" + + argv: list[str] + env: dict[str, str] = field(default_factory=dict) + workdir: Path | None = None + + +@dataclass(frozen=True) +class AgentSettings: + """Settings payload and target location for an agent.""" + + content: dict[str, Any] + path: Path diff --git a/src/scc_cli/ports/remote_fetcher.py b/src/scc_cli/ports/remote_fetcher.py new file mode 100644 index 0000000..eac54f0 --- /dev/null +++ b/src/scc_cli/ports/remote_fetcher.py @@ -0,0 +1,29 @@ +"""Remote fetcher port definition.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Protocol + + +@dataclass(frozen=True) +class RemoteResponse: + """Normalized response data for remote fetch operations.""" + + status_code: int + text: str + content: bytes + headers: dict[str, str] + + +class RemoteFetcher(Protocol): + """Abstract HTTP fetcher for remote config.""" + + def get( + self, + url: str, + *, + headers: dict[str, str] | None = None, + timeout: float | None = None, + ) -> RemoteResponse: + """Perform an HTTP GET request.""" diff --git a/src/scc_cli/ports/sandbox_runtime.py b/src/scc_cli/ports/sandbox_runtime.py new file mode 100644 index 0000000..02906f8 --- /dev/null +++ b/src/scc_cli/ports/sandbox_runtime.py @@ -0,0 +1,32 @@ +"""Sandbox runtime port definition.""" + +from __future__ import annotations + +from typing import Protocol + +from scc_cli.ports.models import SandboxHandle, SandboxSpec, SandboxStatus + + +class SandboxRuntime(Protocol): + """Abstract sandbox runtime operations.""" + + def ensure_available(self) -> None: + """Ensure the runtime is available and ready for use.""" + + def run(self, spec: SandboxSpec) -> SandboxHandle: + """Launch a sandbox session for the given spec.""" + + def resume(self, handle: SandboxHandle) -> None: + """Resume a stopped sandbox session.""" + + def stop(self, handle: SandboxHandle) -> None: + """Stop a running sandbox session.""" + + def remove(self, handle: SandboxHandle) -> None: + """Remove a sandbox session.""" + + def list_running(self) -> list[SandboxHandle]: + """List running sandbox sessions.""" + + def status(self, handle: SandboxHandle) -> SandboxStatus: + """Return status details for a sandbox session.""" diff --git a/src/scc_cli/profiles.py b/src/scc_cli/profiles.py index e024345..dbc5be5 100644 --- a/src/scc_cli/profiles.py +++ b/src/scc_cli/profiles.py @@ -9,992 +9,34 @@ - Config inheritance: 3-layer merge (org defaults -> team -> project) - Security boundaries: Blocked items (fnmatch patterns) never allowed - Delegation control: Org controls whether teams can delegate to projects + +Compatibility wrapper for scc_cli.application.profiles. """ from __future__ import annotations -from dataclasses import dataclass, field -from fnmatch import fnmatch -from pathlib import Path -from typing import TYPE_CHECKING, Any, cast -from urllib.parse import urlparse, urlunparse - -from . import config as config_module - -if TYPE_CHECKING: - pass - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Data Classes for Effective Config (v2 schema) -# ═══════════════════════════════════════════════════════════════════════════════ - - -@dataclass -class ConfigDecision: - """Tracks where a config value came from (for scc config explain).""" - - field: str - value: Any - reason: str - source: str # "org.security" | "org.defaults" | "team.X" | "project" - - -@dataclass -class BlockedItem: - """Tracks an item blocked by security pattern.""" - - item: str - blocked_by: str # The pattern that matched - source: str # Always "org.security" - target_type: str = "plugin" # "plugin" | "mcp_server" - - -@dataclass -class DelegationDenied: - """Tracks an addition denied due to delegation rules.""" - - item: str - requested_by: str # "team" | "project" - reason: str - target_type: str = "plugin" # "plugin" | "mcp_server" - - -@dataclass -class MCPServer: - """Represents an MCP server configuration. - - Supports three transport types: - - sse: Server-Sent Events (requires url) - - stdio: Standard I/O (requires command, optional args and env) - - http: HTTP transport (requires url, optional headers) - """ - - name: str - type: str # "sse" | "stdio" | "http" - url: str | None = None - command: str | None = None - args: list[str] | None = None - env: dict[str, str] | None = None - headers: dict[str, str] | None = None - - -@dataclass -class SessionConfig: - """Session configuration.""" - - timeout_hours: int | None = None - auto_resume: bool | None = None - - -@dataclass -class EffectiveConfig: - """The computed effective configuration after 3-layer merge. - - Contains: - - Final resolved values (plugins, mcp_servers, etc.) - - Tracking information for debugging (decisions, blocked_items, denied_additions) - """ - - plugins: set[str] = field(default_factory=set) - mcp_servers: list[MCPServer] = field(default_factory=list) - network_policy: str | None = None - session_config: SessionConfig = field(default_factory=SessionConfig) - - # For scc config explain - decisions: list[ConfigDecision] = field(default_factory=list) - blocked_items: list[BlockedItem] = field(default_factory=list) - denied_additions: list[DelegationDenied] = field(default_factory=list) - - -@dataclass -class StdioValidationResult: - """Result of validating a stdio MCP server configuration. - - stdio servers are the "sharpest knife" - they have elevated privileges: - - Mounted workspace (write access) - - Network access (required for some tools) - - Tokens in environment variables - - This validation implements layered defense: - - Gate 1: Feature gate (org must explicitly enable) - - Gate 2: Absolute path required (prevents ./evil injection) - - Gate 3: Prefix allowlist + commonpath (prevents path traversal) - - Warnings for host-side checks (command runs in container, not host) - """ - - blocked: bool - reason: str = "" - warnings: list[str] = field(default_factory=list) - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Config Inheritance Functions (3-layer merge) -# ═══════════════════════════════════════════════════════════════════════════════ - - -def matches_blocked(item: str, blocked_patterns: list[str]) -> str | None: - """ - Check whether item matches any blocked pattern using fnmatch. - - Use casefold() for case-insensitive matching. This is important because: - - casefold() handles Unicode edge cases (e.g., German ss -> ss) - - Pattern "Malicious-*" should block "malicious-tool" - - Args: - item: The item to check (plugin name, MCP server name/URL, etc.) - blocked_patterns: List of fnmatch patterns - - Returns: - The pattern that matched, or None if no match - """ - normalized_item = item.strip().casefold() - - for pattern in blocked_patterns: - normalized_pattern = pattern.strip().casefold() - if fnmatch(normalized_item, normalized_pattern): - return pattern - return None - - -def is_allowed(item: str, allowed_patterns: list[str] | None) -> bool: - """Check whether item is allowed by an optional allowlist.""" - if allowed_patterns is None: - return True - if not allowed_patterns: - return False - return matches_blocked(item, allowed_patterns) is not None - - -def mcp_candidates(server: dict[str, Any]) -> list[str]: - """Collect candidate strings for MCP allow/block matching.""" - candidates: list[str] = [] - name = server.get("name", "") - if name: - candidates.append(name) - url = server.get("url", "") - if url: - candidates.append(url) - domain = _extract_domain(url) - if domain: - candidates.append(domain) - command = server.get("command", "") - if command: - candidates.append(command) - return candidates - - -def is_mcp_allowed(server: dict[str, Any], allowed_patterns: list[str] | None) -> bool: - """Check whether MCP server is allowed by patterns.""" - if allowed_patterns is None: - return True - if not allowed_patterns: - return False - for candidate in mcp_candidates(server): - if matches_blocked(candidate, allowed_patterns): - return True - return False - - -def validate_stdio_server( - server: dict[str, Any], - org_config: dict[str, Any], -) -> StdioValidationResult: - """ - Validate a stdio MCP server configuration against org security policy. - - stdio servers are the "sharpest knife" - they have elevated privileges: - - Mounted workspace (write access) - - Network access (required for some tools) - - Tokens in environment variables - - Validation gates (in order): - 1. Feature gate: security.allow_stdio_mcp must be true (default: false) - 2. Absolute path: command must be an absolute path (not relative) - 3. Prefix allowlist: if allowed_stdio_prefixes is set, command must be under one - - Host-side checks (existence, executable) generate warnings only because - the command runs inside the container, not on the host. - - Args: - server: MCP server dict with 'name', 'type', 'command' fields - org_config: Organization config dict - - Returns: - StdioValidationResult with blocked=True/False, reason, and warnings - """ - import os - - command = server.get("command", "") - warnings: list[str] = [] - security = org_config.get("security", {}) - - # Gate 1: Feature gate - stdio must be explicitly enabled by org - # Default is False because stdio servers have elevated privileges - if not security.get("allow_stdio_mcp", False): - return StdioValidationResult( - blocked=True, - reason="stdio MCP disabled by org policy", - ) - - # Gate 2: Absolute path required - prevents "./evil" injection attacks - if not os.path.isabs(command): - return StdioValidationResult( - blocked=True, - reason="stdio command must be absolute path", - ) - - # Gate 3: Prefix allowlist with commonpath enforcement - # Uses realpath to resolve symlinks and ".." traversal attempts - prefixes = security.get("allowed_stdio_prefixes", []) - if prefixes: - # Resolve the actual path (handles symlinks and ..) - try: - resolved = os.path.realpath(command) - except OSError: - # If we can't resolve, use the original command - resolved = command - - # Normalize prefixes the same way - normalized_prefixes = [] - for p in prefixes: - try: - # Remove trailing slash for consistent commonpath comparison - normalized_prefixes.append(os.path.realpath(p.rstrip("/"))) - except OSError: - normalized_prefixes.append(p.rstrip("/")) - - # Check if resolved path is under any allowed prefix - allowed = False - for prefix in normalized_prefixes: - try: - # commonpath returns the longest common sub-path - # If it equals the prefix, command is under that prefix - common = os.path.commonpath([resolved, prefix]) - if common == prefix: - allowed = True - break - except ValueError: - # Different drives on Windows, or empty sequence - continue - - if not allowed: - return StdioValidationResult( - blocked=True, - reason=f"Resolved path {resolved} not in allowed prefixes", - ) - - # Host-side checks: WARN only (command runs in container, not host) - # These are informational because filesystem differs between host and container - if not os.path.exists(command): - warnings.append(f"Command not found on host: {command}") - elif not os.access(command, os.X_OK): - warnings.append(f"Command not executable on host: {command}") - - return StdioValidationResult( - blocked=False, - warnings=warnings, - ) - - -def _extract_domain(url: str) -> str: - """Extract domain from URL for pattern matching.""" - parsed = urlparse(url) - return parsed.netloc or url - - -def is_team_delegated_for_plugins(org_config: dict[str, Any], team_name: str | None) -> bool: - """ - Check whether team is allowed to add additional plugins. - - Uses team-name patterns from delegation.teams.allow_additional_plugins. - """ - if not team_name: - return False - - delegation = org_config.get("delegation", {}) - teams_delegation = delegation.get("teams", {}) - allowed_patterns = teams_delegation.get("allow_additional_plugins", []) - - return matches_blocked(team_name, allowed_patterns) is not None - - -def is_team_delegated_for_mcp(org_config: dict[str, Any], team_name: str | None) -> bool: - """ - Check whether team is allowed to add MCP servers. - - Uses team-name patterns from delegation.teams.allow_additional_mcp_servers. - """ - if not team_name: - return False - - delegation = org_config.get("delegation", {}) - teams_delegation = delegation.get("teams", {}) - allowed_patterns = teams_delegation.get("allow_additional_mcp_servers", []) - - return matches_blocked(team_name, allowed_patterns) is not None - - -def is_project_delegated(org_config: dict[str, Any], team_name: str | None) -> tuple[bool, str]: - """ - Check whether project-level additions are allowed. - - TWO-LEVEL CHECK: - 1. Org-level: delegation.projects.inherit_team_delegation must be true - 2. Team-level: profiles..delegation.allow_project_overrides must be true - - If org disables inheritance (inherit_team_delegation: false), team-level - settings are ignored - this is the master switch. - - Returns: - Tuple of (allowed: bool, reason: str) - Reason explains why delegation was denied if allowed is False - """ - if not team_name: - return (False, "No team specified") - - # First check: org-level master switch - delegation = org_config.get("delegation", {}) - projects_delegation = delegation.get("projects", {}) - org_allows = projects_delegation.get("inherit_team_delegation", False) - - if not org_allows: - # Org-level master switch is OFF - team settings are ignored - return (False, "Org disabled project delegation (inherit_team_delegation: false)") - - # Second check: team-level setting - profiles = org_config.get("profiles", {}) - team_config = profiles.get(team_name, {}) - team_delegation = team_config.get("delegation", {}) - team_allows = team_delegation.get("allow_project_overrides", False) - - if not team_allows: - return ( - False, - f"Team '{team_name}' disabled project overrides (allow_project_overrides: false)", - ) - - return (True, "") - - -def compute_effective_config( - org_config: dict[str, Any], - team_name: str | None, - project_config: dict[str, Any] | None = None, - workspace_path: str | Path | None = None, -) -> EffectiveConfig: - """ - Compute effective configuration by merging org defaults → team → project. - - The merge follows these rules: - 1. Start with org defaults - 2. Apply team additions (if delegated) - 3. Apply project additions (if delegated) - 4. Security blocks are NEVER overridable - checked at every layer - - Args: - org_config: Organization config (v2 schema) - team_name: Name of the team profile to apply (optional) - project_config: Optional project-level config (.scc.yaml content) - workspace_path: Optional path to workspace directory containing .scc.yaml. - If provided, takes precedence over project_config. - - Returns: - EffectiveConfig with merged values and tracking information - """ - # Load project config from file if workspace_path provided - if workspace_path is not None: - project_config = config_module.read_project_config(workspace_path) - - result = EffectiveConfig() - - # Get security blocks (never overridable) - security = org_config.get("security", {}) - blocked_plugins = security.get("blocked_plugins", []) - blocked_mcp_servers = security.get("blocked_mcp_servers", []) - - # Get org defaults - defaults = org_config.get("defaults", {}) - default_plugins = defaults.get("enabled_plugins", []) - disabled_plugins = defaults.get("disabled_plugins", []) - allowed_plugins = defaults.get("allowed_plugins") - allowed_mcp_servers = defaults.get("allowed_mcp_servers") - default_network_policy = defaults.get("network_policy") - default_session = defaults.get("session", {}) - - # ───────────────────────────────────────────────────────────────────────── - # Layer 1: Apply org defaults - # ───────────────────────────────────────────────────────────────────────── - - # Add default plugins (checking against security blocks) - for plugin in default_plugins: - blocked_by = matches_blocked(plugin, blocked_plugins) - if blocked_by: - result.blocked_items.append( - BlockedItem(item=plugin, blocked_by=blocked_by, source="org.security") - ) - continue - - if matches_blocked(plugin, disabled_plugins): - continue - - result.plugins.add(plugin) - result.decisions.append( - ConfigDecision( - field="plugins", - value=plugin, - reason="Included in organization defaults", - source="org.defaults", - ) - ) - - # Set network policy from defaults - if default_network_policy: - result.network_policy = default_network_policy - result.decisions.append( - ConfigDecision( - field="network_policy", - value=default_network_policy, - reason="Organization default network policy", - source="org.defaults", - ) - ) - - # Set session config from defaults - if default_session.get("timeout_hours") is not None: - result.session_config.timeout_hours = default_session["timeout_hours"] - result.decisions.append( - ConfigDecision( - field="session.timeout_hours", - value=default_session["timeout_hours"], - reason="Organization default session timeout", - source="org.defaults", - ) - ) - if default_session.get("auto_resume") is not None: - result.session_config.auto_resume = default_session["auto_resume"] - - # ───────────────────────────────────────────────────────────────────────── - # Layer 2: Apply team profile additions - # ───────────────────────────────────────────────────────────────────────── - - profiles = org_config.get("profiles", {}) - team_config = profiles.get(team_name, {}) - - # Add team plugins (if delegated) - team_plugins = team_config.get("additional_plugins", []) - team_delegated_plugins = is_team_delegated_for_plugins(org_config, team_name) - - for plugin in team_plugins: - # Security check first - blocked_by = matches_blocked(plugin, blocked_plugins) - if blocked_by: - result.blocked_items.append( - BlockedItem(item=plugin, blocked_by=blocked_by, source="org.security") - ) - continue - - # Delegation check - if not team_delegated_plugins: - result.denied_additions.append( - DelegationDenied( - item=plugin, - requested_by="team", - reason=f"Team '{team_name}' not allowed to add plugins", - ) - ) - continue - - # Allowlist check - if not is_allowed(plugin, allowed_plugins): - result.denied_additions.append( - DelegationDenied( - item=plugin, - requested_by="team", - reason="Plugin not allowed by defaults.allowed_plugins", - ) - ) - continue - - result.plugins.add(plugin) - result.decisions.append( - ConfigDecision( - field="plugins", - value=plugin, - reason=f"Added by team profile '{team_name}'", - source=f"team.{team_name}", - ) - ) - - # Add team MCP servers (if delegated) - team_mcp_servers = team_config.get("additional_mcp_servers", []) - team_delegated_mcp = is_team_delegated_for_mcp(org_config, team_name) - - for server_dict in team_mcp_servers: - server_name = server_dict.get("name", "") - server_url = server_dict.get("url", "") - - # Security check - check both name and URL domain - blocked_by = matches_blocked(server_name, blocked_mcp_servers) - if not blocked_by and server_url: - domain = _extract_domain(server_url) - blocked_by = matches_blocked(domain, blocked_mcp_servers) - - if blocked_by: - result.blocked_items.append( - BlockedItem( - item=server_name or server_url, - blocked_by=blocked_by, - source="org.security", - target_type="mcp_server", - ) - ) - continue - - # Delegation check - if not team_delegated_mcp: - result.denied_additions.append( - DelegationDenied( - item=server_name, - requested_by="team", - reason=f"Team '{team_name}' not allowed to add MCP servers", - target_type="mcp_server", - ) - ) - continue - - # Allowlist check - if not is_mcp_allowed(server_dict, allowed_mcp_servers): - result.denied_additions.append( - DelegationDenied( - item=server_name or server_url, - requested_by="team", - reason="MCP server not allowed by defaults.allowed_mcp_servers", - target_type="mcp_server", - ) - ) - continue - - # stdio-type servers require additional security validation - if server_dict.get("type") == "stdio": - stdio_result = validate_stdio_server(server_dict, org_config) - if stdio_result.blocked: - result.blocked_items.append( - BlockedItem( - item=server_name, - blocked_by=stdio_result.reason, - source="org.security", - target_type="mcp_server", - ) - ) - continue - # Warnings are logged inside validate_stdio_server - - mcp_server = MCPServer( - name=server_name, - type=server_dict.get("type", "sse"), - url=server_url or None, - command=server_dict.get("command"), - args=server_dict.get("args"), - ) - result.mcp_servers.append(mcp_server) - result.decisions.append( - ConfigDecision( - field="mcp_servers", - value=server_name, - reason=f"Added by team profile '{team_name}'", - source=f"team.{team_name}", - ) - ) - - # Team session override - team_session = team_config.get("session", {}) - if team_session.get("timeout_hours") is not None: - result.session_config.timeout_hours = team_session["timeout_hours"] - result.decisions.append( - ConfigDecision( - field="session.timeout_hours", - value=team_session["timeout_hours"], - reason=f"Overridden by team profile '{team_name}'", - source=f"team.{team_name}", - ) - ) - - # ───────────────────────────────────────────────────────────────────────── - # Layer 3: Apply project additions (if delegated) - # ───────────────────────────────────────────────────────────────────────── - - if project_config: - project_delegated, delegation_reason = is_project_delegated(org_config, team_name) - - # Add project plugins - project_plugins = project_config.get("additional_plugins", []) - for plugin in project_plugins: - # Security check first - blocked_by = matches_blocked(plugin, blocked_plugins) - if blocked_by: - result.blocked_items.append( - BlockedItem(item=plugin, blocked_by=blocked_by, source="org.security") - ) - continue - - # Delegation check - if not project_delegated: - result.denied_additions.append( - DelegationDenied( - item=plugin, - requested_by="project", - reason=delegation_reason, - ) - ) - continue - - # Allowlist check - if not is_allowed(plugin, allowed_plugins): - result.denied_additions.append( - DelegationDenied( - item=plugin, - requested_by="project", - reason="Plugin not allowed by defaults.allowed_plugins", - ) - ) - continue - - result.plugins.add(plugin) - result.decisions.append( - ConfigDecision( - field="plugins", - value=plugin, - reason="Added by project config", - source="project", - ) - ) - - # Add project MCP servers - project_mcp_servers = project_config.get("additional_mcp_servers", []) - for server_dict in project_mcp_servers: - server_name = server_dict.get("name", "") - server_url = server_dict.get("url", "") - - # Security check - blocked_by = matches_blocked(server_name, blocked_mcp_servers) - if not blocked_by and server_url: - domain = _extract_domain(server_url) - blocked_by = matches_blocked(domain, blocked_mcp_servers) - - if blocked_by: - result.blocked_items.append( - BlockedItem( - item=server_name or server_url, - blocked_by=blocked_by, - source="org.security", - target_type="mcp_server", - ) - ) - continue - - # Delegation check - if not project_delegated: - result.denied_additions.append( - DelegationDenied( - item=server_name, - requested_by="project", - reason=delegation_reason, - target_type="mcp_server", - ) - ) - continue - - # Allowlist check - if not is_mcp_allowed(server_dict, allowed_mcp_servers): - result.denied_additions.append( - DelegationDenied( - item=server_name or server_url, - requested_by="project", - reason="MCP server not allowed by defaults.allowed_mcp_servers", - target_type="mcp_server", - ) - ) - continue - - # stdio-type servers require additional security validation - if server_dict.get("type") == "stdio": - stdio_result = validate_stdio_server(server_dict, org_config) - if stdio_result.blocked: - result.blocked_items.append( - BlockedItem( - item=server_name, - blocked_by=stdio_result.reason, - source="org.security", - target_type="mcp_server", - ) - ) - continue - # Warnings are logged inside validate_stdio_server - - mcp_server = MCPServer( - name=server_name, - type=server_dict.get("type", "sse"), - url=server_url or None, - command=server_dict.get("command"), - args=server_dict.get("args"), - ) - result.mcp_servers.append(mcp_server) - result.decisions.append( - ConfigDecision( - field="mcp_servers", - value=server_name, - reason="Added by project config", - source="project", - ) - ) - - # Project session override - project_session = project_config.get("session", {}) - if project_session.get("timeout_hours") is not None: - if project_delegated: - result.session_config.timeout_hours = project_session["timeout_hours"] - result.decisions.append( - ConfigDecision( - field="session.timeout_hours", - value=project_session["timeout_hours"], - reason="Overridden by project config", - source="project", - ) - ) - - return result - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Core Profile Resolution Functions (New Architecture) -# ═══════════════════════════════════════════════════════════════════════════════ - - -def list_profiles(org_config: dict[str, Any]) -> list[dict[str, Any]]: - """ - List all available profiles from org config. - - Return list of profile dicts with name, description, plugin, and marketplace. - """ - profiles = org_config.get("profiles", {}) - result = [] - - for name, info in profiles.items(): - result.append( - { - "name": name, - "description": info.get("description", ""), - "plugin": info.get("plugin"), - "marketplace": info.get("marketplace"), - } - ) - - return result - - -def resolve_profile(org_config: dict[str, Any], profile_name: str) -> dict[str, Any]: - """ - Resolve profile by name, raise ValueError if not found. - - Return profile dict with name and all profile fields. - """ - profiles = org_config.get("profiles", {}) - - if profile_name not in profiles: - available = ", ".join(sorted(profiles.keys())) or "(none)" - raise ValueError(f"Profile '{profile_name}' not found. Available: {available}") - - profile_info = profiles[profile_name] - return {"name": profile_name, **profile_info} - - -def resolve_marketplace(org_config: dict[Any, Any], profile: dict[Any, Any]) -> dict[Any, Any]: - """ - Resolve marketplace for a profile and translate to claude_adapter format. - - This is the SINGLE translation layer between org-config schema and - claude_adapter expected format. All schema changes should be handled here. - - Schema Translation: - org-config (source/owner/repo) → claude_adapter (type/repo combined) - - Args: - org_config: Organization config with marketplaces dict - profile: Profile dict with a "marketplace" field - - Returns: - Marketplace dict normalized for claude_adapter: - - name: marketplace name (from dict key) - - type: "github" | "gitlab" | "https" - - repo: combined "owner/repo" for github - - url: for git/url sources - - ref: translated from "branch" - - Raises: - ValueError: If marketplace not found, invalid source, or missing fields - """ - marketplace_name = profile.get("marketplace") - if not marketplace_name: - raise ValueError(f"Profile '{profile.get('name')}' has no marketplace field") - - # Dict-based lookup - marketplaces: dict[str, dict[Any, Any]] = org_config.get("marketplaces", {}) - marketplace_config = marketplaces.get(marketplace_name) - - if not marketplace_config: - raise ValueError( - f"Marketplace '{marketplace_name}' not found for profile '{profile.get('name')}'" - ) - - # Validate and translate source type - source = marketplace_config.get("source", "") - valid_sources = {"github", "git", "url"} - if source not in valid_sources: - raise ValueError( - f"Marketplace '{marketplace_name}' has invalid source '{source}'. " - f"Valid sources: {', '.join(sorted(valid_sources))}" - ) - - result: dict[str, Any] = {"name": marketplace_name} - - if source == "github": - # GitHub: requires owner + repo, combine into single repo field - owner = marketplace_config.get("owner", "") - repo = marketplace_config.get("repo", "") - if not owner or not repo: - raise ValueError( - f"GitHub marketplace '{marketplace_name}' requires 'owner' and 'repo' fields" - ) - result["type"] = "github" - result["repo"] = f"{owner}/{repo}" - - elif source == "git": - # Generic git: maps to gitlab type - # Supports two patterns: - # 1. Direct URL: {"source": "git", "url": "https://..."} - # 2. Host + owner + repo: {"source": "git", "host": "gitlab.example.org", "owner": "group", "repo": "name"} - url = marketplace_config.get("url", "") - host = marketplace_config.get("host", "") - owner = marketplace_config.get("owner", "") - repo = marketplace_config.get("repo", "") - - result["type"] = "gitlab" - - if url: - # Pattern 1: Direct URL provided - result["url"] = url - elif host and owner and repo: - # Pattern 2: Construct from host/owner/repo - result["host"] = host - result["repo"] = f"{owner}/{repo}" - else: - raise ValueError( - f"Git marketplace '{marketplace_name}' requires either 'url' field " - f"or 'host', 'owner', 'repo' fields" - ) - - elif source == "url": - # HTTPS URL: requires url - url = marketplace_config.get("url", "") - if not url: - raise ValueError(f"URL marketplace '{marketplace_name}' requires 'url' field") - result["type"] = "https" - result["url"] = url - - # Translate branch -> ref (optional) - if marketplace_config.get("branch"): - result["ref"] = marketplace_config["branch"] - - # Preserve optional fields - for field_name in ("host", "auth", "headers", "path"): - if marketplace_config.get(field_name): - result[field_name] = marketplace_config[field_name] - - return result - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Marketplace URL Resolution (HTTPS-only enforcement) -# ═══════════════════════════════════════════════════════════════════════════════ - - -def _normalize_repo_path(repo: str) -> str: - """ - Normalize repo path: strip whitespace, leading slashes, .git suffix. - """ - repo = repo.strip().lstrip("/") - if repo.endswith(".git"): - repo = repo[:-4] - return repo - - -def get_marketplace_url(marketplace: dict[str, Any]) -> str: - """ - Resolve marketplace to HTTPS URL. - - SECURITY: Rejects SSH URLs (git@, ssh://) and HTTP URLs. - Only HTTPS is allowed for marketplace access. - - URL Resolution Logic: - 1. If 'url' is provided, validate and normalize it - 2. Otherwise, construct from 'host' + 'repo' - 3. For github/gitlab types, use default hosts if not specified - - Args: - marketplace: Marketplace config dict with type, url/host, repo - - Returns: - Normalized HTTPS URL string - - Raises: - ValueError: For SSH URLs, HTTP URLs, unsupported schemes, or missing config - """ - # Check for direct URL first - if raw := marketplace.get("url"): - raw = raw.strip() - - # Reject SSH URLs early (git@ format) - if raw.startswith("git@"): - raise ValueError(f"SSH URL not supported: {raw}") - - # Reject ssh:// protocol - if raw.startswith("ssh://"): - raise ValueError(f"SSH URL not supported: {raw}") - - parsed = urlparse(raw) - - # HTTPS only - reject http:// for security - if parsed.scheme == "http": - raise ValueError(f"HTTP not allowed (use HTTPS): {raw}") - - if parsed.scheme != "https": - raise ValueError(f"Unsupported URL scheme: {parsed.scheme!r}") - - # Normalize: remove trailing slash, drop fragments - normalized_path = parsed.path.rstrip("/") - normalized = parsed._replace(path=normalized_path, fragment="") - return cast(str, urlunparse(normalized)) - - # No URL provided - construct from host + repo - host = (marketplace.get("host") or "").strip() - - if not host: - # Use default hosts for known types - defaults = {"github": "github.com", "gitlab": "gitlab.com"} - host = defaults.get(marketplace.get("type") or "") - - if not host: - raise ValueError( - f"Marketplace type '{marketplace.get('type')}' requires 'url' or 'host'" - ) - - # Reject host with path components (ambiguous config) - if "/" in host: - raise ValueError(f"'host' must not include path: {host!r}") - - # Get and normalize repo path - repo = marketplace.get("repo", "") - repo = _normalize_repo_path(repo) - - return f"https://{host}/{repo}" +from scc_cli.application import compute_effective_config as compute_effective_config_module +from scc_cli.application import profiles as profiles_module + +BlockedItem = compute_effective_config_module.BlockedItem +ConfigDecision = compute_effective_config_module.ConfigDecision +DelegationDenied = compute_effective_config_module.DelegationDenied +EffectiveConfig = compute_effective_config_module.EffectiveConfig +MCPServer = compute_effective_config_module.MCPServer +SessionConfig = compute_effective_config_module.SessionConfig +StdioValidationResult = compute_effective_config_module.StdioValidationResult +compute_effective_config = compute_effective_config_module.compute_effective_config +is_allowed = compute_effective_config_module.is_allowed +is_mcp_allowed = compute_effective_config_module.is_mcp_allowed +is_project_delegated = compute_effective_config_module.is_project_delegated +is_team_delegated_for_mcp = compute_effective_config_module.is_team_delegated_for_mcp +is_team_delegated_for_plugins = compute_effective_config_module.is_team_delegated_for_plugins +matches_blocked = compute_effective_config_module.matches_blocked +mcp_candidates = compute_effective_config_module.mcp_candidates +validate_stdio_server = compute_effective_config_module.validate_stdio_server + +list_profiles = profiles_module.list_profiles +resolve_profile = profiles_module.resolve_profile +resolve_marketplace = profiles_module.resolve_marketplace +get_marketplace_url = profiles_module.get_marketplace_url +_normalize_repo_path = profiles_module._normalize_repo_path diff --git a/src/scc_cli/remote.py b/src/scc_cli/remote.py index 1a3af00..cc99d08 100644 --- a/src/scc_cli/remote.py +++ b/src/scc_cli/remote.py @@ -26,7 +26,9 @@ from scc_cli.auth import is_remote_command_allowed from scc_cli.auth import resolve_auth as _resolve_auth_impl +from scc_cli.bootstrap import get_default_adapters from scc_cli.output_mode import print_human +from scc_cli.ports.remote_fetcher import RemoteFetcher from scc_cli.utils.locks import file_lock, lock_path if TYPE_CHECKING: @@ -186,6 +188,7 @@ def fetch_org_config( auth: str | None, etag: str | None = None, auth_header: str | None = None, + fetcher: RemoteFetcher | None = None, ) -> tuple[dict[str, Any] | None, str | None, int]: """Fetch org config from URL with ETag support. @@ -194,6 +197,7 @@ def fetch_org_config( auth: Auth token for header etag: Previous ETag for conditional request auth_header: Custom header name (defaults to Authorization) + fetcher: Optional RemoteFetcher override Returns: Tuple of (config_dict, new_etag, status_code) @@ -211,8 +215,10 @@ def fetch_org_config( if etag: headers["If-None-Match"] = etag + remote_fetcher = fetcher or get_default_adapters().remote_fetcher + try: - response = requests.get(url, headers=headers, timeout=30) + response = remote_fetcher.get(url, headers=headers, timeout=30) status = response.status_code # 304 Not Modified - use cached version @@ -225,7 +231,7 @@ def fetch_org_config( # Parse JSON response try: - config = response.json() + config = json.loads(response.text) except json.JSONDecodeError: return (None, None, -1) # Invalid JSON diff --git a/src/scc_cli/support_bundle.py b/src/scc_cli/support_bundle.py new file mode 100644 index 0000000..ad812c9 --- /dev/null +++ b/src/scc_cli/support_bundle.py @@ -0,0 +1,159 @@ +from __future__ import annotations + +import json +import platform +import re +import sys +import zipfile +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + +from scc_cli import __version__, config +from scc_cli.doctor.core import run_doctor +from scc_cli.doctor.serialization import build_doctor_json_data + +SECRET_KEY_PATTERNS = [ + r"^auth$", + r".*token.*", + r".*api[_-]?key.*", + r".*apikey.*", + r".*password.*", + r".*secret.*", + r"^authorization$", + r".*credential.*", +] + +_SECRET_PATTERNS = [re.compile(pattern, re.IGNORECASE) for pattern in SECRET_KEY_PATTERNS] + + +def _is_secret_key(key: str) -> bool: + return any(pattern.match(key) for pattern in _SECRET_PATTERNS) + + +def redact_secrets(data: dict[str, Any]) -> dict[str, Any]: + """Redact secret values from a dictionary.""" + + result: dict[str, Any] = {} + + for key, value in data.items(): + if _is_secret_key(key) and isinstance(value, str): + result[key] = "[REDACTED]" + elif isinstance(value, dict): + result[key] = redact_secrets(value) + elif isinstance(value, list): + result[key] = [ + redact_secrets(item) if isinstance(item, dict) else item for item in value + ] + else: + result[key] = value + + return result + + +def redact_paths(data: dict[str, Any], redact: bool = True) -> dict[str, Any]: + """Redact home directory paths from a dictionary.""" + + if not redact: + return data + + home = str(Path.home()) + result: dict[str, Any] = {} + + for key, value in data.items(): + if isinstance(value, str) and home in value: + result[key] = value.replace(home, "~") + elif isinstance(value, dict): + result[key] = redact_paths(value, redact=redact) + elif isinstance(value, list): + result[key] = [ + redact_paths(item, redact=redact) + if isinstance(item, dict) + else (item.replace(home, "~") if isinstance(item, str) and home in item else item) + for item in value + ] + else: + result[key] = value + + return result + + +def build_bundle_data( + redact_paths_flag: bool = True, + workspace_path: Path | None = None, +) -> dict[str, Any]: + """Build support bundle data.""" + + system_info = { + "platform": platform.system(), + "platform_version": platform.version(), + "platform_release": platform.release(), + "machine": platform.machine(), + "python_version": sys.version, + "python_implementation": platform.python_implementation(), + } + + generated_at = datetime.now(timezone.utc).isoformat() + + try: + user_config = config.load_user_config() + if isinstance(user_config, dict): + user_config = redact_secrets(user_config) + except Exception: + user_config = {"error": "Failed to load config"} + + try: + org_config = config.load_cached_org_config() + if org_config: + org_config = redact_secrets(org_config) + except Exception: + org_config = {"error": "Failed to load org config"} + + try: + doctor_result = run_doctor(workspace_path) + doctor_data = build_doctor_json_data(doctor_result) + except Exception as exc: + doctor_data = {"error": f"Failed to run doctor: {exc}"} + + bundle_data: dict[str, Any] = { + "generated_at": generated_at, + "cli_version": __version__, + "system": system_info, + "config": user_config, + "org_config": org_config, + "doctor": doctor_data, + } + + if workspace_path: + bundle_data["workspace"] = str(workspace_path) + + if redact_paths_flag: + bundle_data = redact_paths(bundle_data) + + return bundle_data + + +def get_default_bundle_path() -> Path: + """Get default path for support bundle.""" + + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + return Path.cwd() / f"scc-support-bundle-{timestamp}.zip" + + +def create_bundle( + output_path: Path, + redact_paths_flag: bool = True, + workspace_path: Path | None = None, +) -> dict[str, Any]: + """Create a support bundle zip file.""" + + bundle_data = build_bundle_data( + redact_paths_flag=redact_paths_flag, + workspace_path=workspace_path, + ) + + with zipfile.ZipFile(output_path, "w", zipfile.ZIP_DEFLATED) as bundle: + manifest_json = json.dumps(bundle_data, indent=2) + bundle.writestr("manifest.json", manifest_json) + + return bundle_data diff --git a/src/scc_cli/ui/dashboard/__init__.py b/src/scc_cli/ui/dashboard/__init__.py index 0a80f1b..467e8ab 100644 --- a/src/scc_cli/ui/dashboard/__init__.py +++ b/src/scc_cli/ui/dashboard/__init__.py @@ -22,6 +22,8 @@ >>> run_dashboard() # Interactive dashboard """ +from scc_cli.application.dashboard import TAB_ORDER, DashboardTab + from .loaders import ( _load_all_tab_data, _load_containers_tab_data, @@ -29,7 +31,7 @@ _load_status_tab_data, _load_worktrees_tab_data, ) -from .models import TAB_ORDER, DashboardState, DashboardTab, TabData +from .models import DashboardState, TabData from .orchestrator import _prepare_for_nested_ui, run_dashboard # Lazy import for Dashboard to avoid circular imports diff --git a/src/scc_cli/ui/dashboard/_dashboard.py b/src/scc_cli/ui/dashboard/_dashboard.py index 35bb04d..afd6193 100644 --- a/src/scc_cli/ui/dashboard/_dashboard.py +++ b/src/scc_cli/ui/dashboard/_dashboard.py @@ -22,6 +22,20 @@ from rich.table import Table from rich.text import Text +from scc_cli.application.dashboard import ( + TAB_ORDER, + ContainerItem, + DashboardTab, + PlaceholderItem, + PlaceholderKind, + SessionItem, + StatusAction, + StatusItem, + WorktreeItem, + placeholder_start_reason, + placeholder_tip, +) + # Import config for standalone mode detection from ... import config as scc_config from ...theme import Indicators @@ -50,7 +64,7 @@ WorktreeActionMenuRequested, ) from ..list_screen import ListItem -from .models import TAB_ORDER, DashboardState, DashboardTab +from .models import DashboardState class Dashboard: @@ -334,7 +348,9 @@ def _render_container_details(self, item: ListItem[Any]) -> RenderableType: table.add_row("Name", Text(item.label, style="bold")) container: ContainerInfo | None = None - if isinstance(item.value, ContainerInfo): + if isinstance(item.value, ContainerItem): + container = item.value.container + elif isinstance(item.value, ContainerInfo): container = item.value elif isinstance(item.value, str): # Legacy fallback when value is container ID @@ -382,7 +398,13 @@ def _render_session_details(self, item: ListItem[Any]) -> RenderableType: Uses the raw session dict stored in item.value for field access. """ - session = item.value + session_source = item.value + if isinstance(session_source, SessionItem): + session = session_source.session + elif isinstance(session_source, dict): + session = session_source + else: + return Text("Session details unavailable", style="dim italic") header = self._build_details_header("Session Details") @@ -435,7 +457,8 @@ def _render_worktree_details(self, item: ListItem[Any]) -> RenderableType: table.add_column("value", overflow="fold") table.add_row("Name", Text(item.label, style="bold")) - table.add_row("Path", item.value) + worktree_path = item.value.path if isinstance(item.value, WorktreeItem) else item.value + table.add_row("Path", worktree_path) # Parse description into fields (branch modified +1 !2 ?3 (current)) if item.description: @@ -468,7 +491,7 @@ def _render_worktree_details(self, item: ListItem[Any]) -> RenderableType: # Commands section commands = Text() commands.append("\nCommands\n", style="dim") - commands.append(f" scc start {item.value}\n", style="cyan") + commands.append(f" scc start {worktree_path}\n", style="cyan") return Group(header, table, commands) @@ -494,33 +517,9 @@ def _format_worktree_change_summary(self, part: str) -> str | None: return " · ".join(summaries) if summaries else None - def _get_placeholder_tip(self, value: str | dict[str, Any]) -> str: - """Get contextual help tip for placeholder items. - - Returns actionable guidance for empty/error states. - - Args: - value: Either a string placeholder key or a dict with "_placeholder" key. - """ - tips: dict[str, str] = { - # Container placeholders (first-time user friendly) - "no_containers": "No containers running. Press n to start or run `scc start `.", - # Session placeholders (first-time user friendly) - "no_sessions": "No sessions yet. Press n to create your first session.", - # Worktree placeholders - updated with actionable shortcuts - "no_worktrees": "No worktrees yet. Press c to create, w for recent, v for status.", - "no_git": "Not a git repository. Press i to init or c to clone.", - # Error placeholders (actionable doctor suggestion) - "error": "Unable to load data. Run `scc doctor` to diagnose.", - "config_error": "Configuration issue detected. Run `scc doctor` to fix it.", - } - - # Extract placeholder key from dict if needed - placeholder_key = value - if isinstance(value, dict): - placeholder_key = value.get("_placeholder", "") - - return tips.get(str(placeholder_key), "No details available for this item.") + def _get_placeholder_tip(self, item: PlaceholderItem) -> str: + """Get contextual help tip for placeholder items.""" + return placeholder_tip(item.kind) def _compute_footer_hints( self, _standalone: bool, show_details: bool @@ -531,33 +530,26 @@ def _compute_footer_hints( primary_action: str | None = None if self.state.active_tab == DashboardTab.STATUS: - if current: - if isinstance(current.value, dict): - if current.value.get("_action") == "resume_last_session": + if current and isinstance(current.value, StatusItem): + match current.value.action: + case StatusAction.RESUME_SESSION: primary_action = "resume" - elif isinstance(current.value, str): - if current.value == "start_session": + case StatusAction.START_SESSION: primary_action = "start" - elif current.value == "team": + case StatusAction.SWITCH_TEAM: primary_action = "switch" - elif current.value in { - "containers", - "sessions", - "worktrees", - "settings", - "profile", - }: + case ( + StatusAction.OPEN_TAB + | StatusAction.OPEN_PROFILE + | StatusAction.OPEN_SETTINGS + ): primary_action = "open" - elif current.value == "statusline_not_installed": + case StatusAction.INSTALL_STATUSLINE: primary_action = "install" + case None: + primary_action = None elif self.state.is_placeholder_selected(): - is_startable = False - if current: - if isinstance(current.value, str): - is_startable = current.value in {"no_containers", "no_sessions"} - elif isinstance(current.value, dict): - is_startable = current.value.get("_startable", False) - if is_startable: + if current and isinstance(current.value, PlaceholderItem) and current.value.startable: primary_action = "start" else: if self.state.active_tab == DashboardTab.SESSIONS: @@ -582,8 +574,11 @@ def _compute_footer_hints( if self.state.active_tab == DashboardTab.WORKTREES and not show_details: is_git_repo = True - if current and isinstance(current.value, str): - is_git_repo = current.value not in {"no_git", "no_worktrees"} + if current and isinstance(current.value, PlaceholderItem): + is_git_repo = current.value.kind not in { + PlaceholderKind.NO_GIT, + PlaceholderKind.NO_WORKTREES, + } hints.append(FooterHint("c", "create" if is_git_repo else "clone")) hints.append(FooterHint("Tab", "tabs")) @@ -687,7 +682,10 @@ def _handle_action(self, action: Action[None]) -> bool | None: self.state.status_message = "Details not available in Status tab" return True if self.state.is_placeholder_selected(): - self.state.status_message = self._get_placeholder_tip(current.value) + if isinstance(current.value, PlaceholderItem): + self.state.status_message = self._get_placeholder_tip(current.value) + else: + self.state.status_message = "No details available for this item" return True self.state.details_open = not self.state.details_open return True @@ -696,56 +694,40 @@ def _handle_action(self, action: Action[None]) -> bool | None: # On Status tab, Enter triggers different actions based on item if self.state.active_tab == DashboardTab.STATUS: current = self.state.list_state.current_item - if current: - if isinstance(current.value, dict): - action = current.value.get("_action") - if action == "resume_last_session": - session = current.value.get("session") - if isinstance(session, dict): - raise SessionResumeRequested( - session=session, - return_to=self.state.active_tab.name, - ) - - # Start session row - if current.value == "start_session": + if current and isinstance(current.value, StatusItem): + status_action = current.value.action + if status_action is StatusAction.RESUME_SESSION and current.value.session: + raise SessionResumeRequested( + session=current.value.session, + return_to=self.state.active_tab.name, + ) + + if status_action is StatusAction.START_SESSION: raise StartRequested( return_to=self.state.active_tab.name, reason="dashboard_start", ) - # Team row: same behavior as 't' key - if current.value == "team": + if status_action is StatusAction.SWITCH_TEAM: if scc_config.is_standalone_mode(): self.state.status_message = ( "Teams require org mode. Run `scc setup` to configure." ) - return True # Refresh to show message + return True raise TeamSwitchRequested() - # Resource rows: drill down to corresponding tab - tab_mapping: dict[str, DashboardTab] = { - "containers": DashboardTab.CONTAINERS, - "sessions": DashboardTab.SESSIONS, - "worktrees": DashboardTab.WORKTREES, - } - target_tab = tab_mapping.get(current.value) - if target_tab: - # Clear filter on drill-down (avoids confusion) + if status_action is StatusAction.OPEN_TAB and current.value.action_tab: self.state.list_state.clear_filter() - self.state = self.state.switch_tab(target_tab) - return True # Refresh to show new tab + self.state = self.state.switch_tab(current.value.action_tab) + return True - # Statusline row: Enter triggers installation - if current.value == "statusline_not_installed": + if status_action is StatusAction.INSTALL_STATUSLINE: raise StatuslineInstallRequested(return_to=self.state.active_tab.name) - # Profile row: Enter opens profile menu - if current.value == "profile": + if status_action is StatusAction.OPEN_PROFILE: raise ProfileMenuRequested(return_to=self.state.active_tab.name) - # Settings row: Enter opens settings and maintenance screen - if current.value == "settings": + if status_action is StatusAction.OPEN_SETTINGS: raise SettingsRequested(return_to=self.state.active_tab.name) else: # Resource tabs handling (Containers, Worktrees, Sessions) @@ -754,85 +736,55 @@ def _handle_action(self, action: Action[None]) -> bool | None: return None if self.state.is_placeholder_selected(): - # Placeholder or empty state: handle appropriately - # Check if this is a startable placeholder - # (containers/sessions empty → user can start a new session) - is_startable = False - reason = "" - - # String placeholders (containers, worktrees) - if isinstance(current.value, str): - startable_strings = {"no_containers", "no_sessions"} - if current.value in startable_strings: - is_startable = True - reason = current.value - - # Dict placeholders (sessions tab uses dicts) - elif isinstance(current.value, dict): - if current.value.get("_startable"): - is_startable = True - reason = current.value.get("_placeholder", "unknown") - - if is_startable: - # Uses .name (stable identifier) not .value (display string) - raise StartRequested( - return_to=self.state.active_tab.name, - reason=reason, - ) - # Non-startable placeholders show a tip - self.state.status_message = self._get_placeholder_tip(current.value) + if isinstance(current.value, PlaceholderItem): + if current.value.startable: + raise StartRequested( + return_to=self.state.active_tab.name, + reason=placeholder_start_reason(current.value), + ) + self.state.status_message = self._get_placeholder_tip(current.value) + return True + self.state.status_message = "No details available for this item" return True - # Primary actions per resource tab - if ( - self.state.active_tab == DashboardTab.SESSIONS - and isinstance(current.value, dict) - and not current.value.get("_placeholder") + if self.state.active_tab == DashboardTab.SESSIONS and isinstance( + current.value, SessionItem ): raise SessionResumeRequested( - session=current.value, + session=current.value.session, return_to=self.state.active_tab.name, ) if self.state.active_tab == DashboardTab.WORKTREES and isinstance( - current.value, str + current.value, WorktreeItem ): raise StartRequested( return_to=self.state.active_tab.name, - reason=f"worktree:{current.value}", + reason=f"worktree:{current.value.path}", ) - if self.state.active_tab == DashboardTab.CONTAINERS: - from ...docker.core import ContainerInfo - - container: ContainerInfo | None = None - if isinstance(current.value, ContainerInfo): - container = current.value - elif isinstance(current.value, str): - container = ContainerInfo( - id=current.value, - name=current.label, - status="", - ) - if container: - raise ContainerActionMenuRequested( - container_id=container.id, - container_name=container.name, - return_to=self.state.active_tab.name, - ) + if self.state.active_tab == DashboardTab.CONTAINERS and isinstance( + current.value, ContainerItem + ): + raise ContainerActionMenuRequested( + container_id=current.value.container.id, + container_name=current.value.container.name, + return_to=self.state.active_tab.name, + ) - if self.state.active_tab == DashboardTab.SESSIONS: - if isinstance(current.value, dict): - raise SessionActionMenuRequested( - session=current.value, - return_to=self.state.active_tab.name, - ) + if self.state.active_tab == DashboardTab.SESSIONS and isinstance( + current.value, SessionItem + ): + raise SessionActionMenuRequested( + session=current.value.session, + return_to=self.state.active_tab.name, + ) if self.state.active_tab == DashboardTab.WORKTREES and isinstance( - current.value, str + current.value, WorktreeItem ): raise WorktreeActionMenuRequested( - worktree_path=current.value, + worktree_path=current.value.path, return_to=self.state.active_tab.name, ) @@ -845,38 +797,28 @@ def _handle_action(self, action: Action[None]) -> bool | None: self.state.status_message = "No item selected" return True - if self.state.active_tab == DashboardTab.CONTAINERS: - from ...docker.core import ContainerInfo - - action_container: ContainerInfo | None = None - if isinstance(current.value, ContainerInfo): - action_container = current.value - elif isinstance(current.value, str): - action_container = ContainerInfo( - id=current.value, - name=current.label, - status="", - ) - if action_container: - raise ContainerActionMenuRequested( - container_id=action_container.id, - container_name=action_container.name, - return_to=self.state.active_tab.name, - ) + if self.state.active_tab == DashboardTab.CONTAINERS and isinstance( + current.value, ContainerItem + ): + raise ContainerActionMenuRequested( + container_id=current.value.container.id, + container_name=current.value.container.name, + return_to=self.state.active_tab.name, + ) if self.state.active_tab == DashboardTab.SESSIONS and isinstance( - current.value, dict + current.value, SessionItem ): raise SessionActionMenuRequested( - session=current.value, + session=current.value.session, return_to=self.state.active_tab.name, ) if self.state.active_tab == DashboardTab.WORKTREES and isinstance( - current.value, str + current.value, WorktreeItem ): raise WorktreeActionMenuRequested( - worktree_path=current.value, + worktree_path=current.value.path, return_to=self.state.active_tab.name, ) @@ -933,24 +875,22 @@ def _handle_action(self, action: Action[None]) -> bool | None: if self.state.active_tab == DashboardTab.STATUS: if not self.state.list_state.filter_query: raise SandboxImportRequested(return_to=self.state.active_tab.name) - # Worktrees tab: initialize git repo (only when not in a git repo) elif self.state.active_tab == DashboardTab.WORKTREES: current = self.state.list_state.current_item - # Only show when not in git repo (placeholder is no_git or no_worktrees) + # Only show when placeholder indicates no git repo is_non_git = ( current - and isinstance(current.value, str) - and current.value + and isinstance(current.value, PlaceholderItem) + and current.value.kind in { - "no_git", - "no_worktrees", + PlaceholderKind.NO_GIT, + PlaceholderKind.NO_WORKTREES, } ) if is_non_git: raise GitInitRequested(return_to=self.state.active_tab.name) - else: - self.state.status_message = "Already in a git repository" - return True + self.state.status_message = "Already in a git repository" + return True elif action.custom_key == "c": # User pressed 'c' - create worktree (or clone if not git) # Only active on Worktrees tab @@ -958,8 +898,11 @@ def _handle_action(self, action: Action[None]) -> bool | None: current = self.state.list_state.current_item # Check if we're in a git repo is_git_repo = True - if current and isinstance(current.value, str): - is_git_repo = current.value not in {"no_git", "no_worktrees"} + if current and isinstance(current.value, PlaceholderItem): + is_git_repo = current.value.kind not in { + PlaceholderKind.NO_GIT, + PlaceholderKind.NO_WORKTREES, + } raise CreateWorktreeRequested( return_to=self.state.active_tab.name, is_git_repo=is_git_repo, @@ -984,7 +927,9 @@ def _handle_action(self, action: Action[None]) -> bool | None: from ...docker.core import ContainerInfo key_container: ContainerInfo | None = None - if isinstance(current.value, ContainerInfo): + if isinstance(current.value, ContainerItem): + key_container = current.value.container + elif isinstance(current.value, ContainerInfo): key_container = current.value elif isinstance(current.value, str): # Legacy fallback when value is container ID diff --git a/src/scc_cli/ui/dashboard/loaders.py b/src/scc_cli/ui/dashboard/loaders.py index 8f19587..a54d1c3 100644 --- a/src/scc_cli/ui/dashboard/loaders.py +++ b/src/scc_cli/ui/dashboard/loaders.py @@ -1,463 +1,58 @@ -"""Data loading functions for dashboard tabs. - -This module contains functions to load data for each dashboard tab: -- Status: System overview (team, organization, counts) -- Containers: Docker containers managed by SCC -- Sessions: Recent Claude sessions -- Worktrees: Git worktrees in current repository - -Each loader function returns a TabData instance ready for display. -Loaders handle errors gracefully, returning placeholder items on failure. -""" +"""Data loading wrappers for dashboard tabs.""" from __future__ import annotations from datetime import datetime -from typing import Any + +from scc_cli.application import dashboard as app_dashboard from ..list_screen import ListItem from .models import DashboardTab, TabData def _load_status_tab_data(refresh_at: datetime | None = None) -> TabData: - """Load Status tab data showing quick actions and context. - - The Status tab displays: - - Primary actions (start session, resume) - - Current team and organization context - - Personal profile status - - Quick access to settings & maintenance - - Diagnostic info (Docker, Sandbox, Statusline) is in `scc doctor`. - - Returns: - TabData with status items. - """ - # Import here to avoid circular imports - import os - from pathlib import Path - - from ... import config, sessions - from ...core.personal_profiles import get_profile_status - from ...docker import core as docker_core - - # Suppress unused import warning - refresh_at kept for API compatibility - _ = refresh_at - - items: list[ListItem[Any]] = [] - - # Start new session (primary action) - items.append( - ListItem( - value="start_session", - label="New session", - description="", - ) - ) - - # Resume last session (quick action) - try: - recent_session = sessions.get_most_recent() - if recent_session: - workspace = recent_session.get("workspace", "") - workspace_name = workspace.split("/")[-1] if workspace else "unknown" - last_used = recent_session.get("last_used") - last_used_display = "" - if last_used: - try: - dt = datetime.fromisoformat(last_used) - last_used_display = sessions.format_relative_time(dt) - except ValueError: - last_used_display = last_used - # Build middot-separated description for scannability - desc_parts = [workspace_name] - if recent_session.get("branch"): - desc_parts.append(str(recent_session.get("branch"))) - if last_used_display: - desc_parts.append(last_used_display) - items.append( - ListItem( - value={"_action": "resume_last_session", "session": recent_session}, - label="Resume last", - description=" · ".join(desc_parts), - ) - ) - except Exception: - pass - - # Load current team info - try: - user_config = config.load_user_config() - team = user_config.get("selected_profile") - org_source = user_config.get("organization_source") - - if team: - items.append( - ListItem( - value="team", - label=f"Team: {team}", - description="", - ) - ) - else: - items.append( - ListItem( - value="team", - label="Team: none", - description="", - ) - ) - - # Profile status (with inline indicators) - # Format: "Profile: saved · ✓ synced" following Team pattern - try: - workspace = Path(os.getcwd()) - profile_status = get_profile_status(workspace) - - if profile_status.exists: - # Build middot-separated status indicators - if profile_status.import_count > 0: - # Imports available takes priority - profile_label = f"Profile: saved · ↓ {profile_status.import_count} importable" - elif profile_status.has_drift: - profile_label = "Profile: saved · ◇ drifted" - else: - profile_label = "Profile: saved · ✓ synced" - items.append( - ListItem( - value="profile", - label=profile_label, - description="", - ) - ) - else: - items.append( - ListItem( - value="profile", - label="Profile: none", - description="", - ) - ) - except Exception: - pass # Don't show if profile check fails - - # Organization/sync status - if org_source and isinstance(org_source, dict): - org_url = org_source.get("url", "") - if org_url: - # Get org name, fallback to domain - org_name = None - try: - org_config = config.load_cached_org_config() - if org_config: - org_name = org_config.get("organization", {}).get("name") - except Exception: - org_name = None - - if not org_name: - # Extract domain as fallback - org_name = org_url.replace("https://", "").replace("http://", "").split("/")[0] - - items.append( - ListItem( - value="organization", - label=f"Organization: {org_name}", - description="", - ) - ) - elif user_config.get("standalone"): - items.append( - ListItem( - value="organization", - label="Mode: standalone", - description="", - ) - ) - - except Exception: - items.append( - ListItem( - value="config_error", - label="Config: error", - description="", - ) - ) - - # Container count (summary - details in Containers tab) - try: - containers = docker_core.list_scc_containers() - running = sum(1 for c in containers if "Up" in c.status) - total = len(containers) - items.append( - ListItem( - value="containers", - label=f"Containers: {running}/{total} running", - description="", - ) - ) - except Exception: - pass # Don't show if Docker unavailable - - # Settings shortcut - items.append( - ListItem( - value="settings", - label="Settings", - description="", - ) - ) - - return TabData( - tab=DashboardTab.STATUS, - title="Status", - items=items, - count_active=len(items), - count_total=len(items), - ) + """Load Status tab data showing quick actions and context.""" + tab_data = app_dashboard.load_status_tab_data(refresh_at=refresh_at) + return _to_tab_data(tab_data) def _load_containers_tab_data() -> TabData: - """Load Containers tab data showing SCC-managed containers. - - Returns: - TabData with container list items. - """ - from ...docker import core as docker_core - from ..formatters import format_container - - items: list[ListItem[Any]] = [] - - try: - containers = docker_core.list_scc_containers() - running_count = 0 - - for container in containers: - is_running = "Up" in container.status if container.status else False - if is_running: - running_count += 1 - - items.append(format_container(container)) - - if not items: - items.append( - ListItem( - value="no_containers", - label="No containers", - description="Press 'n' to start or run `scc start `", - ) - ) - - return TabData( - tab=DashboardTab.CONTAINERS, - title="Containers", - items=items, - count_active=running_count, - count_total=len(containers), - ) - - except Exception: - return TabData( - tab=DashboardTab.CONTAINERS, - title="Containers", - items=[ - ListItem( - value="error", - label="Error", - description="Unable to query Docker", - ) - ], - count_active=0, - count_total=0, - ) + """Load Containers tab data showing SCC-managed containers.""" + return _to_tab_data(app_dashboard.load_containers_tab_data()) def _load_sessions_tab_data() -> TabData: - """Load Sessions tab data showing recent Claude sessions. - - Returns: - TabData with session list items. Each ListItem.value contains - the raw session dict for access in the details pane. - """ - from ... import sessions - - items: list[ListItem[dict[str, Any]]] = [] - - try: - recent = sessions.list_recent(limit=20) - - for session in recent: - name = session.get("name", "Unnamed") - desc_parts = [] - - if session.get("team"): - desc_parts.append(str(session["team"])) - if session.get("branch"): - desc_parts.append(str(session["branch"])) - if session.get("last_used"): - desc_parts.append(str(session["last_used"])) - - # Store full session dict for details pane access - # Use middot separators for scannability - items.append( - ListItem( - value=session, - label=name, - description=" · ".join(desc_parts), - ) - ) - - if not items: - # Placeholder with sentinel dict (startable: True enables Enter action) - items.append( - ListItem( - value={"_placeholder": "no_sessions", "_startable": True}, - label="No sessions", - description="Press Enter to start", - ) - ) - - return TabData( - tab=DashboardTab.SESSIONS, - title="Sessions", - items=items, - count_active=len(recent), - count_total=len(recent), - ) - - except Exception: - return TabData( - tab=DashboardTab.SESSIONS, - title="Sessions", - items=[ - ListItem( - value="error", - label="Error", - description="Unable to load sessions", - ) - ], - count_active=0, - count_total=0, - ) + """Load Sessions tab data showing recent Claude sessions.""" + return _to_tab_data(app_dashboard.load_sessions_tab_data()) def _load_worktrees_tab_data(verbose: bool = False) -> TabData: - """Load Worktrees tab data showing git worktrees. - - Worktrees are loaded from the current working directory if it's a git repo. - - Args: - verbose: If True, fetch git status for each worktree (slower but shows - staged/modified/untracked counts with +N/!N/?N indicators). - - Returns: - TabData with worktree list items. - """ - import os - from pathlib import Path - - from ... import git - - items: list[ListItem[str]] = [] - - try: - cwd = Path(os.getcwd()) - worktrees = git.list_worktrees(cwd) - current_count = 0 - - # If verbose, fetch status for each worktree - if verbose: - for wt in worktrees: - staged, modified, untracked, timed_out = git.get_worktree_status(wt.path) - wt.staged_count = staged - wt.modified_count = modified - wt.untracked_count = untracked - wt.status_timed_out = timed_out - wt.has_changes = (staged + modified + untracked) > 0 + """Load Worktrees tab data showing git worktrees.""" + return _to_tab_data(app_dashboard.load_worktrees_tab_data(verbose=verbose)) - for wt in worktrees: - if wt.is_current: - current_count += 1 - desc_parts = [] - if wt.branch: - desc_parts.append(git.get_display_branch(wt.branch)) - - # Show status markers when verbose - if verbose: - if wt.status_timed_out: - desc_parts.append("status timeout") - else: - status_parts = [] - if wt.staged_count > 0: - status_parts.append(f"+{wt.staged_count}") - if wt.modified_count > 0: - status_parts.append(f"!{wt.modified_count}") - if wt.untracked_count > 0: - status_parts.append(f"?{wt.untracked_count}") - if status_parts: - desc_parts.append(" ".join(status_parts)) - elif not wt.has_changes: - desc_parts.append("clean") - elif wt.has_changes: - desc_parts.append("modified") - - if wt.is_current: - desc_parts.append("(current)") - - items.append( - ListItem( - value=wt.path, - label=Path(wt.path).name, - description=" ".join(desc_parts), - ) - ) - - if not items: - items.append( - ListItem( - value="no_worktrees", - label="No worktrees", - description="Press w for recent · i to init · c to clone", - ) - ) - - return TabData( - tab=DashboardTab.WORKTREES, - title="Worktrees", - items=items, - count_active=current_count, - count_total=len(worktrees), - ) - - except Exception: - return TabData( - tab=DashboardTab.WORKTREES, - title="Worktrees", - items=[ - ListItem( - value="no_git", - label="Not available", - description="Press w for recent · i to init · c to clone", - ) - ], - count_active=0, - count_total=0, - ) +def _load_all_tab_data(verbose_worktrees: bool = False) -> dict[DashboardTab, TabData]: + """Load data for all dashboard tabs.""" + return { + tab: _to_tab_data(tab_data) + for tab, tab_data in app_dashboard.load_all_tab_data(verbose_worktrees).items() + } -def _load_all_tab_data(verbose_worktrees: bool = False) -> dict[DashboardTab, TabData]: - """Load data for all dashboard tabs. +def _to_tab_data(tab_data: app_dashboard.DashboardTabData) -> TabData: + items = [_to_list_item(item) for item in tab_data.items] + return TabData( + tab=tab_data.tab, + title=tab_data.title, + items=items, + count_active=tab_data.count_active, + count_total=tab_data.count_total, + ) - Args: - verbose_worktrees: If True, fetch git status for each worktree - (shows +N/!N/?N indicators but takes longer). - Returns: - Dictionary mapping each tab to its data. - """ - return { - DashboardTab.STATUS: _load_status_tab_data(), - DashboardTab.CONTAINERS: _load_containers_tab_data(), - DashboardTab.SESSIONS: _load_sessions_tab_data(), - DashboardTab.WORKTREES: _load_worktrees_tab_data(verbose=verbose_worktrees), - } +def _to_list_item(item: app_dashboard.DashboardItem) -> ListItem[app_dashboard.DashboardItem]: + return ListItem( + value=item, + label=item.label, + description=item.description, + ) diff --git a/src/scc_cli/ui/dashboard/models.py b/src/scc_cli/ui/dashboard/models.py index bef4cbc..2641de2 100644 --- a/src/scc_cli/ui/dashboard/models.py +++ b/src/scc_cli/ui/dashboard/models.py @@ -1,56 +1,27 @@ """Data models for the dashboard module. -This module contains the core data structures used by the dashboard: -- DashboardTab: Enum for available tabs +This module contains the UI data structures used by the dashboard: - TabData: Content for a single tab - DashboardState: State management for the dashboard -These models are intentionally simple dataclasses with no external dependencies -beyond the UI layer, enabling clean separation and testability. +Dashboard tab identities and view models live in the application layer, keeping +UI models focused on rendering and navigation. """ from __future__ import annotations from collections.abc import Sequence from dataclasses import dataclass -from enum import Enum, auto -from typing import Any -from ..list_screen import ListItem, ListState - - -class DashboardTab(Enum): - """Available dashboard tabs. - - Each tab represents a major resource category in SCC. - Tabs are displayed in definition order (Status first, Worktrees last). - """ - - STATUS = auto() - CONTAINERS = auto() - SESSIONS = auto() - WORKTREES = auto() - - @property - def display_name(self) -> str: - """Human-readable name for display in chrome.""" - names = { - DashboardTab.STATUS: "Status", - DashboardTab.CONTAINERS: "Containers", - DashboardTab.SESSIONS: "Sessions", - DashboardTab.WORKTREES: "Worktrees", - } - return names[self] - - -# Ordered list for tab cycling -TAB_ORDER: tuple[DashboardTab, ...] = ( - DashboardTab.STATUS, - DashboardTab.CONTAINERS, - DashboardTab.SESSIONS, - DashboardTab.WORKTREES, +from scc_cli.application.dashboard import ( + TAB_ORDER, + DashboardItem, + DashboardTab, + PlaceholderItem, ) +from ..list_screen import ListItem, ListState + @dataclass class TabData: @@ -59,17 +30,14 @@ class TabData: Attributes: tab: The tab identifier. title: Display title for the tab content area. - items: List items to display in this tab. Value type varies by tab: - - Containers: ContainerInfo (preferred) or str (container ID) - - Worktrees: str (worktree name) - - Sessions: dict[str, Any] (full session data for details pane) + items: List items to display in this tab. count_active: Number of active items (e.g., running containers). count_total: Total number of items. """ tab: DashboardTab title: str - items: Sequence[ListItem[Any]] + items: Sequence[ListItem[DashboardItem]] count_active: int count_total: int @@ -100,7 +68,7 @@ class DashboardState: active_tab: DashboardTab tabs: dict[DashboardTab, TabData] - list_state: ListState[str] + list_state: ListState[DashboardItem] status_message: str | None = None details_open: bool = False help_visible: bool = False @@ -115,12 +83,7 @@ def current_tab_data(self) -> TabData: def is_placeholder_selected(self) -> bool: """Check if the current selection is a placeholder row. - Placeholder rows represent empty states or errors (e.g., "No containers", - "Error loading sessions") and shouldn't show details. - - Placeholders can be identified by: - - String value matching known placeholder names (containers, worktrees) - - Dict value with "_placeholder" key (sessions) + Placeholder rows represent empty states or errors and shouldn't show details. Returns: True if current item is a placeholder, False otherwise. @@ -129,25 +92,7 @@ def is_placeholder_selected(self) -> bool: if not current: return True # No item = treat as placeholder - # Known placeholder string values from tab data loaders - placeholder_values = { - "no_containers", - "no_sessions", - "no_worktrees", - "no_git", - "error", - "config_error", - } - - # Check string placeholders (must be string type first - dicts are unhashable) - if isinstance(current.value, str) and current.value in placeholder_values: - return True - - # Check dict placeholders (sessions tab uses dicts) - if isinstance(current.value, dict) and "_placeholder" in current.value: - return True - - return False + return isinstance(current.value, PlaceholderItem) def switch_tab(self, tab: DashboardTab) -> DashboardState: """Create new state with different active tab. diff --git a/src/scc_cli/ui/dashboard/orchestrator.py b/src/scc_cli/ui/dashboard/orchestrator.py index 7246afb..4e7d625 100644 --- a/src/scc_cli/ui/dashboard/orchestrator.py +++ b/src/scc_cli/ui/dashboard/orchestrator.py @@ -19,6 +19,8 @@ if TYPE_CHECKING: from rich.console import Console +from scc_cli.application import dashboard as app_dashboard + from ...confirm import Confirm from ..chrome import print_with_layout from ..keys import ( @@ -43,8 +45,8 @@ ) from ..list_screen import ListState from ._dashboard import Dashboard -from .loaders import _load_all_tab_data -from .models import DashboardState, DashboardTab +from .loaders import _to_tab_data +from .models import DashboardState def run_dashboard() -> None: @@ -70,219 +72,263 @@ def run_dashboard() -> None: _show_onboarding_banner() scc_config.mark_onboarding_seen() - # Track which tab to restore after flow (uses .name for stability) - restore_tab: str | None = None - # Toast message to show on next dashboard iteration (e.g., "Start cancelled") - toast_message: str | None = None - # Track verbose worktree status display (persists across reloads) - verbose_worktrees: bool = False + flow_state = app_dashboard.DashboardFlowState() while True: - # Load real data for all tabs (pass verbose flag for worktrees) - tabs = _load_all_tab_data(verbose_worktrees=verbose_worktrees) - - # Determine initial tab (restore previous or default to STATUS) - initial_tab = DashboardTab.STATUS - if restore_tab: - # Find tab by name (stable identifier) - for tab in DashboardTab: - if tab.name == restore_tab: - initial_tab = tab - break - restore_tab = None # Clear after use - + view, flow_state = app_dashboard.build_dashboard_view( + flow_state, + app_dashboard.load_all_tab_data, + ) + tabs = {tab: _to_tab_data(tab_data) for tab, tab_data in view.tabs.items()} state = DashboardState( - active_tab=initial_tab, + active_tab=view.active_tab, tabs=tabs, - list_state=ListState(items=tabs[initial_tab].items), - status_message=toast_message, # Show any pending toast - verbose_worktrees=verbose_worktrees, # Preserve verbose state + list_state=ListState(items=tabs[view.active_tab].items), + status_message=view.status_message, + verbose_worktrees=view.verbose_worktrees, ) - toast_message = None # Clear after use dashboard = Dashboard(state) try: dashboard.run() - break # Normal exit (q or Esc) + break except TeamSwitchRequested: - # User pressed 't' - show team picker then reload dashboard - _handle_team_switch() - # Loop continues to reload dashboard with new team + flow_state, should_exit = _apply_event(flow_state, app_dashboard.TeamSwitchEvent()) + if should_exit: + break except StartRequested as start_req: - # User pressed Enter on startable placeholder - # Execute start flow OUTSIDE Rich Live (critical: avoids nested Live) - restore_tab = start_req.return_to - result = _handle_start_flow(start_req.reason) - - if result is None: - # User pressed q: quit app entirely + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.StartFlowEvent( + return_to=_resolve_tab(start_req.return_to), + reason=start_req.reason, + ), + ) + if should_exit: break - if result is False: - # User pressed Esc: go back to dashboard, show toast - toast_message = "Start cancelled" - # Loop continues to reload dashboard with fresh data - except RefreshRequested as refresh_req: - # User pressed 'r' - just reload data - restore_tab = refresh_req.return_to - # Loop continues with fresh data (no additional action needed) + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.RefreshEvent(return_to=_resolve_tab(refresh_req.return_to)), + ) + if should_exit: + break except SessionResumeRequested as resume_req: - # User pressed Enter on a session item → resume it - restore_tab = resume_req.return_to - success = _handle_session_resume(resume_req.session) - - if not success: - # Resume failed (e.g., missing workspace) - show toast - toast_message = "Session resume failed" - else: - # Successfully launched - exit dashboard - # (container is running, user is now in Claude) + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.SessionResumeEvent( + return_to=_resolve_tab(resume_req.return_to), + session=resume_req.session, + ), + ) + if should_exit: break except StatuslineInstallRequested as statusline_req: - # User pressed 'y' on statusline row - install statusline - restore_tab = statusline_req.return_to - success = _handle_statusline_install() - - if success: - toast_message = "Statusline installed successfully" - else: - toast_message = "Statusline installation failed" - # Loop continues to reload dashboard with fresh data + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.StatuslineInstallEvent( + return_to=_resolve_tab(statusline_req.return_to) + ), + ) + if should_exit: + break except RecentWorkspacesRequested as recent_req: - # User pressed 'w' - show recent workspaces picker - restore_tab = recent_req.return_to - selected_workspace = _handle_recent_workspaces() - - if selected_workspace is None: - # User cancelled or quit - toast_message = "Cancelled" - elif selected_workspace: - # User selected a workspace - start session in it - # For now, just show message; full integration comes later - toast_message = f"Selected: {selected_workspace}" - # Loop continues to reload dashboard + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.RecentWorkspacesEvent(return_to=_resolve_tab(recent_req.return_to)), + ) + if should_exit: + break except GitInitRequested as init_req: - # User pressed 'i' - initialize git repo - restore_tab = init_req.return_to - success = _handle_git_init() - - if success: - toast_message = "Git repository initialized" - else: - toast_message = "Git init cancelled or failed" - # Loop continues to reload dashboard + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.GitInitEvent(return_to=_resolve_tab(init_req.return_to)), + ) + if should_exit: + break except CreateWorktreeRequested as create_req: - # User pressed 'c' - create worktree or clone - restore_tab = create_req.return_to - - if create_req.is_git_repo: - success = _handle_create_worktree() - if success: - toast_message = "Worktree created" - else: - toast_message = "Worktree creation cancelled" - else: - success = _handle_clone() - if success: - toast_message = "Repository cloned" - else: - toast_message = "Clone cancelled" - # Loop continues to reload dashboard + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.CreateWorktreeEvent( + return_to=_resolve_tab(create_req.return_to), + is_git_repo=create_req.is_git_repo, + ), + ) + if should_exit: + break except VerboseToggleRequested as verbose_req: - # User pressed 'v' - toggle verbose worktree status - restore_tab = verbose_req.return_to - verbose_worktrees = verbose_req.verbose - toast_message = "Status on" if verbose_worktrees else "Status off" - # Loop continues with new verbose setting + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.VerboseToggleEvent( + return_to=_resolve_tab(verbose_req.return_to), + verbose=verbose_req.verbose, + ), + ) + if should_exit: + break except SettingsRequested as settings_req: - # User pressed 's' - open settings and maintenance screen - restore_tab = settings_req.return_to - settings_result = _handle_settings() - - if settings_result: - toast_message = settings_result # Success message from settings action - # Loop continues to reload dashboard + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.SettingsEvent(return_to=_resolve_tab(settings_req.return_to)), + ) + if should_exit: + break except ContainerStopRequested as container_req: - restore_tab = container_req.return_to - success, message = _handle_container_stop( - container_req.container_id, - container_req.container_name, - ) - toast_message = ( - message if message else ("Container stopped" if success else "Stop failed") + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.ContainerStopEvent( + return_to=_resolve_tab(container_req.return_to), + container_id=container_req.container_id, + container_name=container_req.container_name, + ), ) + if should_exit: + break except ContainerResumeRequested as container_req: - restore_tab = container_req.return_to - success, message = _handle_container_resume( - container_req.container_id, - container_req.container_name, - ) - toast_message = ( - message if message else ("Container resumed" if success else "Resume failed") + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.ContainerResumeEvent( + return_to=_resolve_tab(container_req.return_to), + container_id=container_req.container_id, + container_name=container_req.container_name, + ), ) + if should_exit: + break except ContainerRemoveRequested as container_req: - restore_tab = container_req.return_to - success, message = _handle_container_remove( - container_req.container_id, - container_req.container_name, - ) - toast_message = ( - message if message else ("Container removed" if success else "Remove failed") + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.ContainerRemoveEvent( + return_to=_resolve_tab(container_req.return_to), + container_id=container_req.container_id, + container_name=container_req.container_name, + ), ) + if should_exit: + break except ProfileMenuRequested as profile_req: - # User pressed 'p' - show profile quick menu - restore_tab = profile_req.return_to - profile_result = _handle_profile_menu() - - if profile_result: - toast_message = profile_result # Success message from profile action + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.ProfileMenuEvent(return_to=_resolve_tab(profile_req.return_to)), + ) + if should_exit: + break except SandboxImportRequested as import_req: - # User pressed 'i' - import sandbox plugins - restore_tab = import_req.return_to - import_result = _handle_sandbox_import() - - if import_result: - toast_message = import_result # Success message from import + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.SandboxImportEvent(return_to=_resolve_tab(import_req.return_to)), + ) + if should_exit: + break except ContainerActionMenuRequested as action_req: - # User triggered container action menu (Enter or Space on container) - restore_tab = action_req.return_to - action_result = _handle_container_action_menu( - action_req.container_id, action_req.container_name + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.ContainerActionMenuEvent( + return_to=_resolve_tab(action_req.return_to), + container_id=action_req.container_id, + container_name=action_req.container_name, + ), ) - - if action_result: - toast_message = action_result + if should_exit: + break except SessionActionMenuRequested as action_req: - # User triggered session action menu (Enter or Space on session) - restore_tab = action_req.return_to - action_result = _handle_session_action_menu(action_req.session) - - if action_result: - toast_message = action_result + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.SessionActionMenuEvent( + return_to=_resolve_tab(action_req.return_to), + session=action_req.session, + ), + ) + if should_exit: + break except WorktreeActionMenuRequested as action_req: - # User triggered worktree action menu (Enter or Space on worktree) - restore_tab = action_req.return_to - action_result = _handle_worktree_action_menu(action_req.worktree_path) + flow_state, should_exit = _apply_event( + flow_state, + app_dashboard.WorktreeActionMenuEvent( + return_to=_resolve_tab(action_req.return_to), + worktree_path=action_req.worktree_path, + ), + ) + if should_exit: + break - if action_result: - toast_message = action_result + +def _resolve_tab(tab_name: str | None) -> app_dashboard.DashboardTab: + if not tab_name: + return app_dashboard.DashboardTab.STATUS + try: + return app_dashboard.DashboardTab[tab_name] + except KeyError: + return app_dashboard.DashboardTab.STATUS + + +def _apply_event( + state: app_dashboard.DashboardFlowState, + event: app_dashboard.DashboardEvent, +) -> tuple[app_dashboard.DashboardFlowState, bool]: + step = app_dashboard.handle_dashboard_event(state, event) + if isinstance(step, app_dashboard.DashboardFlowOutcome): + return step.state, step.exit_dashboard + result = _run_effect(step.effect) + outcome = app_dashboard.apply_dashboard_effect_result(step.state, step.effect, result) + return outcome.state, outcome.exit_dashboard + + +def _run_effect(effect: app_dashboard.DashboardEffect) -> object: + if isinstance(effect, app_dashboard.TeamSwitchEvent): + _handle_team_switch() + return None + if isinstance(effect, app_dashboard.StartFlowEvent): + return _handle_start_flow(effect.reason) + if isinstance(effect, app_dashboard.SessionResumeEvent): + return _handle_session_resume(effect.session) + if isinstance(effect, app_dashboard.StatuslineInstallEvent): + return _handle_statusline_install() + if isinstance(effect, app_dashboard.RecentWorkspacesEvent): + return _handle_recent_workspaces() + if isinstance(effect, app_dashboard.GitInitEvent): + return _handle_git_init() + if isinstance(effect, app_dashboard.CreateWorktreeEvent): + if effect.is_git_repo: + return _handle_create_worktree() + return _handle_clone() + if isinstance(effect, app_dashboard.SettingsEvent): + return _handle_settings() + if isinstance(effect, app_dashboard.ContainerStopEvent): + return _handle_container_stop(effect.container_id, effect.container_name) + if isinstance(effect, app_dashboard.ContainerResumeEvent): + return _handle_container_resume(effect.container_id, effect.container_name) + if isinstance(effect, app_dashboard.ContainerRemoveEvent): + return _handle_container_remove(effect.container_id, effect.container_name) + if isinstance(effect, app_dashboard.ProfileMenuEvent): + return _handle_profile_menu() + if isinstance(effect, app_dashboard.SandboxImportEvent): + return _handle_sandbox_import() + if isinstance(effect, app_dashboard.ContainerActionMenuEvent): + return _handle_container_action_menu(effect.container_id, effect.container_name) + if isinstance(effect, app_dashboard.SessionActionMenuEvent): + return _handle_session_action_menu(effect.session) + if isinstance(effect, app_dashboard.WorktreeActionMenuEvent): + return _handle_worktree_action_menu(effect.worktree_path) + msg = f"Unsupported dashboard effect: {effect}" + raise ValueError(msg) def _prepare_for_nested_ui(console: Console) -> None: @@ -372,27 +418,8 @@ def _handle_team_switch() -> None: ) -def _handle_start_flow(reason: str) -> bool | None: - """Handle start flow request from dashboard. - - Runs the interactive start wizard and launches a sandbox if user completes it. - Executes OUTSIDE Rich Live context (the dashboard has already exited - via the exception unwind before this is called). - - Three-state return contract: - - True: Sandbox launched successfully - - False: User pressed Esc (back to dashboard) - - None: User pressed q (quit app entirely) - - Args: - reason: Why the start flow was triggered. Can be: - - "no_containers", "no_sessions": Empty state triggers (show wizard) - - "worktree:/path/to/worktree": Start session in specific worktree - - Returns: - True if wizard completed successfully, False if user wants to go back, - None if user wants to quit entirely. - """ +def _handle_start_flow(reason: str) -> app_dashboard.StartFlowResult: + """Handle start flow request from dashboard.""" from ...commands.launch import run_start_wizard_flow console = get_err_console() @@ -415,22 +442,12 @@ def _handle_start_flow(reason: str) -> bool | None: # Run the wizard with allow_back=True for dashboard context # Returns: True (success), False (Esc/back), None (q/quit) - return run_start_wizard_flow(skip_quick_resume=skip_quick_resume, allow_back=True) + result = run_start_wizard_flow(skip_quick_resume=skip_quick_resume, allow_back=True) + return app_dashboard.StartFlowResult.from_legacy(result) -def _handle_worktree_start(worktree_path: str) -> bool | None: - """Handle starting a session in a specific worktree. - - Launches a new session directly in the selected worktree, bypassing - the wizard workspace selection since the user already selected a worktree. - - Args: - worktree_path: Absolute path to the worktree directory. - - Returns: - True if session started successfully, False if cancelled, - None if user wants to quit entirely. - """ +def _handle_worktree_start(worktree_path: str) -> app_dashboard.StartFlowResult: + """Handle starting a session in a specific worktree.""" from pathlib import Path from rich.status import Status @@ -453,7 +470,7 @@ def _handle_worktree_start(worktree_path: str) -> bool | None: # Validate workspace exists if not workspace_path.exists(): console.print(f"[red]Worktree no longer exists: {worktree_path}[/red]") - return False + return app_dashboard.StartFlowResult.from_legacy(False) console.print(f"[cyan]Starting session in:[/cyan] {workspace_name}") console.print() @@ -467,7 +484,7 @@ def _handle_worktree_start(worktree_path: str) -> bool | None: resolved_path = _validate_and_resolve_workspace(str(workspace_path)) if resolved_path is None: console.print("[red]Workspace validation failed[/red]") - return False + return app_dashboard.StartFlowResult.from_legacy(False) workspace_path = resolved_path # Get current team from config @@ -500,14 +517,14 @@ def _handle_worktree_start(worktree_path: str) -> bool | None: fresh=False, plugin_settings=plugin_settings, ) - return True + return app_dashboard.StartFlowResult.from_legacy(True) except KeyboardInterrupt: console.print("\n[yellow]Cancelled[/yellow]") - return False + return app_dashboard.StartFlowResult.from_legacy(False) except Exception as e: console.print(f"[red]Error starting session: {e}[/red]") - return False + return app_dashboard.StartFlowResult.from_legacy(False) def _handle_session_resume(session: dict[str, Any]) -> bool: @@ -589,11 +606,11 @@ def _handle_session_resume(session: dict[str, Any]) -> bool: # Show resume info workspace_name = workspace_path.name - console.print(f"[cyan]Resuming session:[/cyan] {workspace_name}") + print_with_layout(console, f"[cyan]Resuming session:[/cyan] {workspace_name}") if team: - console.print(f"[dim]Team: {team}[/dim]") + print_with_layout(console, f"[dim]Team: {team}[/dim]") if current_branch: - console.print(f"[dim]Branch: {current_branch}[/dim]") + print_with_layout(console, f"[dim]Branch: {current_branch}[/dim]") console.print() # Launch sandbox with resume flag @@ -1028,9 +1045,11 @@ def _handle_worktree_action_menu(worktree_path: str) -> str | None: if selected == "start": # Reuse worktree start flow directly result = _handle_worktree_start(worktree_path) - if result is None: + if result.decision is app_dashboard.StartFlowDecision.QUIT: return "Cancelled" - return "Started session" if result else "Start cancelled" + if result.decision is app_dashboard.StartFlowDecision.LAUNCHED: + return "Started session" + return "Start cancelled" if selected == "open_shell": console.print(f"[cyan]cd {worktree_path}[/cyan]") diff --git a/src/scc_cli/ui/settings.py b/src/scc_cli/ui/settings.py index f2cb333..8a2d1e4 100644 --- a/src/scc_cli/ui/settings.py +++ b/src/scc_cli/ui/settings.py @@ -13,9 +13,7 @@ from __future__ import annotations -from dataclasses import dataclass -from enum import Enum, auto -from typing import TYPE_CHECKING +from pathlib import Path import readchar from rich.console import Group, RenderableType @@ -25,181 +23,37 @@ from rich.table import Table from rich.text import Text -from .. import config -from ..config import get_selected_profile -from ..console import get_err_console -from ..core.maintenance import ( - RiskTier, - clear_cache, - clear_contexts, - delete_all_sessions, - factory_reset, - get_paths, - get_total_size, - preview_operation, - prune_containers, - prune_sessions, - reset_config, - reset_exceptions, +from scc_cli.application import settings as app_settings +from scc_cli.application.settings import ( + ConfirmationKind, + DoctorInfo, + PathsInfo, + ProfileDiffInfo, + ProfileSyncMode, + ProfileSyncPathPayload, + ProfileSyncPayload, + ProfileSyncPreview, + ProfileSyncResult, + SettingsAction, + SettingsActionResult, + SettingsActionStatus, + SettingsChangeRequest, + SettingsContext, + SettingsValidationRequest, + SettingsValidationResult, + SupportBundleInfo, + SupportBundlePayload, + VersionInfo, +) +from scc_cli.application.settings import ( + SettingsCategory as Category, ) + +from ..console import get_err_console +from ..maintenance import MaintenancePreview, RiskTier from ..theme import Indicators from .chrome import apply_layout, get_layout_metrics -if TYPE_CHECKING: - from pathlib import Path - - -class Category(Enum): - """Categories for the settings screen.""" - - MAINTENANCE = auto() - PROFILES = auto() - DIAGNOSTICS = auto() - ABOUT = auto() - - -@dataclass -class SettingsAction: - """Represents a settings action with its metadata. - - Attributes: - id: Unique identifier for the action. - label: Display label for the action. - description: Brief description of what the action does. - risk_tier: Risk level (affects confirmation behavior). - category: Which category this action belongs to. - """ - - id: str - label: str - description: str - risk_tier: RiskTier - category: Category - - -# Define all available settings actions -SETTINGS_ACTIONS: list[SettingsAction] = [ - # Maintenance actions (Tier 0 = Safe) - SettingsAction( - id="clear_cache", - label="Clear cache", - description="Remove regenerable cache files", - risk_tier=RiskTier.SAFE, - category=Category.MAINTENANCE, - ), - # Tier 1 = Changes State - SettingsAction( - id="clear_contexts", - label="Clear contexts", - description="Clear recent work contexts", - risk_tier=RiskTier.CHANGES_STATE, - category=Category.MAINTENANCE, - ), - SettingsAction( - id="prune_containers", - label="Prune containers", - description="Remove stopped Docker containers", - risk_tier=RiskTier.CHANGES_STATE, - category=Category.MAINTENANCE, - ), - SettingsAction( - id="prune_sessions", - label="Prune sessions", - description="Remove old sessions (keeps recent)", - risk_tier=RiskTier.CHANGES_STATE, - category=Category.MAINTENANCE, - ), - # Tier 2 = Destructive - SettingsAction( - id="reset_exceptions", - label="Reset exceptions", - description="Clear all policy exceptions", - risk_tier=RiskTier.DESTRUCTIVE, - category=Category.MAINTENANCE, - ), - SettingsAction( - id="delete_sessions", - label="Delete all sessions", - description="Remove entire session history", - risk_tier=RiskTier.DESTRUCTIVE, - category=Category.MAINTENANCE, - ), - SettingsAction( - id="reset_config", - label="Reset configuration", - description="Reset to defaults (requires setup)", - risk_tier=RiskTier.DESTRUCTIVE, - category=Category.MAINTENANCE, - ), - # Tier 3 = Factory Reset - SettingsAction( - id="factory_reset", - label="Factory reset", - description="Remove all SCC data", - risk_tier=RiskTier.FACTORY_RESET, - category=Category.MAINTENANCE, - ), - # Profiles (Tier 0 = Safe for read-only, Tier 1 for state changes) - SettingsAction( - id="profile_save", - label="Save profile", - description="Capture current workspace settings", - risk_tier=RiskTier.SAFE, - category=Category.PROFILES, - ), - SettingsAction( - id="profile_apply", - label="Apply profile", - description="Restore saved settings to workspace", - risk_tier=RiskTier.CHANGES_STATE, - category=Category.PROFILES, - ), - SettingsAction( - id="profile_diff", - label="Show diff", - description="Compare profile vs workspace", - risk_tier=RiskTier.SAFE, - category=Category.PROFILES, - ), - SettingsAction( - id="profile_sync", - label="Sync profiles", - description="Export/import via repo", - risk_tier=RiskTier.SAFE, # Opens picker with internal confirmations - category=Category.PROFILES, - ), - # Diagnostics - SettingsAction( - id="run_doctor", - label="Run doctor", - description="Check prerequisites and system health", - risk_tier=RiskTier.SAFE, - category=Category.DIAGNOSTICS, - ), - SettingsAction( - id="generate_support_bundle", - label="Generate support bundle", - description="Create diagnostic bundle for troubleshooting", - risk_tier=RiskTier.SAFE, - category=Category.DIAGNOSTICS, - ), - # About - SettingsAction( - id="show_paths", - label="Show paths", - description="Show SCC file locations", - risk_tier=RiskTier.SAFE, - category=Category.ABOUT, - ), - SettingsAction( - id="show_version", - label="Show version", - description="Show build info and CLI version", - risk_tier=RiskTier.SAFE, - category=Category.ABOUT, - ), -] - def _get_risk_badge(tier: RiskTier) -> Text: """Get a color-coded risk badge for display. @@ -220,11 +74,6 @@ def _get_risk_badge(tier: RiskTier) -> Text: return Text("UNKNOWN") -def _get_actions_for_category(category: Category) -> list[SettingsAction]: - """Get all actions for a given category.""" - return [a for a in SETTINGS_ACTIONS if a.category == category] - - def _format_bytes(size_bytes: int) -> str: """Format bytes as human-readable string.""" if size_bytes == 0: @@ -252,6 +101,8 @@ def __init__(self, initial_category: Category | None = None) -> None: initial_category: Optional category to start on. Defaults to MAINTENANCE. """ self._console = get_err_console() + self._context = SettingsContext(workspace=Path.cwd()) + self._view_model = app_settings.load_settings_state(self._context) self._active_category = initial_category or Category.MAINTENANCE self._cursor = 0 self._last_result: str | None = None # Last action result (receipt line) @@ -260,6 +111,13 @@ def __init__(self, initial_category: Category | None = None) -> None: self._show_preview = False # Preview panel for Tier 1/2 actions self._live: Live | None = None # Reference to Live context + def _refresh_view_model(self) -> None: + self._view_model = app_settings.load_settings_state(self._context) + + def _actions_for_category(self, category: Category | None = None) -> list[SettingsAction]: + target = category or self._active_category + return list(self._view_model.actions_by_category.get(target, [])) + def run(self) -> str | None: """Run the interactive settings screen. @@ -296,7 +154,7 @@ def _handle_key(self, key: str, live: Live) -> bool | None: Returns: True to refresh, False to exit, None for no-op. """ - actions = _get_actions_for_category(self._active_category) + actions = self._actions_for_category() # Clear last result on navigation (keep visible for one action cycle) if key in (readchar.key.UP, "k", readchar.key.DOWN, "j"): @@ -377,168 +235,222 @@ def _handle_key(self, key: str, live: Live) -> bool | None: return None def _execute_action(self, action: SettingsAction) -> str | None: - """Execute a settings action with appropriate confirmation. - - Returns: - Success message if action was performed, None if cancelled. - """ - # Exit Live context for confirmation prompts + """Execute a settings action with appropriate confirmation.""" self._console.print() - # Tier 0 (Safe) - no confirmation needed - if action.risk_tier == RiskTier.SAFE: - return self._run_action(action) + if action.id == "profile_save": + self._console.print("[bold]Save Personal Profile[/bold]") + self._console.print() + elif action.id == "profile_apply": + self._console.print("[bold]Apply Personal Profile[/bold]") + self._console.print() - # Tier 1-2 - Y/N confirmation with affected paths from data - if action.risk_tier in (RiskTier.CHANGES_STATE, RiskTier.DESTRUCTIVE): - # Get preview data to show affected paths - try: - preview = preview_operation(action.id) - self._console.print(f"[yellow]{action.label}[/yellow]: {action.description}") - if preview.paths: - self._console.print("[dim]Affects:[/dim]") - for path in preview.paths[:3]: # Limit to 3 paths - self._console.print(f" {path}") - if len(preview.paths) > 3: - self._console.print(f" [dim](+{len(preview.paths) - 3} more)[/dim]") - if preview.item_count > 0: - self._console.print(f"[dim]Items:[/dim] {preview.item_count}") - if preview.bytes_estimate > 0: - self._console.print( - f"[dim]Size:[/dim] ~{_format_bytes(preview.bytes_estimate)}" - ) - if preview.backup_will_be_created: - self._console.print("[yellow]Backup will be created[/yellow]") - except Exception: - # Fall back to simple confirmation - self._console.print(f"[yellow]{action.label}[/yellow]: {action.description}") + if action.id == "profile_sync": + return self._profile_sync() - if not Confirm.ask("Proceed?"): - return None - return self._run_action(action) - - # Tier 3 (Factory Reset) - type to confirm with full impact from data - if action.risk_tier == RiskTier.FACTORY_RESET: - try: - preview = preview_operation(action.id) - paths_list = "\n".join(f" {p}" for p in preview.paths) - size_info = ( - f"\nTotal size: ~{_format_bytes(preview.bytes_estimate)}" - if preview.bytes_estimate > 0 - else "" - ) - content = ( - "[bold red]WARNING: Factory Reset[/bold red]\n\n" - "This will remove ALL SCC data:\n" - f"{paths_list}{size_info}\n\n" - "This action cannot be undone." - ) - except Exception: - content = ( - "[bold red]WARNING: Factory Reset[/bold red]\n\n" - "This will remove ALL SCC data including:\n" - " - Configuration files\n" - " - Session history\n" - " - Policy exceptions\n" - " - Cached data\n" - " - Work contexts\n\n" - "This action cannot be undone." - ) + if action.id == "generate_support_bundle": + return self._generate_support_bundle() - self._console.print(Panel(content, border_style="red")) - confirm = Prompt.ask( - "Type [bold red]RESET[/bold red] to confirm", - default="", + validation = app_settings.validate_settings( + SettingsValidationRequest( + action_id=action.id, + workspace=self._context.workspace, ) - if confirm.upper() != "RESET": - self._console.print("[dim]Cancelled[/dim]") - return None - return self._run_action(action) - - return None - - def _run_action(self, action: SettingsAction) -> str | None: - """Execute the actual action and return result message.""" - try: - match action.id: - case "clear_cache": - result = clear_cache() - return f"Cache cleared: {result.bytes_freed_human}" - - case "clear_contexts": - result = clear_contexts() - return f"Cleared {result.removed_count} contexts" - - case "prune_containers": - result = prune_containers(dry_run=False) - return f"Pruned {result.removed_count} containers" + ) + if validation and validation.error: + self._console.print(f"[yellow]{validation.error}[/yellow]") + Prompt.ask("[dim]Press Enter to continue[/dim]", default="") + return None - case "prune_sessions": - result = prune_sessions(older_than_days=30, keep_n=20, dry_run=False) - return f"Pruned {result.removed_count} sessions" + confirmed = self._confirm_action(action, validation) + if not confirmed: + return None - case "reset_exceptions": - result = reset_exceptions(scope="all") - return f"Reset {result.removed_count} exceptions" + result = app_settings.apply_settings_change( + SettingsChangeRequest( + action_id=action.id, + workspace=self._context.workspace, + confirmed=bool(validation and validation.confirmation), + ) + ) + message = self._handle_action_result(result) + self._refresh_view_model() + return message + + def _confirm_action( + self, + action: SettingsAction, + validation: SettingsValidationResult | None, + ) -> bool: + if not validation or not validation.confirmation: + return True + if validation.confirmation == ConfirmationKind.CONFIRM: + return self._confirm_with_preview(action, validation) + if validation.confirmation == ConfirmationKind.TYPE_TO_CONFIRM: + return self._confirm_factory_reset(validation) + return True + + def _confirm_with_preview( + self, + action: SettingsAction, + validation: SettingsValidationResult, + ) -> bool: + detail = validation.detail + if isinstance(detail, ProfileSyncPreview): + self._render_profile_sync_preview(detail) + return Confirm.ask("Import now?", default=True) + + if validation.message and validation.message.startswith("Create directory?"): + return self._confirm_create_directory(validation.message) + + if isinstance(detail, MaintenancePreview): + self._render_maintenance_preview(action, detail) + else: + self._console.print(f"[yellow]{action.label}[/yellow]: {action.description}") - case "delete_sessions": - result = delete_all_sessions() - return f"Deleted {result.removed_count} sessions" + return Confirm.ask("Proceed?") - case "reset_config": - result = reset_config() - return "Configuration reset. Run 'scc setup' to reconfigure." + def _confirm_create_directory(self, message: str) -> bool: + from rich import box - case "factory_reset": - _results = factory_reset() # Returns list[ResetResult] - return "Factory reset complete. Run 'scc setup' to reconfigure." + path = message.replace("Create directory?", "").strip() + self._console.print() + panel = Panel( + f"[yellow]Path does not exist:[/yellow]\n {path}", + title="[cyan]Create[/cyan] Directory", + border_style="yellow", + box=box.ROUNDED, + padding=(1, 2), + ) + self._console.print(panel) + return Confirm.ask("[cyan]Create directory?[/cyan]", default=True) + + def _render_maintenance_preview( + self, action: SettingsAction, preview: MaintenancePreview + ) -> None: + self._console.print(f"[yellow]{action.label}[/yellow]: {action.description}") + if preview.paths: + self._console.print("[dim]Affects:[/dim]") + for path in preview.paths[:3]: + self._console.print(f" {path}") + if len(preview.paths) > 3: + self._console.print(f" [dim](+{len(preview.paths) - 3} more)[/dim]") + if preview.item_count > 0: + self._console.print(f"[dim]Items:[/dim] {preview.item_count}") + if preview.bytes_estimate > 0: + self._console.print(f"[dim]Size:[/dim] ~{_format_bytes(preview.bytes_estimate)}") + if preview.backup_will_be_created: + self._console.print("[yellow]Backup will be created[/yellow]") + + def _confirm_factory_reset(self, validation: SettingsValidationResult) -> bool: + if isinstance(validation.detail, MaintenancePreview): + paths_list = "\n".join(f" {path}" for path in validation.detail.paths) + size_info = ( + f"\nTotal size: ~{_format_bytes(validation.detail.bytes_estimate)}" + if validation.detail.bytes_estimate > 0 + else "" + ) + content = ( + "[bold red]WARNING: Factory Reset[/bold red]\n\n" + "This will remove ALL SCC data:\n" + f"{paths_list}{size_info}\n\n" + "This action cannot be undone." + ) + else: + content = ( + "[bold red]WARNING: Factory Reset[/bold red]\n\n" + "This will remove ALL SCC data including:\n" + " - Configuration files\n" + " - Session history\n" + " - Policy exceptions\n" + " - Cached data\n" + " - Work contexts\n\n" + "This action cannot be undone." + ) - case "profile_save": - return self._profile_save() + self._console.print(Panel(content, border_style="red")) + phrase = validation.required_phrase or "RESET" + confirm = Prompt.ask( + f"Type [bold red]{phrase}[/bold red] to confirm", + default="", + ) + if confirm.upper() != phrase: + self._console.print("[dim]Cancelled[/dim]") + return False + return True - case "profile_apply": - return self._profile_apply() + def _handle_action_result(self, result: SettingsActionResult) -> str | None: + if result.status == SettingsActionStatus.ERROR: + message = result.message or "Error" + self._console.print(f"[red]Error: {message}[/red]") + return None - case "profile_diff": - return self._profile_diff() + if result.details: + self._render_detail_lines(result) + elif result.message and result.detail is None: + if result.status == SettingsActionStatus.SUCCESS: + self._console.print(f"[green]✓[/green] {result.message}") + elif result.status == SettingsActionStatus.NOOP: + self._console.print(f"[yellow]{result.message}[/yellow]") + + if isinstance(result.detail, PathsInfo): + self._show_paths_info(result.detail) + elif isinstance(result.detail, VersionInfo): + self._show_version_info(result.detail) + elif isinstance(result.detail, ProfileDiffInfo): + self._profile_diff(result.detail) + elif isinstance(result.detail, ProfileSyncResult): + self._render_profile_sync_result(result.detail) + elif isinstance(result.detail, SupportBundleInfo): + self._render_support_bundle_result(result.detail) + elif isinstance(result.detail, DoctorInfo): + self._render_doctor_result(result.detail) - case "profile_sync": - return self._profile_sync() + if result.warnings: + for warning in result.warnings: + self._console.print(f"[yellow]![/yellow] {warning}") - case "run_doctor": - # Run doctor using core function (not Typer command) - from pathlib import Path + if result.needs_ack: + Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - from ..doctor.render import run_doctor as core_run_doctor + return result.message - self._console.print() - _doctor_result = core_run_doctor(workspace=Path.cwd()) - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return None # No toast, doctor has its own output + def _render_detail_lines(self, result: SettingsActionResult) -> None: + if result.status == SettingsActionStatus.SUCCESS: + for line in result.details: + self._console.print(f"[green]✓[/green] {line}") + return - case "generate_support_bundle": - return self._generate_support_bundle() + if result.status == SettingsActionStatus.NOOP and result.details: + self._console.print(f"[yellow]{result.details[0]}[/yellow]") + for line in result.details[1:]: + self._console.print(f"[dim]{line}[/dim]") + return - case "show_paths": - self._show_paths_info() - return None # No toast + for line in result.details: + self._console.print(line) - case "show_version": - self._show_version_info() - return None # No toast + def _render_profile_sync_preview(self, preview: ProfileSyncPreview) -> None: + from rich import box - case _: - return None + lines = [f"[cyan]Import preview from {preview.repo_path}[/cyan]", ""] + lines.append(f" {preview.imported} profile(s) will be imported") + if preview.skipped > 0: + lines.append(f" {preview.skipped} profile(s) unchanged") - except Exception as e: - self._console.print(f"[red]Error: {e}[/red]") - return None + self._console.print() + self._console.print( + Panel( + "\n".join(lines), + title="[cyan]Sync[/cyan] Profiles", + border_style="bright_black", + box=box.ROUNDED, + padding=(1, 2), + ) + ) - def _show_paths_info(self) -> None: + def _show_paths_info(self, paths_info: PathsInfo) -> None: """Display SCC file paths information.""" - paths = get_paths() - total = get_total_size() - self._console.print() table = Table(title="SCC File Locations", box=None) table.add_column("Location", style="cyan") @@ -546,35 +458,25 @@ def _show_paths_info(self) -> None: table.add_column("Size", justify="right") table.add_column("Status") - for p in paths: - exists = "✓" if p.exists else "✗" - perms = p.permissions if p.exists else "-" + for path_info in paths_info.paths: + exists = "✓" if path_info.exists else "✗" + perms = path_info.permissions if path_info.exists else "-" table.add_row( - p.name, - str(p.path), - p.size_human if p.exists else "-", + path_info.name, + str(path_info.path), + path_info.size_human if path_info.exists else "-", f"{exists} {perms}", ) table.add_section() - table.add_row("Total", "", str(total), "") + table.add_row("Total", "", str(paths_info.total_size), "") self._console.print(table) self._console.print() - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") def _generate_support_bundle(self) -> str | None: - """Generate a support bundle for troubleshooting. - - Prompts for destination path, shows warning about sensitive data, - and creates the bundle. - - Returns: - Success message with bundle path, or None if cancelled. - """ - from pathlib import Path - - from ..commands.support import create_bundle, get_default_bundle_path + """Generate a support bundle for troubleshooting.""" + from scc_cli.support_bundle import get_default_bundle_path self._console.print() self._console.print("[bold]Generate Support Bundle[/bold]") @@ -585,193 +487,57 @@ def _generate_support_bundle(self) -> str | None: ) self._console.print() - # Get default path default_path = get_default_bundle_path() - - # Prompt for path - path_str = Prompt.ask( - "Save bundle to", - default=str(default_path), - ) + path_str = Prompt.ask("Save bundle to", default=str(default_path)) if not path_str: self._console.print("[dim]Cancelled[/dim]") return None output_path = Path(path_str) - - # Create the bundle self._console.print("[cyan]Generating bundle...[/cyan]") - try: - create_bundle(output_path=output_path) - self._console.print() - self._console.print(f"[green]✓[/green] Bundle created: {output_path}") - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return f"Support bundle saved to {output_path.name}" - except Exception as e: - self._console.print(f"[red]Error creating bundle: {e}[/red]") - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return None - - def _show_version_info(self) -> None: - """Display version information.""" - from importlib.metadata import PackageNotFoundError - from importlib.metadata import version as get_version - - self._console.print() - try: - version = get_version("scc-cli") - except PackageNotFoundError: - version = "unknown" - - self._console.print(f"[bold cyan]SCC CLI[/bold cyan] version {version}") - self._console.print() - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - def _profile_save(self) -> str | None: - """Save current workspace settings as a personal profile.""" - from pathlib import Path - - from ..core.personal_profiles import ( - compute_fingerprints, - load_workspace_mcp, - load_workspace_settings, - save_applied_state, - save_personal_profile, + result = app_settings.apply_settings_change( + SettingsChangeRequest( + action_id="generate_support_bundle", + workspace=self._context.workspace, + payload=SupportBundlePayload(output_path=output_path), + ) ) - - workspace = Path.cwd() - self._console.print() - self._console.print("[bold]Save Personal Profile[/bold]") - self._console.print() - - # Load current workspace settings - settings = load_workspace_settings(workspace) - mcp = load_workspace_mcp(workspace) - - if not settings and not mcp: - self._console.print("[yellow]No workspace settings found to save.[/yellow]") - self._console.print("[dim]Create .claude/settings.local.json or .mcp.json first.[/dim]") + message = self._handle_action_result(result) + if result.status == SettingsActionStatus.ERROR: Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return None - - # Save the profile - profile = save_personal_profile(workspace, settings, mcp) - - # Save applied state for drift detection - fingerprints = compute_fingerprints(workspace) - save_applied_state(workspace, profile.profile_id, fingerprints) - - self._console.print(f"[green]✓[/green] Profile saved: {profile.path.name}") - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return "Profile saved" + self._refresh_view_model() + return message - def _profile_apply(self) -> str | None: - """Apply saved profile to current workspace.""" - from pathlib import Path - - from ..core.personal_profiles import ( - compute_fingerprints, - load_personal_profile, - load_workspace_mcp, - load_workspace_settings, - merge_personal_mcp, - merge_personal_settings, - save_applied_state, - write_workspace_mcp, - write_workspace_settings, - ) - - workspace = Path.cwd() + def _show_version_info(self, version_info: VersionInfo) -> None: + """Display version information.""" self._console.print() - self._console.print("[bold]Apply Personal Profile[/bold]") + self._console.print(f"[bold cyan]SCC CLI[/bold cyan] version {version_info.version}") self._console.print() - # Load profile - profile = load_personal_profile(workspace) - if not profile: - self._console.print("[yellow]No profile saved for this workspace.[/yellow]") - self._console.print("[dim]Use 'Save profile' first.[/dim]") - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return None - - # Load current workspace settings - current_settings = load_workspace_settings(workspace) or {} - current_mcp = load_workspace_mcp(workspace) or {} - - # Merge profile into workspace - if profile.settings: - merged_settings = merge_personal_settings(workspace, current_settings, profile.settings) - write_workspace_settings(workspace, merged_settings) - - if profile.mcp: - merged_mcp = merge_personal_mcp(current_mcp, profile.mcp) - write_workspace_mcp(workspace, merged_mcp) - - # Update applied state - fingerprints = compute_fingerprints(workspace) - save_applied_state(workspace, profile.profile_id, fingerprints) - - self._console.print("[green]✓[/green] Profile applied to workspace") - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return "Profile applied" - - def _profile_diff(self) -> str | None: + def _profile_diff(self, diff_info: ProfileDiffInfo) -> None: """Show diff between profile and workspace settings with visual overlay.""" - from pathlib import Path - from rich import box - from ..core.personal_profiles import ( - compute_structured_diff, - load_personal_profile, - load_workspace_mcp, - load_workspace_settings, - ) - - workspace = Path.cwd() - - # Load profile - profile = load_personal_profile(workspace) - if not profile: - self._console.print() - self._console.print("[yellow]No profile saved for this workspace.[/yellow]") - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return None - - # Load current workspace settings - current_settings = load_workspace_settings(workspace) or {} - current_mcp = load_workspace_mcp(workspace) or {} - - # Compute structured diff - diff = compute_structured_diff( - workspace_settings=current_settings, - profile_settings=profile.settings, - workspace_mcp=current_mcp, - profile_mcp=profile.mcp, - ) - + diff = diff_info.diff if diff.is_empty: self._console.print() self._console.print("[green]✓ Profile is in sync with workspace[/green]") - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") return None - # Build diff content grouped by section lines: list[str] = [] current_section = "" rendered_lines = 0 - max_lines = 12 # Smart fallback threshold + max_lines = 12 truncated = False - # Status indicators indicators = { "added": "[green]+[/green]", "removed": "[red]−[/red]", "modified": "[yellow]~[/yellow]", } - # Section display names section_names = { "plugins": "plugins", "mcp_servers": "mcp_servers", @@ -779,21 +545,18 @@ def _profile_diff(self) -> str | None: } for item in diff.items: - # Check if we need to truncate if rendered_lines >= max_lines and not truncated: truncated = True break - # Add section header if new section if item.section != current_section: if current_section: - lines.append("") # Blank line between sections + lines.append("") rendered_lines += 1 lines.append(f" [bold]{section_names.get(item.section, item.section)}[/bold]") rendered_lines += 1 current_section = item.section - # Add item with indicator indicator = indicators.get(item.status, " ") modifier = "(modified)" if item.status == "modified" else "" if modifier: @@ -802,7 +565,6 @@ def _profile_diff(self) -> str | None: lines.append(f" {indicator} {item.name}") rendered_lines += 1 - # Add truncation indicator if needed if truncated: remaining = diff.total_count - ( rendered_lines - len(set(i.section for i in diff.items)) @@ -810,14 +572,11 @@ def _profile_diff(self) -> str | None: lines.append("") lines.append(f" [dim]+ {remaining} more items...[/dim]") - # Add footer lines.append("") lines.append(f" [dim]{diff.total_count} difference(s) · Esc close[/dim]") - # Create panel content content = "\n".join(lines) - # Render the diff overlay self._console.print() self._console.print( Panel( @@ -829,7 +588,6 @@ def _profile_diff(self) -> str | None: ) ) - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") return None def _profile_sync(self) -> str | None: @@ -838,10 +596,9 @@ def _profile_sync(self) -> str | None: from .list_screen import ListItem, ListScreen - # Get default/last-used repo path - default_path = self._get_sync_repo_path() + self._refresh_view_model() + default_path = self._view_model.sync_repo_path - # Build picker items: path row + operations items: list[ListItem[str]] = [ ListItem( value="change_path", @@ -892,40 +649,10 @@ def _profile_sync(self) -> str | None: return None - def _get_sync_repo_path(self) -> str: - """Get the default/last-used sync repository path.""" - from .. import config as scc_config - - # Try to get from user config - try: - cfg = scc_config.load_user_config() - last_repo = cfg.get("sync", {}).get("last_repo") - if last_repo: - return str(last_repo) - except Exception: - pass - - # Default path - return "~/dotfiles/scc-profiles" - - def _save_sync_repo_path(self, path: str) -> None: - """Save the sync repository path to user config.""" - from .. import config as scc_config - - try: - cfg = scc_config.load_user_config() - if "sync" not in cfg: - cfg["sync"] = {} - cfg["sync"]["last_repo"] = path - scc_config.save_user_config(cfg) - except Exception: - pass # Non-critical, ignore errors - def _sync_change_path(self, current_path: str) -> str | None: """Handle path editing for sync.""" from rich import box - # Show styled panel for path input self._console.print() panel = Panel( f"[dim]Current:[/dim] {current_path}\n\n" @@ -939,127 +666,78 @@ def _sync_change_path(self, current_path: str) -> str | None: new_path = Prompt.ask("[cyan]Path[/cyan]", default=current_path) if new_path and new_path != current_path: - self._save_sync_repo_path(new_path) - self._console.print(f"\n[green]✓[/green] Path updated to: {new_path}") - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") + result = app_settings.apply_settings_change( + SettingsChangeRequest( + action_id="profile_sync", + workspace=self._context.workspace, + payload=ProfileSyncPathPayload(new_path=new_path), + ) + ) + self._handle_action_result(result) + self._refresh_view_model() - # Return to sync picker with new path return self._profile_sync() def _sync_export(self, repo_path: Path) -> str | None: """Export profiles to repository.""" - - from rich import box - - from ..core.personal_profiles import ( - export_profiles_to_repo, - list_personal_profiles, - ) - - self._console.print() - - # Check if we have profiles to export - profiles = list_personal_profiles() - if not profiles: - self._console.print( - Panel( - "[yellow]✗ No profiles to export[/yellow]\n\n" - "Save a profile first with 'Save profile' action.", - title="[cyan]Sync[/cyan] Profiles", - border_style="bright_black", - box=box.ROUNDED, - padding=(1, 2), - ) + payload = ProfileSyncPayload(mode=ProfileSyncMode.EXPORT, repo_path=repo_path) + validation = app_settings.validate_settings( + SettingsValidationRequest( + action_id="profile_sync", + workspace=self._context.workspace, + payload=payload, ) + ) + if validation and validation.error: + self._console.print(f"[yellow]{validation.error}[/yellow]") Prompt.ask("[dim]Press Enter to continue[/dim]", default="") return None - # Check if directory exists, offer to create - if not repo_path.exists(): - self._console.print() - self._console.print( - Panel( - f"[yellow]Path does not exist:[/yellow]\n {repo_path}", - title="[cyan]Create[/cyan] Directory", - border_style="yellow", - box=box.ROUNDED, - padding=(1, 2), - ) - ) - create = Confirm.ask("[cyan]Create directory?[/cyan]", default=True) - if not create: + create_dir = False + if ( + validation + and validation.confirmation == ConfirmationKind.CONFIRM + and validation.message + ): + create_dir = self._confirm_create_directory(validation.message) + if not create_dir: return None - repo_path.mkdir(parents=True, exist_ok=True) - self._console.print(f"[green]✓[/green] Created {repo_path}") - # Export self._console.print(f"[dim]Exporting to {repo_path}...[/dim]") - result = export_profiles_to_repo(repo_path, profiles) - - # Show result - lines = [f"[green]✓ Exported {result.exported} profile(s)[/green]"] - for profile in profiles: - lines.append(f" [green]+[/green] {profile.repo_id}") - - if result.warnings: - lines.append("") - for warning in result.warnings: - lines.append(f" [yellow]![/yellow] {warning}") - - # Add hint about local-only operation - lines.append("") - lines.append("[dim]Files written locally · no git commit/push[/dim]") - lines.append("[dim]For git: scc profile export --repo PATH --commit --push[/dim]") - - self._console.print() - self._console.print( - Panel( - "\n".join(lines), - title="[cyan]Sync[/cyan] Profiles", - border_style="bright_black", - box=box.ROUNDED, - padding=(1, 2), + payload = ProfileSyncPayload( + mode=ProfileSyncMode.EXPORT, + repo_path=repo_path, + create_dir=create_dir, + ) + result = app_settings.apply_settings_change( + SettingsChangeRequest( + action_id="profile_sync", + workspace=self._context.workspace, + payload=payload, ) ) - - # Save path for next time - self._save_sync_repo_path(str(repo_path)) - - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return f"Exported {result.exported} profile(s)" + message = self._handle_action_result(result) + self._refresh_view_model() + return message def _sync_import(self, repo_path: Path) -> str | None: """Import profiles from repository with preview.""" - from rich import box - from ..core.personal_profiles import import_profiles_from_repo - - self._console.print() - - # Check if repo exists - if not repo_path.exists(): - self._console.print( - Panel( - f"[yellow]✗ Path not found[/yellow]\n\n{repo_path}", - title="[cyan]Sync[/cyan] Profiles", - border_style="bright_black", - box=box.ROUNDED, - padding=(1, 2), - ) - ) - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return None - - # Preview (dry-run) self._console.print(f"[dim]Checking {repo_path}...[/dim]") - preview = import_profiles_from_repo(repo_path, dry_run=True) + payload = ProfileSyncPayload(mode=ProfileSyncMode.IMPORT, repo_path=repo_path) + validation = app_settings.validate_settings( + SettingsValidationRequest( + action_id="profile_sync", + workspace=self._context.workspace, + payload=payload, + ) + ) - if preview.imported == 0 and preview.skipped == 0: - self._console.print() + if validation and validation.error: self._console.print( Panel( - "[dim]No profiles found in repository.[/dim]", + f"[yellow]✗ {validation.error}[/yellow]", title="[cyan]Sync[/cyan] Profiles", border_style="bright_black", box=box.ROUNDED, @@ -1069,94 +747,79 @@ def _sync_import(self, repo_path: Path) -> str | None: Prompt.ask("[dim]Press Enter to continue[/dim]", default="") return None - # Show preview and ask for confirmation - lines = [f"[cyan]Import preview from {repo_path}[/cyan]", ""] - lines.append(f" {preview.imported} profile(s) will be imported") - if preview.skipped > 0: - lines.append(f" {preview.skipped} profile(s) unchanged") - - self._console.print() - self._console.print( - Panel( - "\n".join(lines), - title="[cyan]Sync[/cyan] Profiles", - border_style="bright_black", - box=box.ROUNDED, - padding=(1, 2), - ) - ) - - # Confirm import - if not Confirm.ask("Import now?", default=True): - return None - - # Actually import - result = import_profiles_from_repo(repo_path, dry_run=False) - - # Show result panel - lines = [f"[green]✓ Imported {result.imported} profile(s)[/green]"] - lines.append("") - lines.append("[dim]Profiles copied locally · no git pull[/dim]") - lines.append("[dim]For git: scc profile import --repo PATH --pull[/dim]") + confirmed = True + if validation and isinstance(validation.detail, ProfileSyncPreview): + self._render_profile_sync_preview(validation.detail) + confirmed = Confirm.ask("Import now?", default=True) + if not confirmed: + return None - self._console.print() - self._console.print( - Panel( - "\n".join(lines), - title="[cyan]Sync[/cyan] Profiles", - border_style="bright_black", - box=box.ROUNDED, - padding=(1, 2), + result = app_settings.apply_settings_change( + SettingsChangeRequest( + action_id="profile_sync", + workspace=self._context.workspace, + payload=payload, + confirmed=confirmed, ) ) - - # Save path for next time - self._save_sync_repo_path(str(repo_path)) - - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return f"Imported {result.imported} profile(s)" + message = self._handle_action_result(result) + self._refresh_view_model() + return message def _sync_full(self, repo_path: Path) -> str | None: """Full sync: import then export.""" + self._console.print(f"[dim]Full sync with {repo_path}...[/dim]") + payload = ProfileSyncPayload(mode=ProfileSyncMode.FULL_SYNC, repo_path=repo_path) + result = app_settings.apply_settings_change( + SettingsChangeRequest( + action_id="profile_sync", + workspace=self._context.workspace, + payload=payload, + ) + ) + message = self._handle_action_result(result) + self._refresh_view_model() + return message + def _render_profile_sync_result(self, result: ProfileSyncResult) -> None: from rich import box - from ..core.personal_profiles import ( - export_profiles_to_repo, - import_profiles_from_repo, - list_personal_profiles, - ) + lines: list[str] = [] + if result.mode == ProfileSyncMode.EXPORT: + lines.append(f"[green]✓ Exported {result.exported} profile(s)[/green]") + for profile_id in result.profile_ids: + lines.append(f" [green]+[/green] {profile_id}") + if result.warnings: + lines.append("") + for warning in result.warnings: + lines.append(f" [yellow]![/yellow] {warning}") + lines.append("") + lines.append("[dim]Files written locally · no git commit/push[/dim]") + lines.append("[dim]For git: scc profile export --repo PATH --commit --push[/dim]") + + if result.mode == ProfileSyncMode.IMPORT: + lines.append(f"[green]✓ Imported {result.imported} profile(s)[/green]") + if result.warnings: + lines.append("") + for warning in result.warnings: + lines.append(f" [yellow]![/yellow] {warning}") + lines.append("") + lines.append("[dim]Profiles copied locally · no git pull[/dim]") + lines.append("[dim]For git: scc profile import --repo PATH --pull[/dim]") - self._console.print() - self._console.print(f"[dim]Full sync with {repo_path}...[/dim]") + if result.mode == ProfileSyncMode.FULL_SYNC: + lines.append("[green]✓ Sync complete[/green]") + lines.append("") + lines.append(f" Imported: {result.imported} profile(s)") + lines.append(f" Exported: {result.exported} profile(s)") + lines.append("") + lines.append("[dim]Files synced locally · no git operations[/dim]") + lines.append("[dim]For git: scc profile sync --repo PATH --pull --commit --push[/dim]") - # Check if repo exists for import - imported = 0 - if repo_path.exists(): - self._console.print("[dim]Step 1: Importing...[/dim]") - import_result = import_profiles_from_repo(repo_path, dry_run=False) - imported = import_result.imported - else: - self._console.print("[dim]Step 1: Skipped (repo not found)[/dim]") - repo_path.mkdir(parents=True, exist_ok=True) - - # Export - self._console.print("[dim]Step 2: Exporting...[/dim]") - profiles = list_personal_profiles() - exported = 0 - if profiles: - export_result = export_profiles_to_repo(repo_path, profiles) - exported = export_result.exported - - # Show result self._console.print() self._console.print( Panel( - f"[green]✓ Sync complete[/green]\n\n" - f" Imported: {imported} profile(s)\n" - f" Exported: {exported} profile(s)\n\n" - f"[dim]Files synced locally · no git operations[/dim]\n" - f"[dim]For git: scc profile sync --repo PATH --pull --commit --push[/dim]", + "\n".join(lines), title="[cyan]Sync[/cyan] Profiles", border_style="bright_black", box=box.ROUNDED, @@ -1164,11 +827,14 @@ def _sync_full(self, repo_path: Path) -> str | None: ) ) - # Save path for next time - self._save_sync_repo_path(str(repo_path)) + def _render_support_bundle_result(self, info: SupportBundleInfo) -> None: + self._console.print() + self._console.print(f"[green]✓[/green] Bundle created: {info.output_path}") + + def _render_doctor_result(self, info: DoctorInfo) -> None: + from scc_cli.doctor import render_doctor_results - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return f"Synced: {imported} imported, {exported} exported" + render_doctor_results(self._console, info.result) def _render(self) -> RenderableType: """Render the settings screen.""" @@ -1179,23 +845,17 @@ def _render(self) -> RenderableType: else self._console.size.width ) - # Profile header - profile = get_selected_profile() - org_name = None - org_config = config.load_cached_org_config() - if org_config: - org_data = org_config.get("organization", {}) - org_name = org_data.get("name") or org_data.get("id") + header_info = self._view_model.header header = Text() header.append("Profile", style="dim") header.append(": ", style="dim") - header.append(profile or "standalone", style="cyan") - if org_name: + header.append(header_info.profile_name, style="cyan") + if header_info.org_name: header.append(f" {Indicators.get('VERTICAL_LINE')} ", style="dim") header.append("Org", style="dim") header.append(": ", style="dim") - header.append(org_name, style="cyan") + header.append(header_info.org_name, style="cyan") header.append("\n") from rich import box @@ -1214,7 +874,7 @@ def _render(self) -> RenderableType: cat_text.append(cat.name.title() + "\n", style=style) # Render action list for current category - actions = _get_actions_for_category(self._active_category) + actions = self._actions_for_category() action_text = Text() label_width = max((len(action.label) for action in actions), default=0) separator_width = max(18, min(36, label_width + 8)) @@ -1340,9 +1000,15 @@ def _render(self) -> RenderableType: # Preview panel overlay elif self._show_preview and actions: action = actions[self._cursor] - try: - preview = preview_operation(action.id) - preview_text = Text() + validation = app_settings.validate_settings( + SettingsValidationRequest( + action_id=action.id, + workspace=self._context.workspace, + ) + ) + preview_text = Text() + if validation and isinstance(validation.detail, MaintenancePreview): + preview = validation.detail preview_text.append(f"{action.label}\n\n", style="bold") preview_text.append("Risk: ") preview_text.append(_get_risk_badge(preview.risk_tier)) @@ -1350,7 +1016,7 @@ def _render(self) -> RenderableType: if preview.paths: preview_text.append("Affects:\n", style="dim") - for path in preview.paths[:5]: # Limit to 5 paths + for path in preview.paths[:5]: preview_text.append(f" {path}\n") if len(preview.paths) > 5: preview_text.append(f" (+{len(preview.paths) - 5} more)\n", style="dim") @@ -1363,9 +1029,8 @@ def _render(self) -> RenderableType: if preview.backup_will_be_created: preview_text.append("\n[yellow]Backup will be created[/yellow]\n") - - except Exception: - preview_text = Text(f"Unable to preview {action.label}") + else: + preview_text.append(f"Unable to preview {action.label}") preview_panel = Panel( preview_text, diff --git a/src/scc_cli/ui/wizard.py b/src/scc_cli/ui/wizard.py index 6691868..e8ce819 100644 --- a/src/scc_cli/ui/wizard.py +++ b/src/scc_cli/ui/wizard.py @@ -330,11 +330,11 @@ def pick_workspace_source( # Check current directory for project markers and git status # Import here to avoid circular dependencies - from scc_cli import git + from scc_cli.services import git as git_service cwd = Path.cwd() cwd_name = cwd.name or str(cwd) - is_git = git.is_git_repo(cwd) + is_git = git_service.is_git_repo(cwd) # Three-tier logic with git awareness: # 1. Suspicious directory (home, /, tmp) → don't show @@ -569,12 +569,12 @@ def pick_team_repo( if expanded.exists(): return str(expanded) - # Need to clone - import git module here to avoid circular imports - from .. import git + # Need to clone - import here to avoid circular imports + from .git_interactive import clone_repo repo_url = result.get("url", "") if repo_url: - cloned_path = git.clone_repo(repo_url, workspace_base) + cloned_path = clone_repo(repo_url, workspace_base) if cloned_path: return cloned_path diff --git a/tests/contracts/test_agent_runner_contract.py b/tests/contracts/test_agent_runner_contract.py new file mode 100644 index 0000000..59fafc1 --- /dev/null +++ b/tests/contracts/test_agent_runner_contract.py @@ -0,0 +1,21 @@ +"""Contract tests for AgentRunner implementations.""" + +from __future__ import annotations + +from pathlib import Path + +from scc_cli.adapters.claude_agent_runner import ClaudeAgentRunner + + +def test_agent_runner_builds_settings_and_command() -> None: + runner = ClaudeAgentRunner() + payload = {"enabledPlugins": ["tool@official"]} + settings_path = Path("/home/agent/.claude/settings.json") + + settings = runner.build_settings(payload, path=settings_path) + command = runner.build_command(settings) + + assert settings.content == payload + assert settings.path == settings_path + assert command.argv[0] == "claude" + assert runner.describe() diff --git a/tests/contracts/test_clock_contract.py b/tests/contracts/test_clock_contract.py new file mode 100644 index 0000000..4ec7bb0 --- /dev/null +++ b/tests/contracts/test_clock_contract.py @@ -0,0 +1,15 @@ +"""Contract tests for Clock implementations.""" + +from __future__ import annotations + +from datetime import timezone + +from scc_cli.adapters.system_clock import SystemClock + + +def test_system_clock_returns_utc_time() -> None: + clock = SystemClock() + now = clock.now() + + assert now.tzinfo is not None + assert now.tzinfo.utcoffset(now) == timezone.utc.utcoffset(now) diff --git a/tests/contracts/test_filesystem_contract.py b/tests/contracts/test_filesystem_contract.py new file mode 100644 index 0000000..61b0f04 --- /dev/null +++ b/tests/contracts/test_filesystem_contract.py @@ -0,0 +1,58 @@ +"""Contract tests for Filesystem port adapters.""" + +from __future__ import annotations + +from pathlib import Path + +from scc_cli.adapters.local_filesystem import LocalFilesystem + + +def test_write_text_atomic_writes_content(tmp_path: Path) -> None: + filesystem = LocalFilesystem() + target = tmp_path / "settings.json" + + filesystem.write_text_atomic(target, "hello") + + assert target.read_text(encoding="utf-8") == "hello" + + +def test_write_text_atomic_leaves_no_temp_files(tmp_path: Path) -> None: + filesystem = LocalFilesystem() + target = tmp_path / "data.txt" + + filesystem.write_text_atomic(target, "content") + + temp_files = [ + path + for path in target.parent.iterdir() + if path.name.startswith(f".{target.name}.") and path.suffix == ".tmp" + ] + + assert temp_files == [] + + +def test_write_text_atomic_overwrites_existing_file(tmp_path: Path) -> None: + filesystem = LocalFilesystem() + target = tmp_path / "data.txt" + + filesystem.write_text_atomic(target, "first") + filesystem.write_text_atomic(target, "second") + + assert target.read_text(encoding="utf-8") == "second" + + +def test_filesystem_helpers(tmp_path: Path) -> None: + filesystem = LocalFilesystem() + directory = tmp_path / "nested" / "dir" + + filesystem.mkdir(directory, parents=True, exist_ok=True) + assert filesystem.exists(directory) + + file_path = directory / "data.txt" + filesystem.write_text(file_path, "payload") + + assert filesystem.read_text(file_path) == "payload" + assert list(filesystem.iterdir(directory)) == [file_path] + + filesystem.unlink(file_path) + assert not filesystem.exists(file_path) diff --git a/tests/contracts/test_git_client_contract.py b/tests/contracts/test_git_client_contract.py new file mode 100644 index 0000000..39532f1 --- /dev/null +++ b/tests/contracts/test_git_client_contract.py @@ -0,0 +1,40 @@ +"""Contract tests for GitClient implementations.""" + +from __future__ import annotations + +import shutil +import subprocess +from pathlib import Path + +import pytest + +from scc_cli.adapters.local_git_client import LocalGitClient + + +@pytest.mark.skipif(shutil.which("git") is None, reason="git not available") +def test_git_client_detects_repo_and_branch(tmp_path: Path) -> None: + client = LocalGitClient() + repo = tmp_path / "repo" + repo.mkdir() + + assert client.is_git_repo(repo) is False + assert client.init_repo(repo) is True + assert client.is_git_repo(repo) is True + + subdir = repo / "subdir" + subdir.mkdir() + + root, start = client.detect_workspace_root(subdir) + assert root == repo.resolve() + assert start == subdir.resolve() + + subprocess.run(["git", "-C", str(repo), "config", "user.name", "Test"], check=True) + subprocess.run(["git", "-C", str(repo), "config", "user.email", "test@example.com"], check=True) + subprocess.run( + ["git", "-C", str(repo), "commit", "--allow-empty", "-m", "init"], + check=True, + capture_output=True, + ) + + branch = client.get_current_branch(repo) + assert branch diff --git a/tests/contracts/test_remote_fetcher_contract.py b/tests/contracts/test_remote_fetcher_contract.py new file mode 100644 index 0000000..948e35c --- /dev/null +++ b/tests/contracts/test_remote_fetcher_contract.py @@ -0,0 +1,26 @@ +"""Contract tests for RemoteFetcher implementations.""" + +from __future__ import annotations + +import responses + +from scc_cli.adapters.requests_fetcher import RequestsFetcher + + +@responses.activate +def test_requests_fetcher_get_returns_response() -> None: + url = "https://example.com/config.json" + responses.add( + responses.GET, + url, + body='{"ok": true}', + status=200, + headers={"ETag": "abc"}, + ) + + fetcher = RequestsFetcher() + response = fetcher.get(url) + + assert response.status_code == 200 + assert response.text == '{"ok": true}' + assert response.headers.get("ETag") == "abc" diff --git a/tests/contracts/test_sandbox_runtime_contract.py b/tests/contracts/test_sandbox_runtime_contract.py new file mode 100644 index 0000000..707ec91 --- /dev/null +++ b/tests/contracts/test_sandbox_runtime_contract.py @@ -0,0 +1,35 @@ +"""Contract tests for SandboxRuntime implementations.""" + +from __future__ import annotations + +from pathlib import Path + +from scc_cli.ports.models import MountSpec, SandboxSpec, SandboxState +from tests.fakes.fake_sandbox_runtime import FakeSandboxRuntime + + +def _make_spec(tmp_path: Path) -> SandboxSpec: + mount = MountSpec(source=tmp_path, target=tmp_path) + return SandboxSpec(image="sandbox-image", workspace_mount=mount, workdir=tmp_path) + + +def test_sandbox_runtime_lifecycle(tmp_path: Path) -> None: + runtime = FakeSandboxRuntime() + spec = _make_spec(tmp_path) + + handle = runtime.run(spec) + + assert runtime.status(handle).state == SandboxState.RUNNING + assert runtime.list_running() == [handle] + + runtime.stop(handle) + assert runtime.status(handle).state == SandboxState.STOPPED + assert runtime.list_running() == [] + + runtime.resume(handle) + assert runtime.status(handle).state == SandboxState.RUNNING + assert runtime.list_running() == [handle] + + runtime.remove(handle) + assert runtime.status(handle).state == SandboxState.UNKNOWN + assert runtime.list_running() == [] diff --git a/tests/fakes/__init__.py b/tests/fakes/__init__.py new file mode 100644 index 0000000..656d947 --- /dev/null +++ b/tests/fakes/__init__.py @@ -0,0 +1,23 @@ +"""Test fakes for SCC ports.""" + +from __future__ import annotations + +from scc_cli.adapters.local_filesystem import LocalFilesystem +from scc_cli.adapters.local_git_client import LocalGitClient +from scc_cli.adapters.requests_fetcher import RequestsFetcher +from scc_cli.adapters.system_clock import SystemClock +from scc_cli.bootstrap import DefaultAdapters +from tests.fakes.fake_agent_runner import FakeAgentRunner +from tests.fakes.fake_sandbox_runtime import FakeSandboxRuntime + + +def build_fake_adapters() -> DefaultAdapters: + """Return default adapters wired with fakes.""" + return DefaultAdapters( + filesystem=LocalFilesystem(), + git_client=LocalGitClient(), + remote_fetcher=RequestsFetcher(), + clock=SystemClock(), + agent_runner=FakeAgentRunner(), + sandbox_runtime=FakeSandboxRuntime(), + ) diff --git a/tests/fakes/fake_agent_runner.py b/tests/fakes/fake_agent_runner.py new file mode 100644 index 0000000..6aa9d27 --- /dev/null +++ b/tests/fakes/fake_agent_runner.py @@ -0,0 +1,21 @@ +"""Fake AgentRunner for tests.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any + +from scc_cli.ports.models import AgentCommand, AgentSettings + + +class FakeAgentRunner: + """Simple AgentRunner stub for unit tests.""" + + def build_settings(self, config: dict[str, Any], *, path: Path) -> AgentSettings: + return AgentSettings(content=config, path=path) + + def build_command(self, settings: AgentSettings) -> AgentCommand: + return AgentCommand(argv=["fake-agent"], env={}, workdir=settings.path.parent) + + def describe(self) -> str: + return "Fake agent runner" diff --git a/tests/fakes/fake_sandbox_runtime.py b/tests/fakes/fake_sandbox_runtime.py new file mode 100644 index 0000000..9a0f01e --- /dev/null +++ b/tests/fakes/fake_sandbox_runtime.py @@ -0,0 +1,57 @@ +"""Fake SandboxRuntime for contract tests.""" + +from __future__ import annotations + +from dataclasses import dataclass + +from scc_cli.ports.models import SandboxHandle, SandboxSpec, SandboxState, SandboxStatus + + +@dataclass +class _SandboxRecord: + handle: SandboxHandle + status: SandboxStatus + + +class FakeSandboxRuntime: + """In-memory sandbox runtime for tests.""" + + def __init__(self) -> None: + self._records: dict[str, _SandboxRecord] = {} + self._counter = 0 + + def ensure_available(self) -> None: + return None + + def run(self, spec: SandboxSpec) -> SandboxHandle: + self._counter += 1 + handle = SandboxHandle(sandbox_id=f"sandbox-{self._counter}") + status = SandboxStatus(state=SandboxState.RUNNING) + self._records[handle.sandbox_id] = _SandboxRecord(handle=handle, status=status) + return handle + + def resume(self, handle: SandboxHandle) -> None: + record = self._records.get(handle.sandbox_id) + if record: + record.status = SandboxStatus(state=SandboxState.RUNNING) + + def stop(self, handle: SandboxHandle) -> None: + record = self._records.get(handle.sandbox_id) + if record: + record.status = SandboxStatus(state=SandboxState.STOPPED) + + def remove(self, handle: SandboxHandle) -> None: + self._records.pop(handle.sandbox_id, None) + + def list_running(self) -> list[SandboxHandle]: + return [ + record.handle + for record in self._records.values() + if record.status.state == SandboxState.RUNNING + ] + + def status(self, handle: SandboxHandle) -> SandboxStatus: + record = self._records.get(handle.sandbox_id) + if record: + return record.status + return SandboxStatus(state=SandboxState.UNKNOWN) diff --git a/tests/test_application_dashboard.py b/tests/test_application_dashboard.py new file mode 100644 index 0000000..27e8be3 --- /dev/null +++ b/tests/test_application_dashboard.py @@ -0,0 +1,161 @@ +"""Unit tests for application dashboard flow.""" + +from __future__ import annotations + +from scc_cli.application import dashboard as app_dashboard + + +def _empty_tab_data(tab: app_dashboard.DashboardTab) -> app_dashboard.DashboardTabData: + return app_dashboard.DashboardTabData( + tab=tab, + title=tab.display_name, + items=[], + count_active=0, + count_total=0, + ) + + +def test_build_dashboard_view_clears_one_time_state() -> None: + tabs = {tab: _empty_tab_data(tab) for tab in app_dashboard.DashboardTab} + + def loader(verbose: bool) -> dict[app_dashboard.DashboardTab, app_dashboard.DashboardTabData]: + assert verbose is True + return tabs + + state = app_dashboard.DashboardFlowState( + restore_tab=app_dashboard.DashboardTab.WORKTREES, + toast_message="Welcome", + verbose_worktrees=True, + ) + + view, next_state = app_dashboard.build_dashboard_view(state, loader) + + assert view.active_tab is app_dashboard.DashboardTab.WORKTREES + assert view.status_message == "Welcome" + assert next_state.restore_tab is None + assert next_state.toast_message is None + + +def test_start_flow_event_sets_restore_tab() -> None: + state = app_dashboard.DashboardFlowState() + event = app_dashboard.StartFlowEvent( + return_to=app_dashboard.DashboardTab.SESSIONS, + reason="dashboard_start", + ) + + step = app_dashboard.handle_dashboard_event(state, event) + + assert isinstance(step, app_dashboard.DashboardEffectRequest) + assert step.state.restore_tab is app_dashboard.DashboardTab.SESSIONS + + +def test_start_flow_cancel_sets_toast() -> None: + state = app_dashboard.DashboardFlowState() + effect = app_dashboard.StartFlowEvent( + return_to=app_dashboard.DashboardTab.STATUS, + reason="dashboard_start", + ) + + outcome = app_dashboard.apply_dashboard_effect_result( + state, + effect, + app_dashboard.StartFlowResult(decision=app_dashboard.StartFlowDecision.CANCELLED), + ) + + assert outcome.exit_dashboard is False + assert outcome.state.toast_message == "Start cancelled" + + +def test_session_resume_success_exits_dashboard() -> None: + state = app_dashboard.DashboardFlowState() + effect = app_dashboard.SessionResumeEvent( + return_to=app_dashboard.DashboardTab.SESSIONS, + session={"name": "session"}, + ) + + outcome = app_dashboard.apply_dashboard_effect_result(state, effect, True) + + assert outcome.exit_dashboard is True + + +def test_verbose_toggle_updates_state() -> None: + state = app_dashboard.DashboardFlowState() + event = app_dashboard.VerboseToggleEvent( + return_to=app_dashboard.DashboardTab.WORKTREES, + verbose=True, + ) + + step = app_dashboard.handle_dashboard_event(state, event) + + assert isinstance(step, app_dashboard.DashboardFlowOutcome) + assert step.state.verbose_worktrees is True + assert step.state.toast_message == "Status on" + + +def test_refresh_event_sets_restore_tab() -> None: + state = app_dashboard.DashboardFlowState() + event = app_dashboard.RefreshEvent(return_to=app_dashboard.DashboardTab.CONTAINERS) + + step = app_dashboard.handle_dashboard_event(state, event) + + assert isinstance(step, app_dashboard.DashboardFlowOutcome) + assert step.state.restore_tab is app_dashboard.DashboardTab.CONTAINERS + + +def test_statusline_install_effect_sets_message() -> None: + state = app_dashboard.DashboardFlowState() + effect = app_dashboard.StatuslineInstallEvent(return_to=app_dashboard.DashboardTab.STATUS) + + outcome = app_dashboard.apply_dashboard_effect_result(state, effect, True) + + assert outcome.state.toast_message == "Statusline installed successfully" + + outcome = app_dashboard.apply_dashboard_effect_result(state, effect, False) + + assert outcome.state.toast_message == "Statusline installation failed" + + +def test_create_worktree_effect_messages() -> None: + state = app_dashboard.DashboardFlowState() + effect = app_dashboard.CreateWorktreeEvent( + return_to=app_dashboard.DashboardTab.WORKTREES, + is_git_repo=True, + ) + + outcome = app_dashboard.apply_dashboard_effect_result(state, effect, True) + + assert outcome.state.toast_message == "Worktree created" + + outcome = app_dashboard.apply_dashboard_effect_result(state, effect, False) + + assert outcome.state.toast_message == "Worktree creation cancelled" + + clone_effect = app_dashboard.CreateWorktreeEvent( + return_to=app_dashboard.DashboardTab.WORKTREES, + is_git_repo=False, + ) + + outcome = app_dashboard.apply_dashboard_effect_result(state, clone_effect, True) + + assert outcome.state.toast_message == "Repository cloned" + + outcome = app_dashboard.apply_dashboard_effect_result(state, clone_effect, False) + + assert outcome.state.toast_message == "Clone cancelled" + + +def test_container_stop_effect_uses_fallback_message() -> None: + state = app_dashboard.DashboardFlowState() + effect = app_dashboard.ContainerStopEvent( + return_to=app_dashboard.DashboardTab.CONTAINERS, + container_id="abc", + container_name="scc-demo", + ) + + outcome = app_dashboard.apply_dashboard_effect_result(state, effect, (True, None)) + + assert outcome.state.toast_message == "Container stopped" + + outcome = app_dashboard.apply_dashboard_effect_result(state, effect, (False, "Custom")) + + assert outcome.state.toast_message == "Custom" diff --git a/tests/test_application_settings.py b/tests/test_application_settings.py new file mode 100644 index 0000000..125b6e5 --- /dev/null +++ b/tests/test_application_settings.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +from pathlib import Path + +from scc_cli import config +from scc_cli.application import settings as app_settings +from scc_cli.core.personal_profiles import APPLIED_STATE_FILE + + +def test_load_settings_state_uses_saved_sync_path(temp_config_dir: Path, tmp_path: Path) -> None: + config.save_user_config({"sync": {"last_repo": "/tmp/scc-sync"}}) + + view_model = app_settings.load_settings_state(app_settings.SettingsContext(workspace=tmp_path)) + + assert view_model.sync_repo_path == "/tmp/scc-sync" + + +def test_apply_settings_change_updates_sync_path(temp_config_dir: Path, tmp_path: Path) -> None: + request = app_settings.SettingsChangeRequest( + action_id="profile_sync", + workspace=tmp_path, + payload=app_settings.ProfileSyncPathPayload(new_path="/tmp/new-sync"), + ) + + result = app_settings.apply_settings_change(request) + + assert result.status == app_settings.SettingsActionStatus.SUCCESS + + updated = config.load_user_config() + assert updated["sync"]["last_repo"] == "/tmp/new-sync" + + +def test_apply_settings_change_profile_save_writes_profile_and_state( + temp_config_dir: Path, tmp_path: Path +) -> None: + workspace = tmp_path / "workspace" + settings_path = workspace / ".claude" / "settings.local.json" + settings_path.parent.mkdir(parents=True) + settings_path.write_text('{"foo": "bar"}') + + request = app_settings.SettingsChangeRequest( + action_id="profile_save", + workspace=workspace, + ) + + result = app_settings.apply_settings_change(request) + + assert result.status == app_settings.SettingsActionStatus.SUCCESS + profiles_dir = config.CONFIG_DIR / "personal" / "projects" + profiles = list(profiles_dir.glob("*.json")) + assert len(profiles) == 1 + assert (workspace / ".claude" / APPLIED_STATE_FILE).exists() + + +def test_apply_settings_change_profile_sync_export_writes_repo_index( + temp_config_dir: Path, tmp_path: Path +) -> None: + workspace = tmp_path / "workspace" + settings_path = workspace / ".claude" / "settings.local.json" + settings_path.parent.mkdir(parents=True) + settings_path.write_text('{"foo": "bar"}') + + app_settings.apply_settings_change( + app_settings.SettingsChangeRequest( + action_id="profile_save", + workspace=workspace, + ) + ) + + repo_path = tmp_path / "profile-repo" + request = app_settings.SettingsChangeRequest( + action_id="profile_sync", + workspace=workspace, + payload=app_settings.ProfileSyncPayload( + mode=app_settings.ProfileSyncMode.EXPORT, + repo_path=repo_path, + create_dir=True, + ), + ) + + result = app_settings.apply_settings_change(request) + + assert result.status == app_settings.SettingsActionStatus.SUCCESS + profiles_dir = repo_path / ".scc" / "profiles" + index_path = profiles_dir / "index.json" + profile_files = [path for path in profiles_dir.glob("*.json") if path.name != "index.json"] + assert index_path.exists() + assert len(profile_files) == 1 diff --git a/tests/test_application_start_session.py b/tests/test_application_start_session.py new file mode 100644 index 0000000..3d64430 --- /dev/null +++ b/tests/test_application_start_session.py @@ -0,0 +1,205 @@ +from __future__ import annotations + +from pathlib import Path +from unittest.mock import MagicMock, patch + +from scc_cli.application.start_session import ( + StartSessionDependencies, + StartSessionPlan, + StartSessionRequest, + prepare_start_session, + start_session, +) +from scc_cli.application.sync_marketplace import SyncError, SyncResult +from scc_cli.core.constants import AGENT_CONFIG_DIR, SANDBOX_IMAGE +from scc_cli.core.workspace import ResolverResult +from scc_cli.ports.models import MountSpec, SandboxSpec +from tests.fakes.fake_agent_runner import FakeAgentRunner +from tests.fakes.fake_sandbox_runtime import FakeSandboxRuntime + + +class FakeGitClient: + def __init__(self, branch: str | None = "main", is_repo: bool = True) -> None: + self._branch = branch + self._is_repo = is_repo + + def check_available(self) -> None: + return None + + def check_installed(self) -> bool: + return True + + def get_version(self) -> str | None: + return "fake-git" + + def is_git_repo(self, path: Path) -> bool: + return self._is_repo + + def init_repo(self, path: Path) -> bool: + return True + + def create_empty_initial_commit(self, path: Path) -> tuple[bool, str | None]: + return True, None + + def detect_workspace_root(self, start_dir: Path) -> tuple[Path | None, Path]: + return None, start_dir + + def get_current_branch(self, path: Path) -> str | None: + return self._branch + + +def _build_resolver_result(workspace_path: Path) -> ResolverResult: + resolved = workspace_path.resolve() + return ResolverResult( + workspace_root=resolved, + entry_dir=resolved, + mount_root=resolved, + container_workdir=str(resolved), + is_auto_detected=False, + is_suspicious=False, + reason="test", + ) + + +def _build_dependencies(git_client: FakeGitClient) -> StartSessionDependencies: + return StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=git_client, + agent_runner=FakeAgentRunner(), + sandbox_runtime=FakeSandboxRuntime(), + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + ) + + +def test_prepare_start_session_builds_plan_with_sync_result(tmp_path: Path) -> None: + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team="alpha", + session_name="session-1", + resume=False, + fresh=False, + offline=False, + standalone=False, + dry_run=False, + allow_suspicious=False, + org_config={ + "defaults": {"network_policy": "restricted"}, + "profiles": {"alpha": {}}, + }, + ) + sync_result = SyncResult(success=True, rendered_settings={"plugins": []}) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient(branch="main")) + + with ( + patch( + "scc_cli.application.start_session.resolve_launch_context", + return_value=resolver_result, + ), + patch( + "scc_cli.application.start_session.sync_marketplace_settings", + return_value=sync_result, + ) as sync_mock, + ): + plan = prepare_start_session(request, dependencies=dependencies) + + sync_mock.assert_called_once() + assert sync_mock.call_args.kwargs["write_to_workspace"] is False + assert sync_mock.call_args.kwargs["container_path_prefix"] == str(workspace_path) + assert plan.sync_result is sync_result + assert plan.sync_error_message is None + assert plan.current_branch == "main" + assert plan.agent_settings is not None + assert plan.agent_settings.content == {"plugins": []} + assert plan.agent_settings.path == Path("/home/agent") / AGENT_CONFIG_DIR / "settings.json" + assert plan.sandbox_spec is not None + assert plan.sandbox_spec.image == SANDBOX_IMAGE + assert plan.sandbox_spec.network_policy == "restricted" + + +def test_prepare_start_session_captures_sync_error(tmp_path: Path) -> None: + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team="alpha", + session_name=None, + resume=False, + fresh=False, + offline=False, + standalone=False, + dry_run=False, + allow_suspicious=False, + org_config={ + "defaults": {}, + "profiles": {"alpha": {}}, + }, + ) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with ( + patch( + "scc_cli.application.start_session.resolve_launch_context", + return_value=resolver_result, + ), + patch( + "scc_cli.application.start_session.sync_marketplace_settings", + side_effect=SyncError("sync failed"), + ), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.sync_result is None + assert plan.sync_error_message == "sync failed" + assert plan.agent_settings is None + assert plan.sandbox_spec is not None + + +def test_start_session_runs_sandbox_runtime(tmp_path: Path) -> None: + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + resolver_result = _build_resolver_result(workspace_path) + sandbox_spec = SandboxSpec( + image="test-image", + workspace_mount=MountSpec(source=workspace_path, target=workspace_path), + workdir=workspace_path, + ) + plan = StartSessionPlan( + resolver_result=resolver_result, + workspace_path=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + current_branch=None, + effective_config=None, + sync_result=None, + sync_error_message=None, + agent_settings=None, + sandbox_spec=sandbox_spec, + ) + runtime = FakeSandboxRuntime() + dependencies = StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=FakeGitClient(), + agent_runner=FakeAgentRunner(), + sandbox_runtime=runtime, + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + ) + + handle = start_session(plan, dependencies=dependencies) + + assert handle.sandbox_id == "sandbox-1" diff --git a/tests/test_bootstrap.py b/tests/test_bootstrap.py new file mode 100644 index 0000000..4486613 --- /dev/null +++ b/tests/test_bootstrap.py @@ -0,0 +1,23 @@ +"""Tests for bootstrap adapter wiring.""" + +from __future__ import annotations + +from scc_cli.adapters.claude_agent_runner import ClaudeAgentRunner +from scc_cli.adapters.docker_sandbox_runtime import DockerSandboxRuntime +from scc_cli.adapters.local_filesystem import LocalFilesystem +from scc_cli.adapters.local_git_client import LocalGitClient +from scc_cli.adapters.requests_fetcher import RequestsFetcher +from scc_cli.adapters.system_clock import SystemClock +from scc_cli.bootstrap import DefaultAdapters, get_default_adapters + + +def test_get_default_adapters_returns_expected_types() -> None: + adapters = get_default_adapters() + + assert isinstance(adapters, DefaultAdapters) + assert isinstance(adapters.filesystem, LocalFilesystem) + assert isinstance(adapters.git_client, LocalGitClient) + assert isinstance(adapters.remote_fetcher, RequestsFetcher) + assert isinstance(adapters.clock, SystemClock) + assert isinstance(adapters.agent_runner, ClaudeAgentRunner) + assert isinstance(adapters.sandbox_runtime, DockerSandboxRuntime) diff --git a/tests/test_cli.py b/tests/test_cli.py index 7ee6f68..c205102 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -24,6 +24,7 @@ SandboxNotAvailableError, ) from scc_cli.core.exit_codes import EXIT_USAGE +from tests.fakes import build_fake_adapters runner = CliRunner() @@ -130,21 +131,13 @@ def test_start_with_install_deps_runs_dependency_install(self, tmp_path): (tmp_path / "package.json").write_text("{}") with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.app.docker.check_docker_available"), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.workspace.git.get_current_branch", return_value="main"), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(tmp_path, False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=build_fake_adapters(), ), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), - patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker"], False), - ), - patch("scc_cli.commands.launch.sandbox.docker.run"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), patch("scc_cli.commands.launch.workspace.deps.auto_install_dependencies") as mock_deps, ): mock_deps.return_value = True @@ -155,22 +148,14 @@ def test_start_with_install_deps_runs_dependency_install(self, tmp_path): def test_start_with_offline_uses_cache_only(self, tmp_path): """Should use cached config only when --offline flag set.""" with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), - patch("scc_cli.remote.load_org_config") as mock_remote, - patch("scc_cli.commands.launch.app.docker.check_docker_available"), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.workspace.git.get_current_branch", return_value="main"), - patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(tmp_path, False), - ), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker"], False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=build_fake_adapters(), ), - patch("scc_cli.commands.launch.sandbox.docker.run"), + patch("scc_cli.remote.load_org_config") as mock_remote, + patch("scc_cli.commands.launch.workspace.check_branch_safety"), ): mock_remote.return_value = { "schema_version": "1.0.0", @@ -185,21 +170,13 @@ def test_start_with_offline_uses_cache_only(self, tmp_path): def test_start_with_standalone_skips_org_config(self, tmp_path): """Should skip org config when --standalone flag set.""" with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.app.docker.check_docker_available"), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.workspace.git.get_current_branch", return_value="main"), - patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(tmp_path, False), - ), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker"], False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=build_fake_adapters(), ), - patch("scc_cli.commands.launch.sandbox.docker.run"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), patch("scc_cli.remote.load_org_config") as mock_remote, ): runner.invoke(app, ["start", str(tmp_path), "--standalone"]) @@ -224,7 +201,7 @@ def test_worktree_with_install_deps_installs_after_create(self, tmp_path): patch("scc_cli.commands.worktree.worktree_commands.git.is_git_repo", return_value=True), patch("scc_cli.commands.worktree.worktree_commands.git.has_commits", return_value=True), patch( - "scc_cli.commands.worktree.worktree_commands.git.create_worktree", + "scc_cli.commands.worktree.worktree_commands.create_worktree", return_value=worktree_path, ), patch( @@ -308,8 +285,8 @@ class TestStartCommandErrors: def test_start_requires_setup_first(self, tmp_path): """Should prompt for setup when not configured.""" with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=True), - patch("scc_cli.commands.launch.app.setup.maybe_run_setup", return_value=False), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=True), + patch("scc_cli.commands.launch.flow.setup.maybe_run_setup", return_value=False), ): result = runner.invoke(app, ["start", str(tmp_path)]) @@ -318,16 +295,15 @@ def test_start_requires_setup_first(self, tmp_path): def test_start_shows_docker_not_found_error(self, tmp_path): """Should show helpful message when Docker not installed.""" + fake_adapters = build_fake_adapters() + fake_adapters.sandbox_runtime.ensure_available = MagicMock( + side_effect=DockerNotFoundError() + ) with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.app.docker.check_docker_available") as mock_docker, - patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(tmp_path, False), - ), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow.get_default_adapters", return_value=fake_adapters), ): - mock_docker.side_effect = DockerNotFoundError() result = runner.invoke(app, ["start", str(tmp_path)]) # Should show error message about Docker @@ -335,16 +311,15 @@ def test_start_shows_docker_not_found_error(self, tmp_path): def test_start_shows_docker_version_error(self, tmp_path): """Should show helpful message when Docker version too old.""" + fake_adapters = build_fake_adapters() + fake_adapters.sandbox_runtime.ensure_available = MagicMock( + side_effect=DockerVersionError(current_version="4.0.0") + ) with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.app.docker.check_docker_available") as mock_docker, - patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(tmp_path, False), - ), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow.get_default_adapters", return_value=fake_adapters), ): - mock_docker.side_effect = DockerVersionError(current_version="4.0.0") result = runner.invoke(app, ["start", str(tmp_path)]) # Should indicate version issue @@ -352,16 +327,15 @@ def test_start_shows_docker_version_error(self, tmp_path): def test_start_shows_sandbox_not_available_error(self, tmp_path): """Should show helpful message when sandbox not available.""" + fake_adapters = build_fake_adapters() + fake_adapters.sandbox_runtime.ensure_available = MagicMock( + side_effect=SandboxNotAvailableError() + ) with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.app.docker.check_docker_available") as mock_docker, - patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(tmp_path, False), - ), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow.get_default_adapters", return_value=fake_adapters), ): - mock_docker.side_effect = SandboxNotAvailableError() result = runner.invoke(app, ["start", str(tmp_path)]) # Should indicate sandbox not available diff --git a/tests/test_config_explain.py b/tests/test_config_explain.py index 658c3f0..24a98f6 100644 --- a/tests/test_config_explain.py +++ b/tests/test_config_explain.py @@ -18,7 +18,7 @@ from typer.testing import CliRunner from scc_cli import cli -from scc_cli.profiles import ( +from scc_cli.application.compute_effective_config import ( BlockedItem, ConfigDecision, DelegationDenied, @@ -199,7 +199,7 @@ def test_explain_shows_effective_plugins(self, effective_config_basic, mock_org_ ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_basic, ), ): @@ -218,7 +218,7 @@ def test_explain_shows_source_attribution(self, effective_config_basic, mock_org ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_basic, ), ): @@ -238,7 +238,7 @@ def test_explain_shows_session_config(self, effective_config_basic, mock_org_con ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_basic, ), ): @@ -266,7 +266,7 @@ def test_explain_shows_blocked_items(self, effective_config_with_blocked, mock_o ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_with_blocked, ), ): @@ -285,7 +285,7 @@ def test_explain_shows_blocked_pattern(self, effective_config_with_blocked, mock ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_with_blocked, ), ): @@ -313,7 +313,7 @@ def test_explain_shows_denied_additions(self, effective_config_with_denied, mock ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_with_denied, ), ): @@ -332,7 +332,7 @@ def test_explain_shows_denial_reason(self, effective_config_with_denied, mock_or ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_with_denied, ), ): @@ -360,7 +360,7 @@ def test_explain_filter_plugins(self, effective_config_full, mock_org_config): ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_full, ), ): @@ -380,7 +380,7 @@ def test_explain_filter_session(self, effective_config_full, mock_org_config): ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_full, ), ): @@ -410,7 +410,7 @@ def test_explain_with_workspace(self, effective_config_basic, mock_org_config, t ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_basic, ) as mock_compute, ): @@ -431,7 +431,7 @@ def test_explain_uses_cwd_by_default(self, effective_config_basic, mock_org_conf ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_basic, ) as mock_compute, ): @@ -526,7 +526,7 @@ def test_explain_shows_mcp_servers(self, effective_config_with_mcp, mock_org_con ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_with_mcp, ), ): @@ -545,7 +545,7 @@ def test_explain_shows_mcp_server_types(self, effective_config_with_mcp, mock_or ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_with_mcp, ), ): @@ -564,7 +564,7 @@ def test_explain_filter_mcp_servers(self, effective_config_with_mcp, mock_org_co ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective_config_with_mcp, ), ): @@ -633,7 +633,7 @@ def test_blocked_items_output_format_plugin(self, mock_org_config): ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective, ), ): @@ -675,7 +675,7 @@ def test_blocked_items_output_format_mcp_server(self, mock_org_config): ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective, ), ): @@ -722,7 +722,7 @@ def test_denied_additions_output_format(self, mock_org_config): ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective, ), ): @@ -763,7 +763,7 @@ def test_denied_additions_shows_local_scope_hint(self, mock_org_config): ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective, ), ): @@ -823,7 +823,7 @@ def test_active_exceptions_output_format(self, mock_org_config, tmp_path): ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective, ), patch("scc_cli.commands.config.UserStore") as mock_user_store, @@ -880,7 +880,7 @@ def test_active_exceptions_shows_scope_badge(self, mock_org_config, tmp_path): ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective, ), patch("scc_cli.commands.config.UserStore") as mock_user_store, @@ -934,7 +934,7 @@ def test_expired_exceptions_show_cleanup_hint(self, mock_org_config, tmp_path): ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective, ), patch("scc_cli.commands.config.UserStore") as mock_user_store, @@ -1002,7 +1002,7 @@ def test_explain_full_output_order(self, mock_org_config): ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective, ), ): @@ -1045,7 +1045,7 @@ def test_explain_empty_sections_not_shown(self, mock_org_config): ), patch("scc_cli.commands.config.config.get_selected_profile", return_value="dev"), patch( - "scc_cli.commands.config.profiles.compute_effective_config", + "scc_cli.commands.config.compute_effective_config", return_value=effective, ), ): diff --git a/tests/test_config_inheritance.py b/tests/test_config_inheritance.py index 752963c..9555dc1 100644 --- a/tests/test_config_inheritance.py +++ b/tests/test_config_inheritance.py @@ -365,7 +365,7 @@ class TestComputeEffectiveConfigBasicMerge: def test_org_defaults_only(self, valid_org_config): """With no team or project, should return org defaults.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config result = compute_effective_config( org_config=valid_org_config, team_name=None, project_config=None @@ -380,7 +380,7 @@ def test_org_defaults_only(self, valid_org_config): def test_team_extends_org_defaults(self, valid_org_config): """Team profile should extend org defaults, not replace.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config result = compute_effective_config( org_config=valid_org_config, @@ -399,7 +399,7 @@ def test_team_extends_org_defaults(self, valid_org_config): def test_project_extends_team_when_delegated(self, valid_org_config, project_config): """Project should extend team config when delegation allows.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config # Ensure delegation is enabled valid_org_config["delegation"]["projects"]["inherit_team_delegation"] = True @@ -423,7 +423,7 @@ def test_project_extends_team_when_delegated(self, valid_org_config, project_con def test_minimal_config_uses_defaults(self, minimal_org_config): """Minimal config should use sensible defaults.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config result = compute_effective_config( org_config=minimal_org_config, team_name=None, project_config=None @@ -452,7 +452,7 @@ def test_org_disables_delegation_ignores_team_setting(self, valid_org_config, pr Even if team says allow_project_overrides: true, org's inherit_team_delegation: false should prevent project from adding anything. """ - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config # Org says: NO delegation to projects (master switch OFF) valid_org_config["delegation"]["projects"]["inherit_team_delegation"] = False @@ -479,7 +479,7 @@ def test_org_disables_delegation_ignores_team_setting(self, valid_org_config, pr def test_org_enables_but_team_disables_delegation(self, valid_org_config, project_config): """When org enables but team disables, project additions should be rejected.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config # Org says: delegation CAN happen (master switch ON) valid_org_config["delegation"]["projects"]["inherit_team_delegation"] = True @@ -504,7 +504,7 @@ def test_org_enables_but_team_disables_delegation(self, valid_org_config, projec def test_both_org_and_team_enable_delegation(self, valid_org_config, project_config): """When both org and team enable, project additions should be allowed.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config valid_org_config["delegation"]["projects"]["inherit_team_delegation"] = True valid_org_config["profiles"]["urban-planning"]["delegation"]["allow_project_overrides"] = ( @@ -522,7 +522,7 @@ def test_both_org_and_team_enable_delegation(self, valid_org_config, project_con def test_team_not_in_allowed_list_rejects_additions(self, valid_org_config): """Team trying to add plugins not in org's allowed list should be rejected.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config # Org allows only specific teams to add MCP servers valid_org_config["delegation"]["teams"]["allow_additional_mcp_servers"] = [ @@ -549,7 +549,7 @@ class TestComputeEffectiveConfigSecurityBlocks: def test_blocked_plugin_rejected_from_org_defaults(self, valid_org_config): """Plugin in org defaults that matches blocked pattern should be rejected.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config # Add a blocked plugin pattern valid_org_config["security"]["blocked_plugins"] = ["internal-*"] @@ -574,7 +574,7 @@ def test_blocked_plugin_rejected_from_org_defaults(self, valid_org_config): def test_blocked_plugin_rejected_from_team(self, valid_org_config): """Plugin from team profile matching blocked pattern should be rejected.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config # Block all gis-* plugins valid_org_config["security"]["blocked_plugins"] = ["gis-*"] @@ -590,7 +590,7 @@ def test_blocked_plugin_rejected_from_team(self, valid_org_config): def test_blocked_plugin_rejected_from_project(self, valid_org_config, project_config): """Plugin from project matching blocked pattern should be rejected.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config # Enable delegation valid_org_config["delegation"]["projects"]["inherit_team_delegation"] = True @@ -612,7 +612,7 @@ def test_blocked_plugin_rejected_from_project(self, valid_org_config, project_co def test_blocked_mcp_server_rejected(self, valid_org_config): """MCP server matching blocked pattern should be rejected.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config # Block all sundsvall.se MCP servers valid_org_config["security"]["blocked_mcp_servers"] = ["*.sundsvall.se"] @@ -629,7 +629,7 @@ def test_blocked_mcp_server_rejected(self, valid_org_config): def test_security_blocks_cannot_be_overridden(self, valid_org_config): """Security blocks apply regardless of delegation settings.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config # Max delegation enabled valid_org_config["delegation"]["teams"]["allow_additional_plugins"] = ["*"] @@ -653,7 +653,7 @@ class TestComputeEffectiveConfigGlobPatterns: def test_wildcard_star_matches_multiple_chars(self, valid_org_config): """Pattern * should match any characters.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config valid_org_config["security"]["blocked_plugins"] = ["test-*-plugin"] valid_org_config["defaults"]["enabled_plugins"] = [ @@ -672,7 +672,7 @@ def test_wildcard_star_matches_multiple_chars(self, valid_org_config): def test_wildcard_question_matches_single_char(self, valid_org_config): """Pattern ? should match exactly one character.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config valid_org_config["security"]["blocked_plugins"] = ["test-?-plugin"] valid_org_config["defaults"]["enabled_plugins"] = [ @@ -691,7 +691,7 @@ def test_wildcard_question_matches_single_char(self, valid_org_config): def test_domain_wildcard_pattern(self, valid_org_config): """Domain patterns like *.domain.com should work.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config valid_org_config["security"]["blocked_mcp_servers"] = ["*.evil.com"] valid_org_config["defaults"]["allowed_mcp_servers"] = None @@ -712,7 +712,7 @@ def test_domain_wildcard_pattern(self, valid_org_config): def test_exact_match_pattern(self, valid_org_config): """Exact patterns (no wildcards) should match exactly.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config valid_org_config["security"]["blocked_plugins"] = ["exact-plugin"] valid_org_config["defaults"]["enabled_plugins"] = [ @@ -735,7 +735,7 @@ class TestComputeEffectiveConfigDecisionTracking: def test_decisions_track_plugin_sources(self, valid_org_config, project_config): """Decisions should track where each plugin came from.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config valid_org_config["delegation"]["projects"]["inherit_team_delegation"] = True valid_org_config["profiles"]["urban-planning"]["delegation"]["allow_project_overrides"] = ( @@ -757,7 +757,7 @@ def test_decisions_track_plugin_sources(self, valid_org_config, project_config): def test_blocked_items_tracked_with_pattern(self, valid_org_config): """Blocked items should show which pattern blocked them.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config valid_org_config["security"]["blocked_plugins"] = ["gis-*"] @@ -775,7 +775,7 @@ def test_blocked_items_tracked_with_pattern(self, valid_org_config): def test_denied_additions_tracked_with_reason(self, valid_org_config, project_config): """Denied additions should explain why they were denied.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config # Disable delegation at org level valid_org_config["delegation"]["projects"]["inherit_team_delegation"] = False @@ -1042,7 +1042,7 @@ class TestProjectConfigIntegration: def test_compute_effective_config_loads_project_from_path(self, valid_org_config, tmp_path): """compute_effective_config should load project config from workspace path.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config # Enable delegation valid_org_config["delegation"]["projects"]["inherit_team_delegation"] = True @@ -1072,7 +1072,7 @@ def test_compute_effective_config_loads_project_from_path(self, valid_org_config def test_compute_effective_config_no_project_file_is_ok(self, valid_org_config, tmp_path): """compute_effective_config should work when no .scc.yaml exists.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config # No .scc.yaml file created @@ -1089,7 +1089,7 @@ def test_compute_effective_config_project_config_dict_still_works( self, valid_org_config, project_config ): """Passing project_config dict directly should still work (backward compat).""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config valid_org_config["delegation"]["projects"]["inherit_team_delegation"] = True valid_org_config["profiles"]["urban-planning"]["delegation"]["allow_project_overrides"] = ( @@ -1109,7 +1109,7 @@ def test_compute_effective_config_workspace_path_overrides_project_config( self, valid_org_config, project_config, tmp_path ): """When both workspace_path and project_config provided, workspace_path wins.""" - from scc_cli.profiles import compute_effective_config + from scc_cli.application.compute_effective_config import compute_effective_config valid_org_config["delegation"]["projects"]["inherit_team_delegation"] = True valid_org_config["profiles"]["urban-planning"]["delegation"]["allow_project_overrides"] = ( @@ -1146,8 +1146,8 @@ class TestClaudeAdapterWithEffectiveConfig: def test_build_settings_from_effective_config_plugins(self, valid_org_config): """build_settings_from_effective_config should include effective plugins.""" + from scc_cli.application.compute_effective_config import compute_effective_config from scc_cli.claude_adapter import build_settings_from_effective_config - from scc_cli.profiles import compute_effective_config # Compute effective config effective = compute_effective_config( @@ -1171,8 +1171,8 @@ def test_build_settings_from_effective_config_plugins(self, valid_org_config): def test_build_settings_from_effective_config_mcp_servers(self, valid_org_config): """build_settings_from_effective_config should include MCP servers.""" + from scc_cli.application.compute_effective_config import compute_effective_config from scc_cli.claude_adapter import build_settings_from_effective_config - from scc_cli.profiles import compute_effective_config effective = compute_effective_config( org_config=valid_org_config, @@ -1192,8 +1192,8 @@ def test_build_settings_from_effective_config_mcp_servers(self, valid_org_config def test_build_settings_blocked_plugins_not_included(self, valid_org_config): """Blocked plugins should not appear in Claude settings.""" + from scc_cli.application.compute_effective_config import compute_effective_config from scc_cli.claude_adapter import build_settings_from_effective_config - from scc_cli.profiles import compute_effective_config # Block gis-tools valid_org_config["security"]["blocked_plugins"].append("gis-tools") diff --git a/tests/test_evaluation.py b/tests/test_evaluation.py index 717ac27..3d62dc2 100644 --- a/tests/test_evaluation.py +++ b/tests/test_evaluation.py @@ -746,8 +746,8 @@ class TestEvaluateFunction: def test_evaluate_empty_config(self): """Empty EffectiveConfig produces empty EvaluationResult.""" + from scc_cli.application.compute_effective_config import EffectiveConfig from scc_cli.evaluation import evaluate - from scc_cli.profiles import EffectiveConfig config = EffectiveConfig() result = evaluate(config) @@ -759,9 +759,9 @@ def test_evaluate_empty_config(self): def test_evaluate_blocked_plugin_has_security_reason(self): """Blocked plugins get BlockReason.SECURITY annotation.""" + from scc_cli.application.compute_effective_config import BlockedItem as ProfileBlockedItem + from scc_cli.application.compute_effective_config import EffectiveConfig from scc_cli.evaluation import evaluate - from scc_cli.profiles import BlockedItem as ProfileBlockedItem - from scc_cli.profiles import EffectiveConfig config = EffectiveConfig( blocked_items=[ @@ -785,9 +785,9 @@ def test_evaluate_blocked_plugin_has_security_reason(self): def test_evaluate_blocked_mcp_server_has_security_reason(self): """Blocked MCP servers get BlockReason.SECURITY annotation.""" + from scc_cli.application.compute_effective_config import BlockedItem as ProfileBlockedItem + from scc_cli.application.compute_effective_config import EffectiveConfig from scc_cli.evaluation import evaluate - from scc_cli.profiles import BlockedItem as ProfileBlockedItem - from scc_cli.profiles import EffectiveConfig config = EffectiveConfig( blocked_items=[ @@ -810,8 +810,8 @@ def test_evaluate_blocked_mcp_server_has_security_reason(self): def test_evaluate_denied_plugin_has_delegation_reason(self): """Denied plugins get BlockReason.DELEGATION annotation.""" + from scc_cli.application.compute_effective_config import DelegationDenied, EffectiveConfig from scc_cli.evaluation import evaluate - from scc_cli.profiles import DelegationDenied, EffectiveConfig config = EffectiveConfig( denied_additions=[ @@ -835,8 +835,8 @@ def test_evaluate_denied_plugin_has_delegation_reason(self): def test_evaluate_denied_mcp_server_has_delegation_reason(self): """Denied MCP servers get BlockReason.DELEGATION annotation.""" + from scc_cli.application.compute_effective_config import DelegationDenied, EffectiveConfig from scc_cli.evaluation import evaluate - from scc_cli.profiles import DelegationDenied, EffectiveConfig config = EffectiveConfig( denied_additions=[ @@ -859,14 +859,14 @@ def test_evaluate_denied_mcp_server_has_delegation_reason(self): def test_evaluate_mixed_blocked_and_denied(self): """Config with both blocked and denied items converts correctly.""" - from scc_cli.evaluation import evaluate - from scc_cli.profiles import ( + from scc_cli.application.compute_effective_config import ( BlockedItem as ProfileBlockedItem, ) - from scc_cli.profiles import ( + from scc_cli.application.compute_effective_config import ( DelegationDenied, EffectiveConfig, ) + from scc_cli.evaluation import evaluate config = EffectiveConfig( blocked_items=[ @@ -914,14 +914,14 @@ def test_evaluate_mixed_blocked_and_denied(self): def test_evaluate_preserves_target_types(self): """Target types are correctly preserved during conversion.""" - from scc_cli.evaluation import evaluate - from scc_cli.profiles import ( + from scc_cli.application.compute_effective_config import ( BlockedItem as ProfileBlockedItem, ) - from scc_cli.profiles import ( + from scc_cli.application.compute_effective_config import ( DelegationDenied, EffectiveConfig, ) + from scc_cli.evaluation import evaluate config = EffectiveConfig( blocked_items=[ diff --git a/tests/test_git_safety.py b/tests/test_git_safety.py index 1381965..f3cd15a 100644 --- a/tests/test_git_safety.py +++ b/tests/test_git_safety.py @@ -17,6 +17,7 @@ from scc_cli import git from scc_cli.core.errors import WorktreeCreationError +from scc_cli.ui import check_branch_safety, cleanup_worktree, create_worktree, list_worktrees # ═══════════════════════════════════════════════════════════════════════════════ # Fixtures - Real Git Repos @@ -110,7 +111,7 @@ def test_create_worktree_uses_scc_prefix(self, temp_git_repo): patch("scc_cli.ui.git_interactive._fetch_branch"), patch("scc_cli.ui.git_interactive.install_dependencies"), ): - worktree_path = git.create_worktree( + worktree_path = create_worktree( temp_git_repo, "test-prefix", console=console, @@ -323,7 +324,7 @@ class TestCheckBranchSafety: def test_allows_feature_branch_without_prompt(self, temp_git_repo_on_feature): """Feature branches should pass without any prompts.""" console = MagicMock() - result = git.check_branch_safety(temp_git_repo_on_feature, console) + result = check_branch_safety(temp_git_repo_on_feature, console) assert result is True # Should not have shown any warning panels console.print.assert_not_called() @@ -334,7 +335,7 @@ def test_warns_on_main_branch(self, temp_git_repo_on_main): with patch("scc_cli.ui.git_interactive.prompt_with_layout") as mock_prompt: # User chooses to continue (option 2) mock_prompt.return_value = "2" - result = git.check_branch_safety(temp_git_repo_on_main, console) + result = check_branch_safety(temp_git_repo_on_main, console) assert result is True # Should have printed warning @@ -345,7 +346,7 @@ def test_cancel_on_protected_branch_returns_false(self, temp_git_repo_on_main): console = MagicMock() with patch("scc_cli.ui.git_interactive.prompt_with_layout") as mock_prompt: mock_prompt.return_value = "3" # Cancel - result = git.check_branch_safety(temp_git_repo_on_main, console) + result = check_branch_safety(temp_git_repo_on_main, console) assert result is False @@ -356,7 +357,7 @@ def test_creates_branch_when_user_chooses(self, temp_git_repo_on_main): # First call: choose to create branch # Second call: branch name mock_prompt.side_effect = ["1", "my-new-feature"] - result = git.check_branch_safety(temp_git_repo_on_main, console) + result = check_branch_safety(temp_git_repo_on_main, console) assert result is True # Verify branch was actually created @@ -368,7 +369,7 @@ def test_passes_non_git_directory(self, tmp_path): non_repo = tmp_path / "not-a-repo" non_repo.mkdir() console = MagicMock() - result = git.check_branch_safety(non_repo, console) + result = check_branch_safety(non_repo, console) assert result is True @@ -425,7 +426,7 @@ def test_creates_worktree_directory(self, temp_git_repo): patch("scc_cli.ui.git_interactive._fetch_branch"), # Skip fetch, no remote in temp repo patch("scc_cli.ui.git_interactive.install_dependencies"), # Skip deps install ): - worktree_path = git.create_worktree( + worktree_path = create_worktree( temp_git_repo, "test-feature", console=console, @@ -447,7 +448,7 @@ def test_creates_branch_with_prefix(self, temp_git_repo): patch("scc_cli.ui.git_interactive._fetch_branch"), # Skip fetch, no remote in temp repo patch("scc_cli.ui.git_interactive.install_dependencies"), # Skip deps install ): - worktree_path = git.create_worktree( + worktree_path = create_worktree( temp_git_repo, "my-feature", console=console, @@ -479,7 +480,7 @@ def fake_create_worktree(*_args, **_kwargs) -> None: ) as mock_install, ): with pytest.raises(WorktreeCreationError): - git.create_worktree(temp_git_repo, "fail", console=console) + create_worktree(temp_git_repo, "fail", console=console) assert mock_install.called @@ -520,7 +521,7 @@ def test_warns_about_uncommitted_changes(self, temp_git_repo): # Cleanup with user declining with patch("scc_cli.ui.git_interactive.confirm_with_layout", return_value=False): - result = git.cleanup_worktree( + result = cleanup_worktree( temp_git_repo, "dirty-feature", force=False, @@ -555,7 +556,7 @@ def test_force_deletes_without_confirmation(self, temp_git_repo): # Don't delete branch with patch("scc_cli.ui.git_interactive.confirm_with_layout", return_value=False): - result = git.cleanup_worktree( + result = cleanup_worktree( temp_git_repo, "force-delete", force=True, @@ -569,7 +570,7 @@ def test_force_deletes_without_confirmation(self, temp_git_repo): def test_returns_false_for_nonexistent_worktree(self, temp_git_repo): """Should return False if worktree doesn't exist.""" console = MagicMock() - result = git.cleanup_worktree( + result = cleanup_worktree( temp_git_repo, "nonexistent-worktree", force=False, @@ -588,7 +589,7 @@ class TestListWorktrees: def test_lists_main_worktree(self, temp_git_repo): """Should list at least the main worktree.""" - worktrees = git.list_worktrees(temp_git_repo) + worktrees = list_worktrees(temp_git_repo) assert len(worktrees) >= 1 def test_lists_created_worktree(self, temp_git_repo): @@ -604,7 +605,7 @@ def test_lists_created_worktree(self, temp_git_repo): capture_output=True, ) - worktrees = git.list_worktrees(temp_git_repo) + worktrees = list_worktrees(temp_git_repo) paths = [str(w.path) for w in worktrees] assert any("list-test" in p for p in paths) diff --git a/tests/test_git_safety_critical.py b/tests/test_git_safety_critical.py index f2e892a..c28b664 100644 --- a/tests/test_git_safety_critical.py +++ b/tests/test_git_safety_critical.py @@ -27,6 +27,7 @@ import pytest from scc_cli import git +from scc_cli.ui import cleanup_worktree # ═══════════════════════════════════════════════════════════════════════════════ # Fixtures for Real Git Operations @@ -129,7 +130,7 @@ def test_rmtree_fallback_triggered_when_git_worktree_remove_fails(self, worktree with patch( "scc_cli.ui.git_interactive.confirm_with_layout", return_value=False ): # Don't delete branch - result = git.cleanup_worktree( + result = cleanup_worktree( repo, worktree_setup["worktree_name"], force=True, @@ -160,7 +161,7 @@ def test_rmtree_fallback_does_not_affect_main_repo(self, worktree_setup): (worktree_path / ".git").write_text("corrupted") with patch("scc_cli.ui.git_interactive.confirm_with_layout", return_value=False): - git.cleanup_worktree( + cleanup_worktree( repo, worktree_setup["worktree_name"], force=True, @@ -190,7 +191,7 @@ def test_rmtree_handles_worktree_with_uncommitted_changes_when_forced(self, work # Force cleanup (skips confirmation) with patch("scc_cli.ui.git_interactive.confirm_with_layout", return_value=False): - result = git.cleanup_worktree( + result = cleanup_worktree( worktree_setup["repo"], worktree_setup["worktree_name"], force=True, @@ -206,7 +207,7 @@ def test_cleanup_nonexistent_worktree_returns_false(self, real_git_repo): """Attempting to clean up non-existent worktree should fail gracefully.""" console = MagicMock() - result = git.cleanup_worktree( + result = cleanup_worktree( real_git_repo, "nonexistent-worktree", force=False, diff --git a/tests/test_git_worktree.py b/tests/test_git_worktree.py index 03bf44f..2d085c2 100644 --- a/tests/test_git_worktree.py +++ b/tests/test_git_worktree.py @@ -357,7 +357,7 @@ def test_detects_scc_yaml_project(self, tmp_path): subdir.mkdir() # Mock git not being available - with patch("scc_cli.git.check_git_installed", return_value=False): + with patch("scc_cli.services.git.core.check_git_installed", return_value=False): root, start_cwd = detect_workspace_root(subdir) assert root == project @@ -376,7 +376,7 @@ def test_detects_git_file_worktree(self, tmp_path): subdir.mkdir() # Mock git not being available to test fallback to .git file detection - with patch("scc_cli.git.check_git_installed", return_value=False): + with patch("scc_cli.services.git.core.check_git_installed", return_value=False): root, start_cwd = detect_workspace_root(subdir) assert root == worktree @@ -392,7 +392,7 @@ def test_returns_none_for_non_workspace(self, tmp_path): nested = plain_dir / "deep" / "path" nested.mkdir(parents=True) - with patch("scc_cli.git.check_git_installed", return_value=False): + with patch("scc_cli.services.git.core.check_git_installed", return_value=False): root, start_cwd = detect_workspace_root(nested) assert root is None @@ -430,7 +430,7 @@ def test_scc_yaml_at_parent_dir(self, tmp_path): deep = project / "packages" / "core" / "src" deep.mkdir(parents=True) - with patch("scc_cli.git.check_git_installed", return_value=False): + with patch("scc_cli.services.git.core.check_git_installed", return_value=False): root, start_cwd = detect_workspace_root(deep) assert root == project diff --git a/tests/test_handle_errors.py b/tests/test_handle_errors.py new file mode 100644 index 0000000..34d5a44 --- /dev/null +++ b/tests/test_handle_errors.py @@ -0,0 +1,62 @@ +"""Tests for handle_errors JSON behavior.""" + +from __future__ import annotations + +import json +from collections.abc import Generator + +import pytest +import typer + +from scc_cli.cli_common import handle_errors +from scc_cli.core.errors import ConfigError +from scc_cli.core.exit_codes import EXIT_CANCELLED, EXIT_CONFIG +from scc_cli.output_mode import json_command_mode + + +@pytest.fixture(autouse=True) +def reset_output_mode_state() -> Generator[None, None, None]: + """Reset JSON output state to avoid cross-test leakage.""" + from scc_cli.output_mode import _json_command_mode, _json_mode, _pretty_mode + + _pretty_mode.set(False) + _json_mode.set(False) + _json_command_mode.set(False) + yield + _pretty_mode.set(False) + _json_mode.set(False) + _json_command_mode.set(False) + + +@handle_errors +def _raise_config_error() -> None: + raise ConfigError(user_message="Config boom") + + +@handle_errors +def _raise_keyboard_interrupt() -> None: + raise KeyboardInterrupt + + +def test_handle_errors_config_error_json(capsys: pytest.CaptureFixture[str]) -> None: + with json_command_mode(): + with pytest.raises(typer.Exit) as exc_info: + _raise_config_error() + + assert exc_info.value.exit_code == EXIT_CONFIG + payload = json.loads(capsys.readouterr().out) + + assert payload["kind"] == "Error" + assert payload["status"]["errors"] == ["Config boom"] + + +def test_handle_errors_keyboard_interrupt_json(capsys: pytest.CaptureFixture[str]) -> None: + with json_command_mode(): + with pytest.raises(typer.Exit) as exc_info: + _raise_keyboard_interrupt() + + assert exc_info.value.exit_code == EXIT_CANCELLED + payload = json.loads(capsys.readouterr().out) + + assert payload["kind"] == "Error" + assert payload["status"]["errors"] == ["Operation cancelled by user"] diff --git a/tests/test_import_boundaries.py b/tests/test_import_boundaries.py index 0b9bfe7..c23d396 100644 --- a/tests/test_import_boundaries.py +++ b/tests/test_import_boundaries.py @@ -159,6 +159,68 @@ def test_core_does_not_import_commands(self) -> None: assert result.returncode == 1, f"core/ imports commands/:\n{result.stdout}" +class TestApplicationLayerBoundaries: + """Application layer must not depend on UI or commands.""" + + def test_application_does_not_import_ui_or_commands(self) -> None: + """application/ must not import from ui/ or commands/.""" + application_path = SRC / "application" + if not application_path.exists(): + return + + result = subprocess.run( + [ + "grep", + "-rE", + r"(from scc_cli\.ui|import scc_cli\.ui|from scc_cli\.commands|import scc_cli\.commands)", + str(application_path), + ], + capture_output=True, + text=True, + ) + assert result.returncode == 1, ( + f"application/ imports ui/ or commands/ modules:\n{result.stdout}" + ) + + +class TestAdapterBoundaries: + """Adapter layer must not depend on UI and only bootstrap composes adapters.""" + + def test_adapters_do_not_import_ui(self) -> None: + """adapters/ must not import from ui/.""" + adapters_path = SRC / "adapters" + if not adapters_path.exists(): + return + + result = subprocess.run( + [ + "grep", + "-rE", + r"(from scc_cli\.ui|import scc_cli\.ui|from \.\.ui)", + str(adapters_path), + ], + capture_output=True, + text=True, + ) + assert result.returncode == 1, f"adapters/ imports ui/:\n{result.stdout}" + + def test_only_bootstrap_imports_adapters(self) -> None: + """Only bootstrap.py should import adapters for composition.""" + result = subprocess.run( + [ + "grep", + "-rE", + r"(from scc_cli\.adapters|import scc_cli\.adapters|from \.\.adapters)", + str(SRC), + "--exclude-dir=adapters", + "--exclude=bootstrap.py", + ], + capture_output=True, + text=True, + ) + assert result.returncode == 1, f"Non-bootstrap modules import adapters:\n{result.stdout}" + + class TestGitModuleBoundary: """git.py facade must have no Rich imports after Phase 4 refactoring. @@ -492,3 +554,83 @@ def test_no_characterization_suffix_test_files(self) -> None: f"Characterization tests are temporary refactoring safety nets.\n" f"Convert to proper tests and delete when refactoring is complete." ) + + +class TestPortsBoundary: + """ports/ must not depend on UI or command layers.""" + + def test_ports_no_ui_imports(self) -> None: + """ports/ must not import from ui/.""" + ports_path = SRC / "ports" + if not ports_path.exists(): + return + + result = subprocess.run( + [ + "grep", + "-rE", + r"(from scc_cli\.ui|from \.\.ui|import scc_cli\.ui)", + str(ports_path), + ], + capture_output=True, + text=True, + ) + assert result.returncode == 1, f"ports/ imports ui/:\n{result.stdout}" + + def test_ports_no_commands_imports(self) -> None: + """ports/ must not import from commands/.""" + ports_path = SRC / "ports" + if not ports_path.exists(): + return + + result = subprocess.run( + [ + "grep", + "-rE", + r"(from scc_cli\.commands|from \.\.commands|import scc_cli\.commands)", + str(ports_path), + ], + capture_output=True, + text=True, + ) + assert result.returncode == 1, f"ports/ imports commands/:\n{result.stdout}" + + +class TestAdaptersBoundary: + """adapters/ must not depend on UI or command layers.""" + + def test_adapters_no_ui_imports(self) -> None: + """adapters/ must not import from ui/.""" + adapters_path = SRC / "adapters" + if not adapters_path.exists(): + return + + result = subprocess.run( + [ + "grep", + "-rE", + r"(from scc_cli\.ui|from \.\.ui|import scc_cli\.ui)", + str(adapters_path), + ], + capture_output=True, + text=True, + ) + assert result.returncode == 1, f"adapters/ imports ui/:\n{result.stdout}" + + def test_adapters_no_commands_imports(self) -> None: + """adapters/ must not import from commands/.""" + adapters_path = SRC / "adapters" + if not adapters_path.exists(): + return + + result = subprocess.run( + [ + "grep", + "-rE", + r"(from scc_cli\.commands|from \.\.commands|import scc_cli\.commands)", + str(adapters_path), + ], + capture_output=True, + text=True, + ) + assert result.returncode == 1, f"adapters/ imports commands/:\n{result.stdout}" diff --git a/tests/test_integration.py b/tests/test_integration.py index f1e5563..4f2bdad 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -18,6 +18,7 @@ from typer.testing import CliRunner from scc_cli.cli import app +from tests.fakes import build_fake_adapters runner = CliRunner() @@ -197,7 +198,7 @@ class TestStartWorkflow: def test_start_requires_setup_first(self, full_config_environment, git_workspace): """Start should prompt for setup if not configured.""" - with patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=True): + with patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=True): result = runner.invoke(app, ["start", str(git_workspace)]) # Should indicate setup is needed @@ -205,30 +206,24 @@ def test_start_requires_setup_first(self, full_config_environment, git_workspace def test_start_with_workspace_launches_docker(self, full_config_environment, git_workspace): """Start with workspace should launch Docker sandbox.""" + fake_adapters = build_fake_adapters() + with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch( - "scc_cli.commands.launch.app.config.load_user_config", + "scc_cli.commands.launch.flow.config.load_user_config", return_value={"standalone": True}, ), - patch("scc_cli.commands.launch.app.docker.check_docker_available"), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.workspace.git.get_current_branch", return_value="main"), patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(git_workspace, False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), - patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker"], False), - ) as mock_get_container, - patch("scc_cli.commands.launch.sandbox.docker.run"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), ): runner.invoke(app, ["start", str(git_workspace)]) - # Docker should be called - mock_get_container.assert_called_once() + # Sandbox runtime should be invoked + assert fake_adapters.sandbox_runtime.list_running() def test_start_with_team_resolves_profile( self, full_config_environment, git_workspace, sample_org_config @@ -247,25 +242,17 @@ def test_start_with_team_resolves_profile( ) ) + fake_adapters = build_fake_adapters() + with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config") as mock_load_config, + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config") as mock_load_config, patch("scc_cli.remote.load_org_config", return_value=sample_org_config), - patch("scc_cli.commands.launch.app.docker.check_docker_available"), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch( - "scc_cli.commands.launch.workspace.git.get_current_branch", return_value="feature-x" - ), patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(git_workspace, False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), - patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker"], False), - ), - patch("scc_cli.commands.launch.sandbox.docker.run"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), ): mock_load_config.return_value = { "organization_source": {"url": "https://gitlab.test.org/config.json"}, @@ -281,20 +268,20 @@ def test_cancel_at_protected_branch_prompt_exits(self, full_config_environment, """Cancelling at protected branch prompt should exit with EXIT_CANCELLED.""" from scc_cli.core.exit_codes import EXIT_CANCELLED + fake_adapters = build_fake_adapters() + with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch( - "scc_cli.commands.launch.app.config.load_user_config", + "scc_cli.commands.launch.flow.config.load_user_config", return_value={"standalone": True}, ), - patch("scc_cli.commands.launch.app.docker.check_docker_available"), - # Simulate user cancelling at protected branch prompt - patch("scc_cli.commands.launch.workspace.git.check_branch_safety", return_value=False), - patch("scc_cli.commands.launch.workspace.git.get_current_branch", return_value="main"), patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(git_workspace, False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), + # Simulate user cancelling at protected branch prompt + patch("scc_cli.commands.launch.workspace.check_branch_safety", return_value=False), ): result = runner.invoke(app, ["start", str(git_workspace)]) @@ -383,7 +370,7 @@ def test_worktree_creates_branch_and_worktree(self, full_config_environment, git """Worktree command should create git worktree and branch.""" with ( patch("scc_cli.commands.worktree.worktree_commands.git.is_git_repo", return_value=True), - patch("scc_cli.commands.worktree.worktree_commands.git.create_worktree") as mock_create, + patch("scc_cli.commands.worktree.worktree_commands.create_worktree") as mock_create, patch( "scc_cli.commands.worktree.worktree_commands.Confirm.ask", return_value=False ), # Don't start claude @@ -406,7 +393,7 @@ def test_worktree_with_install_deps(self, full_config_environment, git_workspace with ( patch("scc_cli.commands.worktree.worktree_commands.git.is_git_repo", return_value=True), patch( - "scc_cli.commands.worktree.worktree_commands.git.create_worktree", + "scc_cli.commands.worktree.worktree_commands.create_worktree", return_value=worktree_path, ), patch( @@ -453,23 +440,17 @@ def test_start_offline_uses_cache_only(self, full_config_environment, git_worksp ) ) + fake_adapters = build_fake_adapters() + with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config") as mock_load_config, + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config") as mock_load_config, patch("scc_cli.remote.load_org_config") as mock_remote, - patch("scc_cli.commands.launch.app.docker.check_docker_available"), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.workspace.git.get_current_branch", return_value="main"), - patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(git_workspace, False), - ), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker"], False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), - patch("scc_cli.commands.launch.sandbox.docker.run"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), ): mock_load_config.return_value = { "organization_source": {"url": "https://gitlab.test.org/config.json"}, @@ -497,26 +478,20 @@ class TestStandaloneWorkflow: def test_start_standalone_skips_org_config(self, full_config_environment, git_workspace): """--standalone should skip org config entirely.""" + fake_adapters = build_fake_adapters() + with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch( - "scc_cli.commands.launch.app.config.load_user_config", + "scc_cli.commands.launch.flow.config.load_user_config", return_value={"standalone": True}, ), patch("scc_cli.remote.load_org_config") as mock_remote, - patch("scc_cli.commands.launch.app.docker.check_docker_available"), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.workspace.git.get_current_branch", return_value="main"), patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(git_workspace, False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), - patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker"], False), - ), - patch("scc_cli.commands.launch.sandbox.docker.run"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), ): runner.invoke(app, ["start", str(git_workspace), "--standalone"]) @@ -537,25 +512,19 @@ def test_start_with_install_deps(self, full_config_environment, git_workspace): # Create package.json to trigger npm detection (git_workspace / "package.json").write_text("{}") + fake_adapters = build_fake_adapters() + with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch( - "scc_cli.commands.launch.app.config.load_user_config", + "scc_cli.commands.launch.flow.config.load_user_config", return_value={"standalone": True}, ), - patch("scc_cli.commands.launch.app.docker.check_docker_available"), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.workspace.git.get_current_branch", return_value="main"), - patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(git_workspace, False), - ), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker"], False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), - patch("scc_cli.commands.launch.sandbox.docker.run"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), patch("scc_cli.commands.launch.workspace.deps.auto_install_dependencies") as mock_deps, ): mock_deps.return_value = True diff --git a/tests/test_json_command.py b/tests/test_json_command.py new file mode 100644 index 0000000..0f7b6f9 --- /dev/null +++ b/tests/test_json_command.py @@ -0,0 +1,130 @@ +"""Tests for json_command decorator behavior. + +These tests lock current JSON envelope and exit code behavior before refactors. +""" + +from __future__ import annotations + +import json +from collections.abc import Callable, Generator +from typing import Any + +import pytest +import typer + +from scc_cli.core.errors import ConfigError, PolicyViolationError +from scc_cli.core.exit_codes import EXIT_CANCELLED, EXIT_CONFIG, EXIT_GOVERNANCE, EXIT_SUCCESS +from scc_cli.json_command import json_command +from scc_cli.kinds import Kind + +JsonCommand = Callable[..., dict[str, Any]] + + +@pytest.fixture(autouse=True) +def reset_output_mode_state() -> Generator[None, None, None]: + """Reset JSON output state to avoid cross-test leakage.""" + from scc_cli.output_mode import _json_command_mode, _json_mode, _pretty_mode + + _pretty_mode.set(False) + _json_mode.set(False) + _json_command_mode.set(False) + yield + _pretty_mode.set(False) + _json_mode.set(False) + _json_command_mode.set(False) + + +def _build_success_command() -> JsonCommand: + @json_command(Kind.TEAM_LIST) + def command(json_output: bool = False, pretty: bool = False) -> dict[str, Any]: + return {"teams": ["alpha", "beta"]} + + return command + + +def _build_config_error_command() -> JsonCommand: + @json_command(Kind.TEAM_LIST) + def command(json_output: bool = False, pretty: bool = False) -> dict[str, Any]: + raise ConfigError() + + return command + + +def _build_policy_error_command() -> JsonCommand: + @json_command(Kind.TEAM_LIST) + def command(json_output: bool = False, pretty: bool = False) -> dict[str, Any]: + raise PolicyViolationError(item="dangerous") + + return command + + +def _build_keyboard_interrupt_command() -> JsonCommand: + @json_command(Kind.TEAM_LIST) + def command(json_output: bool = False, pretty: bool = False) -> dict[str, Any]: + raise KeyboardInterrupt + + return command + + +def test_json_command_success_envelope(capsys: pytest.CaptureFixture[str]) -> None: + command = _build_success_command() + + with pytest.raises(typer.Exit) as exc_info: + command(json_output=True, pretty=False) + + assert exc_info.value.exit_code == EXIT_SUCCESS + payload = json.loads(capsys.readouterr().out) + + assert payload["kind"] == Kind.TEAM_LIST + assert payload["status"]["ok"] is True + assert payload["data"] == {"teams": ["alpha", "beta"]} + + +def test_json_command_non_json_mode_returns_value( + capsys: pytest.CaptureFixture[str], +) -> None: + command = _build_success_command() + + result = command(json_output=False, pretty=False) + + assert result == {"teams": ["alpha", "beta"]} + assert capsys.readouterr().out == "" + + +def test_json_command_config_error_exit_code(capsys: pytest.CaptureFixture[str]) -> None: + command = _build_config_error_command() + + with pytest.raises(typer.Exit) as exc_info: + command(json_output=True, pretty=False) + + assert exc_info.value.exit_code == EXIT_CONFIG + payload = json.loads(capsys.readouterr().out) + + assert payload["status"]["ok"] is False + assert payload["status"]["errors"] == ["Configuration error"] + + +def test_json_command_policy_violation_exit_code(capsys: pytest.CaptureFixture[str]) -> None: + command = _build_policy_error_command() + + with pytest.raises(typer.Exit) as exc_info: + command(json_output=True, pretty=False) + + assert exc_info.value.exit_code == EXIT_GOVERNANCE + payload = json.loads(capsys.readouterr().out) + + assert payload["status"]["ok"] is False + assert "blocked" in payload["status"]["errors"][0] + + +def test_json_command_keyboard_interrupt_exit_code(capsys: pytest.CaptureFixture[str]) -> None: + command = _build_keyboard_interrupt_command() + + with pytest.raises(typer.Exit) as exc_info: + command(json_output=True, pretty=False) + + assert exc_info.value.exit_code == EXIT_CANCELLED + payload = json.loads(capsys.readouterr().out) + + assert payload["status"]["ok"] is False + assert payload["status"]["errors"] == ["Cancelled"] diff --git a/tests/test_maintenance_tasks.py b/tests/test_maintenance_tasks.py new file mode 100644 index 0000000..aba12c3 --- /dev/null +++ b/tests/test_maintenance_tasks.py @@ -0,0 +1,236 @@ +from __future__ import annotations + +from datetime import datetime, timedelta, timezone +from pathlib import Path + +from scc_cli import config, contexts, sessions +from scc_cli.docker.core import ContainerInfo +from scc_cli.maintenance.cache_cleanup import ( + cleanup_expired_exceptions, + clear_cache, + clear_contexts, + prune_containers, +) +from scc_cli.maintenance.migrations import ( + factory_reset, + reset_config, + reset_exceptions, +) +from scc_cli.maintenance.repair_sessions import ( + delete_all_sessions, + prune_sessions, +) +from scc_cli.maintenance.types import ResetResult, RiskTier +from scc_cli.models.exceptions import AllowTargets, Exception, ExceptionFile +from scc_cli.stores.exception_store import RepoStore, UserStore + + +def _make_exception(exc_id: str, expires_at: datetime) -> Exception: + return Exception( + id=exc_id, + created_at=(expires_at - timedelta(days=2)).isoformat(), + expires_at=expires_at.isoformat(), + reason="test", + scope="local", + allow=AllowTargets(plugins=["scc-test"]), + ) + + +def _write_exceptions(store: UserStore | RepoStore, exceptions: list[Exception]) -> None: + store.write(ExceptionFile(exceptions=exceptions)) + + +def test_clear_cache_dry_run_keeps_files(temp_config_dir: Path) -> None: + cache_dir = config.CACHE_DIR + cache_file = cache_dir / "cache.txt" + cache_file.write_text("data") + + result = clear_cache(dry_run=True) + + assert result.removed_count == 1 + assert cache_file.exists() + + +def test_clear_cache_removes_files(temp_config_dir: Path) -> None: + cache_dir = config.CACHE_DIR + cache_file = cache_dir / "cache.txt" + cache_file.write_text("data") + + result = clear_cache(dry_run=False) + + assert result.success is True + assert result.removed_count == 1 + assert cache_dir.exists() + assert not cache_file.exists() + + +def test_cleanup_expired_exceptions_prunes(temp_config_dir: Path) -> None: + now = datetime.now(timezone.utc) + expired = _make_exception("expired", now - timedelta(days=1)) + active = _make_exception("active", now + timedelta(days=1)) + user_store = UserStore() + _write_exceptions(user_store, [expired, active]) + + result = cleanup_expired_exceptions(dry_run=False) + + assert result.removed_count == 1 + remaining = user_store.read() + assert [exc.id for exc in remaining.exceptions] == ["active"] + + +def test_clear_contexts_clears_cache(temp_config_dir: Path, tmp_path: Path) -> None: + workspace = tmp_path / "repo" + context = contexts.WorkContext( + team=None, + repo_root=workspace, + worktree_path=workspace, + worktree_name="main", + ) + contexts.record_context(context) + + result = clear_contexts(dry_run=False) + + assert result.removed_count == 1 + assert contexts.load_recent_contexts(limit=10) == [] + + +def test_prune_containers_removes_stopped(monkeypatch) -> None: + containers = [ + ContainerInfo(id="abc", name="scc-one", status="Exited"), + ContainerInfo(id="def", name="scc-two", status="running"), + ContainerInfo(id="", name="scc-three", status="Stopped"), + ] + removed: list[str] = [] + + from scc_cli import docker + + monkeypatch.setattr(docker, "_list_all_sandbox_containers", lambda: containers) + + def _fake_remove(container_id: str) -> None: + removed.append(container_id) + + monkeypatch.setattr(docker, "remove_container", _fake_remove) + + result = prune_containers(dry_run=False) + + assert result.removed_count == 2 + assert set(removed) == {"abc", "scc-three"} + + +def test_prune_sessions_removes_old_entries(temp_config_dir: Path) -> None: + now = datetime.now(timezone.utc) + sessions._save_sessions( + [ + {"workspace": "one", "last_used": (now - timedelta(days=1)).isoformat()}, + {"workspace": "two", "last_used": (now - timedelta(days=60)).isoformat()}, + {"workspace": "three", "last_used": (now - timedelta(days=45)).isoformat()}, + ] + ) + + result = prune_sessions(older_than_days=30, keep_n=1, dry_run=False) + + assert result.removed_count == 2 + remaining = sessions._load_sessions() + assert [session["workspace"] for session in remaining] == ["one"] + + +def test_delete_all_sessions_creates_backup(temp_config_dir: Path) -> None: + sessions._save_sessions( + [ + {"workspace": "one", "last_used": "2024-01-01T00:00:00+00:00"}, + {"workspace": "two", "last_used": "2024-01-02T00:00:00+00:00"}, + ] + ) + + result = delete_all_sessions(dry_run=False, create_backup=True) + + assert result.removed_count == 2 + assert result.backup_path is not None + assert result.backup_path.exists() + assert sessions._load_sessions() == [] + + +def test_reset_exceptions_resets_user_and_repo(temp_config_dir: Path, tmp_path: Path) -> None: + now = datetime.now(timezone.utc) + user_store = UserStore() + repo_root = tmp_path / "repo" + repo_root.mkdir() + repo_store = RepoStore(repo_root) + + _write_exceptions( + user_store, + [_make_exception("user-1", now + timedelta(days=1))], + ) + _write_exceptions( + repo_store, + [_make_exception("repo-1", now + timedelta(days=2))], + ) + + result = reset_exceptions(scope="all", repo_root=repo_root, create_backup=True) + + assert result.removed_count == 2 + assert result.backup_path is not None + assert result.backup_path.exists() + assert not user_store.path.exists() + assert not repo_store.path.exists() + + +def test_reset_config_creates_backup(temp_config_dir: Path) -> None: + config.CONFIG_FILE.write_text('{"profile": "dev"}') + + result = reset_config(dry_run=False, create_backup=True) + + assert result.success is True + assert result.backup_path is not None + assert result.backup_path.exists() + assert not config.CONFIG_FILE.exists() + + +def test_factory_reset_stops_on_failure(monkeypatch) -> None: + def _make_result(action_id: str, success: bool = True) -> ResetResult: + return ResetResult( + success=success, + action_id=action_id, + risk_tier=RiskTier.FACTORY_RESET, + message="ok", + ) + + monkeypatch.setattr( + "scc_cli.maintenance.migrations.reset_config", + lambda **_: _make_result("reset_config"), + ) + monkeypatch.setattr( + "scc_cli.maintenance.migrations.delete_all_sessions", + lambda **_: _make_result("delete_all_sessions", success=False), + ) + monkeypatch.setattr( + "scc_cli.maintenance.migrations.reset_exceptions", + lambda **_: _make_result("reset_exceptions"), + ) + monkeypatch.setattr( + "scc_cli.maintenance.migrations.clear_contexts", + lambda **_: _make_result("clear_contexts"), + ) + monkeypatch.setattr( + "scc_cli.maintenance.migrations.clear_cache", + lambda **_: _make_result("clear_cache"), + ) + monkeypatch.setattr( + "scc_cli.maintenance.migrations.prune_containers", + lambda **_: _make_result("prune_containers"), + ) + + results = factory_reset() + + assert [result.action_id for result in results] == ["reset_config", "delete_all_sessions"] + + results = factory_reset(continue_on_error=True) + + assert [result.action_id for result in results] == [ + "reset_config", + "delete_all_sessions", + "reset_exceptions", + "clear_contexts", + "clear_cache", + "prune_containers", + ] diff --git a/tests/test_marketplace_sync.py b/tests/test_marketplace_sync.py index 54f0a56..d38eba2 100644 --- a/tests/test_marketplace_sync.py +++ b/tests/test_marketplace_sync.py @@ -8,15 +8,20 @@ from __future__ import annotations import json +from dataclasses import replace from datetime import datetime, timezone from pathlib import Path -from typing import TYPE_CHECKING -from unittest.mock import MagicMock, patch +from typing import Any +from unittest.mock import MagicMock import pytest -if TYPE_CHECKING: - pass +from scc_cli.adapters.local_filesystem import LocalFilesystem +from scc_cli.adapters.system_clock import SystemClock +from scc_cli.application.sync_marketplace import SyncMarketplaceDependencies +from scc_cli.marketplace.materialize import MaterializedMarketplace, materialize_marketplace +from scc_cli.marketplace.resolve import resolve_effective_config +from scc_cli.ports.remote_fetcher import RemoteFetcher def make_org_config_data(**overrides: dict) -> dict: @@ -28,12 +33,40 @@ def make_org_config_data(**overrides: dict) -> dict: return config +def _materialize_with_fetcher( + name: str, + source: Any, + project_dir: Path, + force_refresh: bool = False, + fetcher: RemoteFetcher | None = None, +) -> MaterializedMarketplace: + return materialize_marketplace( + name=name, + source=source, + project_dir=project_dir, + force_refresh=force_refresh, + ) + + +@pytest.fixture +def sync_dependencies() -> SyncMarketplaceDependencies: + remote_fetcher = MagicMock(spec=RemoteFetcher) + remote_fetcher.get.side_effect = AssertionError("Unexpected remote fetch") + return SyncMarketplaceDependencies( + filesystem=LocalFilesystem(), + remote_fetcher=remote_fetcher, + clock=SystemClock(), + resolve_effective_config=resolve_effective_config, + materialize_marketplace=_materialize_with_fetcher, + ) + + class TestSyncError: """Tests for SyncError exception.""" def test_create_with_message(self) -> None: """Should create error with message.""" - from scc_cli.marketplace.sync import SyncError + from scc_cli.application.sync_marketplace import SyncError error = SyncError("Test error") assert str(error) == "Test error" @@ -41,7 +74,7 @@ def test_create_with_message(self) -> None: def test_create_with_details(self) -> None: """Should create error with details dict.""" - from scc_cli.marketplace.sync import SyncError + from scc_cli.application.sync_marketplace import SyncError error = SyncError("Test error", details={"key": "value"}) assert str(error) == "Test error" @@ -53,7 +86,7 @@ class TestSyncResult: def test_create_success_result(self) -> None: """Should create successful result with empty lists.""" - from scc_cli.marketplace.sync import SyncResult + from scc_cli.application.sync_marketplace import SyncResult result = SyncResult(success=True) assert result.success is True @@ -64,7 +97,7 @@ def test_create_success_result(self) -> None: def test_create_with_plugins(self) -> None: """Should create result with enabled plugins.""" - from scc_cli.marketplace.sync import SyncResult + from scc_cli.application.sync_marketplace import SyncResult result = SyncResult( success=True, @@ -74,7 +107,7 @@ def test_create_with_plugins(self) -> None: def test_create_with_marketplaces(self) -> None: """Should create result with materialized marketplaces.""" - from scc_cli.marketplace.sync import SyncResult + from scc_cli.application.sync_marketplace import SyncResult result = SyncResult( success=True, @@ -84,7 +117,7 @@ def test_create_with_marketplaces(self) -> None: def test_create_with_warnings(self) -> None: """Should create result with warnings.""" - from scc_cli.marketplace.sync import SyncResult + from scc_cli.application.sync_marketplace import SyncResult result = SyncResult( success=True, @@ -94,7 +127,7 @@ def test_create_with_warnings(self) -> None: def test_create_with_settings_path(self, tmp_path: Path) -> None: """Should create result with settings path.""" - from scc_cli.marketplace.sync import SyncResult + from scc_cli.application.sync_marketplace import SyncResult settings_path = tmp_path / ".claude" / "settings.local.json" result = SyncResult( @@ -107,20 +140,29 @@ def test_create_with_settings_path(self, tmp_path: Path) -> None: class TestSyncMarketplaceSettingsValidation: """Tests for sync_marketplace_settings input validation.""" - def test_invalid_org_config_raises_sync_error(self, tmp_path: Path) -> None: + def test_invalid_org_config_raises_sync_error( + self, + tmp_path: Path, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should raise SyncError for invalid org config.""" - from scc_cli.marketplace.sync import SyncError, sync_marketplace_settings + from scc_cli.application.sync_marketplace import SyncError, sync_marketplace_settings with pytest.raises(SyncError, match="Invalid org config"): sync_marketplace_settings( project_dir=tmp_path, org_config_data={"invalid": "config"}, team_id="test-team", + dependencies=sync_dependencies, ) - def test_none_team_id_raises_sync_error(self, tmp_path: Path) -> None: + def test_none_team_id_raises_sync_error( + self, + tmp_path: Path, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should raise SyncError when team_id is None.""" - from scc_cli.marketplace.sync import SyncError, sync_marketplace_settings + from scc_cli.application.sync_marketplace import SyncError, sync_marketplace_settings valid_config = make_org_config_data( profiles={"test-team": {}}, @@ -131,6 +173,7 @@ def test_none_team_id_raises_sync_error(self, tmp_path: Path) -> None: project_dir=tmp_path, org_config_data=valid_config, team_id=None, + dependencies=sync_dependencies, ) @@ -167,35 +210,51 @@ def org_config_with_marketplace(self) -> dict: }, ) - def test_computes_effective_plugins(self, tmp_path: Path, minimal_org_config: dict) -> None: + def test_computes_effective_plugins( + self, + tmp_path: Path, + minimal_org_config: dict, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should compute effective plugins for team.""" - from scc_cli.marketplace.sync import sync_marketplace_settings + from scc_cli.application.sync_marketplace import sync_marketplace_settings result = sync_marketplace_settings( project_dir=tmp_path, org_config_data=minimal_org_config, team_id="test-team", + dependencies=sync_dependencies, ) assert result.success is True assert "plugin-a@claude-plugins-official" in result.plugins_enabled - def test_skips_implicit_marketplaces(self, tmp_path: Path, minimal_org_config: dict) -> None: + def test_skips_implicit_marketplaces( + self, + tmp_path: Path, + minimal_org_config: dict, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should not materialize claude-plugins-official.""" - from scc_cli.marketplace.sync import sync_marketplace_settings + from scc_cli.application.sync_marketplace import sync_marketplace_settings result = sync_marketplace_settings( project_dir=tmp_path, org_config_data=minimal_org_config, team_id="test-team", + dependencies=sync_dependencies, ) # claude-plugins-official should not be materialized assert "claude-plugins-official" not in result.marketplaces_materialized - def test_warns_on_missing_marketplace_source(self, tmp_path: Path) -> None: + def test_warns_on_missing_marketplace_source( + self, + tmp_path: Path, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should warn when marketplace source is not found.""" - from scc_cli.marketplace.sync import sync_marketplace_settings + from scc_cli.application.sync_marketplace import sync_marketplace_settings config = make_org_config_data( defaults={ @@ -210,21 +269,21 @@ def test_warns_on_missing_marketplace_source(self, tmp_path: Path) -> None: project_dir=tmp_path, org_config_data=config, team_id="test-team", + dependencies=sync_dependencies, ) assert any("missing-marketplace" in w for w in result.warnings) - @patch("scc_cli.marketplace.sync.materialize_marketplace") def test_materializes_custom_marketplaces( self, - mock_materialize: MagicMock, tmp_path: Path, org_config_with_marketplace: dict, + sync_dependencies: SyncMarketplaceDependencies, ) -> None: """Should materialize custom marketplaces.""" - from scc_cli.marketplace.materialize import MaterializedMarketplace - from scc_cli.marketplace.sync import sync_marketplace_settings + from scc_cli.application.sync_marketplace import sync_marketplace_settings + mock_materialize = MagicMock() mock_materialize.return_value = MaterializedMarketplace( name="internal", canonical_name="internal", @@ -239,43 +298,53 @@ def test_materializes_custom_marketplaces( plugins_available=["my-plugin"], ) + dependencies = replace(sync_dependencies, materialize_marketplace=mock_materialize) result = sync_marketplace_settings( project_dir=tmp_path, org_config_data=org_config_with_marketplace, team_id="test-team", + dependencies=dependencies, ) assert "internal" in result.marketplaces_materialized - @patch("scc_cli.marketplace.sync.materialize_marketplace") def test_warns_on_materialization_error( self, - mock_materialize: MagicMock, tmp_path: Path, org_config_with_marketplace: dict, + sync_dependencies: SyncMarketplaceDependencies, ) -> None: """Should warn when materialization fails.""" + from scc_cli.application.sync_marketplace import sync_marketplace_settings from scc_cli.marketplace.materialize import MaterializationError - from scc_cli.marketplace.sync import sync_marketplace_settings + mock_materialize = MagicMock() mock_materialize.side_effect = MaterializationError("Failed to clone", "internal") + dependencies = replace(sync_dependencies, materialize_marketplace=mock_materialize) result = sync_marketplace_settings( project_dir=tmp_path, org_config_data=org_config_with_marketplace, team_id="test-team", + dependencies=dependencies, ) assert any("Failed to materialize" in w for w in result.warnings) - def test_writes_settings_file(self, tmp_path: Path, minimal_org_config: dict) -> None: + def test_writes_settings_file( + self, + tmp_path: Path, + minimal_org_config: dict, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should write settings.local.json.""" - from scc_cli.marketplace.sync import sync_marketplace_settings + from scc_cli.application.sync_marketplace import sync_marketplace_settings result = sync_marketplace_settings( project_dir=tmp_path, org_config_data=minimal_org_config, team_id="test-team", + dependencies=sync_dependencies, ) settings_path = tmp_path / ".claude" / "settings.local.json" @@ -285,9 +354,14 @@ def test_writes_settings_file(self, tmp_path: Path, minimal_org_config: dict) -> data = json.loads(settings_path.read_text()) assert "enabledPlugins" in data - def test_creates_claude_directory(self, tmp_path: Path, minimal_org_config: dict) -> None: + def test_creates_claude_directory( + self, + tmp_path: Path, + minimal_org_config: dict, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should create .claude directory if missing.""" - from scc_cli.marketplace.sync import sync_marketplace_settings + from scc_cli.application.sync_marketplace import sync_marketplace_settings # Ensure .claude doesn't exist claude_dir = tmp_path / ".claude" @@ -297,20 +371,27 @@ def test_creates_claude_directory(self, tmp_path: Path, minimal_org_config: dict project_dir=tmp_path, org_config_data=minimal_org_config, team_id="test-team", + dependencies=sync_dependencies, ) assert claude_dir.exists() assert claude_dir.is_dir() - def test_saves_managed_state(self, tmp_path: Path, minimal_org_config: dict) -> None: + def test_saves_managed_state( + self, + tmp_path: Path, + minimal_org_config: dict, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should save managed state tracking file.""" - from scc_cli.marketplace.sync import sync_marketplace_settings + from scc_cli.application.sync_marketplace import sync_marketplace_settings sync_marketplace_settings( project_dir=tmp_path, org_config_data=minimal_org_config, team_id="test-team", org_config_url="https://example.com/config.json", + dependencies=sync_dependencies, ) managed_path = tmp_path / ".claude" / ".scc-managed.json" @@ -321,15 +402,21 @@ def test_saves_managed_state(self, tmp_path: Path, minimal_org_config: dict) -> assert data["org_config_url"] == "https://example.com/config.json" assert data["team_id"] == "test-team" - def test_dry_run_does_not_write_files(self, tmp_path: Path, minimal_org_config: dict) -> None: + def test_dry_run_does_not_write_files( + self, + tmp_path: Path, + minimal_org_config: dict, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should not write files when dry_run=True.""" - from scc_cli.marketplace.sync import sync_marketplace_settings + from scc_cli.application.sync_marketplace import sync_marketplace_settings result = sync_marketplace_settings( project_dir=tmp_path, org_config_data=minimal_org_config, team_id="test-team", dry_run=True, + dependencies=sync_dependencies, ) assert result.success is True @@ -338,9 +425,14 @@ def test_dry_run_does_not_write_files(self, tmp_path: Path, minimal_org_config: settings_path = tmp_path / ".claude" / "settings.local.json" assert not settings_path.exists() - def test_preserves_user_customizations(self, tmp_path: Path, minimal_org_config: dict) -> None: + def test_preserves_user_customizations( + self, + tmp_path: Path, + minimal_org_config: dict, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should preserve user-added plugins in settings.""" - from scc_cli.marketplace.sync import sync_marketplace_settings + from scc_cli.application.sync_marketplace import sync_marketplace_settings # Create existing settings with user plugin claude_dir = tmp_path / ".claude" @@ -354,6 +446,7 @@ def test_preserves_user_customizations(self, tmp_path: Path, minimal_org_config: project_dir=tmp_path, org_config_data=minimal_org_config, team_id="test-team", + dependencies=sync_dependencies, ) settings_path = claude_dir / "settings.local.json" @@ -367,9 +460,13 @@ def test_preserves_user_customizations(self, tmp_path: Path, minimal_org_config: class TestBlockedPluginWarnings: """Tests for blocked plugin conflict detection.""" - def test_warns_on_blocked_plugin_conflict(self, tmp_path: Path) -> None: + def test_warns_on_blocked_plugin_conflict( + self, + tmp_path: Path, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should warn when user has blocked plugin installed.""" - from scc_cli.marketplace.sync import sync_marketplace_settings + from scc_cli.application.sync_marketplace import sync_marketplace_settings # Create settings with a plugin that will be blocked claude_dir = tmp_path / ".claude" @@ -394,6 +491,7 @@ def test_warns_on_blocked_plugin_conflict(self, tmp_path: Path) -> None: project_dir=tmp_path, org_config_data=config, team_id="test-team", + dependencies=sync_dependencies, ) # Should have a warning about the blocked plugin @@ -403,27 +501,39 @@ def test_warns_on_blocked_plugin_conflict(self, tmp_path: Path) -> None: class TestLoadExistingPlugins: """Tests for _load_existing_plugins helper.""" - def test_returns_empty_when_no_file(self, tmp_path: Path) -> None: + def test_returns_empty_when_no_file( + self, + tmp_path: Path, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should return empty list when settings file doesn't exist.""" - from scc_cli.marketplace.sync import _load_existing_plugins + from scc_cli.application.sync_marketplace import _load_existing_plugins - result = _load_existing_plugins(tmp_path) + result = _load_existing_plugins(tmp_path, sync_dependencies.filesystem) assert result == [] - def test_returns_empty_on_invalid_json(self, tmp_path: Path) -> None: + def test_returns_empty_on_invalid_json( + self, + tmp_path: Path, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should return empty list on corrupted JSON.""" - from scc_cli.marketplace.sync import _load_existing_plugins + from scc_cli.application.sync_marketplace import _load_existing_plugins claude_dir = tmp_path / ".claude" claude_dir.mkdir() (claude_dir / "settings.local.json").write_text("not valid json") - result = _load_existing_plugins(tmp_path) + result = _load_existing_plugins(tmp_path, sync_dependencies.filesystem) assert result == [] - def test_returns_plugins_list(self, tmp_path: Path) -> None: + def test_returns_plugins_list( + self, + tmp_path: Path, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should return plugins from settings file.""" - from scc_cli.marketplace.sync import _load_existing_plugins + from scc_cli.application.sync_marketplace import _load_existing_plugins claude_dir = tmp_path / ".claude" claude_dir.mkdir() @@ -431,32 +541,37 @@ def test_returns_plugins_list(self, tmp_path: Path) -> None: json.dumps({"enabledPlugins": ["p1@m1", "p2@m2"]}) ) - result = _load_existing_plugins(tmp_path) + result = _load_existing_plugins(tmp_path, sync_dependencies.filesystem) assert result == ["p1@m1", "p2@m2"] - def test_returns_empty_on_missing_key(self, tmp_path: Path) -> None: + def test_returns_empty_on_missing_key( + self, + tmp_path: Path, + sync_dependencies: SyncMarketplaceDependencies, + ) -> None: """Should return empty list when enabledPlugins key missing.""" - from scc_cli.marketplace.sync import _load_existing_plugins + from scc_cli.application.sync_marketplace import _load_existing_plugins claude_dir = tmp_path / ".claude" claude_dir.mkdir() (claude_dir / "settings.local.json").write_text(json.dumps({})) - result = _load_existing_plugins(tmp_path) + result = _load_existing_plugins(tmp_path, sync_dependencies.filesystem) assert result == [] class TestForceRefreshBehavior: """Tests for force_refresh parameter handling.""" - @patch("scc_cli.marketplace.sync.materialize_marketplace") def test_passes_force_refresh_to_materialize( - self, mock_materialize: MagicMock, tmp_path: Path + self, + tmp_path: Path, + sync_dependencies: SyncMarketplaceDependencies, ) -> None: """Should pass force_refresh to materialize_marketplace.""" - from scc_cli.marketplace.materialize import MaterializedMarketplace - from scc_cli.marketplace.sync import sync_marketplace_settings + from scc_cli.application.sync_marketplace import sync_marketplace_settings + mock_materialize = MagicMock() mock_materialize.return_value = MaterializedMarketplace( name="internal", canonical_name="internal", @@ -483,11 +598,13 @@ def test_passes_force_refresh_to_materialize( }, ) + dependencies = replace(sync_dependencies, materialize_marketplace=mock_materialize) sync_marketplace_settings( project_dir=tmp_path, org_config_data=config, team_id="test-team", force_refresh=True, + dependencies=dependencies, ) mock_materialize.assert_called_once() diff --git a/tests/test_no_root_sprawl.py b/tests/test_no_root_sprawl.py index 8576de5..22fb1e6 100644 --- a/tests/test_no_root_sprawl.py +++ b/tests/test_no_root_sprawl.py @@ -6,12 +6,16 @@ Target architecture: src/scc_cli/ + application/ - use-case orchestration core/ - domain models, validation, pure transforms - services/ - use-case orchestration + services/ - legacy orchestration (moving into application/) + ports/ - protocol definitions + adapters/ - concrete IO implementations commands/ - CLI wiring ui/ - presentation __init__.py, __main__.py - entry points cli.py or main.py - CLI app setup + bootstrap.py - composition root wiring Current state includes legacy modules that will be refactored into the target structure. These are tracked in ALLOWED_LEGACY and should shrink over time. @@ -28,11 +32,15 @@ "__init__.py", "__main__.py", "__pycache__", + "bootstrap.py", "cli.py", "main.py", # Target packages + "application", "core", "services", + "ports", + "adapters", "commands", "ui", } @@ -46,6 +54,7 @@ "docker", "doctor", "evaluation", + "maintenance", # temporary top-level package pending core/ move "marketplace", "models", "schemas", @@ -77,6 +86,7 @@ "setup.py", "source_resolver.py", "stats.py", + "support_bundle.py", # legacy top-level support bundle helper "subprocess_utils.py", "teams.py", "theme.py", @@ -135,6 +145,8 @@ def test_no_unexpected_top_level_items(self) -> None: "Target architecture:", " core/ - domain models, validation, pure transforms", " services/ - use-case orchestration", + " ports/ - protocol definitions", + " adapters/ - concrete IO implementations", " commands/ - CLI wiring", " ui/ - presentation", ] diff --git a/tests/test_quick_resume_behavior.py b/tests/test_quick_resume_behavior.py index 17f036d..1e96e3c 100644 --- a/tests/test_quick_resume_behavior.py +++ b/tests/test_quick_resume_behavior.py @@ -19,11 +19,11 @@ def test_quick_resume_shows_active_team_in_header() -> None: ) with ( - patch("scc_cli.commands.launch.app.config.is_standalone_mode", return_value=False), - patch("scc_cli.commands.launch.app.config.load_cached_org_config", return_value={}), - patch("scc_cli.commands.launch.app.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.app.load_recent_contexts", return_value=[context]), - patch("scc_cli.commands.launch.app.pick_context_quick_resume") as mock_picker, + patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={}), + patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[context]), + patch("scc_cli.commands.launch.flow.pick_context_quick_resume") as mock_picker, ): mock_picker.side_effect = RuntimeError("stop") try: @@ -47,12 +47,12 @@ def test_quick_resume_back_cancels_at_top_level() -> None: ) with ( - patch("scc_cli.commands.launch.app.config.is_standalone_mode", return_value=False), - patch("scc_cli.commands.launch.app.config.load_cached_org_config", return_value={}), - patch("scc_cli.commands.launch.app.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.app.load_recent_contexts", return_value=[context]), + patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={}), + patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[context]), patch( - "scc_cli.commands.launch.app.pick_context_quick_resume", + "scc_cli.commands.launch.flow.pick_context_quick_resume", return_value=(QuickResumeResult.BACK, None), ), ): diff --git a/tests/test_session_flags.py b/tests/test_session_flags.py index 42ed957..6501ba6 100644 --- a/tests/test_session_flags.py +++ b/tests/test_session_flags.py @@ -15,6 +15,7 @@ from scc_cli.cli import app from scc_cli.core.exit_codes import EXIT_CANCELLED, EXIT_USAGE +from tests.fakes import build_fake_adapters runner = CliRunner() @@ -72,26 +73,20 @@ def test_resume_auto_selects_recent_session(self, mock_session): """--resume without workspace should use most recent session.""" # Mock session with no team (standalone mode) standalone_session = {**mock_session, "team": None} + fake_adapters = build_fake_adapters() with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( - "scc_cli.commands.launch.app.sessions.list_recent", + "scc_cli.commands.launch.flow.sessions.list_recent", return_value=[standalone_session], ) as mock_list, - patch("scc_cli.commands.launch.app.docker.check_docker_available"), patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker", "run"], False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), - patch("scc_cli.commands.launch.sandbox.docker.run"), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), - patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(mock_session["workspace"], False), - ), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.sandbox.sessions.record_session"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), + patch("scc_cli.commands.launch.flow.sessions.record_session"), patch("os.path.exists", return_value=True), patch("pathlib.Path.exists", return_value=True), ): @@ -107,26 +102,20 @@ def test_resume_short_flag_works(self, mock_session): """-r short flag should work like --resume.""" # Mock session with no team (standalone mode) standalone_session = {**mock_session, "team": None} + fake_adapters = build_fake_adapters() with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( - "scc_cli.commands.launch.app.sessions.list_recent", + "scc_cli.commands.launch.flow.sessions.list_recent", return_value=[standalone_session], ) as mock_list, - patch("scc_cli.commands.launch.app.docker.check_docker_available"), patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker", "run"], False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), - patch("scc_cli.commands.launch.sandbox.docker.run"), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), - patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(mock_session["workspace"], False), - ), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.sandbox.sessions.record_session"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), + patch("scc_cli.commands.launch.flow.sessions.record_session"), patch("os.path.exists", return_value=True), patch("pathlib.Path.exists", return_value=True), ): @@ -138,9 +127,9 @@ def test_resume_short_flag_works(self, mock_session): def test_resume_without_sessions_shows_error(self): """--resume with no sessions should show appropriate error.""" with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.app.sessions.list_recent", return_value=[]), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow.sessions.list_recent", return_value=[]), ): # Use --standalone flag to bypass team filtering result = runner.invoke(app, ["start", "--resume", "--standalone"]) @@ -161,29 +150,24 @@ def test_select_shows_session_picker(self, mock_sessions_list, mock_session): # Sessions need team=None for standalone mode filtering standalone_sessions = [{**s, "team": None} for s in mock_sessions_list] standalone_session = {**mock_session, "team": None} + fake_adapters = build_fake_adapters() with ( - patch("scc_cli.commands.launch.app.is_interactive_allowed", return_value=True), - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( - "scc_cli.commands.launch.app.sessions.list_recent", return_value=standalone_sessions + "scc_cli.commands.launch.flow.sessions.list_recent", + return_value=standalone_sessions, ), patch( - "scc_cli.commands.launch.app.select_session", return_value=standalone_session + "scc_cli.commands.launch.flow.select_session", return_value=standalone_session ) as mock_picker, - patch("scc_cli.commands.launch.app.docker.check_docker_available"), patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker", "run"], False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), - patch("scc_cli.commands.launch.sandbox.docker.run"), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), - patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(mock_session["workspace"], False), - ), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.sandbox.sessions.record_session"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), + patch("scc_cli.commands.launch.flow.sessions.record_session"), patch("os.path.exists", return_value=True), patch("pathlib.Path.exists", return_value=True), ): @@ -198,29 +182,24 @@ def test_select_short_flag_works(self, mock_sessions_list, mock_session): # Sessions need team=None for standalone mode filtering standalone_sessions = [{**s, "team": None} for s in mock_sessions_list] standalone_session = {**mock_session, "team": None} + fake_adapters = build_fake_adapters() with ( - patch("scc_cli.commands.launch.app.is_interactive_allowed", return_value=True), - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( - "scc_cli.commands.launch.app.sessions.list_recent", return_value=standalone_sessions + "scc_cli.commands.launch.flow.sessions.list_recent", + return_value=standalone_sessions, ), patch( - "scc_cli.commands.launch.app.select_session", return_value=standalone_session + "scc_cli.commands.launch.flow.select_session", return_value=standalone_session ) as mock_picker, - patch("scc_cli.commands.launch.app.docker.check_docker_available"), - patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker", "run"], False), - ), - patch("scc_cli.commands.launch.sandbox.docker.run"), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(mock_session["workspace"], False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.sandbox.sessions.record_session"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), + patch("scc_cli.commands.launch.flow.sessions.record_session"), patch("os.path.exists", return_value=True), patch("pathlib.Path.exists", return_value=True), ): @@ -232,10 +211,10 @@ def test_select_short_flag_works(self, mock_sessions_list, mock_session): def test_select_without_sessions_shows_message(self): """--select with no sessions should show appropriate message.""" with ( - patch("scc_cli.commands.launch.app.is_interactive_allowed", return_value=True), - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.app.sessions.list_recent", return_value=[]), + patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow.sessions.list_recent", return_value=[]), ): # Use --standalone flag to bypass team filtering result = runner.invoke(app, ["start", "--select", "--standalone"]) @@ -249,14 +228,15 @@ def test_select_user_cancels_exits_gracefully(self, mock_sessions_list): # Sessions need team=None for standalone mode filtering standalone_sessions = [{**s, "team": None} for s in mock_sessions_list] with ( - patch("scc_cli.commands.launch.app.is_interactive_allowed", return_value=True), - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( - "scc_cli.commands.launch.app.sessions.list_recent", return_value=standalone_sessions + "scc_cli.commands.launch.flow.sessions.list_recent", + return_value=standalone_sessions, ), patch( - "scc_cli.commands.launch.app.select_session", return_value=None + "scc_cli.commands.launch.flow.select_session", return_value=None ), # User cancelled ): # Use --standalone flag to bypass team filtering @@ -276,33 +256,27 @@ class TestFlagMutualExclusivity: def test_resume_and_select_are_mutually_exclusive(self, mock_session, mock_sessions_list): """Using both --resume and --select should error or pick one.""" + fake_adapters = build_fake_adapters() with ( - patch("scc_cli.commands.launch.app.is_interactive_allowed", return_value=True), - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch( - "scc_cli.commands.launch.app.config.load_user_config", + "scc_cli.commands.launch.flow.config.load_user_config", return_value={"standalone": True}, ), patch( - "scc_cli.commands.launch.app.sessions.get_most_recent", return_value=mock_session - ), - patch( - "scc_cli.commands.launch.app.sessions.list_recent", return_value=mock_sessions_list + "scc_cli.commands.launch.flow.sessions.get_most_recent", return_value=mock_session ), - patch("scc_cli.commands.launch.app.select_session", return_value=mock_session), - patch("scc_cli.commands.launch.app.docker.check_docker_available"), patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker", "run"], False), + "scc_cli.commands.launch.flow.sessions.list_recent", return_value=mock_sessions_list ), - patch("scc_cli.commands.launch.sandbox.docker.run"), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), + patch("scc_cli.commands.launch.flow.select_session", return_value=mock_session), patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(mock_session["workspace"], False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.sandbox.sessions.record_session"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), + patch("scc_cli.commands.launch.flow.sessions.record_session"), patch("os.path.exists", return_value=True), patch("pathlib.Path.exists", return_value=True), ): @@ -326,30 +300,24 @@ class TestSmartWorkspaceDetection: def test_auto_detects_workspace_from_git_repo(self, mock_session): """Running 'scc start' from git repo should auto-detect workspace.""" detected_path = "/home/user/project" + fake_adapters = build_fake_adapters() with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch( - "scc_cli.commands.launch.app.config.load_user_config", + "scc_cli.commands.launch.flow.config.load_user_config", return_value={"standalone": True}, ), # Smart detection returns detected workspace patch( - "scc_cli.commands.launch.app.git.detect_workspace_root", + "scc_cli.commands.launch.flow.git.detect_workspace_root", return_value=(mock_session["workspace"], detected_path), ) as mock_detect, - patch("scc_cli.commands.launch.app.docker.check_docker_available"), - patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker", "run"], False), - ), - patch("scc_cli.commands.launch.sandbox.docker.run"), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(mock_session["workspace"], False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.sandbox.sessions.record_session"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), + patch("scc_cli.commands.launch.flow.sessions.record_session"), patch("os.path.exists", return_value=True), patch("pathlib.Path.exists", return_value=True), ): @@ -363,18 +331,18 @@ def test_auto_detects_workspace_from_git_repo(self, mock_session): def test_no_detection_non_tty_shows_error(self): """Running 'scc start' in non-git dir without TTY should error.""" with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch( - "scc_cli.commands.launch.app.config.load_user_config", + "scc_cli.commands.launch.flow.config.load_user_config", return_value={"standalone": True}, ), # Smart detection returns None (not in a git repo) patch( - "scc_cli.commands.launch.app.git.detect_workspace_root", + "scc_cli.commands.launch.flow.git.detect_workspace_root", return_value=(None, "/home/user/random"), ), # Non-TTY environment - patch("scc_cli.commands.launch.app.is_interactive_allowed", return_value=False), + patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=False), ): result = runner.invoke(app, ["start"]) @@ -386,13 +354,13 @@ def test_no_detection_non_tty_shows_error(self): def test_non_interactive_flag_requires_workspace(self): """--non-interactive should fail fast when interactive input is needed.""" with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch( - "scc_cli.commands.launch.app.config.load_user_config", + "scc_cli.commands.launch.flow.config.load_user_config", return_value={"standalone": True}, ), patch( - "scc_cli.commands.launch.app.is_interactive_allowed", return_value=False + "scc_cli.commands.launch.flow.is_interactive_allowed", return_value=False ) as mock_allowed, ): result = runner.invoke(app, ["start", "--non-interactive"]) @@ -403,23 +371,23 @@ def test_non_interactive_flag_requires_workspace(self): def test_interactive_flag_bypasses_detection(self, mock_sessions_list): """The -i flag should force interactive mode even when workspace can be detected.""" with ( - patch("scc_cli.commands.launch.app.is_interactive_allowed", return_value=True), - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch( - "scc_cli.commands.launch.app.config.load_user_config", + "scc_cli.commands.launch.flow.config.load_user_config", return_value={"standalone": True}, ), # Detection would succeed, but should not be called when -i is used patch( - "scc_cli.commands.launch.app.git.detect_workspace_root", + "scc_cli.commands.launch.flow.git.detect_workspace_root", return_value=("/home/user/project", "/home/user/project"), ) as mock_detect, - patch("scc_cli.commands.launch.app.config.is_standalone_mode", return_value=True), - patch("scc_cli.commands.launch.app.config.load_cached_org_config", return_value=None), - patch("scc_cli.commands.launch.app.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.app.load_recent_contexts", return_value=[]), + patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=True), + patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value=None), + patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[]), # User selects workspace via picker - patch("scc_cli.commands.launch.app.pick_workspace_source", return_value=None), + patch("scc_cli.commands.launch.flow.pick_workspace_source", return_value=None), ): result = runner.invoke(app, ["start", "-i"]) @@ -428,32 +396,27 @@ def test_interactive_flag_bypasses_detection(self, mock_sessions_list): # User cancelled (returned None from picker) assert result.exit_code == 0 + @pytest.mark.skip(reason="Phase 3 feature: auto-detection feedback not implemented") def test_detection_feedback_shown_on_success(self, mock_session): """Auto-detected workspace should show brief feedback message.""" - detected_path = "/home/user/my-project" + standalone_sessions = [{**mock_session, "team": None}] + standalone_session = {**mock_session, "team": None} + fake_adapters = build_fake_adapters() with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch( - "scc_cli.commands.launch.app.config.load_user_config", - return_value={"standalone": True}, - ), - patch( - "scc_cli.commands.launch.app.git.detect_workspace_root", - return_value=(detected_path, detected_path), - ), - patch("scc_cli.commands.launch.app.docker.check_docker_available"), + patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( - "scc_cli.commands.launch.sandbox.docker.get_or_create_container", - return_value=(["docker", "run"], False), + "scc_cli.commands.launch.flow.sessions.list_recent", + return_value=standalone_sessions, ), - patch("scc_cli.commands.launch.sandbox.docker.run"), - patch("scc_cli.commands.launch.sandbox.docker.prepare_sandbox_volume_for_credentials"), + patch("scc_cli.commands.launch.flow.select_session", return_value=standalone_session), patch( - "scc_cli.commands.launch.workspace.git.get_workspace_mount_path", - return_value=(detected_path, False), + "scc_cli.commands.launch.flow.get_default_adapters", + return_value=fake_adapters, ), - patch("scc_cli.commands.launch.workspace.git.check_branch_safety"), - patch("scc_cli.commands.launch.sandbox.sessions.record_session"), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), + patch("scc_cli.commands.launch.flow.sessions.record_session"), patch("os.path.exists", return_value=True), patch("pathlib.Path.exists", return_value=True), ): diff --git a/tests/test_start_cancellation.py b/tests/test_start_cancellation.py index e0708b9..a99bade 100644 --- a/tests/test_start_cancellation.py +++ b/tests/test_start_cancellation.py @@ -16,10 +16,10 @@ def test_start_cancelled_exits_130_and_message(): """User cancellation should exit 130 and show a Cancelled message.""" with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( - "scc_cli.commands.launch.app._resolve_session_selection", + "scc_cli.commands.launch.flow._resolve_session_selection", return_value=(None, None, None, None, True, False), ), ): @@ -32,8 +32,8 @@ def test_start_cancelled_exits_130_and_message(): def test_start_offline_without_cache_exits_config(): """--offline with no cache should exit with EXIT_CONFIG and message.""" with ( - patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False), - patch("scc_cli.commands.launch.app.config.load_cached_org_config", return_value=None), + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value=None), ): result = runner.invoke(app, ["start", "--offline"]) diff --git a/tests/test_start_dryrun.py b/tests/test_start_dryrun.py index 4a2e8ca..c57b61d 100644 --- a/tests/test_start_dryrun.py +++ b/tests/test_start_dryrun.py @@ -29,48 +29,14 @@ def test_dry_run_does_not_launch_docker(self, tmp_path, monkeypatch): # Create a minimal workspace (tmp_path / ".git").mkdir() - mock_docker_run = MagicMock() + mock_start_session = MagicMock() - with patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False): - with patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}): + with patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False): + with patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}): with patch( - "scc_cli.commands.launch.app.config.load_cached_org_config", return_value={} + "scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={} ): - with patch("scc_cli.commands.launch.app.docker.run", mock_docker_run): - with patch("scc_cli.commands.launch.app.docker.check_docker_available"): - try: - start( - workspace=str(tmp_path), - team=None, - session_name=None, - resume=False, - select=False, - worktree_name=None, - fresh=False, - install_deps=False, - offline=False, - standalone=False, - dry_run=True, - ) - except click.exceptions.Exit: - pass # Expected exit - - # Docker run should NOT have been called - mock_docker_run.assert_not_called() - - def test_dry_run_shows_workspace_path(self, tmp_path, monkeypatch, capsys): - """--dry-run should display the workspace path.""" - from scc_cli.commands.launch import start - - monkeypatch.chdir(tmp_path) - (tmp_path / ".git").mkdir() - - with patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False): - with patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}): - with patch( - "scc_cli.commands.launch.app.config.load_cached_org_config", return_value={} - ): - with patch("scc_cli.commands.launch.app.docker.check_docker_available"): + with patch("scc_cli.commands.launch.flow.start_session", mock_start_session): try: start( workspace=str(tmp_path), @@ -86,7 +52,39 @@ def test_dry_run_shows_workspace_path(self, tmp_path, monkeypatch, capsys): dry_run=True, ) except click.exceptions.Exit: - pass + pass # Expected exit + + # Sandbox launch should NOT have been called + mock_start_session.assert_not_called() + + def test_dry_run_shows_workspace_path(self, tmp_path, monkeypatch, capsys): + """--dry-run should display the workspace path.""" + from scc_cli.commands.launch import start + + monkeypatch.chdir(tmp_path) + (tmp_path / ".git").mkdir() + + with patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False): + with patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}): + with patch( + "scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={} + ): + try: + start( + workspace=str(tmp_path), + team=None, + session_name=None, + resume=False, + select=False, + worktree_name=None, + fresh=False, + install_deps=False, + offline=False, + standalone=False, + dry_run=True, + ) + except click.exceptions.Exit: + pass captured = capsys.readouterr() assert str(tmp_path) in captured.out or "Workspace" in captured.out @@ -109,36 +107,35 @@ def test_dry_run_shows_team_name(self, tmp_path, monkeypatch, capsys): mock_org = {"profiles": {"platform": {"description": "Platform team"}}} - with patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False): + with patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False): with patch( - "scc_cli.commands.launch.app.config.load_user_config", + "scc_cli.commands.launch.flow.config.load_user_config", return_value={"selected_profile": "platform"}, ): with patch( - "scc_cli.commands.launch.app.config.load_cached_org_config", + "scc_cli.commands.launch.flow.config.load_cached_org_config", return_value=mock_org, ): - with patch("scc_cli.commands.launch.app.docker.check_docker_available"): - with patch( - "scc_cli.commands.launch.app.teams.validate_team_profile", - return_value={"valid": True}, - ): - try: - start( - workspace=str(tmp_path), - team="platform", - session_name=None, - resume=False, - select=False, - worktree_name=None, - fresh=False, - install_deps=False, - offline=False, - standalone=False, - dry_run=True, - ) - except click.exceptions.Exit: - pass + with patch( + "scc_cli.commands.launch.flow.teams.validate_team_profile", + return_value={"valid": True}, + ): + try: + start( + workspace=str(tmp_path), + team="platform", + session_name=None, + resume=False, + select=False, + worktree_name=None, + fresh=False, + install_deps=False, + offline=False, + standalone=False, + dry_run=True, + ) + except click.exceptions.Exit: + pass captured = capsys.readouterr() assert "platform" in captured.out @@ -159,30 +156,29 @@ def test_dry_run_json_has_correct_kind(self, tmp_path, monkeypatch, capsys): monkeypatch.chdir(tmp_path) (tmp_path / ".git").mkdir() - with patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False): - with patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}): + with patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False): + with patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}): with patch( - "scc_cli.commands.launch.app.config.load_cached_org_config", return_value={} + "scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={} ): - with patch("scc_cli.commands.launch.app.docker.check_docker_available"): - try: - start( - workspace=str(tmp_path), - team=None, - session_name=None, - resume=False, - select=False, - worktree_name=None, - fresh=False, - install_deps=False, - offline=False, - standalone=False, - dry_run=True, - json_output=True, - pretty=False, - ) - except click.exceptions.Exit: - pass + try: + start( + workspace=str(tmp_path), + team=None, + session_name=None, + resume=False, + select=False, + worktree_name=None, + fresh=False, + install_deps=False, + offline=False, + standalone=False, + dry_run=True, + json_output=True, + pretty=False, + ) + except click.exceptions.Exit: + pass captured = capsys.readouterr() output = json.loads(captured.out) @@ -195,30 +191,29 @@ def test_dry_run_json_has_envelope_structure(self, tmp_path, monkeypatch, capsys monkeypatch.chdir(tmp_path) (tmp_path / ".git").mkdir() - with patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False): - with patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}): + with patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False): + with patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}): with patch( - "scc_cli.commands.launch.app.config.load_cached_org_config", return_value={} + "scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={} ): - with patch("scc_cli.commands.launch.app.docker.check_docker_available"): - try: - start( - workspace=str(tmp_path), - team=None, - session_name=None, - resume=False, - select=False, - worktree_name=None, - fresh=False, - install_deps=False, - offline=False, - standalone=False, - dry_run=True, - json_output=True, - pretty=False, - ) - except click.exceptions.Exit: - pass + try: + start( + workspace=str(tmp_path), + team=None, + session_name=None, + resume=False, + select=False, + worktree_name=None, + fresh=False, + install_deps=False, + offline=False, + standalone=False, + dry_run=True, + json_output=True, + pretty=False, + ) + except click.exceptions.Exit: + pass captured = capsys.readouterr() output = json.loads(captured.out) @@ -317,29 +312,28 @@ def test_dry_run_exits_zero_when_ready(self, tmp_path, monkeypatch): exit_code = None - with patch("scc_cli.commands.launch.app.setup.is_setup_needed", return_value=False): - with patch("scc_cli.commands.launch.app.config.load_user_config", return_value={}): + with patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False): + with patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}): with patch( - "scc_cli.commands.launch.app.config.load_cached_org_config", return_value={} + "scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={} ): - with patch("scc_cli.commands.launch.app.docker.check_docker_available"): - try: - start( - workspace=str(tmp_path), - team=None, - session_name=None, - resume=False, - select=False, - worktree_name=None, - fresh=False, - install_deps=False, - offline=False, - standalone=False, - dry_run=True, - ) - exit_code = 0 # If no exit raised, exit code is 0 - except click.exceptions.Exit as e: - exit_code = e.exit_code + try: + start( + workspace=str(tmp_path), + team=None, + session_name=None, + resume=False, + select=False, + worktree_name=None, + fresh=False, + install_deps=False, + offline=False, + standalone=False, + dry_run=True, + ) + exit_code = 0 # If no exit raised, exit code is 0 + except click.exceptions.Exit as e: + exit_code = e.exit_code assert exit_code == 0 diff --git a/tests/test_start_personal_profile.py b/tests/test_start_personal_profile.py index b65ce5d..5c37543 100644 --- a/tests/test_start_personal_profile.py +++ b/tests/test_start_personal_profile.py @@ -2,7 +2,7 @@ from pathlib import Path -from scc_cli.commands.launch import app as launch_app +from scc_cli.commands.launch import flow as launch_flow from scc_cli.core import personal_profiles from scc_cli.marketplace.managed import ManagedState, save_managed_state @@ -27,7 +27,7 @@ def test_apply_personal_profile_applies(tmp_path: Path) -> None: {}, ) - profile_id, applied = launch_app._apply_personal_profile( + profile_id, applied = launch_flow._apply_personal_profile( tmp_path, json_mode=True, non_interactive=True ) diff --git a/tests/test_support_bundle.py b/tests/test_support_bundle.py index 603b707..eec4cc1 100644 --- a/tests/test_support_bundle.py +++ b/tests/test_support_bundle.py @@ -27,7 +27,7 @@ class TestSecretRedaction: def test_redact_secrets_replaces_auth_values(self) -> None: """Auth values should be replaced with [REDACTED].""" - from scc_cli.commands.support import redact_secrets + from scc_cli.support_bundle import redact_secrets data = { "auth": "secret-token-12345", @@ -40,7 +40,7 @@ def test_redact_secrets_replaces_auth_values(self) -> None: def test_redact_secrets_replaces_token_values(self) -> None: """Token values should be replaced with [REDACTED].""" - from scc_cli.commands.support import redact_secrets + from scc_cli.support_bundle import redact_secrets data = { "token": "ghp_abc123xyz", @@ -55,7 +55,7 @@ def test_redact_secrets_replaces_token_values(self) -> None: def test_redact_secrets_replaces_api_key_values(self) -> None: """API key values should be replaced with [REDACTED].""" - from scc_cli.commands.support import redact_secrets + from scc_cli.support_bundle import redact_secrets data = { "api_key": "sk-ant-api03-xxx", @@ -70,7 +70,7 @@ def test_redact_secrets_replaces_api_key_values(self) -> None: def test_redact_secrets_replaces_password_values(self) -> None: """Password values should be replaced with [REDACTED].""" - from scc_cli.commands.support import redact_secrets + from scc_cli.support_bundle import redact_secrets data = { "password": "super-secret", @@ -83,7 +83,7 @@ def test_redact_secrets_replaces_password_values(self) -> None: def test_redact_secrets_handles_nested_dicts(self) -> None: """Nested dictionaries should have secrets redacted.""" - from scc_cli.commands.support import redact_secrets + from scc_cli.support_bundle import redact_secrets data = { "config": { @@ -98,7 +98,7 @@ def test_redact_secrets_handles_nested_dicts(self) -> None: def test_redact_secrets_handles_lists(self) -> None: """Lists containing dicts should have secrets redacted.""" - from scc_cli.commands.support import redact_secrets + from scc_cli.support_bundle import redact_secrets data = { "plugins": [ @@ -114,7 +114,7 @@ def test_redact_secrets_handles_lists(self) -> None: def test_redact_secrets_strips_authorization_headers(self) -> None: """Authorization headers should be stripped.""" - from scc_cli.commands.support import redact_secrets + from scc_cli.support_bundle import redact_secrets data = { "headers": { @@ -138,7 +138,7 @@ class TestPathRedaction: def test_redact_paths_replaces_home_directory(self) -> None: """Home directory paths should be redacted.""" - from scc_cli.commands.support import redact_paths + from scc_cli.support_bundle import redact_paths home = str(Path.home()) data = {"path": f"{home}/projects/my-repo"} @@ -149,7 +149,7 @@ def test_redact_paths_replaces_home_directory(self) -> None: def test_redact_paths_handles_nested_paths(self) -> None: """Nested paths should be redacted.""" - from scc_cli.commands.support import redact_paths + from scc_cli.support_bundle import redact_paths home = str(Path.home()) data = { @@ -163,7 +163,7 @@ def test_redact_paths_handles_nested_paths(self) -> None: def test_redact_paths_preserves_relative_paths(self) -> None: """Relative paths should not be modified.""" - from scc_cli.commands.support import redact_paths + from scc_cli.support_bundle import redact_paths data = {"path": "./relative/path"} result = redact_paths(data) @@ -172,7 +172,7 @@ def test_redact_paths_preserves_relative_paths(self) -> None: def test_redact_paths_disabled_with_flag(self) -> None: """Path redaction can be disabled.""" - from scc_cli.commands.support import redact_paths + from scc_cli.support_bundle import redact_paths home = str(Path.home()) data = {"path": f"{home}/projects/my-repo"} @@ -191,7 +191,7 @@ class TestBundleDataCollection: def test_build_bundle_data_includes_system_info(self) -> None: """Bundle data should include system info.""" - from scc_cli.commands.support import build_bundle_data + from scc_cli.support_bundle import build_bundle_data result = build_bundle_data() @@ -201,7 +201,7 @@ def test_build_bundle_data_includes_system_info(self) -> None: def test_build_bundle_data_includes_cli_version(self) -> None: """Bundle data should include CLI version.""" - from scc_cli.commands.support import build_bundle_data + from scc_cli.support_bundle import build_bundle_data result = build_bundle_data() @@ -209,7 +209,7 @@ def test_build_bundle_data_includes_cli_version(self) -> None: def test_build_bundle_data_includes_timestamp(self) -> None: """Bundle data should include generation timestamp.""" - from scc_cli.commands.support import build_bundle_data + from scc_cli.support_bundle import build_bundle_data result = build_bundle_data() @@ -217,10 +217,10 @@ def test_build_bundle_data_includes_timestamp(self) -> None: def test_build_bundle_data_includes_config(self) -> None: """Bundle data should include config (redacted).""" - from scc_cli.commands.support import build_bundle_data + from scc_cli.support_bundle import build_bundle_data with patch( - "scc_cli.commands.support.config.load_user_config", + "scc_cli.support_bundle.config.load_user_config", return_value={"selected_profile": "test"}, ): result = build_bundle_data() @@ -229,9 +229,9 @@ def test_build_bundle_data_includes_config(self) -> None: def test_build_bundle_data_includes_doctor_output(self) -> None: """Bundle data should include doctor output.""" - from scc_cli.commands.support import build_bundle_data + from scc_cli.support_bundle import build_bundle_data - with patch("scc_cli.commands.support.doctor.run_doctor") as mock_doctor: + with patch("scc_cli.support_bundle.run_doctor") as mock_doctor: from scc_cli.doctor import CheckResult, DoctorResult mock_doctor.return_value = DoctorResult( @@ -254,11 +254,11 @@ class TestBundleFileCreation: def test_create_bundle_creates_zip_file(self, tmp_path: Path) -> None: """create_bundle should create a zip file.""" - from scc_cli.commands.support import create_bundle + from scc_cli.support_bundle import create_bundle output_path = tmp_path / "support-bundle.zip" - with patch("scc_cli.commands.support.build_bundle_data", return_value={"test": "data"}): + with patch("scc_cli.support_bundle.build_bundle_data", return_value={"test": "data"}): create_bundle(output_path) assert output_path.exists() @@ -266,11 +266,11 @@ def test_create_bundle_creates_zip_file(self, tmp_path: Path) -> None: def test_create_bundle_contains_manifest(self, tmp_path: Path) -> None: """Bundle zip should contain manifest.json.""" - from scc_cli.commands.support import create_bundle + from scc_cli.support_bundle import create_bundle output_path = tmp_path / "support-bundle.zip" - with patch("scc_cli.commands.support.build_bundle_data", return_value={"test": "data"}): + with patch("scc_cli.support_bundle.build_bundle_data", return_value={"test": "data"}): create_bundle(output_path) with zipfile.ZipFile(output_path, "r") as zf: @@ -278,7 +278,7 @@ def test_create_bundle_contains_manifest(self, tmp_path: Path) -> None: def test_create_bundle_default_output_path(self) -> None: """create_bundle should use default path if not specified.""" - from scc_cli.commands.support import get_default_bundle_path + from scc_cli.support_bundle import get_default_bundle_path result = get_default_bundle_path() diff --git a/tests/test_ui_dashboard.py b/tests/test_ui_dashboard.py index 4f091c4..0339234 100644 --- a/tests/test_ui_dashboard.py +++ b/tests/test_ui_dashboard.py @@ -15,6 +15,7 @@ from rich.console import Console +from scc_cli.application import dashboard as app_dashboard from scc_cli.ui.dashboard import ( Dashboard, DashboardState, @@ -69,9 +70,15 @@ def test_tab_display_names(self) -> None: class TestTabData: """Test TabData dataclass.""" - def _make_items(self, labels: list[str]) -> list[ListItem[str]]: + def _make_items(self, labels: list[str]) -> list[ListItem[app_dashboard.DashboardItem]]: """Helper to create list items.""" - return [ListItem(value=label, label=label) for label in labels] + return [ + ListItem( + value=app_dashboard.StatusItem(label=label, description=""), + label=label, + ) + for label in labels + ] def test_tab_data_creation(self) -> None: """TabData can be created with required fields.""" @@ -111,7 +118,14 @@ class TestDashboardState: def _make_tab_data(self, tab: DashboardTab, count: int = 2) -> TabData: """Helper to create tab data.""" items = [ - ListItem(value=f"{tab.name}_{i}", label=f"{tab.name} Item {i}") for i in range(count) + ListItem( + value=app_dashboard.StatusItem( + label=f"{tab.name} Item {i}", + description="", + ), + label=f"{tab.name} Item {i}", + ) + for i in range(count) ] return TabData( tab=tab, @@ -210,7 +224,12 @@ def _make_state_with_all_tabs(self) -> DashboardState: """Helper to create state with all tabs.""" tabs = {} for tab in DashboardTab: - items = [ListItem(value=tab.name, label=tab.name)] + items = [ + ListItem( + value=app_dashboard.StatusItem(label=tab.name, description=""), + label=tab.name, + ) + ] tabs[tab] = TabData( tab=tab, title=tab.display_name, @@ -275,7 +294,12 @@ def _make_dashboard(self) -> Dashboard: """Helper to create a dashboard instance.""" tabs = {} for tab in DashboardTab: - items = [ListItem(value=tab.name, label=tab.name)] + items = [ + ListItem( + value=app_dashboard.StatusItem(label=tab.name, description=""), + label=tab.name, + ) + ] tabs[tab] = TabData( tab=tab, title=tab.display_name, @@ -346,8 +370,20 @@ def _make_dashboard(self) -> Dashboard: tabs = {} for tab in DashboardTab: items = [ - ListItem(value=f"{tab.name}_1", label=f"{tab.display_name} Item 1"), - ListItem(value=f"{tab.name}_2", label=f"{tab.display_name} Item 2"), + ListItem( + value=app_dashboard.StatusItem( + label=f"{tab.display_name} Item 1", + description="", + ), + label=f"{tab.display_name} Item 1", + ), + ListItem( + value=app_dashboard.StatusItem( + label=f"{tab.display_name} Item 2", + description="", + ), + label=f"{tab.display_name} Item 2", + ), ] tabs[tab] = TabData( tab=tab, @@ -425,7 +461,15 @@ def test_includes_team_info_when_selected(self) -> None: data = _load_status_tab_data() - team_item = next((item for item in data.items if item.value == "team"), None) + team_item = next( + ( + item + for item in data.items + if isinstance(item.value, app_dashboard.StatusItem) + and item.value.action is app_dashboard.StatusAction.SWITCH_TEAM + ), + None, + ) assert team_item is not None # Team name is in the label using colon syntax assert "production-team" in team_item.label @@ -441,7 +485,15 @@ def test_handles_no_team_selected(self) -> None: data = _load_status_tab_data() - team_item = next((item for item in data.items if item.value == "team"), None) + team_item = next( + ( + item + for item in data.items + if isinstance(item.value, app_dashboard.StatusItem) + and item.value.action is app_dashboard.StatusAction.SWITCH_TEAM + ), + None, + ) assert team_item is not None # Uses colon syntax: "Team: none" assert "none" in team_item.label @@ -463,7 +515,14 @@ def test_includes_container_count(self) -> None: data = _load_status_tab_data() containers_item = next( - (item for item in data.items if item.value == "containers"), None + ( + item + for item in data.items + if isinstance(item.value, app_dashboard.StatusItem) + and item.value.action is app_dashboard.StatusAction.OPEN_TAB + and item.value.action_tab == DashboardTab.CONTAINERS + ), + None, ) assert containers_item is not None # Container count in label using colon syntax: "Containers: 1/2 running" @@ -476,7 +535,15 @@ def test_handles_config_error_gracefully(self) -> None: data = _load_status_tab_data() - error_item = next((item for item in data.items if item.value == "config_error"), None) + error_item = next( + ( + item + for item in data.items + if isinstance(item.value, app_dashboard.StatusItem) + and "Config: error" in item.label + ), + None, + ) assert error_item is not None # Config error in label using colon syntax: "Config: error" assert "error" in error_item.label @@ -509,8 +576,9 @@ def test_lists_containers_with_status(self) -> None: data = _load_containers_tab_data() assert len(data.items) == 1 - # Value is now a ContainerInfo object for full metadata access - assert data.items[0].value.id == "abc123" + container_item = data.items[0].value + assert isinstance(container_item, app_dashboard.ContainerItem) + assert container_item.container.id == "abc123" assert data.items[0].label == "scc-myproject" # Description shows: workspace name · status indicator · time assert "myproject" in data.items[0].description @@ -548,6 +616,9 @@ def test_shows_no_containers_message(self) -> None: data = _load_containers_tab_data() assert len(data.items) == 1 + placeholder = data.items[0].value + assert isinstance(placeholder, app_dashboard.PlaceholderItem) + assert placeholder.kind is app_dashboard.PlaceholderKind.NO_CONTAINERS assert "No containers" in data.items[0].label def test_handles_docker_error_gracefully(self) -> None: @@ -558,7 +629,9 @@ def test_handles_docker_error_gracefully(self) -> None: data = _load_containers_tab_data() assert len(data.items) == 1 - assert data.items[0].value == "error" + placeholder = data.items[0].value + assert isinstance(placeholder, app_dashboard.PlaceholderItem) + assert placeholder.kind is app_dashboard.PlaceholderKind.ERROR assert "Unable to query Docker" in data.items[0].description @@ -602,6 +675,9 @@ def test_shows_no_sessions_message(self) -> None: data = _load_sessions_tab_data() assert len(data.items) == 1 + placeholder = data.items[0].value + assert isinstance(placeholder, app_dashboard.PlaceholderItem) + assert placeholder.kind is app_dashboard.PlaceholderKind.NO_SESSIONS assert "No sessions" in data.items[0].label def test_handles_sessions_error_gracefully(self) -> None: @@ -612,7 +688,9 @@ def test_handles_sessions_error_gracefully(self) -> None: data = _load_sessions_tab_data() assert len(data.items) == 1 - assert data.items[0].value == "error" + placeholder = data.items[0].value + assert isinstance(placeholder, app_dashboard.PlaceholderItem) + assert placeholder.kind is app_dashboard.PlaceholderKind.ERROR class TestLoadWorktreesTabData: @@ -620,7 +698,7 @@ class TestLoadWorktreesTabData: def test_returns_tab_data_with_worktrees_tab(self) -> None: """Returns TabData for WORKTREES tab.""" - with patch("scc_cli.git.list_worktrees") as mock_git: + with patch("scc_cli.services.git.worktree.get_worktrees_data") as mock_git: mock_git.return_value = [] data = _load_worktrees_tab_data() @@ -630,7 +708,7 @@ def test_returns_tab_data_with_worktrees_tab(self) -> None: def test_lists_worktrees_with_branch_info(self) -> None: """Lists worktrees with branch and status.""" - with patch("scc_cli.git.list_worktrees") as mock_git: + with patch("scc_cli.services.git.worktree.get_worktrees_data") as mock_git: worktree = MagicMock() worktree.path = "/home/user/project-main" worktree.branch = "main" @@ -647,7 +725,7 @@ def test_lists_worktrees_with_branch_info(self) -> None: def test_shows_modified_indicator(self) -> None: """Shows modified indicator for worktrees with changes.""" - with patch("scc_cli.git.list_worktrees") as mock_git: + with patch("scc_cli.services.git.worktree.get_worktrees_data") as mock_git: worktree = MagicMock() worktree.path = "/home/user/feature" worktree.branch = "feature/test" @@ -661,22 +739,28 @@ def test_shows_modified_indicator(self) -> None: def test_shows_no_worktrees_message_when_not_git_repo(self) -> None: """Shows message when not in a git repository.""" - with patch("scc_cli.git.list_worktrees") as mock_git: + with patch("scc_cli.services.git.worktree.get_worktrees_data") as mock_git: mock_git.return_value = [] data = _load_worktrees_tab_data() assert len(data.items) == 1 + placeholder = data.items[0].value + assert isinstance(placeholder, app_dashboard.PlaceholderItem) + assert placeholder.kind is app_dashboard.PlaceholderKind.NO_WORKTREES assert "No worktrees" in data.items[0].label def test_handles_git_error_gracefully(self) -> None: """Shows error message when git query fails.""" - with patch("scc_cli.git.list_worktrees") as mock_git: + with patch("scc_cli.services.git.worktree.get_worktrees_data") as mock_git: mock_git.side_effect = Exception("Git error") data = _load_worktrees_tab_data() assert len(data.items) == 1 + placeholder = data.items[0].value + assert isinstance(placeholder, app_dashboard.PlaceholderItem) + assert placeholder.kind is app_dashboard.PlaceholderKind.NO_GIT assert "Not available" in data.items[0].label @@ -685,42 +769,41 @@ class TestLoadAllTabData: def test_returns_dict_with_all_tabs(self) -> None: """Returns data for all dashboard tabs.""" - with patch("scc_cli.ui.dashboard._load_status_tab_data") as mock_status: - with patch("scc_cli.ui.dashboard._load_containers_tab_data") as mock_containers: - with patch("scc_cli.ui.dashboard._load_sessions_tab_data") as mock_sessions: - with patch("scc_cli.ui.dashboard._load_worktrees_tab_data") as mock_worktrees: - mock_status.return_value = TabData( - tab=DashboardTab.STATUS, - title="Status", - items=[], - count_active=0, - count_total=0, - ) - mock_containers.return_value = TabData( - tab=DashboardTab.CONTAINERS, - title="Containers", - items=[], - count_active=0, - count_total=0, - ) - mock_sessions.return_value = TabData( - tab=DashboardTab.SESSIONS, - title="Sessions", - items=[], - count_active=0, - count_total=0, - ) - mock_worktrees.return_value = TabData( - tab=DashboardTab.WORKTREES, - title="Worktrees", - items=[], - count_active=0, - count_total=0, - ) - - data = _load_all_tab_data() - - assert DashboardTab.STATUS in data - assert DashboardTab.CONTAINERS in data - assert DashboardTab.SESSIONS in data - assert DashboardTab.WORKTREES in data + with patch("scc_cli.application.dashboard.load_all_tab_data") as mock_all: + mock_all.return_value = { + DashboardTab.STATUS: app_dashboard.DashboardTabData( + tab=DashboardTab.STATUS, + title="Status", + items=[], + count_active=0, + count_total=0, + ), + DashboardTab.CONTAINERS: app_dashboard.DashboardTabData( + tab=DashboardTab.CONTAINERS, + title="Containers", + items=[], + count_active=0, + count_total=0, + ), + DashboardTab.SESSIONS: app_dashboard.DashboardTabData( + tab=DashboardTab.SESSIONS, + title="Sessions", + items=[], + count_active=0, + count_total=0, + ), + DashboardTab.WORKTREES: app_dashboard.DashboardTabData( + tab=DashboardTab.WORKTREES, + title="Worktrees", + items=[], + count_active=0, + count_total=0, + ), + } + + data = _load_all_tab_data() + + assert DashboardTab.STATUS in data + assert DashboardTab.CONTAINERS in data + assert DashboardTab.SESSIONS in data + assert DashboardTab.WORKTREES in data diff --git a/tests/test_ui_integration.py b/tests/test_ui_integration.py index b64b8b9..a597d41 100644 --- a/tests/test_ui_integration.py +++ b/tests/test_ui_integration.py @@ -9,11 +9,14 @@ from __future__ import annotations from io import StringIO +from typing import Any from unittest.mock import MagicMock, patch import pytest from rich.console import Console, RenderableType +from scc_cli.application import dashboard as app_dashboard +from scc_cli.docker.core import ContainerInfo from scc_cli.ui.dashboard import ( Dashboard, DashboardState, @@ -33,6 +36,72 @@ def _render_to_str(renderable: RenderableType) -> str: return console.file.getvalue() # type: ignore[union-attr] +def _status_item( + label: str, + description: str = "", + *, + action: app_dashboard.StatusAction | None = None, + action_tab: DashboardTab | None = None, + session: dict[str, Any] | None = None, +) -> ListItem[app_dashboard.DashboardItem]: + item = app_dashboard.StatusItem( + label=label, + description=description, + action=action, + action_tab=action_tab, + session=session, + ) + return ListItem(value=item, label=label, description=description) + + +def _container_item( + container_id: str, + name: str, + description: str, + *, + status: str = "Up", +) -> ListItem[app_dashboard.DashboardItem]: + container = ContainerInfo(id=container_id, name=name, status=status) + item = app_dashboard.ContainerItem(label=name, description=description, container=container) + return ListItem(value=item, label=name, description=description) + + +def _session_item( + label: str, + description: str, + session: dict[str, object] | None = None, +) -> ListItem[app_dashboard.DashboardItem]: + session_data = session or {"name": label} + item = app_dashboard.SessionItem(label=label, description=description, session=session_data) + return ListItem(value=item, label=label, description=description) + + +def _worktree_item( + label: str, + description: str, + path: str | None = None, +) -> ListItem[app_dashboard.DashboardItem]: + worktree_path = path or label + item = app_dashboard.WorktreeItem(label=label, description=description, path=worktree_path) + return ListItem(value=item, label=label, description=description) + + +def _placeholder_item( + label: str, + description: str, + *, + kind: app_dashboard.PlaceholderKind, + startable: bool = False, +) -> ListItem[app_dashboard.DashboardItem]: + item = app_dashboard.PlaceholderItem( + label=label, + description=description, + kind=kind, + startable=startable, + ) + return ListItem(value=item, label=label, description=description) + + class TestDashboardTabNavigation: """Test dashboard tab switching behavior.""" @@ -44,7 +113,11 @@ def mock_tab_data(self) -> dict[DashboardTab, TabData]: tab=DashboardTab.STATUS, title="Status", items=[ - ListItem(value="team", label="Team", description="platform"), + _status_item( + "Team", + "platform", + action=app_dashboard.StatusAction.SWITCH_TEAM, + ), ], count_active=1, count_total=1, @@ -53,8 +126,8 @@ def mock_tab_data(self) -> dict[DashboardTab, TabData]: tab=DashboardTab.CONTAINERS, title="Containers", items=[ - ListItem(value="c1", label="scc-main", description="Up 2h"), - ListItem(value="c2", label="scc-dev", description="Exited"), + _container_item("c1", "scc-main", "Up 2h", status="Up 2h"), + _container_item("c2", "scc-dev", "Exited", status="Exited"), ], count_active=1, count_total=2, @@ -63,7 +136,11 @@ def mock_tab_data(self) -> dict[DashboardTab, TabData]: tab=DashboardTab.SESSIONS, title="Sessions", items=[ - ListItem(value="s1", label="session-1", description="platform"), + _session_item( + "session-1", + "platform", + session={"name": "session-1", "team": "platform"}, + ), ], count_active=1, count_total=1, @@ -72,7 +149,7 @@ def mock_tab_data(self) -> dict[DashboardTab, TabData]: tab=DashboardTab.WORKTREES, title="Worktrees", items=[ - ListItem(value="w1", label="main", description="main branch"), + _worktree_item("main", "main branch", path="w1"), ], count_active=0, count_total=1, @@ -172,9 +249,9 @@ def mock_tab_data(self) -> dict[DashboardTab, TabData]: tab=DashboardTab.STATUS, title="Status", items=[ - ListItem(value="item1", label="Item 1", description="First"), - ListItem(value="item2", label="Item 2", description="Second"), - ListItem(value="item3", label="Item 3", description="Third"), + _status_item("Item 1", "First"), + _status_item("Item 2", "Second"), + _status_item("Item 3", "Third"), ], count_active=3, count_total=3, @@ -248,8 +325,8 @@ def mock_tab_data(self) -> dict[DashboardTab, TabData]: tab=DashboardTab.STATUS, title="Status", items=[ - ListItem(value="team", label="Team", description="platform"), - ListItem(value="config", label="Config", description="settings"), + _status_item("Team", "platform"), + _status_item("Config", "settings"), ], count_active=2, count_total=2, @@ -332,7 +409,7 @@ def _reset_cli_module(self) -> None: # Remove cached modules to ensure fresh imports and prevent test pollution modules_to_reset = [ "scc_cli.cli", - "scc_cli.commands.launch.app", + "scc_cli.commands.launch.flow", "scc_cli.services.workspace", ] for module in modules_to_reset: @@ -416,7 +493,7 @@ def mock_tab_data(self) -> dict[DashboardTab, TabData]: tab: TabData( tab=tab, title=tab.display_name, - items=[ListItem(value="test", label="Test", description="")], + items=[_status_item("Test", "")], count_active=1, count_total=1, ) @@ -474,7 +551,13 @@ def test_enter_on_team_row_in_standalone_shows_message( mock_tab_data[DashboardTab.STATUS] = TabData( tab=DashboardTab.STATUS, title="Status", - items=[ListItem(value="team", label="Team", description="No team")], + items=[ + _status_item( + "Team", + "No team", + action=app_dashboard.StatusAction.SWITCH_TEAM, + ) + ], count_active=1, count_total=1, ) @@ -545,9 +628,23 @@ def status_tab_data(self) -> dict[DashboardTab, TabData]: tab=DashboardTab.STATUS, title="Status", items=[ - ListItem(value="team", label="Team", description="platform"), - ListItem(value="containers", label="Containers", description="2/3"), - ListItem(value="sessions", label="Sessions", description="5"), + _status_item( + "Team", + "platform", + action=app_dashboard.StatusAction.SWITCH_TEAM, + ), + _status_item( + "Containers", + "2/3", + action=app_dashboard.StatusAction.OPEN_TAB, + action_tab=DashboardTab.CONTAINERS, + ), + _status_item( + "Sessions", + "5", + action=app_dashboard.StatusAction.OPEN_TAB, + action_tab=DashboardTab.SESSIONS, + ), ], count_active=3, count_total=3, @@ -555,7 +652,7 @@ def status_tab_data(self) -> dict[DashboardTab, TabData]: DashboardTab.CONTAINERS: TabData( tab=DashboardTab.CONTAINERS, title="Containers", - items=[ListItem(value="c1", label="container-1", description="Up")], + items=[_container_item("c1", "container-1", "Up", status="Up")], count_active=1, count_total=1, ), @@ -604,7 +701,9 @@ def test_load_all_tab_data_returns_all_tabs(self) -> None: with patch("scc_cli.config.load_user_config") as mock_config: with patch("scc_cli.sessions.list_recent") as mock_sessions: with patch("scc_cli.docker.core.list_scc_containers") as mock_docker: - with patch("scc_cli.git.list_worktrees") as mock_worktrees: + with patch( + "scc_cli.services.git.worktree.get_worktrees_data" + ) as mock_worktrees: mock_config.return_value = {} mock_sessions.return_value = [] mock_docker.return_value = [] @@ -628,7 +727,13 @@ def resource_tab_data(self) -> dict[DashboardTab, TabData]: DashboardTab.STATUS: TabData( tab=DashboardTab.STATUS, title="Status", - items=[ListItem(value="team", label="Team", description="platform")], + items=[ + _status_item( + "Team", + "platform", + action=app_dashboard.StatusAction.SWITCH_TEAM, + ) + ], count_active=1, count_total=1, ), @@ -636,8 +741,8 @@ def resource_tab_data(self) -> dict[DashboardTab, TabData]: tab=DashboardTab.CONTAINERS, title="Containers", items=[ - ListItem(value="c1", label="scc-main", description="Up 2h"), - ListItem(value="c2", label="scc-dev", description="Exited"), + _container_item("c1", "scc-main", "Up 2h", status="Up 2h"), + _container_item("c2", "scc-dev", "Exited", status="Exited"), ], count_active=1, count_total=2, @@ -645,14 +750,20 @@ def resource_tab_data(self) -> dict[DashboardTab, TabData]: DashboardTab.SESSIONS: TabData( tab=DashboardTab.SESSIONS, title="Sessions", - items=[ListItem(value="s1", label="session-1", description="platform")], + items=[ + _session_item( + "session-1", + "platform", + session={"name": "session-1", "team": "platform"}, + ) + ], count_active=1, count_total=1, ), DashboardTab.WORKTREES: TabData( tab=DashboardTab.WORKTREES, title="Worktrees", - items=[ListItem(value="w1", label="main", description="main branch")], + items=[_worktree_item("main", "main branch", path="w1")], count_active=0, count_total=1, ), @@ -665,7 +776,13 @@ def placeholder_tab_data(self) -> dict[DashboardTab, TabData]: DashboardTab.STATUS: TabData( tab=DashboardTab.STATUS, title="Status", - items=[ListItem(value="team", label="Team", description="platform")], + items=[ + _status_item( + "Team", + "platform", + action=app_dashboard.StatusAction.SWITCH_TEAM, + ) + ], count_active=1, count_total=1, ), @@ -673,10 +790,11 @@ def placeholder_tab_data(self) -> dict[DashboardTab, TabData]: tab=DashboardTab.CONTAINERS, title="Containers", items=[ - ListItem( - value="no_containers", - label="No containers", - description="Run 'scc start' to create one", + _placeholder_item( + "No containers", + "Run 'scc start' to create one", + kind=app_dashboard.PlaceholderKind.NO_CONTAINERS, + startable=True, ) ], count_active=0, @@ -686,7 +804,11 @@ def placeholder_tab_data(self) -> dict[DashboardTab, TabData]: tab=DashboardTab.SESSIONS, title="Sessions", items=[ - ListItem(value="error", label="Error", description="Unable to load sessions") + _placeholder_item( + "Error", + "Unable to load sessions", + kind=app_dashboard.PlaceholderKind.ERROR, + ) ], count_active=0, count_total=0, @@ -695,10 +817,10 @@ def placeholder_tab_data(self) -> dict[DashboardTab, TabData]: tab=DashboardTab.WORKTREES, title="Worktrees", items=[ - ListItem( - value="no_git", - label="Not available", - description="Not in a git repository", + _placeholder_item( + "Not available", + "Not in a git repository", + kind=app_dashboard.PlaceholderKind.NO_GIT, ) ], count_active=0, @@ -733,15 +855,15 @@ def test_enter_on_session_tab_raises_resume( """Enter on Sessions tab raises SessionResumeRequested (primary action is resume).""" from scc_cli.ui.keys import SessionResumeRequested - # Update Sessions tab to have a dict value (required for session resume) + # Update Sessions tab to include a resumable session item resource_tab_data[DashboardTab.SESSIONS] = TabData( tab=DashboardTab.SESSIONS, title="Sessions", items=[ - ListItem( - value={"id": "s1", "name": "session-1"}, - label="session-1", - description="platform", + _session_item( + "session-1", + "platform", + session={"id": "s1", "name": "session-1"}, ) ], count_active=1, @@ -817,7 +939,13 @@ def test_enter_on_startable_placeholder_raises_start_requested( def test_enter_on_non_startable_placeholder_shows_tip(self) -> None: """Enter on non-startable placeholder (no_worktrees, no_git) shows tip message.""" # Create a worktree tab with no_worktrees placeholder - worktree_items = [ListItem(value="no_worktrees", label="No worktrees")] + worktree_items = [ + _placeholder_item( + "No worktrees", + "", + kind=app_dashboard.PlaceholderKind.NO_WORKTREES, + ) + ] tab_data = { DashboardTab.STATUS: TabData( tab=DashboardTab.STATUS, title="Status", items=[], count_active=0, count_total=0 @@ -865,15 +993,22 @@ def test_is_placeholder_selected_detects_placeholders( self, placeholder_tab_data: dict[DashboardTab, TabData] ) -> None: """is_placeholder_selected() returns True for all known placeholder values.""" - placeholder_values = ["no_containers", "no_sessions", "no_worktrees", "no_git", "error"] + placeholder_kinds = [ + app_dashboard.PlaceholderKind.NO_CONTAINERS, + app_dashboard.PlaceholderKind.NO_SESSIONS, + app_dashboard.PlaceholderKind.NO_WORKTREES, + app_dashboard.PlaceholderKind.NO_GIT, + app_dashboard.PlaceholderKind.ERROR, + ] - for placeholder in placeholder_values: + for kind in placeholder_kinds: + item = _placeholder_item("Test", "", kind=kind) state = DashboardState( active_tab=DashboardTab.CONTAINERS, tabs=placeholder_tab_data, - list_state=ListState(items=[ListItem(value=placeholder, label="Test")]), + list_state=ListState(items=[item]), ) - assert state.is_placeholder_selected() is True, f"Failed for {placeholder}" + assert state.is_placeholder_selected() is True, f"Failed for {kind}" def test_is_placeholder_selected_false_for_real_items( self, resource_tab_data: dict[DashboardTab, TabData] @@ -983,8 +1118,13 @@ def test_startable_placeholder_shows_enter_start_hint( ) -> None: """Startable placeholder shows 'Enter start' in footer hints.""" # Create container data with startable placeholder - placeholder_items: list[ListItem[str]] = [ - ListItem(value="no_containers", label="No containers", description="Start one"), + placeholder_items = [ + _placeholder_item( + "No containers", + "Start one", + kind=app_dashboard.PlaceholderKind.NO_CONTAINERS, + startable=True, + ) ] tab_data = dict(resource_tab_data) tab_data[DashboardTab.CONTAINERS] = TabData( @@ -1039,9 +1179,9 @@ def test_filter_changes_selection_updates_details( """Filter changes affect selection which updates details (regression test).""" # Add more items to Containers for meaningful filtering containers_items = [ - ListItem(value="c1", label="scc-main", description="Up 2h"), - ListItem(value="c2", label="scc-dev", description="Exited"), - ListItem(value="c3", label="other-container", description="Up 1h"), + _container_item("c1", "scc-main", "Up 2h", status="Up 2h"), + _container_item("c2", "scc-dev", "Exited", status="Exited"), + _container_item("c3", "other-container", "Up 1h", status="Up 1h"), ] resource_tab_data[DashboardTab.CONTAINERS] = TabData( tab=DashboardTab.CONTAINERS, diff --git a/tests/test_ui_keys.py b/tests/test_ui_keys.py index fa18d23..ba63d0c 100644 --- a/tests/test_ui_keys.py +++ b/tests/test_ui_keys.py @@ -358,11 +358,13 @@ def test_dashboard_handles_team_switch(self) -> None: """Dashboard handles TEAM_SWITCH by raising TeamSwitchRequested.""" from unittest.mock import patch + from scc_cli.application import dashboard as app_dashboard from scc_cli.ui.dashboard import Dashboard, DashboardState, DashboardTab, TabData from scc_cli.ui.list_screen import ListItem, ListState # Create minimal dashboard state - items: list[ListItem[str]] = [ListItem(value="test", label="Test")] + status_item = app_dashboard.StatusItem(label="Test", description="") + items = [ListItem(value=status_item, label="Test", description="")] tab_data = TabData( tab=DashboardTab.STATUS, title="Status", diff --git a/tests/test_ui_wizard.py b/tests/test_ui_wizard.py index 5432fe1..5a04845 100644 --- a/tests/test_ui_wizard.py +++ b/tests/test_ui_wizard.py @@ -300,7 +300,7 @@ def test_cwd_option_shown_for_non_workspace_with_warning(self, tmp_path: Path) - # But it's also not suspicious (not home, /, tmp, etc.) # So the option SHOULD appear with a warning with patch("scc_cli.ui.wizard.Path.cwd", return_value=tmp_path): - with patch("scc_cli.git.is_git_repo", return_value=False): + with patch("scc_cli.services.git.is_git_repo", return_value=False): with patch("scc_cli.ui.wizard._run_single_select_picker") as mock_picker: mock_picker.return_value = None pick_workspace_source() diff --git a/tests/test_worktree_cli.py b/tests/test_worktree_cli.py index a5aaf2f..5014ddd 100644 --- a/tests/test_worktree_cli.py +++ b/tests/test_worktree_cli.py @@ -16,7 +16,8 @@ import click import pytest -from scc_cli.git import WorktreeInfo, render_worktrees +from scc_cli.git import WorktreeInfo +from scc_cli.ui import render_worktrees # ═══════════════════════════════════════════════════════════════════════════════ # Tests for Worktree CLI Structure @@ -62,14 +63,14 @@ def test_worktree_app_has_remove_command(self) -> None: class TestWorktreeCreate: """Test scc worktree create command.""" - def test_create_calls_git_create_worktree(self, tmp_path: Path) -> None: - """create should call git.create_worktree with correct args.""" + def test_create_calls_ui_create_worktree(self, tmp_path: Path) -> None: + """create should call ui.create_worktree with correct args.""" from scc_cli.commands.worktree import worktree_create_cmd with ( patch("scc_cli.commands.worktree.worktree_commands.git.is_git_repo", return_value=True), patch("scc_cli.commands.worktree.worktree_commands.git.has_commits", return_value=True), - patch("scc_cli.commands.worktree.worktree_commands.git.create_worktree") as mock_create, + patch("scc_cli.commands.worktree.worktree_commands.create_worktree") as mock_create, patch("scc_cli.commands.worktree.worktree_commands.Confirm.ask", return_value=False), ): mock_create.return_value = tmp_path / "worktrees" / "feature" @@ -89,13 +90,13 @@ def test_create_calls_git_create_worktree(self, tmp_path: Path) -> None: assert call_args[0][1] == "feature" def test_create_with_base_branch(self, tmp_path: Path) -> None: - """create with --base should pass branch to git.create_worktree.""" + """create with --base should pass branch to ui.create_worktree.""" from scc_cli.commands.worktree import worktree_create_cmd with ( patch("scc_cli.commands.worktree.worktree_commands.git.is_git_repo", return_value=True), patch("scc_cli.commands.worktree.worktree_commands.git.has_commits", return_value=True), - patch("scc_cli.commands.worktree.worktree_commands.git.create_worktree") as mock_create, + patch("scc_cli.commands.worktree.worktree_commands.create_worktree") as mock_create, patch("scc_cli.commands.worktree.worktree_commands.Confirm.ask", return_value=False), ): mock_create.return_value = tmp_path / "worktrees" / "feature" @@ -143,13 +144,13 @@ def test_create_raises_for_non_repo(self, tmp_path: Path) -> None: class TestWorktreeList: """Test scc worktree list command.""" - def test_list_calls_git_list_worktrees(self, tmp_path: Path) -> None: - """list should call git.list_worktrees.""" + def test_list_calls_ui_list_worktrees(self, tmp_path: Path) -> None: + """list should call ui.list_worktrees.""" from scc_cli.commands.worktree import worktree_list_cmd with ( - patch("scc_cli.commands.worktree.worktree_commands.git.list_worktrees") as mock_list, - patch("scc_cli.commands.worktree.worktree_commands.git.render_worktrees"), + patch("scc_cli.commands.worktree.worktree_commands.list_worktrees") as mock_list, + patch("scc_cli.commands.worktree.worktree_commands.render_worktrees"), ): mock_list.return_value = [ WorktreeInfo(path=str(tmp_path), branch="main", status="clean") @@ -170,8 +171,8 @@ def test_list_json_has_correct_kind(self, tmp_path: Path, capsys) -> None: from scc_cli.commands.worktree import worktree_list_cmd with ( - patch("scc_cli.commands.worktree.worktree_commands.git.list_worktrees") as mock_list, - patch("scc_cli.commands.worktree.worktree_commands.git.render_worktrees"), + patch("scc_cli.commands.worktree.worktree_commands.list_worktrees") as mock_list, + patch("scc_cli.commands.worktree.worktree_commands.render_worktrees"), ): mock_list.return_value = [ WorktreeInfo(path=str(tmp_path), branch="main", status="clean") @@ -200,8 +201,8 @@ def test_list_json_contains_worktrees(self, tmp_path: Path, capsys) -> None: ] with ( - patch("scc_cli.commands.worktree.worktree_commands.git.list_worktrees") as mock_list, - patch("scc_cli.commands.worktree.worktree_commands.git.render_worktrees"), + patch("scc_cli.commands.worktree.worktree_commands.list_worktrees") as mock_list, + patch("scc_cli.commands.worktree.worktree_commands.render_worktrees"), ): mock_list.return_value = worktrees try: @@ -223,8 +224,8 @@ def test_list_json_empty_worktrees(self, tmp_path: Path, capsys) -> None: from scc_cli.commands.worktree import worktree_list_cmd with ( - patch("scc_cli.commands.worktree.worktree_commands.git.list_worktrees") as mock_list, - patch("scc_cli.commands.worktree.worktree_commands.git.render_worktrees"), + patch("scc_cli.commands.worktree.worktree_commands.list_worktrees") as mock_list, + patch("scc_cli.commands.worktree.worktree_commands.render_worktrees"), ): mock_list.return_value = [] try: @@ -262,13 +263,11 @@ def test_render_worktrees_detached_branch_shows_label(self) -> None: class TestWorktreeRemove: """Test scc worktree remove command.""" - def test_remove_calls_git_cleanup_worktree(self, tmp_path: Path) -> None: - """remove should call git.cleanup_worktree with correct args.""" + def test_remove_calls_ui_cleanup_worktree(self, tmp_path: Path) -> None: + """remove should call ui.cleanup_worktree with correct args.""" from scc_cli.commands.worktree import worktree_remove_cmd - with patch( - "scc_cli.commands.worktree.worktree_commands.git.cleanup_worktree" - ) as mock_cleanup: + with patch("scc_cli.commands.worktree.worktree_commands.cleanup_worktree") as mock_cleanup: mock_cleanup.return_value = True try: worktree_remove_cmd( @@ -287,9 +286,7 @@ def test_remove_with_force_flag(self, tmp_path: Path) -> None: """remove with --force should pass force=True to cleanup.""" from scc_cli.commands.worktree import worktree_remove_cmd - with patch( - "scc_cli.commands.worktree.worktree_commands.git.cleanup_worktree" - ) as mock_cleanup: + with patch("scc_cli.commands.worktree.worktree_commands.cleanup_worktree") as mock_cleanup: mock_cleanup.return_value = True try: worktree_remove_cmd( @@ -559,9 +556,7 @@ def test_select_no_worktrees_has_actionable_error(self, tmp_path: Path, capsys) with ( patch("scc_cli.commands.worktree.worktree_commands.git.is_git_repo", return_value=True), - patch( - "scc_cli.commands.worktree.worktree_commands.git.list_worktrees", return_value=[] - ), + patch("scc_cli.commands.worktree.worktree_commands.list_worktrees", return_value=[]), patch( "scc_cli.commands.worktree.worktree_commands.git.list_branches_without_worktrees", return_value=[], @@ -601,7 +596,7 @@ def test_list_json_output_is_valid(self, tmp_path: Path) -> None: with ( patch("scc_cli.commands.worktree.worktree_commands.git.is_git_repo", return_value=True), patch( - "scc_cli.commands.worktree.worktree_commands.git.list_worktrees", + "scc_cli.commands.worktree.worktree_commands.list_worktrees", return_value=worktrees, ), ): @@ -627,7 +622,7 @@ def test_list_json_contains_worktree_data(self, tmp_path: Path) -> None: with ( patch("scc_cli.commands.worktree.worktree_commands.git.is_git_repo", return_value=True), patch( - "scc_cli.commands.worktree.worktree_commands.git.list_worktrees", + "scc_cli.commands.worktree.worktree_commands.list_worktrees", return_value=worktrees, ), ): @@ -662,9 +657,7 @@ def test_select_no_worktrees_exits(self, tmp_path: Path) -> None: with ( patch("scc_cli.commands.worktree.worktree_commands.git.is_git_repo", return_value=True), - patch( - "scc_cli.commands.worktree.worktree_commands.git.list_worktrees", return_value=[] - ), + patch("scc_cli.commands.worktree.worktree_commands.list_worktrees", return_value=[]), ): with pytest.raises(click.exceptions.Exit) as exc_info: worktree_select_cmd(workspace=str(tmp_path), branches=False) @@ -827,13 +820,13 @@ class TestWorktreeListVerbose: This prevents the flag from becoming a no-op during refactoring. """ - def test_list_passes_verbose_to_git(self, tmp_path: Path) -> None: - """list --verbose should pass verbose=True to git.list_worktrees.""" + def test_list_passes_verbose_to_ui(self, tmp_path: Path) -> None: + """list --verbose should pass verbose=True to ui.list_worktrees.""" from scc_cli.commands.worktree import worktree_list_cmd with ( - patch("scc_cli.commands.worktree.worktree_commands.git.list_worktrees") as mock_list, - patch("scc_cli.commands.worktree.worktree_commands.git.render_worktrees"), + patch("scc_cli.commands.worktree.worktree_commands.list_worktrees") as mock_list, + patch("scc_cli.commands.worktree.worktree_commands.render_worktrees"), ): mock_list.return_value = [ WorktreeInfo(path=str(tmp_path), branch="main", status="clean") @@ -858,7 +851,7 @@ def test_verbose_triggers_get_worktree_status(self, tmp_path: Path) -> None: This is the critical contract test that ensures -v flag actually fetches git status instead of becoming a silent no-op. """ - from scc_cli.git import list_worktrees + from scc_cli.ui import list_worktrees # Create a mock worktree with a path mock_worktree_path = str(tmp_path) @@ -890,7 +883,7 @@ def test_verbose_false_skips_status_check(self, tmp_path: Path) -> None: This verifies the performance benefit of the non-verbose path. """ - from scc_cli.git import list_worktrees + from scc_cli.ui import list_worktrees with ( patch("scc_cli.ui.git_interactive.get_worktrees_data") as mock_get_data, @@ -1139,7 +1132,7 @@ def test_enter_caret_uses_main_worktree(self, tmp_path: Path) -> None: return_value="main", ), patch( - "scc_cli.commands.worktree.worktree_commands.git.list_worktrees", + "scc_cli.commands.worktree.worktree_commands.list_worktrees", return_value=[main_worktree], ), patch("subprocess.run") as mock_run, @@ -1165,7 +1158,7 @@ def test_enter_no_target_would_show_picker(self, tmp_path: Path) -> None: with ( patch("scc_cli.commands.worktree.worktree_commands.git.is_git_repo", return_value=True), patch( - "scc_cli.commands.worktree.worktree_commands.git.list_worktrees", + "scc_cli.commands.worktree.worktree_commands.list_worktrees", return_value=[worktree], ), patch("scc_cli.commands.worktree.worktree_commands.pick_worktree") as mock_picker, @@ -1280,7 +1273,7 @@ def test_non_git_repo_interactive_accepts_init(self, tmp_path: Path, capsys) -> ), # User accepts init patch("scc_cli.commands.worktree.worktree_commands.git.init_repo", return_value=True), patch("scc_cli.commands.worktree.worktree_commands.git.has_commits", return_value=True), - patch("scc_cli.commands.worktree.worktree_commands.git.create_worktree") as mock_create, + patch("scc_cli.commands.worktree.worktree_commands.create_worktree") as mock_create, ): mock_create.return_value = tmp_path / "feature-x" try: diff --git a/tests/test_worktree_guidance.py b/tests/test_worktree_guidance.py index 53ab845..06ff530 100644 --- a/tests/test_worktree_guidance.py +++ b/tests/test_worktree_guidance.py @@ -13,7 +13,7 @@ def test_warns_when_in_main_repo() -> None: with ( patch("scc_cli.commands.launch.render.git.is_git_repo", return_value=True), patch("scc_cli.commands.launch.render.git.is_worktree", return_value=False), - patch("scc_cli.commands.launch.render.print_human") as mock_print, + patch("scc_cli.commands.launch.render.print_with_layout") as mock_print, ): warn_if_non_worktree(workspace, json_mode=False) @@ -27,7 +27,7 @@ def test_no_warning_for_worktree() -> None: with ( patch("scc_cli.commands.launch.render.git.is_git_repo", return_value=True), patch("scc_cli.commands.launch.render.git.is_worktree", return_value=True), - patch("scc_cli.commands.launch.render.print_human") as mock_print, + patch("scc_cli.commands.launch.render.print_with_layout") as mock_print, ): warn_if_non_worktree(workspace, json_mode=False) @@ -40,7 +40,7 @@ def test_no_warning_for_non_repo() -> None: with ( patch("scc_cli.commands.launch.render.git.is_git_repo", return_value=False), - patch("scc_cli.commands.launch.render.print_human") as mock_print, + patch("scc_cli.commands.launch.render.print_with_layout") as mock_print, ): warn_if_non_worktree(workspace, json_mode=False) diff --git a/uv.lock b/uv.lock index a2ef102..47f5e44 100644 --- a/uv.lock +++ b/uv.lock @@ -951,7 +951,7 @@ wheels = [ [[package]] name = "scc-cli" -version = "1.6.3" +version = "1.6.4" source = { editable = "." } dependencies = [ { name = "jsonschema" },