From 989eaaa6b9a36a8aec5a423e815e21de2dbffa78 Mon Sep 17 00:00:00 2001 From: Andrew Lee Date: Mon, 19 Jan 2026 12:28:40 -0800 Subject: [PATCH 1/2] Add release infrastructure (#72) From 5f8e68e45c461beb072b7d16c660002257dc3791 Mon Sep 17 00:00:00 2001 From: Andrew Lee Date: Mon, 19 Jan 2026 09:52:38 -0800 Subject: [PATCH 2/2] Add sandbox CLI and integrate with TUI - Add full sandbox CLI (amux sandbox run/ls/rm/update, amux claude/codex/etc) - Integrate sandbox runtime selection into TUI (local vs sandbox per worktree) - Add Daytona provider for remote sandbox management - Add credential persistence, workspace sync, and agent installation - Add settings sync, health checks, and doctor commands - Add worktree runtime metadata and selection dialog --- TEST_PLAN.md | 655 ++++++++++++++++++++ cmd/amux/main.go | 7 + go.mod | 13 +- go.sum | 32 +- internal/app/app_core.go | 4 + internal/app/app_init.go | 7 +- internal/app/harness.go | 4 +- internal/app/keybindings.go | 2 + internal/app/runtime_provider.go | 86 +++ internal/app/sandbox_manager.go | 346 +++++++++++ internal/cli/aliases.go | 185 ++++++ internal/cli/auth.go | 224 +++++++ internal/cli/cli.go | 120 ++++ internal/cli/common.go | 59 ++ internal/cli/completions.go | 150 +++++ internal/cli/doctor_enhanced.go | 223 +++++++ internal/cli/doctor_explain.go | 269 ++++++++ internal/cli/doctor_logs.go | 90 +++ internal/cli/helpers.go | 95 +++ internal/cli/sandbox.go | 24 + internal/cli/sandbox_auth.go | 151 +++++ internal/cli/sandbox_list.go | 392 ++++++++++++ internal/cli/sandbox_manage.go | 241 +++++++ internal/cli/sandbox_run.go | 398 ++++++++++++ internal/cli/settings.go | 412 ++++++++++++ internal/cli/setup.go | 116 ++++ internal/cli/snapshot.go | 241 +++++++ internal/cli/spinner.go | 111 ++++ internal/cli/spinner_test.go | 103 +++ internal/cli/status.go | 296 +++++++++ internal/cli/wizard.go | 294 +++++++++ internal/data/workspace.go | 4 +- internal/daytona/artifacts.go | 20 + internal/daytona/client.go | 301 +++++++++ internal/daytona/filesystem.go | 204 ++++++ internal/daytona/http.go | 9 + internal/daytona/image.go | 46 ++ internal/daytona/process.go | 57 ++ internal/daytona/sandbox.go | 238 +++++++ internal/daytona/snapshot.go | 117 ++++ internal/daytona/toolbox_client.go | 155 +++++ internal/daytona/types.go | 121 ++++ internal/daytona/util.go | 13 + internal/daytona/volume.go | 77 +++ internal/git/operations.go | 6 + internal/git/status.go | 75 +++ internal/pty/terminal.go | 33 +- internal/sandbox/ARCHITECTURE.md | 186 ++++++ internal/sandbox/agent.go | 241 +++++++ internal/sandbox/agent_daytona.go | 349 +++++++++++ internal/sandbox/agent_install.go | 337 ++++++++++ internal/sandbox/agent_install_test.go | 191 ++++++ internal/sandbox/agent_test.go | 323 ++++++++++ internal/sandbox/computer.go | 253 ++++++++ internal/sandbox/config.go | 230 +++++++ internal/sandbox/credentials.go | 354 +++++++++++ internal/sandbox/credentials_test.go | 156 +++++ internal/sandbox/errors.go | 307 +++++++++ internal/sandbox/errors_test.go | 173 ++++++ internal/sandbox/exec_helpers.go | 25 + internal/sandbox/file_transfer.go | 41 ++ internal/sandbox/health.go | 448 +++++++++++++ internal/sandbox/logger.go | 333 ++++++++++ internal/sandbox/metadata.go | 171 +++++ internal/sandbox/metadata_test.go | 83 +++ internal/sandbox/plugins.go | 140 +++++ internal/sandbox/plugins_impl.go | 407 ++++++++++++ internal/sandbox/plugins_test.go | 236 +++++++ internal/sandbox/preflight.go | 34 + internal/sandbox/preflight_enhanced.go | 490 +++++++++++++++ internal/sandbox/provider.go | 345 +++++++++++ internal/sandbox/provider_daytona.go | 384 ++++++++++++ internal/sandbox/providers.go | 45 ++ internal/sandbox/retry.go | 329 ++++++++++ internal/sandbox/retry_test.go | 289 +++++++++ internal/sandbox/settings.go | 209 +++++++ internal/sandbox/settings_sync.go | 391 ++++++++++++ internal/sandbox/shell.go | 297 +++++++++ internal/sandbox/shell_test.go | 339 ++++++++++ internal/sandbox/snapshot.go | 104 ++++ internal/sandbox/ssh.go | 49 ++ internal/sandbox/sync.go | 434 +++++++++++++ internal/sandbox/sync_incremental.go | 450 ++++++++++++++ internal/sandbox/testing.go | 252 ++++++++ internal/ui/center/model.go | 47 +- internal/ui/center/model_input_lifecycle.go | 2 +- internal/ui/center/model_lifecycle.go | 11 +- internal/ui/center/model_tabs.go | 4 +- internal/ui/center/model_tabs_actions.go | 2 +- internal/ui/center/model_tabs_restore.go | 2 +- internal/ui/center/model_tabs_session.go | 10 +- internal/ui/center/model_tabs_viewer.go | 4 +- internal/ui/center/perf_test.go | 2 +- internal/ui/center/selection_test.go | 4 +- internal/ui/common/icons.go | 2 +- internal/ui/sidebar/terminal.go | 7 + 96 files changed, 16310 insertions(+), 38 deletions(-) create mode 100644 TEST_PLAN.md create mode 100644 internal/app/runtime_provider.go create mode 100644 internal/app/sandbox_manager.go create mode 100644 internal/cli/aliases.go create mode 100644 internal/cli/auth.go create mode 100644 internal/cli/cli.go create mode 100644 internal/cli/common.go create mode 100644 internal/cli/completions.go create mode 100644 internal/cli/doctor_enhanced.go create mode 100644 internal/cli/doctor_explain.go create mode 100644 internal/cli/doctor_logs.go create mode 100644 internal/cli/helpers.go create mode 100644 internal/cli/sandbox.go create mode 100644 internal/cli/sandbox_auth.go create mode 100644 internal/cli/sandbox_list.go create mode 100644 internal/cli/sandbox_manage.go create mode 100644 internal/cli/sandbox_run.go create mode 100644 internal/cli/settings.go create mode 100644 internal/cli/setup.go create mode 100644 internal/cli/snapshot.go create mode 100644 internal/cli/spinner.go create mode 100644 internal/cli/spinner_test.go create mode 100644 internal/cli/status.go create mode 100644 internal/cli/wizard.go create mode 100644 internal/daytona/artifacts.go create mode 100644 internal/daytona/client.go create mode 100644 internal/daytona/filesystem.go create mode 100644 internal/daytona/http.go create mode 100644 internal/daytona/image.go create mode 100644 internal/daytona/process.go create mode 100644 internal/daytona/sandbox.go create mode 100644 internal/daytona/snapshot.go create mode 100644 internal/daytona/toolbox_client.go create mode 100644 internal/daytona/types.go create mode 100644 internal/daytona/util.go create mode 100644 internal/daytona/volume.go create mode 100644 internal/sandbox/ARCHITECTURE.md create mode 100644 internal/sandbox/agent.go create mode 100644 internal/sandbox/agent_daytona.go create mode 100644 internal/sandbox/agent_install.go create mode 100644 internal/sandbox/agent_install_test.go create mode 100644 internal/sandbox/agent_test.go create mode 100644 internal/sandbox/computer.go create mode 100644 internal/sandbox/config.go create mode 100644 internal/sandbox/credentials.go create mode 100644 internal/sandbox/credentials_test.go create mode 100644 internal/sandbox/errors.go create mode 100644 internal/sandbox/errors_test.go create mode 100644 internal/sandbox/exec_helpers.go create mode 100644 internal/sandbox/file_transfer.go create mode 100644 internal/sandbox/health.go create mode 100644 internal/sandbox/logger.go create mode 100644 internal/sandbox/metadata.go create mode 100644 internal/sandbox/metadata_test.go create mode 100644 internal/sandbox/plugins.go create mode 100644 internal/sandbox/plugins_impl.go create mode 100644 internal/sandbox/plugins_test.go create mode 100644 internal/sandbox/preflight.go create mode 100644 internal/sandbox/preflight_enhanced.go create mode 100644 internal/sandbox/provider.go create mode 100644 internal/sandbox/provider_daytona.go create mode 100644 internal/sandbox/providers.go create mode 100644 internal/sandbox/retry.go create mode 100644 internal/sandbox/retry_test.go create mode 100644 internal/sandbox/settings.go create mode 100644 internal/sandbox/settings_sync.go create mode 100644 internal/sandbox/shell.go create mode 100644 internal/sandbox/shell_test.go create mode 100644 internal/sandbox/snapshot.go create mode 100644 internal/sandbox/ssh.go create mode 100644 internal/sandbox/sync.go create mode 100644 internal/sandbox/sync_incremental.go create mode 100644 internal/sandbox/testing.go diff --git a/TEST_PLAN.md b/TEST_PLAN.md new file mode 100644 index 00000000..efa8f095 --- /dev/null +++ b/TEST_PLAN.md @@ -0,0 +1,655 @@ +# Amux Manual Test Plan & Documentation + +This document serves as both a test plan and a reference for amux - a tool for +running AI coding agents in ephemeral Daytona sandboxes with persistent +credentials and CLI caches. + +--- + +## Table of Contents + +1. [Overview](#overview) +2. [Core Concepts](#core-concepts) + - [Fresh Sandboxes Per Run](#fresh-sandboxes-per-run) + - [Worktree ID](#worktree-id) + - [Persistent Credentials & CLI Cache](#persistent-credentials--cli-cache) + - [Worktree Runtime (TUI)](#worktree-runtime-tui) + - [Config + Metadata Files](#config--metadata-files) +3. [User Stories + DX Decisions](#user-stories--dx-decisions) +4. [Daytona Setup & Authentication](#daytona-setup--authentication) +5. [Sandbox Lifecycle](#sandbox-lifecycle) +6. [Workspace Sync System](#workspace-sync-system) +7. [Coding Agents Reference](#coding-agents-reference) +8. [Test Procedures](#test-procedures) +9. [Troubleshooting](#troubleshooting) + +--- + +## Overview + +### What is Amux? + +Amux runs AI coding agents in disposable Daytona sandboxes. Each run: + +1. Creates a new Daytona sandbox +2. Syncs your local workspace to the sandbox +3. Mounts a persistent volume for credentials and CLI downloads +4. Runs the agent interactively via SSH or exec +5. Deletes the sandbox on exit (unless `--keep`) + +### Why Fresh Sandboxes? + +- **Clean environments**: no drift across runs +- **Easy cleanup**: sandboxes are deleted by default +- **Persistent creds**: you still login once, thanks to a persistent volume +- **Multi-agent**: run different agents without local installs + +--- + +## Core Concepts + +### Fresh Sandboxes Per Run + +Amux creates a **new sandbox per run**. Use `--keep` only when you want to leave +one running for debugging or reuse. + +### Worktree ID + +**Worktree ID** (`amux.worktreeId`): +- Computed from: SHA256 hash of absolute working directory path (first 16 hex chars) +- Purpose: isolates workspace files per directory +- Used for: workspace path in sandbox (`~/.amux/workspaces/{worktreeId}/repo/`) + +**Example**: +``` +/home/user/project-a -> worktreeId: x1y2z3w4v5u6t7s8 +/home/user/project-b -> worktreeId: p9o8i7u6y5t4r3e2 + +# Inside the sandbox: +~/.amux/workspaces/x1y2z3w4v5u6t7s8/repo/ -> project-a files +~/.amux/workspaces/p9o8i7u6y5t4r3e2/repo/ -> project-b files +``` + +### Persistent Credentials & CLI Cache + +Credentials and CLI installs are stored on a persistent volume (default +`amux-persist`) mounted at `/amux`, then symlinked into the sandbox home +directory. + +Persisted paths include: +- `~/.claude/`, `~/.codex/`, `~/.gemini/`, `~/.factory/` +- `~/.config/`, `~/.local/`, `~/.npm/`, `~/.npmrc` + +This means: +- You authenticate once +- CLI downloads stay cached +- New sandboxes start fast + +Resetting persistence is done by **rotating to a new volume** (see +`amux sandbox reset`). Old volumes are kept in Daytona for manual cleanup. + +### Worktree Runtime (TUI) + +Each worktree can run in **Local** or **Sandbox** mode: +- **Local**: agents + terminal run on your machine. +- **Sandbox**: agents + terminal run in a shared Daytona sandbox for that worktree. + +Runtime selection is stored in the per-worktree metadata file and drives +which environment the TUI uses. + +### Config + Metadata Files + +**Local config file**: `~/.amux/config.json` +```json +{ + "daytonaApiKey": "...", + "daytonaApiUrl": "https://api.daytona.io", + "defaultSnapshotName": "amux-agents", + "persistenceVolumeName": "amux-persist", + "settingsSync": { + "enabled": false, + "claude": true, + "codex": true + } +} +``` + +**Global sandbox metadata**: `~/.amux/sandbox.json` +```json +{ + "sandboxes": { + "worktree-id-here": { + "sandboxId": "sandbox-uuid-here", + "agent": "claude", + "provider": "daytona", + "createdAt": "2026-01-13T10:30:00Z", + "worktreeId": "...", + "project": "my-repo" + } + } +} +``` + +**Worktree metadata**: `~/.amux/worktrees-metadata//worktree.json` +```json +{ + "name": "feature-branch", + "branch": "feature-branch", + "runtime": "sandbox", + "assistant": "claude" +} +``` + +--- + +## User Stories + DX Decisions + +### 1) First-time setup +**Story:** New user wants to run Claude in a sandbox with minimal friction. +**Flow:** +- `amux setup` prompts for API key and optionally creates a snapshot. +- `amux sandbox run claude` starts a new sandbox and handles login once. +**DX Decision:** Login is only required on first use; credentials persist via +`/amux` volume. + +### 2) Daily workflow (targeted repo change) +**Story:** Developer runs one short task and exits. +**Flow:** +- `amux sandbox run ` creates a fresh sandbox each time. +- Workspace sync happens automatically. +- Sandbox is deleted on exit. +**DX Decision:** New sandbox per run is the default to keep environments clean. + +### 3) Quick agent switch +**Story:** Developer uses multiple agents in the same repo. +**Flow:** +- `amux sandbox run codex`, then `amux sandbox run claude`. +- Shared persistence keeps CLIs installed and credentials available. +**DX Decision:** Persistence is shared across agents so switching is instant. + +### 4) Parallel sandboxes across repos +**Story:** Developer jumps between multiple repos quickly. +**Flow:** +- Each repo spins a new sandbox per run. +- Worktree ID isolates file paths inside each sandbox. +**DX Decision:** Isolation is handled per run; no shared sandbox state. + +### 5) Long-running task +**Story:** Developer wants to keep a sandbox alive. +**Flow:** +- `amux sandbox run --keep` +- `amux status` shows the sandbox and `amux ssh` attaches. +**DX Decision:** `--keep` enables longer sessions, but is opt-in. + +### 6) Preview URL + logs (web apps) +**Story:** Developer runs a Next.js dev server and wants a browser URL + logs. +**Flow:** +- `amux sandbox run claude --preview 3000 --record` +- Inside the agent, run `npm run dev` (listening on `0.0.0.0:3000`). +- `amux sandbox logs -f` tails the latest recorded session log. +**DX Decision:** Preview URLs are one flag away, and log recording is opt-in but +persistent so you can tail from another terminal (even after exit via a +short-lived log reader sandbox). + +### 7) Credential reset or clean slate +**Story:** Developer wants to clear all agent logins and cached CLIs. +**Flow:** +- `amux sandbox reset` rotates to a new persistence volume. +- New sandboxes start clean; old volume remains in Daytona. +**DX Decision:** Reset is fast and safe (no destructive delete), while still +allowing a full wipe by deleting old volumes in Daytona UI. + +### 8) Settings sync +**Story:** Developer wants local preferences inside the sandbox. +**Flow:** +- `amux settings sync --enable --claude --git` +- `amux sandbox run claude --sync-settings` +**DX Decision:** Settings sync is explicit and opt-in to avoid surprises. + +### 9) TUI integration (worktree-level runtime) +**Story:** Developer wants to choose "Sandbox" in the TUI and run agents. +**Flow:** +- Worktree runtime is set to **Sandbox** or **Local** (worktree-level). +- When Sandbox is selected, the TUI creates or attaches to a single sandbox + for that worktree. +- All agent tabs and the bottom-right terminal share the same sandbox. +- Persistence volume is mounted, so agents are ready instantly. +**DX Decision:** Keep CLI semantics simple so the TUI can reuse them directly, +while sharing a single sandbox per worktree in the TUI. + +### 10) TUI-only users (current) +**Story:** Developer never uses the CLI and configures everything in TUI. +**Flow:** +- Switch worktree runtime to **Sandbox** in TUI. +- Complete setup wizard (Daytona API key). +- Open multiple agent tabs (e.g., Claude + Codex) in the same sandbox. +- Use the shared sandbox terminal for shell commands. +- Switch worktree back to Local to sync changes down. +**DX Decision:** TUI writes to the same config as CLI, so setup is one-time. + +### 11) CLI-only users (current) +**Story:** Developer never uses the TUI and relies entirely on CLI. +**Flow:** +- `amux setup` or `amux auth login` +- `amux sandbox run ` for per-run sandboxes +- `amux sandbox update`, `amux sandbox reset`, `amux sandbox logs` +**DX Decision:** CLI remains the source of truth for automation and scripts. + +### 12) TUI + CLI users (current) +**Story:** Developer uses CLI for setup or automation and TUI for daily runs. +**Flow:** +- CLI creates/updates `~/.amux/config.json`. +- TUI reads the same config and uses Sandbox mode without re-setup. +- CLI commands (e.g., `amux sandbox logs`) can inspect the same sandboxes. +**DX Decision:** One shared config; both front-ends stay in sync. + +--- + +## Daytona Setup & Authentication + +### First-Time Setup + +#### Step 1: Get Your API Key + +1. Go to your Daytona dashboard +2. Navigate to Settings > API Keys +3. Create a new API key +4. Copy the key (you won't see it again) + +#### Step 2: Configure Amux + +**Option A: Interactive Setup (Recommended)** +```bash +./amux setup +``` + +**Option B: Environment Variable** +```bash +export DAYTONA_API_KEY=your-api-key-here +``` + +**Option C: Direct Login** +```bash +./amux auth login +# Enter your Daytona API key when prompted +``` + +**Option D: TUI Setup** +- Open the TUI. +- Activate a worktree and set runtime to **Sandbox**. +- Enter your Daytona API key when prompted. + +#### Step 3: Verify Setup + +```bash +./amux doctor +``` + +Expected output: +``` +Running diagnostics... +OK: All checks passed +``` + +--- + +## Sandbox Lifecycle + +### Create + Run (default) + +```bash +amux sandbox run claude +``` + +- Creates a new sandbox +- Syncs your workspace +- Runs Claude +- Deletes the sandbox on exit + +### Keep a Sandbox + +```bash +amux sandbox run claude --keep +``` + +- Keeps the sandbox after the session exits +- Useful for debugging or long-running tasks + +### Status / SSH / Exec + +```bash +amux status +amux ssh +amux exec -- ls -la +``` + +### List / Remove + +```bash +amux sandbox ls +amux sandbox rm [sandbox-id] +``` + +--- + +## Workspace Sync System + +Amux syncs your local workspace to the sandbox so agents can access and modify +files. + +**Sync Methods:** +1. **Full Sync** (default first time) + - Creates a tarball of your workspace + - Uploads and extracts in the sandbox + - Respects `.amuxignore` patterns + +2. **Incremental Sync** (subsequent runs) + - Computes file hashes and timestamps + - Only transfers changed files + - Much faster for large repos + +**Skip sync:** +```bash +amux sandbox run claude --no-sync +``` + +--- + +## Coding Agents Reference + +This section is the per-agent walkthrough and test checklist. + +### Common amux behavior (all agents) +- `amux sandbox run ` installs the CLI if missing, then runs it. +- Credentials and CLI caches persist via the `/amux` volume (see persistence section). +- Pass API keys into the sandbox with `--env KEY=...` (host env vars are not forwarded). +- Auto-login runs only when: credentials mode is not `none` **and** no agent args + are passed after `--`. +- `amux sandbox update ` forces a reinstall/update in the sandbox. + +### Claude Code (Anthropic) +**Install (amux):** `curl -fsSL https://claude.ai/install.sh | bash` + +**First-time auth options:** +- `amux sandbox run claude`, then complete the CLI login flow. +- API key auth: `--env ANTHROPIC_API_KEY=...`, `--env ANTHROPIC_AUTH_TOKEN=...`, + or `--env CLAUDE_API_KEY=...`. + - You can re-authenticate inside Claude Code with `/login`. + +**amux behavior:** +- No auto-login command is run; Claude prompts interactively. +- Credential detection file: `~/.claude/.credentials.json` (persisted). + +**Updates:** +- Claude auto-updates on startup. +- amux only installs if missing; `amux sandbox update claude` forces reinstall. + - Manual update inside sandbox: `claude update`. + +**Test checklist:** +- First run prompts login; second run does not. +- `--env ANTHROPIC_API_KEY=...` (or token) skips login. +- `amux sandbox update claude` reinstalls. + +### Codex CLI (OpenAI) +**Install (amux):** `npm install -g @openai/codex@latest` + +**First-time auth options:** +- amux auto-runs `codex login` when no `OPENAI_API_KEY` is passed via `--env`. +- Device auth is the default; set `AMUX_CODEX_DEVICE_AUTH=0` to disable. +- API key auth: `--env OPENAI_API_KEY=...` (stored in `~/.codex/auth.json`). + - Codex also supports ChatGPT account login on first run. + +**amux behavior:** +- Codex TUI2 is auto-enabled unless `AMUX_CODEX_TUI2=0` or you already pass TUI2 flags. +- Credential detection file: `~/.codex/auth.json` (persisted). + +**Updates:** +- Codex does not auto-update; amux re-checks roughly every 24 hours. +- `--update` or `amux sandbox update codex` forces reinstall immediately. + - Manual update inside sandbox: `npm i -g @openai/codex@latest` (or `codex --upgrade`). + +**Test checklist:** +- Auto-login runs on first launch without API key. +- `--env OPENAI_API_KEY=...` skips login. +- `AMUX_CODEX_DEVICE_AUTH=0` disables device auth. +- `AMUX_CODEX_TUI2=0` disables auto TUI2 flags. +- Update path triggers npm reinstall. + +### OpenCode (Open Source) +**Install (amux):** `curl -fsSL https://opencode.ai/install | bash` + +**First-time auth options:** +- amux auto-runs `opencode auth login` when no credentials are present. +- OpenCode reads providers from env or `.env`. + +**amux behavior:** +- Credential detection file: `~/.local/share/opencode/auth.json` (persisted). +- amux does not infer auth from env vars; to skip login, use `--credentials none`. + +**Updates:** +- amux only installs if missing; `amux sandbox update opencode` forces reinstall. + +**Test checklist:** +- Auto-login runs on first launch. +- `--credentials none` skips auto-login even without creds. +- Persisted auth prevents re-login. +- Update path forces reinstall. + +### Amp (Sourcegraph) +**Install (amux):** `curl -fsSL https://ampcode.com/install.sh | bash` + +**First-time auth options:** +- amux auto-runs `amp login` when no credentials are present (and no `AMP_API_KEY` passed). +- API key auth: `--env AMP_API_KEY=...` (token from ampcode.com/settings). + - Running `amp` directly also prompts for login on first run. + +**amux behavior:** +- Credential detection file: `~/.config/amp/secrets.json` (persisted). +- If `AMP_API_KEY` is set, auto-login is skipped. + +**Updates:** +- amux does not reinstall on every run. +- `amux sandbox update amp` forces reinstall. + +**Test checklist:** +- Auto-login runs on first launch. +- `--env AMP_API_KEY=...` skips login. +- Update path forces reinstall. + +### Gemini CLI (Google) +**Install (amux):** `npm install -g @google/gemini-cli@latest` + +**First-time auth options:** +- Run `amux sandbox run gemini` and complete the CLI sign-in flow. +- API key auth: `--env GEMINI_API_KEY=...` or `--env GOOGLE_API_KEY=...`. +- Vertex AI auth: set `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION`, plus one of: + - ADC (`gcloud auth application-default login`) + - Service account JSON via `GOOGLE_APPLICATION_CREDENTIALS=...` + - Google Cloud API key via `GOOGLE_API_KEY=...` + +**amux behavior:** +- No auto-login command is run; Gemini prompts interactively. +- Credential detection file: `~/.gemini/oauth_creds.json` (persisted). + +**Updates:** +- amux only installs if missing; `amux sandbox update gemini` forces reinstall. + +**Test checklist:** +- First run prompts sign-in; second run does not. +- `--env GEMINI_API_KEY=...` (or `GOOGLE_API_KEY`) skips login. +- Update path forces reinstall. + +### Droid (Factory) +**Install (amux):** `curl -fsSL https://app.factory.ai/cli | sh` + +**First-time auth options:** +- Run `amux sandbox run droid` and complete onboarding if prompted. +- The CLI supports `/login` for interactive login. +- API key auth: `--env FACTORY_API_KEY=...` for headless usage. + +**amux behavior:** +- No auto-login command is run. +- Credential detection file: `~/.factory/config.json` (persisted). + +**Updates:** +- amux only installs if missing; `amux sandbox update droid` forces reinstall. + +**Test checklist:** +- First run prompts onboarding; second run does not. +- `--env FACTORY_API_KEY=...` skips login. +- Update path forces reinstall. + +### Shell +Built-in bash shell (no auth, no updates). + +--- + +## Test Procedures + +### 1. Smoke Test (Fresh Run) + +```bash +amux sandbox run shell +``` + +- Expect sandbox creation spinner +- Expect workspace sync +- Expect shell prompt in sandbox +- Exit and confirm sandbox deletion + +### 2. Credential Persistence + +1. Run Claude and login once: + ```bash + amux sandbox run claude + ``` +2. Exit. +3. Run again: + ```bash + amux sandbox run claude + ``` +4. Expect **no login prompt** (credentials persisted via volume). + +### 3. CLI Cache Persistence + +1. Run Codex once: + ```bash + amux sandbox run codex + ``` +2. Exit. +3. Run Codex again: + ```bash + amux sandbox run codex + ``` +4. Expect no lengthy reinstall (CLI should be cached in `/amux`). + +### 4. `--keep` Workflow + +```bash +amux sandbox run shell --keep +amux status +amux ssh +amux sandbox rm --project +``` + +- Status should show a running sandbox +- SSH should connect +- rm should delete sandbox + metadata + +### 5. Workspace Sync Round-Trip + +1. Run: + ```bash + amux sandbox run shell + ``` +2. Modify a file inside the sandbox. +3. Exit. +4. Verify changes were synced back locally. + +### 6. Settings Sync + +```bash +amux settings sync --enable --claude --git +amux sandbox run claude --sync-settings +``` + +- Confirm settings files are copied into sandbox +- Ensure secrets are filtered + +### 7. Preview + Logs + +```bash +amux sandbox run claude --preview 3000 --record +# In a second terminal: +amux sandbox logs -f +``` + +- Preview URL should print (and open if not suppressed) +- Logs should stream once the server starts +- After exit, `amux sandbox logs` should still work (spins a log reader sandbox) + +### 8. Snapshot (Optional) + +```bash +amux snapshot create --agents claude,codex +amux sandbox run claude --snapshot amux-agents +``` + +- Expect faster startup +- Verify agents are preinstalled + +### 9. Reset Persistence + +```bash +amux sandbox reset +amux sandbox run claude +``` + +- Expect a fresh login prompt +- New sandboxes should use the new volume + +### 10. CLI-only Users (Current) + +```bash +amux setup +amux sandbox run claude +amux sandbox update codex +amux sandbox logs -f +``` + +- CLI should fully configure and run sandboxes without any TUI usage +- All features should work without TUI state + +### 11. TUI-only Users (Current) + +TUI flow: +1. Switch worktree runtime to **Sandbox**. +2. Complete setup wizard (Daytona API key). +3. Open multiple agent tabs in the same worktree. +4. Use the shared sandbox terminal for shell commands. +5. Switch worktree back to Local to sync changes down. + +- All sandbox config is set in TUI +- All agent tabs share the same sandbox +- Terminal reflects sandbox filesystem + +### 12. TUI + CLI Users (Current) + +Hybrid flow: +1. Run `amux setup` in CLI. +2. Open TUI and set worktree runtime to **Sandbox**. +3. Create multiple agent tabs (shared sandbox). +4. Use CLI to inspect the same sandbox (`amux status`, `amux sandbox logs`). + +- TUI and CLI read/write the same `~/.amux/config.json` +- No duplicate setup required + +--- + +## Troubleshooting + +- **API key missing**: run `amux auth login` or set `DAYTONA_API_KEY` +- **SSH not found**: install OpenSSH client locally +- **Sandbox not found**: run `amux sandbox run ` to create one +- **Credentials missing after run**: check `/amux` mount and symlinks +- **Sync issues**: try `--no-sync` and inspect `.amuxignore` diff --git a/cmd/amux/main.go b/cmd/amux/main.go index 387e4c70..dfe184a4 100644 --- a/cmd/amux/main.go +++ b/cmd/amux/main.go @@ -19,6 +19,7 @@ import ( tea "charm.land/bubbletea/v2" "github.com/andyrewlee/amux/internal/app" + "github.com/andyrewlee/amux/internal/cli" "github.com/andyrewlee/amux/internal/logging" "github.com/andyrewlee/amux/internal/safego" ) @@ -36,6 +37,12 @@ func main() { fmt.Printf("amux %s (commit: %s, built: %s)\n", version, commit, date) os.Exit(0) } + + // Handle CLI subcommands + if len(os.Args) > 1 { + os.Exit(cli.Run(os.Args[1:])) + } + // Initialize logging home, _ := os.UserHomeDir() logDir := filepath.Join(home, ".amux", "logs") diff --git a/go.mod b/go.mod index 54e5ebb4..be52a9b9 100644 --- a/go.mod +++ b/go.mod @@ -10,23 +10,34 @@ require ( github.com/charmbracelet/x/ansi v0.11.3 github.com/creack/pty v1.1.21 github.com/fsnotify/fsnotify v1.9.0 + github.com/gorilla/websocket v1.5.0 + github.com/lrstanley/bubblezone v1.0.0 github.com/mattn/go-runewidth v0.0.19 github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 + github.com/spf13/cobra v1.9.1 + golang.org/x/term v0.39.0 ) require ( + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/charmbracelet/colorprofile v0.4.1 // indirect github.com/charmbracelet/ultraviolet v0.0.0-20251212194010-b927aa605560 // indirect + github.com/charmbracelet/x/cellbuf v0.0.14 // indirect github.com/charmbracelet/x/term v0.2.2 // indirect github.com/charmbracelet/x/termios v0.1.1 // indirect github.com/charmbracelet/x/windows v0.2.2 // indirect github.com/clipperhouse/displaywidth v0.6.1 // indirect github.com/clipperhouse/stringish v0.1.1 // indirect github.com/clipperhouse/uax29/v2 v2.3.0 // indirect + github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/lucasb-eyer/go-colorful v1.3.0 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/rivo/uniseg v0.4.7 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect golang.org/x/sync v0.18.0 // indirect - golang.org/x/sys v0.38.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/text v0.24.0 // indirect ) diff --git a/go.sum b/go.sum index b22ed997..28b49fe2 100644 --- a/go.sum +++ b/go.sum @@ -4,16 +4,22 @@ charm.land/bubbletea/v2 v2.0.0-rc.2.0.20251216153312-819e2e89c62e h1:tXwTmgGpwZT charm.land/bubbletea/v2 v2.0.0-rc.2.0.20251216153312-819e2e89c62e/go.mod h1:pDM18flq3Z4njKZPA3zCvyVSSIJbMcoqlE82BdGUtL8= charm.land/lipgloss/v2 v2.0.0-beta.3.0.20251205162909-7869489d8971 h1:xZFcNsJMiIDbFtWRyDmkKNk1sjojfaom4Zoe0cyH/8c= charm.land/lipgloss/v2 v2.0.0-beta.3.0.20251205162909-7869489d8971/go.mod h1:i61Y3FmdbcBNSKa+pKB3DaE4uVQmBLMs/xlvRyHcXAE= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aymanbagabas/go-udiff v0.3.1 h1:LV+qyBQ2pqe0u42ZsUEtPiCaUoqgA9gYRDs3vj1nolY= github.com/aymanbagabas/go-udiff v0.3.1/go.mod h1:G0fsKmG+P6ylD0r6N/KgQD/nWzgfnl8ZBcNLgcbrw8E= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/charmbracelet/colorprofile v0.4.1 h1:a1lO03qTrSIRaK8c3JRxJDZOvhvIeSco3ej+ngLk1kk= github.com/charmbracelet/colorprofile v0.4.1/go.mod h1:U1d9Dljmdf9DLegaJ0nGZNJvoXAhayhmidOdcBwAvKk= github.com/charmbracelet/ultraviolet v0.0.0-20251212194010-b927aa605560 h1:j3PW2hypGoPKBy3ooKzW0TFxaxhyHK3NbkLLn4KeRFc= github.com/charmbracelet/ultraviolet v0.0.0-20251212194010-b927aa605560/go.mod h1:VWATWLRwYP06VYCEur7FsNR2B1xAo7Y+xl1PTbd1ePc= github.com/charmbracelet/x/ansi v0.11.3 h1:6DcVaqWI82BBVM/atTyq6yBoRLZFBsnoDoX9GCu2YOI= github.com/charmbracelet/x/ansi v0.11.3/go.mod h1:yI7Zslym9tCJcedxz5+WBq+eUGMJT0bM06Fqy1/Y4dI= +github.com/charmbracelet/x/cellbuf v0.0.14 h1:lfjq/R7q0YSSU3XLvB0BkLWRxE9bKVKZ6n9HaAeFR24= +github.com/charmbracelet/x/cellbuf v0.0.14/go.mod h1:5//lJr5H8Z1kP1BC93BOGYXuGvIV3qjQXOGGpATT2s0= github.com/charmbracelet/x/exp/golden v0.0.0-20250806222409-83e3a29d542f h1:pk6gmGpCE7F3FcjaOEKYriCvpmIN4+6OS/RD0vm4uIA= github.com/charmbracelet/x/exp/golden v0.0.0-20250806222409-83e3a29d542f/go.mod h1:IfZAMTHB6XkZSeXUqriemErjAWCCzT0LwjKFYCZyw0I= github.com/charmbracelet/x/term v0.2.2 h1:xVRT/S2ZcKdhhOuSP4t5cLi5o+JxklsoEObBSgfgZRk= @@ -28,12 +34,21 @@ github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfa github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/lrstanley/bubblezone v1.0.0 h1:bIpUaBilD42rAQwlg/4u5aTqVAt6DSRKYZuSdmkr8UA= +github.com/lrstanley/bubblezone v1.0.0/go.mod h1:kcTekA8HE/0Ll2bWzqHlhA2c513KDNLW7uDfDP4Mly8= github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= @@ -44,11 +59,24 @@ github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELU github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/app/app_core.go b/internal/app/app_core.go index b91af815..e634a99c 100644 --- a/internal/app/app_core.go +++ b/internal/app/app_core.go @@ -30,6 +30,8 @@ const ( DialogDeleteWorkspace = "delete_workspace" DialogRemoveProject = "remove_project" DialogSelectAssistant = "select_assistant" + DialogSelectRuntime = "select_runtime" + DialogSandboxAPIKey = "sandbox_api_key" DialogQuit = "quit" DialogCleanupTmux = "cleanup_tmux" ) @@ -47,6 +49,8 @@ type App struct { gitStatus GitStatusService tmuxService *tmuxService updateService UpdateService + sandboxManager *SandboxManager + runtimeProvider *RuntimeAgentProvider // State projects []data.Project diff --git a/internal/app/app_init.go b/internal/app/app_init.go index 91ca717d..f2d403d2 100644 --- a/internal/app/app_init.go +++ b/internal/app/app_init.go @@ -42,6 +42,8 @@ func New(version, commit, date string) (*App, error) { workspaces := data.NewWorkspaceStore(cfg.Paths.MetadataRoot) scripts := process.NewScriptRunner(cfg.PortStart, cfg.PortRangeSize) workspaceService := newWorkspaceService(registry, workspaces, scripts, cfg.Paths.WorkspacesRoot) + sandboxManager := NewSandboxManager(cfg) + runtimeProvider := NewRuntimeAgentProvider(cfg, sandboxManager) // Create status manager (callback will be nil, we use it for caching only) statusManager := git.NewStatusManager(nil) @@ -89,6 +91,8 @@ func New(version, commit, date string) (*App, error) { gitStatus: gitStatus, tmuxService: tmuxSvc, updateService: updateSvc, + sandboxManager: sandboxManager, + runtimeProvider: runtimeProvider, fileWatcher: fileWatcher, fileWatcherCh: fileWatcherCh, fileWatcherErr: fileWatcherErr, @@ -97,7 +101,7 @@ func New(version, commit, date string) (*App, error) { stateWatcherErr: stateWatcherErr, layout: layout.NewManager(), dashboard: dashboard.New(), - center: center.New(cfg), + center: center.New(cfg, runtimeProvider), sidebar: sidebar.NewTabbedSidebar(), sidebarTerminal: sidebar.NewTerminalModel(), helpOverlay: common.NewHelpOverlay(), @@ -138,6 +142,7 @@ func New(version, commit, date string) (*App, error) { app.center.SetStyles(app.styles) app.toast.SetStyles(app.styles) app.helpOverlay.SetStyles(app.styles) + app.sidebarTerminal.SetTerminalFactory(runtimeProvider.CreateTerminalForWorkspace) app.setKeymapHintsEnabled(cfg.UI.ShowKeymapHints) // Propagate tmux config to components app.center.SetTmuxConfig(tmuxOpts.ServerName, tmuxOpts.ConfigPath) diff --git a/internal/app/harness.go b/internal/app/harness.go index f1c9f1a4..c568a93a 100644 --- a/internal/app/harness.go +++ b/internal/app/harness.go @@ -94,7 +94,7 @@ func newMonitorHarness(cfg *config.Config, opts HarnessOptions) *Harness { } func newCenterHarness(cfg *config.Config, opts HarnessOptions) *Harness { - centerModel := center.New(cfg) + centerModel := center.New(cfg, nil) centerModel.SetShowKeymapHints(opts.ShowKeymapHints) dash := dashboard.New() @@ -167,7 +167,7 @@ func newCenterHarness(cfg *config.Config, opts HarnessOptions) *Harness { } func newSidebarHarness(cfg *config.Config, opts HarnessOptions) *Harness { - centerModel := center.New(cfg) + centerModel := center.New(cfg, nil) centerModel.SetShowKeymapHints(opts.ShowKeymapHints) dash := dashboard.New() diff --git a/internal/app/keybindings.go b/internal/app/keybindings.go index d2972107..b053ce81 100644 --- a/internal/app/keybindings.go +++ b/internal/app/keybindings.go @@ -23,6 +23,8 @@ type KeyMap struct { NewAgentTab key.Binding NewTerminalTab key.Binding Help key.Binding + Monitor key.Binding + CopyMode key.Binding // Dashboard Enter key.Binding diff --git a/internal/app/runtime_provider.go b/internal/app/runtime_provider.go new file mode 100644 index 00000000..2da632cb --- /dev/null +++ b/internal/app/runtime_provider.go @@ -0,0 +1,86 @@ +package app + +import ( + "fmt" + "os" + + "github.com/andyrewlee/amux/internal/config" + "github.com/andyrewlee/amux/internal/data" + "github.com/andyrewlee/amux/internal/pty" + "github.com/andyrewlee/amux/internal/tmux" +) + +// RuntimeAgentProvider routes agent creation based on workspace runtime. +type RuntimeAgentProvider struct { + local *pty.AgentManager + sandbox *SandboxManager +} + +func NewRuntimeAgentProvider(cfg *config.Config, sandboxManager *SandboxManager) *RuntimeAgentProvider { + return &RuntimeAgentProvider{ + local: pty.NewAgentManager(cfg), + sandbox: sandboxManager, + } +} + +func (p *RuntimeAgentProvider) CreateAgent(wt *data.Workspace, agentType pty.AgentType, rows, cols uint16) (*pty.Agent, error) { + if wt != nil && data.NormalizeRuntime(wt.Runtime) == data.RuntimeCloudSandbox { + return p.sandbox.CreateAgent(wt, agentType, rows, cols) + } + return p.local.CreateAgent(wt, agentType, "", rows, cols) +} + +func (p *RuntimeAgentProvider) CreateAgentWithTags(wt *data.Workspace, agentType pty.AgentType, sessionName string, rows, cols uint16, tags tmux.SessionTags) (*pty.Agent, error) { + if wt != nil && data.NormalizeRuntime(wt.Runtime) == data.RuntimeCloudSandbox { + return p.sandbox.CreateAgent(wt, agentType, rows, cols) + } + return p.local.CreateAgentWithTags(wt, agentType, sessionName, rows, cols, tags) +} + +func (p *RuntimeAgentProvider) CreateViewer(wt *data.Workspace, command string, rows, cols uint16) (*pty.Agent, error) { + if wt != nil && data.NormalizeRuntime(wt.Runtime) == data.RuntimeCloudSandbox { + return p.sandbox.CreateViewer(wt, command, rows, cols) + } + return p.local.CreateViewer(wt, command, "", rows, cols) +} + +func (p *RuntimeAgentProvider) CreateViewerWithTags(wt *data.Workspace, command string, sessionName string, rows, cols uint16, tags tmux.SessionTags) (*pty.Agent, error) { + if wt != nil && data.NormalizeRuntime(wt.Runtime) == data.RuntimeCloudSandbox { + return p.sandbox.CreateViewer(wt, command, rows, cols) + } + return p.local.CreateViewerWithTags(wt, command, sessionName, rows, cols, tags) +} + +func (p *RuntimeAgentProvider) CloseAgent(agent *pty.Agent) error { + if agent == nil { + return nil + } + // Local manager will remove it from its list; sandbox manager doesn't track agents. + if agent.Workspace != nil && data.NormalizeRuntime(agent.Workspace.Runtime) == data.RuntimeCloudSandbox { + if agent.Terminal != nil { + return agent.Terminal.Close() + } + return nil + } + return p.local.CloseAgent(agent) +} + +func (p *RuntimeAgentProvider) CloseAll() { + p.local.CloseAll() +} + +// CreateTerminalForWorkspace returns a shell terminal based on runtime. +func (p *RuntimeAgentProvider) CreateTerminalForWorkspace(wt *data.Workspace) (*pty.Terminal, error) { + if wt == nil { + return nil, fmt.Errorf("workspace is required") + } + if data.NormalizeRuntime(wt.Runtime) == data.RuntimeCloudSandbox { + return p.sandbox.CreateShell(wt) + } + + shell := os.Getenv("SHELL") + if shell == "" { + shell = "/bin/bash" + } + return pty.New(shell, wt.Root, nil) +} diff --git a/internal/app/sandbox_manager.go b/internal/app/sandbox_manager.go new file mode 100644 index 00000000..cbddd064 --- /dev/null +++ b/internal/app/sandbox_manager.go @@ -0,0 +1,346 @@ +package app + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/andyrewlee/amux/internal/config" + "github.com/andyrewlee/amux/internal/data" + "github.com/andyrewlee/amux/internal/git" + "github.com/andyrewlee/amux/internal/logging" + "github.com/andyrewlee/amux/internal/pty" + "github.com/andyrewlee/amux/internal/sandbox" +) + +const defaultSandboxAutoStopMinutes int32 = 30 + +type sandboxSession struct { + sandbox sandbox.RemoteSandbox + provider sandbox.Provider + providerName string + worktreeID string + workspacePath string + synced bool + credentialsReady bool +} + +// SandboxManager coordinates per-worktree sandbox sessions for the TUI. +type SandboxManager struct { + cfg *config.Config + mu sync.Mutex + sessions map[string]*sandboxSession +} + +func NewSandboxManager(cfg *config.Config) *SandboxManager { + return &SandboxManager{ + cfg: cfg, + sessions: make(map[string]*sandboxSession), + } +} + +func (m *SandboxManager) ensureProvider(wt *data.Workspace) (sandbox.Provider, string, sandbox.Config, error) { + cfg, err := sandbox.LoadConfig() + if err != nil { + return nil, "", cfg, err + } + if sandbox.ResolveAPIKey(cfg) == "" { + return nil, "", cfg, sandbox.NewSandboxError( + sandbox.ErrCodeConfig, + "auth", + errors.New("Daytona API key not found"), + ) + } + provider, name, err := sandbox.ResolveProvider(cfg, wt.Root, "") + if err != nil { + return nil, name, cfg, err + } + return provider, name, cfg, nil +} + +func (m *SandboxManager) sessionFor(worktreeID string) *sandboxSession { + m.mu.Lock() + defer m.mu.Unlock() + return m.sessions[worktreeID] +} + +func (m *SandboxManager) storeSession(session *sandboxSession) { + if session == nil { + return + } + m.mu.Lock() + defer m.mu.Unlock() + m.sessions[session.worktreeID] = session +} + +func (m *SandboxManager) attachSession(wt *data.Workspace) (*sandboxSession, error) { + if wt == nil { + return nil, fmt.Errorf("workspace is required") + } + worktreeID := sandbox.ComputeWorktreeID(wt.Root) + if existing := m.sessionFor(worktreeID); existing != nil { + return existing, nil + } + + provider, providerName, _, err := m.ensureProvider(wt) + if err != nil { + return nil, err + } + + meta, err := sandbox.LoadSandboxMeta(wt.Root, providerName) + if err != nil || meta == nil || meta.SandboxID == "" { + return nil, nil + } + + sb, err := provider.GetSandbox(context.Background(), meta.SandboxID) + if err != nil { + return nil, nil + } + if err := sb.Start(context.Background()); err != nil { + return nil, err + } + if err := sb.WaitReady(context.Background(), 60*time.Second); err != nil { + logging.Warn("Sandbox attach wait ready: %v", err) + } + + workspacePath := sandbox.GetWorktreeRepoPath(sb, sandbox.SyncOptions{ + Cwd: wt.Root, + WorktreeID: worktreeID, + }) + + session := &sandboxSession{ + sandbox: sb, + provider: provider, + providerName: providerName, + worktreeID: worktreeID, + workspacePath: workspacePath, + synced: true, + } + + // Ensure persistence wiring for credentials. + if err := sandbox.SetupCredentials(sb, sandbox.CredentialsConfig{ + Mode: "auto", + SettingsSyncMode: "auto", + }, false); err != nil { + return nil, err + } + session.credentialsReady = true + m.storeSession(session) + return session, nil +} + +func (m *SandboxManager) ensureSession(wt *data.Workspace, agent sandbox.Agent) (*sandboxSession, error) { + if wt == nil { + return nil, fmt.Errorf("workspace is required") + } + worktreeID := sandbox.ComputeWorktreeID(wt.Root) + if existing := m.sessionFor(worktreeID); existing != nil { + return existing, nil + } + + session, err := m.attachSession(wt) + if err != nil { + return nil, err + } + if session != nil { + return session, nil + } + + provider, providerName, cfg, err := m.ensureProvider(wt) + if err != nil { + return nil, err + } + + if !sandbox.IsValidAgent(agent.String()) { + agent = sandbox.AgentShell + } + + sb, _, err := sandbox.CreateSandboxSession(provider, wt.Root, sandbox.SandboxConfig{ + Agent: agent, + EnvVars: nil, + Volumes: nil, + AutoStopInterval: defaultSandboxAutoStopMinutes, + Snapshot: sandbox.ResolveSnapshotID(cfg), + Ephemeral: false, + PersistenceVolumeName: sandbox.ResolvePersistenceVolumeName(cfg), + }) + if err != nil { + return nil, err + } + + workspacePath := sandbox.GetWorktreeRepoPath(sb, sandbox.SyncOptions{ + Cwd: wt.Root, + WorktreeID: worktreeID, + }) + + session = &sandboxSession{ + sandbox: sb, + provider: provider, + providerName: providerName, + worktreeID: worktreeID, + workspacePath: workspacePath, + } + + if err := sandbox.UploadWorkspace(sb, sandbox.SyncOptions{ + Cwd: wt.Root, + WorktreeID: worktreeID, + }, false); err != nil { + return nil, err + } + session.synced = true + + if err := sandbox.SetupCredentials(sb, sandbox.CredentialsConfig{ + Mode: "auto", + SettingsSyncMode: "auto", + }, false); err != nil { + return nil, err + } + session.credentialsReady = true + + m.storeSession(session) + return session, nil +} + +func (m *SandboxManager) CreateAgent(wt *data.Workspace, agentType pty.AgentType, rows, cols uint16) (*pty.Agent, error) { + agent := sandbox.Agent(agentType) + if !sandbox.IsValidAgent(agent.String()) { + return nil, fmt.Errorf("unknown agent type: %s", agentType) + } + + session, err := m.ensureSession(wt, agent) + if err != nil { + return nil, err + } + + if err := sandbox.EnsureAgentInstalled(session.sandbox, agent, false, false); err != nil { + return nil, err + } + + env := map[string]string{ + "WORKTREE_ROOT": session.workspacePath, + "WORKTREE_NAME": wt.Name, + } + remoteCommand, err := sandbox.BuildAgentRemoteCommand(session.sandbox, sandbox.AgentConfig{ + Agent: agent, + WorkspacePath: session.workspacePath, + Env: env, + }) + if err != nil { + return nil, err + } + + cmd, cleanup, err := sandbox.BuildSSHCommand(session.sandbox, remoteCommand) + if err != nil { + return nil, err + } + + term, err := pty.NewWithCmd(cmd, cleanup) + if err != nil { + return nil, err + } + + // Set initial size if provided + if rows > 0 && cols > 0 { + _ = term.SetSize(rows, cols) + } + + assistantCfg := m.cfg.Assistants[string(agentType)] + return &pty.Agent{ + Type: agentType, + Terminal: term, + Workspace: wt, + Config: assistantCfg, + }, nil +} + +func (m *SandboxManager) CreateViewer(wt *data.Workspace, command string, rows, cols uint16) (*pty.Agent, error) { + session, err := m.ensureSession(wt, sandbox.AgentShell) + if err != nil { + return nil, err + } + + remoteCommand := buildRemoteCommand(session.workspacePath, command, map[string]string{ + "WORKTREE_ROOT": session.workspacePath, + "WORKTREE_NAME": wt.Name, + }) + cmd, cleanup, err := sandbox.BuildSSHCommand(session.sandbox, remoteCommand) + if err != nil { + return nil, err + } + + term, err := pty.NewWithCmd(cmd, cleanup) + if err != nil { + return nil, err + } + + // Set initial size if provided + if rows > 0 && cols > 0 { + _ = term.SetSize(rows, cols) + } + + return &pty.Agent{ + Type: pty.AgentType("viewer"), + Terminal: term, + Workspace: wt, + }, nil +} + +func (m *SandboxManager) CreateShell(wt *data.Workspace) (*pty.Terminal, error) { + session, err := m.ensureSession(wt, sandbox.AgentShell) + if err != nil { + return nil, err + } + remoteCommand := buildRemoteCommand(session.workspacePath, "exec bash -i", map[string]string{ + "WORKTREE_ROOT": session.workspacePath, + "WORKTREE_NAME": wt.Name, + }) + cmd, cleanup, err := sandbox.BuildSSHCommand(session.sandbox, remoteCommand) + if err != nil { + return nil, err + } + return pty.NewWithCmd(cmd, cleanup) +} + +func (m *SandboxManager) SyncToLocal(wt *data.Workspace) error { + session, err := m.attachSession(wt) + if err != nil || session == nil { + return err + } + return sandbox.DownloadWorkspace(session.sandbox, sandbox.SyncOptions{ + Cwd: wt.Root, + WorktreeID: session.worktreeID, + }, false) +} + +func (m *SandboxManager) GitStatus(wt *data.Workspace) (*git.StatusResult, error) { + session, err := m.ensureSession(wt, sandbox.AgentShell) + if err != nil { + return nil, err + } + resp, err := session.sandbox.Exec(context.Background(), "git status --short", &sandbox.ExecOptions{ + Cwd: session.workspacePath, + }) + if err != nil { + return nil, err + } + if resp.ExitCode != 0 { + return nil, fmt.Errorf("git status failed: %s", resp.Stdout) + } + return git.ParseStatus(resp.Stdout), nil +} + +func buildRemoteCommand(workspacePath string, command string, env map[string]string) string { + parts := make([]string, 0, 3) + if len(env) > 0 { + parts = append(parts, sandbox.BuildEnvExports(env)...) + } + if workspacePath != "" { + parts = append(parts, fmt.Sprintf("cd %s", sandbox.ShellQuote(workspacePath))) + } + parts = append(parts, command) + script := strings.Join(parts, "\n") + return fmt.Sprintf("bash -lc %s", sandbox.ShellQuote(script)) +} diff --git a/internal/cli/aliases.go b/internal/cli/aliases.go new file mode 100644 index 00000000..908d62fc --- /dev/null +++ b/internal/cli/aliases.go @@ -0,0 +1,185 @@ +package cli + +import ( + "fmt" + "os" + "strings" + + "github.com/spf13/cobra" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +// buildAgentAliasCommand creates a top-level alias for `amux sandbox run `. +// This allows users to simply run `amux claude` instead of `amux sandbox run claude`. +func buildAgentAliasCommand(agent string, description string) *cobra.Command { + var envVars []string + var volumes []string + var credentials string + var snapshot string + var noSync bool + var autoStop int32 + var update bool + var keep bool + var syncSettings bool + var noSyncSettings bool + var previewPort int + var previewNoOpen bool + var recordLogs bool + + cmd := &cobra.Command{ + Use: agent + " [-- agent-args...]", + Short: description, + Long: description + "\n\nThis is a shortcut for `amux sandbox run " + agent + "`.", + RunE: func(cmd *cobra.Command, args []string) error { + return runAgentAlias( + agent, + envVars, + volumes, + credentials, + snapshot, + noSync, + autoStop, + update, + keep, + cmd.Flags().Changed("keep"), + syncSettings, + noSyncSettings, + previewPort, + cmd.Flags().Changed("preview"), + previewNoOpen, + recordLogs, + args, + ) + }, + } + + cmd.Flags().StringArrayVarP(&envVars, "env", "e", []string{}, "Environment variable (repeatable)") + cmd.Flags().StringArrayVarP(&volumes, "volume", "v", []string{}, "Volume mount (repeatable)") + cmd.Flags().StringVar(&credentials, "credentials", "auto", "Credentials mode (sandbox|none|auto)") + cmd.Flags().StringVar(&snapshot, "snapshot", "", "Use a specific snapshot") + cmd.Flags().BoolVar(&noSync, "no-sync", false, "Skip workspace sync") + cmd.Flags().Int32Var(&autoStop, "auto-stop", 30, "Auto-stop interval in minutes (0 to disable)") + cmd.Flags().BoolVarP(&update, "update", "u", false, "Update agent to latest version") + cmd.Flags().BoolVarP(&Verbose, "verbose", "V", false, "Enable verbose output") + cmd.Flags().BoolVar(&keep, "keep", false, "Keep sandbox after the session exits") + cmd.Flags().BoolVar(&syncSettings, "sync-settings", false, "Sync local settings files to sandbox") + cmd.Flags().BoolVar(&noSyncSettings, "no-sync-settings", false, "Skip settings sync even if enabled globally") + cmd.Flags().IntVar(&previewPort, "preview", 0, "Open a preview URL for the given port (implies --keep unless --keep=false)") + cmd.Flags().BoolVar(&previewNoOpen, "no-open", false, "Do not open the preview URL automatically") + cmd.Flags().BoolVar(&recordLogs, "record", false, "Record the session output to persistent logs") + + return cmd +} + +func runAgentAlias(agentName string, envVars, volumes []string, credentials string, snapshotID string, noSync bool, autoStop int32, forceUpdate bool, keep bool, keepExplicit bool, syncSettings, noSyncSettings bool, previewPort int, previewExplicit bool, previewNoOpen bool, recordLogs bool, passthroughArgs []string) error { + agent := sandbox.Agent(agentName) + + cwd, err := os.Getwd() + if err != nil { + return err + } + + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + + // Parse environment variables + envMap := map[string]string{} + for _, e := range envVars { + parts := strings.SplitN(e, "=", 2) + if len(parts) == 2 { + envMap[parts[0]] = parts[1] + } + } + + // Parse volume specs + volumeSpecs := []sandbox.VolumeSpec{} + for _, v := range volumes { + spec, err := parseVolumeSpec(v) + if err != nil { + return err + } + volumeSpecs = append(volumeSpecs, spec) + } + + // Parse credentials mode + credMode := strings.ToLower(credentials) + switch credMode { + case "sandbox", "none", "auto", "": + if credMode == "" { + credMode = "auto" + } + default: + return fmt.Errorf("invalid credentials mode: use sandbox, none, or auto") + } + if credMode == "auto" { + if agent == sandbox.AgentShell { + credMode = "none" + } else { + credMode = "sandbox" + } + } + + // Handle Codex TUI2 auto-enable + agentArgs := passthroughArgs + if agent == sandbox.AgentCodex && getenvFallback("AMUX_CODEX_TUI2") != "0" { + hasTui2Flag := false + for _, arg := range agentArgs { + if strings.Contains(arg, "tui2") || strings.Contains(arg, "features.tui2") { + hasTui2Flag = true + break + } + } + if !hasTui2Flag { + agentArgs = append([]string{"--enable", "tui2"}, agentArgs...) + } + } + + // Resolve snapshot + if snapshotID == "" { + snapshotID = sandbox.ResolveSnapshotID(cfg) + } + if Verbose && snapshotID != "" { + fmt.Printf("Using snapshot: %s\n", snapshotID) + } + + if previewExplicit && (previewPort < 1 || previewPort > 65535) { + return fmt.Errorf("preview port must be between 1 and 65535") + } + if previewPort != 0 && !keepExplicit { + keep = true + fmt.Println("Preview enabled; keeping sandbox after exit. Use --keep=false to override.") + } + + syncEnabled := !noSync + + // Determine settings sync mode based on flags + settingsSyncMode := "auto" // default: use global config + if syncSettings { + settingsSyncMode = "force" + } else if noSyncSettings { + settingsSyncMode = "skip" + } + + // Use the shared runAgent function for consistent behavior + return runAgent(runAgentParams{ + agent: agent, + cwd: cwd, + envMap: envMap, + volumeSpecs: volumeSpecs, + credMode: credMode, + autoStop: autoStop, + snapshotID: snapshotID, + syncEnabled: syncEnabled, + forceUpdate: forceUpdate, + agentArgs: agentArgs, + keepSandbox: keep, + settingsSyncMode: settingsSyncMode, + persistenceVolumeName: sandbox.ResolvePersistenceVolumeName(cfg), + previewPort: previewPort, + previewNoOpen: previewNoOpen, + recordLogs: recordLogs, + }) +} diff --git a/internal/cli/auth.go b/internal/cli/auth.go new file mode 100644 index 00000000..5c79f2c0 --- /dev/null +++ b/internal/cli/auth.go @@ -0,0 +1,224 @@ +package cli + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +func buildAuthCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "auth [provider]", + Short: "Authentication commands", + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + action := args[0] + provider := "" + if len(args) > 1 { + provider = args[1] + } + + switch action { + case "login": + if provider != "" { + switch provider { + case "gh", "github": + return runGhAuthLogin() + default: + return fmt.Errorf("unknown provider: use gh") + } + } + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + apiKey, err := promptInput("Daytona API key: ") + if err != nil { + return err + } + if apiKey == "" { + return fmt.Errorf("no API key provided") + } + cfg.DaytonaAPIKey = apiKey + if err := sandbox.SaveConfig(cfg); err != nil { + return err + } + fmt.Println("Saved credentials to ~/.amux/config.json") + fmt.Println() + fmt.Println("Note: Agent authentication (Claude, Codex, etc.) happens inside the sandbox") + fmt.Println("via OAuth/browser login on first run - no API keys needed here.") + return nil + case "status": + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + showAll := len(args) > 1 && args[1] == "--all" + + fmt.Println("amux auth status") + fmt.Println(strings.Repeat("─", 50)) + fmt.Println() + + // Daytona API key + if sandbox.ResolveAPIKey(cfg) != "" { + fmt.Println("✓ Daytona API key configured") + } else { + fmt.Println("✗ Daytona API key not set") + fmt.Println(" Run: amux auth login") + } + + if showAll { + fmt.Println() + fmt.Println("Agent authentication (Claude, Codex, Gemini, etc.):") + fmt.Println(" Agents authenticate via OAuth/browser login inside the sandbox.") + fmt.Println(" Credentials persist across sandboxes for future sessions.") + fmt.Println(" Optional: pass API keys via --env flag to skip OAuth.") + } else { + fmt.Println() + fmt.Println("Run `amux auth status --all` for more details") + } + + fmt.Println() + fmt.Println(strings.Repeat("─", 50)) + return nil + case "logout": + if err := sandbox.ClearConfigKeys(); err != nil { + return err + } + fmt.Println("Removed saved credentials from ~/.amux/config.json") + fmt.Println("If you use env vars, unset AMUX_DAYTONA_API_KEY") + return nil + default: + return fmt.Errorf("unknown action: use login, logout, or status") + } + }, + } + return cmd +} + +func runGhAuthLogin() error { + cwd, err := os.Getwd() + if err != nil { + return err + } + + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + providerInstance, _, err := sandbox.ResolveProvider(cfg, cwd, "") + if err != nil { + return err + } + + // Load existing sandbox metadata - require sandbox to exist + meta, err := sandbox.LoadSandboxMeta(cwd, providerInstance.Name()) + if err != nil { + return err + } + if meta == nil { + return fmt.Errorf("no sandbox exists - run `amux sandbox run ` first to create one") + } + + sb, err := providerInstance.GetSandbox(context.Background(), meta.SandboxID) + if err != nil { + return fmt.Errorf("sandbox not found - run `amux sandbox run ` to create one") + } + + // Ensure sandbox is started + if sb.State() != sandbox.StateStarted { + fmt.Fprintln(os.Stderr, "Starting sandbox...") + if err := sb.Start(context.Background()); err != nil { + return fmt.Errorf("failed to start sandbox: %w", err) + } + if err := sb.WaitReady(context.Background(), 60*time.Second); err != nil { + fmt.Fprintf(os.Stderr, "Warning: sandbox may not be fully ready: %v\n", err) + } + } + + prevShellRaw := os.Getenv("AMUX_SHELL_RAW") + os.Setenv("AMUX_SHELL_RAW", "0") + defer func() { + if prevShellRaw == "" { + _ = os.Unsetenv("AMUX_SHELL_RAW") + } else { + _ = os.Setenv("AMUX_SHELL_RAW", prevShellRaw) + } + }() + + if err := sandbox.SetupCredentials(sb, sandbox.CredentialsConfig{Mode: "sandbox", Agent: sandbox.AgentShell}, false); err != nil { + return err + } + + if !ensureGhCli(sb) { + return fmt.Errorf("GitHub CLI is required for device login") + } + + status, _ := sb.Exec(context.Background(), `bash -lc "gh auth status -h github.com >/dev/null 2>&1"`, nil) + if status != nil && status.ExitCode == 0 { + fmt.Println("GitHub is already authenticated on this sandbox") + return nil + } + + fmt.Println("\namux GitHub login") + fmt.Println("1. A one-time device code will appear below") + fmt.Println("2. Open https://github.com/login/device locally") + fmt.Println("3. Paste the code, finish the login, then return here") + fmt.Println("If prompted, choose GitHub.com + HTTPS") + fmt.Println("Tip: if you see \"Press Enter\", just hit Enter") + + homeDir := resolveSandboxHome(sb) + script := strings.Join([]string{ + "echo ''", + "echo 'GitHub device login starting'", + "echo 'Open https://github.com/login/device on your local machine'", + "echo 'Paste the one-time code shown below'", + "echo ''", + "gh auth login --hostname github.com --git-protocol https --device --skip-ssh-key", + "gh auth setup-git", + "if gh auth status -h github.com >/dev/null 2>&1; then", + " echo ''", + " echo 'GitHub auth saved on this sandbox'", + "else", + " echo ''", + " echo 'GitHub auth not confirmed - run `amux auth login gh` again'", + "fi", + }, "\n") + + raw := false + exitCode, err := sandbox.RunAgentInteractive(sb, sandbox.AgentConfig{ + Agent: sandbox.AgentShell, + WorkspacePath: homeDir, + Args: []string{"-lc", script}, + Env: map[string]string{"BROWSER": "echo"}, + RawMode: &raw, + }) + if err != nil { + return err + } + if exitCode != 0 { + return fmt.Errorf("GitHub auth session exited with code %d", exitCode) + } + return nil +} + +func ensureGhCli(sb sandbox.RemoteSandbox) bool { + check, _ := sb.Exec(context.Background(), "command -v gh", nil) + if check != nil && check.ExitCode == 0 { + return true + } + fmt.Println("GitHub CLI not found, attempting install...") + installCmd := `bash -lc "if command -v apt-get >/dev/null 2>&1; then (apt-get update -y || sudo apt-get update -y) >/dev/null 2>&1; (apt-get install -y gh || sudo apt-get install -y gh) >/dev/null 2>&1; elif command -v apk >/dev/null 2>&1; then (apk add --no-cache github-cli) >/dev/null 2>&1; elif command -v yum >/dev/null 2>&1; then (yum install -y gh || sudo yum install -y gh) >/dev/null 2>&1; elif command -v dnf >/dev/null 2>&1; then (dnf install -y gh || sudo dnf install -y gh) >/dev/null 2>&1; else exit 1; fi"` + resp, _ := sb.Exec(context.Background(), installCmd, nil) + if resp != nil && resp.ExitCode == 0 { + return true + } + fmt.Println("Failed to install GitHub CLI - install gh manually and run `gh auth login` inside a sandbox shell") + return false +} diff --git a/internal/cli/cli.go b/internal/cli/cli.go new file mode 100644 index 00000000..5199d18a --- /dev/null +++ b/internal/cli/cli.go @@ -0,0 +1,120 @@ +package cli + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" +) + +// Run executes the AMUX CLI. It returns a process exit code. +func Run(args []string) int { + root := buildRootCommand() + root.SetArgs(args) + if err := root.Execute(); err != nil { + if exitErr, ok := err.(exitError); ok { + return exitErr.code + } + fmt.Fprintln(os.Stderr, err) + return 1 + } + return 0 +} + +func buildRootCommand() *cobra.Command { + root := &cobra.Command{ + Use: "amux", + Short: "Daytona-powered sandbox CLI for Claude Code, Codex, OpenCode, Amp, Gemini, and Droid", + Long: `amux - Run AI coding agents in cloud sandboxes + +Quick start: + amux claude Run Claude Code in a cloud sandbox + amux codex Run Codex in a cloud sandbox + amux shell Run a shell in a cloud sandbox + +Management: + amux status Check sandbox status + amux ls List all sandboxes + amux rm [id] Remove a sandbox + amux ssh SSH into the sandbox + +Setup: + amux setup Initial setup (validate credentials) + amux doctor Diagnose issues + amux auth login Configure Daytona API key`, + SilenceUsage: true, + SilenceErrors: true, + } + root.Version = "0.1.0" + root.SetHelpCommand(&cobra.Command{Hidden: true}) + root.CompletionOptions.DisableDefaultCmd = true + + // Core commands + root.AddCommand(buildSetupCommand()) + root.AddCommand(buildEnhancedDoctorCommand()) + root.AddCommand(buildSnapshotCommand()) + root.AddCommand(buildAuthCommand()) + root.AddCommand(buildSandboxCommand()) + root.AddCommand(buildSettingsCommand()) + + // Quick access commands + root.AddCommand(buildStatusCommand()) + root.AddCommand(buildSSHCommand()) + root.AddCommand(buildExecCommand()) + + // Documentation and help commands + root.AddCommand(buildCompletionCommand()) + root.AddCommand(buildExplainCommand()) + root.AddCommand(buildLogsCommand()) + + // Agent aliases - shortcuts for `amux sandbox run ` + root.AddCommand(buildAgentAliasCommand("claude", "Run Claude Code in a sandbox")) + root.AddCommand(buildAgentAliasCommand("codex", "Run Codex in a sandbox")) + root.AddCommand(buildAgentAliasCommand("opencode", "Run OpenCode in a sandbox")) + root.AddCommand(buildAgentAliasCommand("amp", "Run Amp in a sandbox")) + root.AddCommand(buildAgentAliasCommand("gemini", "Run Gemini CLI in a sandbox")) + root.AddCommand(buildAgentAliasCommand("droid", "Run Droid in a sandbox")) + root.AddCommand(buildAgentAliasCommand("shell", "Run a shell in a sandbox")) + + // Command aliases for convenience + root.AddCommand(buildLsAlias()) + root.AddCommand(buildRmAlias()) + + // Register shell completions for commands + registerCompletions(root) + + return root +} + +// buildLsAlias creates an alias for `amux sandbox ls` +func buildLsAlias() *cobra.Command { + cmd := &cobra.Command{ + Use: "ls", + Short: "List all amux sandboxes (alias for `sandbox ls`)", + RunE: func(cmd *cobra.Command, args []string) error { + // Delegate to sandbox ls + sandboxCmd := buildSandboxLsCommand() + return sandboxCmd.RunE(sandboxCmd, args) + }, + } + return cmd +} + +// buildRmAlias creates an alias for `amux sandbox rm` +func buildRmAlias() *cobra.Command { + var project bool + cmd := &cobra.Command{ + Use: "rm [id]", + Short: "Remove a sandbox (alias for `sandbox rm`)", + RunE: func(cmd *cobra.Command, args []string) error { + // Delegate to sandbox rm + sandboxCmd := buildSandboxRmCommand() + if project { + _ = sandboxCmd.Flags().Set("project", "true") + } + return sandboxCmd.RunE(sandboxCmd, args) + }, + } + cmd.Flags().BoolVar(&project, "project", false, "Remove sandbox for current project") + return cmd +} diff --git a/internal/cli/common.go b/internal/cli/common.go new file mode 100644 index 00000000..5a277ab7 --- /dev/null +++ b/internal/cli/common.go @@ -0,0 +1,59 @@ +package cli + +import ( + "bufio" + "context" + "fmt" + "os" + "strings" + + "golang.org/x/term" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +func promptInput(label string) (string, error) { + reader := bufio.NewReader(os.Stdin) + fmt.Print(label) + text, err := reader.ReadString('\n') + if err != nil { + return "", err + } + return strings.TrimSpace(text), nil +} + +func ensureDaytonaAPIKey() error { + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + if sandbox.ResolveAPIKey(cfg) != "" { + return nil + } + if !term.IsTerminal(int(os.Stdin.Fd())) { + return fmt.Errorf("Daytona API key not found. Set AMUX_DAYTONA_API_KEY or run `amux auth login`.") + } + apiKey, err := promptInput("Daytona API key: ") + if err != nil { + return err + } + if apiKey == "" { + return fmt.Errorf("no API key provided") + } + cfg.DaytonaAPIKey = apiKey + if err := sandbox.SaveConfig(cfg); err != nil { + return err + } + fmt.Println("Saved Daytona API key to ~/.amux/config.json") + return nil +} + +func resolveSandboxHome(sb sandbox.RemoteSandbox) string { + resp, err := sb.Exec(context.Background(), `sh -lc "USER_NAME=$(id -un 2>/dev/null || echo daytona); HOME_DIR=$(getent passwd \"$USER_NAME\" 2>/dev/null | cut -d: -f6 || true); if [ -z \"$HOME_DIR\" ]; then HOME_DIR=/home/$USER_NAME; fi; printf \"%s\" \"$HOME_DIR\""`, nil) + if err == nil { + if resp.Stdout != "" { + return strings.TrimSpace(resp.Stdout) + } + } + return "/home/daytona" +} diff --git a/internal/cli/completions.go b/internal/cli/completions.go new file mode 100644 index 00000000..e7bf5971 --- /dev/null +++ b/internal/cli/completions.go @@ -0,0 +1,150 @@ +package cli + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" +) + +// buildCompletionCommand creates the completion command for shell completions. +func buildCompletionCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "completion [bash|zsh|fish|powershell]", + Short: "Generate shell completion scripts", + Long: `Generate shell completion scripts for amux. + +To load completions: + +Bash: + $ source <(amux completion bash) + + # To load completions for each session, execute once: + # Linux: + $ amux completion bash > /etc/bash_completion.d/amux + # macOS: + $ amux completion bash > $(brew --prefix)/etc/bash_completion.d/amux + +Zsh: + # If shell completion is not already enabled in your environment, + # you will need to enable it. You can execute the following once: + $ echo "autoload -U compinit; compinit" >> ~/.zshrc + + # To load completions for each session, execute once: + $ amux completion zsh > "${fpath[1]}/_amux" + + # You will need to start a new shell for this setup to take effect. + +Fish: + $ amux completion fish | source + + # To load completions for each session, execute once: + $ amux completion fish > ~/.config/fish/completions/amux.fish + +PowerShell: + PS> amux completion powershell | Out-String | Invoke-Expression + + # To load completions for every new session, run: + PS> amux completion powershell > amux.ps1 + # and source this file from your PowerShell profile. +`, + DisableFlagsInUseLine: true, + ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, + Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), + RunE: func(cmd *cobra.Command, args []string) error { + switch args[0] { + case "bash": + return cmd.Root().GenBashCompletion(os.Stdout) + case "zsh": + return cmd.Root().GenZshCompletion(os.Stdout) + case "fish": + return cmd.Root().GenFishCompletion(os.Stdout, true) + case "powershell": + return cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) + default: + return fmt.Errorf("unknown shell: %s", args[0]) + } + }, + } + + return cmd +} + +// registerCompletions adds custom completions to commands. +func registerCompletions(root *cobra.Command) { + // Agent name completions + agentNames := []string{"claude", "codex", "opencode", "amp", "gemini", "droid", "shell"} + + // Find sandbox run command and add completions + for _, cmd := range root.Commands() { + if cmd.Name() == "sandbox" { + for _, subCmd := range cmd.Commands() { + if subCmd.Name() == "run" { + subCmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if len(args) == 0 { + return agentNames, cobra.ShellCompDirectiveNoFileComp + } + return nil, cobra.ShellCompDirectiveNoFileComp + } + } + if subCmd.Name() == "update" { + subCmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if len(args) == 0 { + return agentNames[:len(agentNames)-1], cobra.ShellCompDirectiveNoFileComp // Exclude "shell" + } + return nil, cobra.ShellCompDirectiveNoFileComp + } + } + } + } + + // Add completions for explain command + if cmd.Name() == "explain" { + cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if len(args) == 0 { + topics := []string{"credentials", "sync", "agents", "snapshots", "settings", "architecture"} + return topics, cobra.ShellCompDirectiveNoFileComp + } + return nil, cobra.ShellCompDirectiveNoFileComp + } + } + } + + // Add completions for snapshot commands + // These would need to fetch from the API, so we'll use dynamic completion + registerSnapshotCompletions(root) +} + +// registerSnapshotCompletions adds snapshot name completions. +func registerSnapshotCompletions(root *cobra.Command) { + // Find snapshot command + for _, cmd := range root.Commands() { + if cmd.Name() == "snapshot" { + for _, subCmd := range cmd.Commands() { + if subCmd.Name() == "rm" || subCmd.Name() == "show" { + subCmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + // Return empty - would need API call to list snapshots + // This is a placeholder for future implementation + return nil, cobra.ShellCompDirectiveNoFileComp + } + } + } + } + } +} + +// FlagCompletionFunc returns completions for flag values. +func FlagCompletionFunc(flagName string) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + switch flagName { + case "credentials": + return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"sandbox", "none", "auto"}, cobra.ShellCompDirectiveNoFileComp + } + case "agent": + return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"claude", "codex", "opencode", "amp", "gemini", "droid"}, cobra.ShellCompDirectiveNoFileComp + } + default: + return nil + } +} diff --git a/internal/cli/doctor_enhanced.go b/internal/cli/doctor_enhanced.go new file mode 100644 index 00000000..c718da8b --- /dev/null +++ b/internal/cli/doctor_enhanced.go @@ -0,0 +1,223 @@ +package cli + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +// buildEnhancedDoctorCommand creates the enhanced doctor command. +func buildEnhancedDoctorCommand() *cobra.Command { + var deep bool + var fix bool + var agent string + + cmd := &cobra.Command{ + Use: "doctor", + Short: "Diagnose and fix common issues", + Long: `Run diagnostic checks to identify and fix common issues. + +By default, runs quick local checks. Use --deep for comprehensive sandbox checks.`, + RunE: func(cmd *cobra.Command, args []string) error { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + if deep { + return runDeepDoctor(ctx, agent, fix) + } + return runQuickDoctor(ctx, fix) + }, + } + + cmd.Flags().BoolVar(&deep, "deep", false, "Run comprehensive sandbox health checks") + cmd.Flags().BoolVar(&fix, "fix", false, "Attempt to automatically fix issues") + cmd.Flags().StringVar(&agent, "agent", "claude", "Agent to check (for --deep)") + + return cmd +} + +// runQuickDoctor performs quick local checks. +func runQuickDoctor(ctx context.Context, fix bool) error { + fmt.Println("\033[1mRunning diagnostics...\033[0m") + fmt.Println() + + report, err := sandbox.RunEnhancedPreflight(ctx, true) + if err != nil { + return err + } + + fmt.Println() + if report.Passed { + fmt.Println("\033[32m✓ All checks passed\033[0m") + } else { + fmt.Println("\033[31m✗ Some checks failed\033[0m") + + if fix { + fmt.Println() + fmt.Println("Attempting fixes...") + // Run fixes for known issues + for _, errMsg := range report.Errors { + if strings.Contains(errMsg, "api_key") { + fmt.Println(" Run `amux setup` to configure your API key") + } + if strings.Contains(errMsg, "ssh") { + fmt.Println(" Install OpenSSH client for your platform") + } + } + } + } + + return nil +} + +// runDeepDoctor performs comprehensive sandbox health checks. +func runDeepDoctor(ctx context.Context, agentName string, fix bool) error { + fmt.Println("\033[1mRunning deep diagnostics...\033[0m") + fmt.Println() + + // First run quick checks + report, err := sandbox.RunEnhancedPreflight(ctx, true) + if err != nil { + return err + } + + if !report.Passed { + fmt.Println() + fmt.Println("\033[31m✗ Basic checks failed - fix these first\033[0m") + return fmt.Errorf("preflight checks failed") + } + + fmt.Println() + fmt.Println("\033[1mChecking sandbox health...\033[0m") + fmt.Println() + + // Create a sandbox for diagnostics + cwd, err := os.Getwd() + if err != nil { + return err + } + + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + providerInstance, _, err := sandbox.ResolveProvider(cfg, cwd, "") + if err != nil { + return err + } + + snapshotID := sandbox.ResolveSnapshotID(cfg) + + spinner := NewSpinner("Connecting to sandbox") + spinner.Start() + + sb, _, err := sandbox.CreateSandboxSession(providerInstance, cwd, sandbox.SandboxConfig{ + Agent: sandbox.Agent(agentName), + Snapshot: snapshotID, + Ephemeral: true, + PersistenceVolumeName: sandbox.ResolvePersistenceVolumeName(cfg), + }) + + if err != nil { + spinner.StopWithMessage("✗ Could not connect to sandbox") + return err + } + spinner.StopWithMessage("✓ Connected to sandbox") + + cleanup := func() { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + _ = sb.Stop(ctx) + _ = providerInstance.DeleteSandbox(ctx, sb.ID()) + _ = sandbox.RemoveSandboxMetaByID(sb.ID()) + } + defer cleanup() + + if err := sandbox.SetupCredentials(sb, sandbox.CredentialsConfig{ + Mode: "sandbox", + Agent: sandbox.Agent(agentName), + SettingsSyncMode: "skip", + }, false); err != nil { + return err + } + + // Get Daytona client for health checks + client, err := sandbox.GetDaytonaClient() + if err != nil { + return err + } + + // Run health checks + health, err := sandbox.NewSandboxHealth(client, sb, sandbox.Agent(agentName)) + if err != nil { + return err + } + health.SetVerbose(true) + + fmt.Println() + fmt.Println("\033[1mSandbox Health Checks:\033[0m") + fmt.Println() + + healthReport := health.Check(ctx) + fmt.Print(sandbox.FormatReport(healthReport)) + + // Attempt repairs if requested + if fix && healthReport.Overall != sandbox.HealthStatusHealthy { + fmt.Println() + fmt.Println("\033[1mAttempting repairs...\033[0m") + + if err := health.Repair(ctx); err != nil { + fmt.Printf("\033[31m✗ Some repairs failed: %v\033[0m\n", err) + } else { + fmt.Println("\033[32m✓ Repairs completed\033[0m") + + // Re-check health + fmt.Println() + fmt.Println("Re-checking health...") + newReport := health.Check(ctx) + fmt.Print(sandbox.FormatReport(newReport)) + } + } + + fmt.Println() + + // Show credentials status + fmt.Println("\033[1mCredential Status:\033[0m") + fmt.Println() + + credentials := sandbox.CheckAllAgentCredentials(sb) + for _, cred := range credentials { + icon := "\033[31m✗\033[0m" + status := "not configured" + if cred.HasCredential { + icon = "\033[32m✓\033[0m" + status = "configured" + } + fmt.Printf(" %s %s: %s\n", icon, cred.Agent, status) + } + + // GitHub + if sandbox.HasGitHubCredentials(sb) { + fmt.Printf(" \033[32m✓\033[0m GitHub CLI: authenticated\n") + } else { + fmt.Printf(" \033[33m!\033[0m GitHub CLI: not authenticated\n") + } + + fmt.Println() + + // Show tips + if healthReport.Overall != sandbox.HealthStatusHealthy { + fmt.Println("\033[1mTips:\033[0m") + fmt.Println(" - Run `amux doctor --deep --fix` to attempt automatic repairs") + fmt.Println(" - Run `amux sandbox rm --project` and try again for a fresh start") + fmt.Println() + } + + return nil +} diff --git a/internal/cli/doctor_explain.go b/internal/cli/doctor_explain.go new file mode 100644 index 00000000..11691df8 --- /dev/null +++ b/internal/cli/doctor_explain.go @@ -0,0 +1,269 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +// buildExplainCommand creates the explain command for learning about amux concepts. +func buildExplainCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "explain ", + Short: "Explain amux concepts and architecture", + Long: `Get detailed explanations of how amux works. + +Available topics: + credentials How credentials are stored and persisted + sync How workspace syncing works + agents Supported AI coding agents + snapshots Using snapshots for faster startup + settings Settings sync configuration + architecture Overall system architecture`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return explainTopic(args[0]) + }, + } + + return cmd +} + +func explainTopic(topic string) error { + explanations := map[string]string{ + "credentials": ` +╭─────────────────────────────────────────────────────────────────╮ +│ CREDENTIALS PERSISTENCE │ +╰─────────────────────────────────────────────────────────────────╯ + +Credentials are stored on a persistent volume mounted at /amux and +symlinked into the sandbox home directory. They persist across sandboxes. + +Storage locations in sandbox's home directory (backed by /amux/home): +├── ~/.claude/ # Claude CLI credentials +│ └── .credentials.json +├── ~/.codex/ # Codex CLI credentials +│ └── auth.json +├── ~/.config/codex/ # Codex config +├── ~/.local/share/opencode/ # OpenCode credentials +├── ~/.config/amp/ # Amp config +├── ~/.local/share/amp/ # Amp data +├── ~/.gemini/ # Gemini CLI credentials +├── ~/.factory/ # Droid credentials +├── ~/.config/gh/ # GitHub CLI credentials +└── ~/.gitconfig # Git configuration + +How it works: +1. amux mounts a persistent volume at /amux for each sandbox +2. It symlinks credential + cache dirs (e.g., ~/.config, ~/.local, ~/.claude) +3. When the agent authenticates via OAuth, credentials are saved there +4. New sandboxes reuse the same volume, so credentials and CLI installs persist +5. To reset, delete the amux-persist volume in Daytona + +Commands: + amux auth status --all # Check all credential status + amux doctor --deep # Verify credential directories +`, + + "sync": ` +╭─────────────────────────────────────────────────────────────────╮ +│ WORKSPACE SYNCING │ +╰─────────────────────────────────────────────────────────────────╯ + +amux syncs your local workspace to the sandbox so agents can access +and modify your files. + +Sync Methods: +1. Full Sync (default first time) + - Creates a tarball of your workspace + - Uploads and extracts in the sandbox + - Respects .amuxignore patterns + +2. Incremental Sync (subsequent runs) + - Computes file hashes and timestamps + - Only transfers changed files + - Much faster for large workspaces + +Ignored by default: + - .git/ + - node_modules/ + - __pycache__/ + - .env files + - Build artifacts + +Custom ignores (.amuxignore): + # Add patterns like .gitignore + *.log + dist/ + .cache/ + +Commands: + amux claude # Syncs workspace automatically + amux claude --no-sync # Skip sync (use existing files) +`, + + "agents": ` +╭─────────────────────────────────────────────────────────────────╮ +│ SUPPORTED AGENTS │ +╰─────────────────────────────────────────────────────────────────╯ + +amux supports multiple AI coding agents: + +┌──────────┬─────────────────────┬─────────────────────────────────┐ +│ Agent │ Provider │ Installation │ +├──────────┼─────────────────────┼─────────────────────────────────┤ +│ claude │ Anthropic │ npm install -g @anthropic-ai/ │ +│ │ │ claude-code │ +├──────────┼─────────────────────┼─────────────────────────────────┤ +│ codex │ OpenAI │ npm install -g @openai/codex │ +├──────────┼─────────────────────┼─────────────────────────────────┤ +│ gemini │ Google │ npm install -g @google/ │ +│ │ │ gemini-cli │ +├──────────┼─────────────────────┼─────────────────────────────────┤ +│ opencode │ Open Source │ curl opencode.ai/install | bash │ +├──────────┼─────────────────────┼─────────────────────────────────┤ +│ amp │ Sourcegraph │ curl ampcode.com/install.sh | │ +│ │ │ bash │ +├──────────┼─────────────────────┼─────────────────────────────────┤ +│ droid │ Factory │ curl app.factory.ai/cli | sh │ +├──────────┼─────────────────────┼─────────────────────────────────┤ +│ shell │ - │ Built-in bash shell │ +└──────────┴─────────────────────┴─────────────────────────────────┘ + +Commands: + amux claude # Run Claude Code + amux codex # Run Codex + amux sandbox run # Run any agent + amux sandbox update # Update agents to latest version +`, + + "snapshots": ` +╭─────────────────────────────────────────────────────────────────╮ +│ SNAPSHOTS │ +╰─────────────────────────────────────────────────────────────────╯ + +Snapshots are pre-built sandbox images that include installed agents +and dependencies. They make sandbox startup much faster. + +Benefits: + - Instant startup (vs 30+ seconds for fresh install) + - Consistent environment across sessions + - Pre-configured tools and settings + +Creating a snapshot: + amux snapshot create --name my-snapshot + amux snapshot create --agents claude,codex # With specific agents + +Using a snapshot: + amux claude --snapshot my-snapshot + amux config set defaultSnapshot my-snapshot # Use by default + +Listing snapshots: + amux snapshot ls + +Commands: + amux snapshot create # Create a new snapshot + amux snapshot ls # List all snapshots + amux snapshot rm # Delete a snapshot +`, + + "settings": ` +╭─────────────────────────────────────────────────────────────────╮ +│ SETTINGS SYNC │ +╰─────────────────────────────────────────────────────────────────╯ + +Settings sync copies your local preferences (NOT credentials) to +the sandbox. This is opt-in and requires explicit consent. + +What gets synced: + ✓ Claude settings (~/.claude/settings.json) + - Model preferences + - Feature flags + - Permission settings + + ✓ Git config (~/.gitconfig - safe keys only) + - user.name, user.email + - Aliases + - Editor preferences + - NOT credentials or tokens + +What does NOT get synced: + ✗ API keys + ✗ Tokens + ✗ Passwords + ✗ Private keys + ✗ Credential helpers + +Enabling settings sync: + amux settings sync --enable --claude --git + +Checking status: + amux settings status + +Disabling: + amux settings sync --disable +`, + + "architecture": ` +╭─────────────────────────────────────────────────────────────────╮ +│ ARCHITECTURE │ +╰─────────────────────────────────────────────────────────────────╯ + +amux creates a fresh Daytona sandbox per run and mounts a persistent volume: + +┌─────────────────────────────────────────────────────────────────┐ +│ Your Machine │ +├─────────────────────────────────────────────────────────────────┤ +│ amux CLI │ +│ ├─ Preflight checks │ +│ ├─ Workspace sync │ +│ └─ SSH connection │ +└─────────────────────────────────────────────────────────────────┘ + │ + │ HTTPS API + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ Daytona │ +├─────────────────────────────────────────────────────────────────┤ +│ ┌─────────────────────────────────────────────────────────────┐│ +│ │ Ephemeral Sandbox ││ +│ │ ┌─────────────────────────────────────────────────────────┐││ +│ │ │ /amux (persistent volume) │││ +│ │ │ └── /amux/home/... (credentials + CLI caches) │││ +│ │ └─────────────────────────────────────────────────────────┘││ +│ │ ┌─────────────────────────────────────────────────────────┐││ +│ │ │ /workspace/{worktreeID}/ │││ +│ │ │ (per-project workspace isolation) │││ +│ │ └─────────────────────────────────────────────────────────┘││ +│ │ ┌─────────────────────────────────────────────────────────┐││ +│ │ │ Agent (claude/codex/opencode/amp/gemini/droid) │││ +│ │ └─────────────────────────────────────────────────────────┘││ +│ └─────────────────────────────────────────────────────────────┘│ +└─────────────────────────────────────────────────────────────────┘ + +Key Components: + - Provider Interface: Daytona provider (additional providers removed) + - Persistence Manager: Mounts volume + home directory symlinks + - Sync Engine: Uploads/downloads workspace files + - Agent Plugins: Modular agent installation and configuration +`, + } + + explanation, ok := explanations[strings.ToLower(topic)] + if !ok { + fmt.Println("Unknown topic:", topic) + fmt.Println() + fmt.Println("Available topics:") + fmt.Println(" credentials How credentials are stored and persisted") + fmt.Println(" sync How workspace syncing works") + fmt.Println(" agents Supported AI coding agents") + fmt.Println(" snapshots Using snapshots for faster startup") + fmt.Println(" settings Settings sync configuration") + fmt.Println(" architecture Overall system architecture") + return nil + } + + fmt.Print(explanation) + return nil +} diff --git a/internal/cli/doctor_logs.go b/internal/cli/doctor_logs.go new file mode 100644 index 00000000..a3ae0227 --- /dev/null +++ b/internal/cli/doctor_logs.go @@ -0,0 +1,90 @@ +package cli + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/spf13/cobra" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +// buildLogsCommand creates the logs command for viewing sandbox output. +func buildLogsCommand() *cobra.Command { + var follow bool + var lines int + + cmd := &cobra.Command{ + Use: "logs", + Short: "View sandbox logs and output", + Long: "View logs and output from the current workspace's sandbox.", + RunE: func(cmd *cobra.Command, args []string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + providerInstance, _, err := sandbox.ResolveProvider(cfg, cwd, "") + if err != nil { + return err + } + + meta, err := sandbox.LoadSandboxMeta(cwd, providerInstance.Name()) + if err != nil { + return err + } + if meta == nil { + return fmt.Errorf("no sandbox exists for this project - run `amux sandbox run ` first") + } + + sb, err := providerInstance.GetSandbox(context.Background(), meta.SandboxID) + if err != nil { + return fmt.Errorf("sandbox not found - run `amux sandbox run ` to create one") + } + + if sb.State() != sandbox.StateStarted { + fmt.Fprintln(os.Stderr, "Starting sandbox...") + if err := sb.Start(context.Background()); err != nil { + return fmt.Errorf("failed to start sandbox: %w", err) + } + if err := sb.WaitReady(context.Background(), 60*time.Second); err != nil { + fmt.Fprintf(os.Stderr, "Warning: sandbox may not be fully ready: %v\n", err) + } + } + + // Get logs from sandbox + logCmd := fmt.Sprintf("journalctl --no-pager -n %d", lines) + if follow { + logCmd = "journalctl -f" + } + + resp, err := sb.Exec(context.Background(), logCmd, nil) + if err != nil { + // Fallback to dmesg + resp, err = sb.Exec(context.Background(), fmt.Sprintf("dmesg | tail -n %d", lines), nil) + if err != nil { + return fmt.Errorf("could not retrieve logs: %w", err) + } + } + + if resp.Stdout != "" { + fmt.Print(resp.Stdout) + } else { + fmt.Println("No logs available") + } + + return nil + }, + } + + cmd.Flags().BoolVarP(&follow, "follow", "f", false, "Follow log output") + cmd.Flags().IntVarP(&lines, "lines", "n", 100, "Number of lines to show") + + return cmd +} diff --git a/internal/cli/helpers.go b/internal/cli/helpers.go new file mode 100644 index 00000000..b7f74511 --- /dev/null +++ b/internal/cli/helpers.go @@ -0,0 +1,95 @@ +package cli + +import ( + "fmt" + "os" + "os/exec" + "runtime" + "strings" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +func parseVolumeSpec(spec string) (sandbox.VolumeSpec, error) { + parts := strings.SplitN(spec, ":", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return sandbox.VolumeSpec{}, fmt.Errorf("invalid volume spec %q. Use name:/path", spec) + } + return sandbox.VolumeSpec{Name: parts[0], MountPath: parts[1]}, nil +} + +func buildVncURL(previewURL string) string { + if previewURL == "" { + return "" + } + trimmed := strings.TrimRight(previewURL, "/") + parts := strings.SplitN(trimmed, "?", 2) + urlBase := parts[0] + query := "" + if len(parts) == 2 { + query = parts[1] + } + vnc := urlBase + "/vnc.html" + if query == "" { + return vnc + } + return vnc + "?" + query +} + +func tryOpenURL(url string) bool { + if url == "" { + return false + } + var cmd *exec.Cmd + switch runtime.GOOS { + case "darwin": + cmd = exec.Command("open", url) + case "windows": + cmd = exec.Command("cmd", "/c", "start", "", url) + default: + cmd = exec.Command("xdg-open", url) + } + cmd.Stdout = nil + cmd.Stderr = nil + cmd.Stdin = nil + if err := cmd.Start(); err != nil { + return false + } + return true +} + +func getAgentArgs(argv []string, agent string) []string { + agentIndex := -1 + for i := 0; i < len(argv)-2; i++ { + if argv[i] == "sandbox" && argv[i+1] == "run" && argv[i+2] == agent { + agentIndex = i + 2 + break + } + } + if agentIndex == -1 { + return nil + } + passthrough := -1 + for i := agentIndex + 1; i < len(argv); i++ { + if argv[i] == "--" { + passthrough = i + break + } + } + if passthrough == -1 { + return nil + } + if passthrough+1 >= len(argv) { + return nil + } + return argv[passthrough+1:] +} + +func getenvFallback(keys ...string) string { + for _, key := range keys { + if val, ok := os.LookupEnv(key); ok && val != "" { + return val + } + } + return "" +} diff --git a/internal/cli/sandbox.go b/internal/cli/sandbox.go new file mode 100644 index 00000000..5952b319 --- /dev/null +++ b/internal/cli/sandbox.go @@ -0,0 +1,24 @@ +package cli + +import ( + "github.com/spf13/cobra" +) + +// Verbose controls whether verbose output is enabled. +var Verbose bool + +func buildSandboxCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "sandbox", + Short: "Manage sandboxes", + } + cmd.AddCommand(buildSandboxRunCommand()) + cmd.AddCommand(buildSandboxUpdateCommand()) + cmd.AddCommand(buildSandboxPreviewCommand()) + cmd.AddCommand(buildSandboxLogsCommand()) + cmd.AddCommand(buildSandboxDesktopCommand()) + cmd.AddCommand(buildSandboxLsCommand()) + cmd.AddCommand(buildSandboxRmCommand()) + cmd.AddCommand(buildSandboxResetCommand()) + return cmd +} diff --git a/internal/cli/sandbox_auth.go b/internal/cli/sandbox_auth.go new file mode 100644 index 00000000..efbb7bdf --- /dev/null +++ b/internal/cli/sandbox_auth.go @@ -0,0 +1,151 @@ +package cli + +import ( + "fmt" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +// checkNeedsLogin determines if an agent needs login based on stored credentials +func checkNeedsLogin(sb sandbox.RemoteSandbox, agent sandbox.Agent, envMap map[string]string) bool { + // Check if credentials already exist on the sandbox + credStatus := sandbox.CheckAgentCredentials(sb, agent) + if credStatus.HasCredential { + return false + } + + // Check if API key is provided via environment + switch agent { + case sandbox.AgentClaude: + if envMap["ANTHROPIC_API_KEY"] != "" || envMap["CLAUDE_API_KEY"] != "" || envMap["ANTHROPIC_AUTH_TOKEN"] != "" { + return false + } + case sandbox.AgentCodex: + if envMap["OPENAI_API_KEY"] != "" { + return false + } + case sandbox.AgentGemini: + if envMap["GEMINI_API_KEY"] != "" || envMap["GOOGLE_API_KEY"] != "" || envMap["GOOGLE_APPLICATION_CREDENTIALS"] != "" { + return false + } + case sandbox.AgentDroid: + if envMap["FACTORY_API_KEY"] != "" { + return false + } + case sandbox.AgentAmp: + if envMap["AMP_API_KEY"] != "" { + return false + } + } + + // Agents that need explicit login + switch agent { + case sandbox.AgentCodex, sandbox.AgentOpenCode, sandbox.AgentAmp: + return true + } + + return false +} + +// handleAgentLogin runs the login flow for agents that need it +func handleAgentLogin(sb sandbox.RemoteSandbox, agent sandbox.Agent, workspacePath string, envMap map[string]string) (int, error) { + fmt.Printf("\n%s requires authentication (first run)\n", agent) + fmt.Println("Credentials will persist for future sessions.") + fmt.Println() + + var loginArgs []string + switch agent { + case sandbox.AgentCodex: + loginArgs = []string{"login"} + if getenvFallback("AMUX_CODEX_DEVICE_AUTH") != "0" { + loginArgs = append(loginArgs, "--device-auth") + } + case sandbox.AgentOpenCode: + loginArgs = []string{"auth", "login"} + case sandbox.AgentAmp: + loginArgs = []string{"login"} + default: + return 0, nil + } + + raw := false + exitCode, err := sandbox.RunAgentInteractive(sb, sandbox.AgentConfig{ + Agent: agent, + WorkspacePath: workspacePath, + Args: loginArgs, + Env: envMap, + RawMode: &raw, + }) + if err != nil { + return 1, err + } + + if exitCode == 0 { + fmt.Println("\n✓ Authentication complete") + } + + return exitCode, nil +} + +// handleAgentExit handles post-exit tasks (workspace download, exit tips) +func handleAgentExit(sb sandbox.RemoteSandbox, agent sandbox.Agent, exitCode int, syncEnabled bool, cwd string) error { + // Show tips for exit code 127 (command not found) + if exitCode == 127 { + showAgentTips(agent) + } + + // Show exit code if non-zero + if exitCode != 0 && exitCode != 127 { + fmt.Printf("\nExited with code %d\n", exitCode) + } + + // Sync workspace back + if syncEnabled { + worktreeID := sandbox.ComputeWorktreeID(cwd) + if Verbose { + fmt.Println("\nSyncing changes...") + if err := sandbox.DownloadWorkspace(sb, sandbox.SyncOptions{Cwd: cwd, WorktreeID: worktreeID}, Verbose); err != nil { + return err + } + fmt.Println("Done") + } else { + spinner := NewSpinner("Syncing changes") + spinner.Start() + if err := sandbox.DownloadWorkspace(sb, sandbox.SyncOptions{Cwd: cwd, WorktreeID: worktreeID}, false); err != nil { + spinner.StopWithMessage("✗ Sync failed") + return err + } + spinner.StopWithMessage("✓ Changes synced") + } + } + + if exitCode != 0 { + return exitError{code: exitCode} + } + return nil +} + +// showAgentTips displays helpful tips when an agent fails to start +func showAgentTips(agent sandbox.Agent) { + fmt.Println() + switch agent { + case sandbox.AgentClaude: + fmt.Println("Tip: Claude requires authentication. Run `claude` and complete login,") + fmt.Println(" or pass --env ANTHROPIC_API_KEY=...") + case sandbox.AgentCodex: + fmt.Println("Tip: Codex requires OpenAI credentials. Login will start automatically,") + fmt.Println(" or pass --env OPENAI_API_KEY=...") + case sandbox.AgentOpenCode: + fmt.Println("Tip: OpenCode requires authentication. Login will start automatically,") + fmt.Println(" or pass API keys via --env") + case sandbox.AgentAmp: + fmt.Println("Tip: Amp requires authentication. Login will start automatically,") + fmt.Println(" or pass --env AMP_API_KEY=...") + case sandbox.AgentGemini: + fmt.Println("Tip: Gemini requires authentication. Choose a login method in the CLI,") + fmt.Println(" or pass --env GEMINI_API_KEY=...") + case sandbox.AgentDroid: + fmt.Println("Tip: Droid requires authentication. Run `/login` inside Droid,") + fmt.Println(" or pass --env FACTORY_API_KEY=...") + } +} diff --git a/internal/cli/sandbox_list.go b/internal/cli/sandbox_list.go new file mode 100644 index 00000000..9242a1ab --- /dev/null +++ b/internal/cli/sandbox_list.go @@ -0,0 +1,392 @@ +package cli + +import ( + "context" + "encoding/json" + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +func buildSandboxPreviewCommand() *cobra.Command { + var noOpen bool + + cmd := &cobra.Command{ + Use: "preview ", + Short: "Open a browser preview for a sandbox port", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + port, err := strconv.Atoi(args[0]) + if err != nil || port <= 0 || port > 65535 { + return fmt.Errorf("port must be a number between 1 and 65535") + } + cwd, err := os.Getwd() + if err != nil { + return err + } + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + providerInstance, _, err := sandbox.ResolveProvider(cfg, cwd, "") + if err != nil { + return err + } + if !providerInstance.SupportsFeature(sandbox.FeaturePreviewURLs) { + return fmt.Errorf("preview URLs are not supported by the selected provider") + } + fmt.Printf("Preparing preview for port %d...\n", port) + sb, _, err := resolveCurrentSandbox(providerInstance, cwd) + if err != nil { + return err + } + url, err := sb.GetPreviewURL(context.Background(), port) + if err != nil { + return err + } + if url == "" { + return fmt.Errorf("unable to construct a preview URL") + } + fmt.Printf("Preview URL: %s\n", url) + if !noOpen { + if !tryOpenURL(url) { + fmt.Println("Open the URL in your browser.") + } + } + fmt.Printf("Tip: Ensure your app listens on 0.0.0.0:%d inside the sandbox.\n", port) + return nil + }, + } + cmd.Flags().BoolVar(&noOpen, "no-open", false, "Do not open the URL automatically") + return cmd +} + +func buildSandboxLogsCommand() *cobra.Command { + var follow bool + var lines int + var list bool + var file string + var spin bool + + cmd := &cobra.Command{ + Use: "logs", + Short: "View recorded agent logs for this workspace", + Long: "View recorded agent session output stored on the persistent volume.", + RunE: func(cmd *cobra.Command, args []string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + providerInstance, _, err := sandbox.ResolveProvider(cfg, cwd, "") + if err != nil { + return err + } + if list && file != "" { + return fmt.Errorf("cannot use --list with --file") + } + + worktreeID := sandbox.ComputeWorktreeID(cwd) + logDir := fmt.Sprintf("/amux/logs/%s", worktreeID) + resolveSandbox := func() (sandbox.RemoteSandbox, func(), error) { + meta, err := sandbox.LoadSandboxMeta(cwd, providerInstance.Name()) + if err != nil { + return nil, nil, err + } + if meta != nil { + sb, err := providerInstance.GetSandbox(context.Background(), meta.SandboxID) + if err == nil { + if err := sb.Start(context.Background()); err == nil { + if err := sb.WaitReady(context.Background(), 60*time.Second); err != nil && Verbose { + fmt.Fprintf(os.Stderr, "Warning: sandbox may not be fully ready: %v\n", err) + } + return sb, nil, nil + } + } + } + if !spin { + return nil, nil, fmt.Errorf("no running sandbox found; re-run with --spin to start a log reader sandbox") + } + if !providerInstance.SupportsFeature(sandbox.FeatureVolumes) { + return nil, nil, fmt.Errorf("persistent logs require volume support from the provider") + } + volumeMgr := providerInstance.Volumes() + if volumeMgr == nil { + return nil, nil, fmt.Errorf("volume manager is not available") + } + volumeName := sandbox.ResolvePersistenceVolumeName(cfg) + volume, err := volumeMgr.GetOrCreate(context.Background(), volumeName) + if err != nil { + return nil, nil, err + } + if _, err := volumeMgr.WaitReady(context.Background(), volumeName, 0); err != nil { + return nil, nil, err + } + labels := map[string]string{ + "amux.provider": providerInstance.Name(), + "amux.worktreeId": worktreeID, + "amux.purpose": "logs", + } + sb, err := providerInstance.CreateSandbox(context.Background(), sandbox.SandboxCreateConfig{ + Agent: sandbox.AgentShell, + Labels: labels, + Volumes: []sandbox.VolumeMount{{VolumeID: volume.ID, MountPath: "/amux"}}, + AutoStopMinutes: 15, + AutoDeleteMinutes: 20, + Ephemeral: true, + }) + if err != nil { + return nil, nil, err + } + if err := sb.WaitReady(context.Background(), 60*time.Second); err != nil { + return nil, nil, err + } + cleanup := func() { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + _ = sb.Stop(ctx) + _ = providerInstance.DeleteSandbox(ctx, sb.ID()) + } + fmt.Println("Started a short-lived log reader sandbox.") + return sb, cleanup, nil + } + + sb, cleanup, err := resolveSandbox() + if err != nil { + return err + } + if cleanup != nil { + defer cleanup() + } + + if list { + resp, err := sb.Exec(context.Background(), fmt.Sprintf("ls -t %s/*.log 2>/dev/null", sandbox.ShellQuote(logDir)), nil) + if err != nil { + return fmt.Errorf("could not list logs: %w", err) + } + if strings.TrimSpace(resp.Stdout) == "" { + fmt.Println("No logs found.") + return nil + } + fmt.Print(resp.Stdout) + return nil + } + + logPath := strings.TrimSpace(file) + if logPath == "" { + listCmd := fmt.Sprintf("ls -t %s/*.log 2>/dev/null | head -n 1", sandbox.ShellQuote(logDir)) + resp, err := sb.Exec(context.Background(), listCmd, nil) + if err != nil { + return fmt.Errorf("could not list logs: %w", err) + } + logPath = strings.TrimSpace(resp.Stdout) + if logPath == "" { + return fmt.Errorf("no recorded logs found for this workspace; run `amux sandbox run --record`") + } + } else if !strings.Contains(logPath, "/") { + logPath = fmt.Sprintf("%s/%s", logDir, logPath) + } + + if follow { + fmt.Printf("Tailing %s (Ctrl+C to stop)\n", logPath) + _, err := sb.ExecInteractive(context.Background(), + fmt.Sprintf("tail -n %d -f %s", lines, sandbox.ShellQuote(logPath)), + os.Stdin, os.Stdout, os.Stderr, nil) + return err + } + + resp, err := sb.Exec(context.Background(), fmt.Sprintf("tail -n %d %s", lines, sandbox.ShellQuote(logPath)), nil) + if err != nil { + return err + } + if resp.Stdout != "" { + fmt.Print(resp.Stdout) + } + return nil + }, + } + + cmd.Flags().BoolVarP(&follow, "follow", "f", false, "Follow log output") + cmd.Flags().IntVarP(&lines, "lines", "n", 200, "Number of lines to show") + cmd.Flags().BoolVar(&list, "list", false, "List recorded logs for this workspace") + cmd.Flags().StringVar(&file, "file", "", "Show a specific log file (basename or full path)") + cmd.Flags().BoolVar(&spin, "spin", true, "Start a short-lived sandbox if none is running") + return cmd +} + +func buildSandboxDesktopCommand() *cobra.Command { + var port string + var noOpen bool + + cmd := &cobra.Command{ + Use: "desktop", + Short: "Open a remote desktop (VNC) for the sandbox", + RunE: func(cmd *cobra.Command, args []string) error { + p, err := strconv.Atoi(port) + if err != nil || p <= 0 || p > 65535 { + return fmt.Errorf("port must be a number between 1 and 65535") + } + cwd, err := os.Getwd() + if err != nil { + return err + } + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + providerInstance, _, err := sandbox.ResolveProvider(cfg, cwd, "") + if err != nil { + return err + } + if !providerInstance.SupportsFeature(sandbox.FeatureDesktop) { + return fmt.Errorf("desktop is not supported by the selected provider") + } + fmt.Println("Checking desktop status...") + sb, _, err := resolveCurrentSandbox(providerInstance, cwd) + if err != nil { + return err + } + desktop, ok := sb.(sandbox.DesktopAccess) + if !ok { + return fmt.Errorf("desktop is not available for this provider") + } + status, err := desktop.DesktopStatus(context.Background()) + if err != nil { + return fmt.Errorf("desktop is not available in this sandbox image. Tip: use a desktop-enabled base image and rebuild your snapshot") + } + if status == nil || status.Status != "active" { + fmt.Println("Starting desktop...") + if err := desktop.StartDesktop(context.Background()); err != nil { + return fmt.Errorf("failed to start desktop services. Tip: your snapshot may be missing VNC dependencies (xvfb/novnc)") + } + time.Sleep(5 * time.Second) + status, err = desktop.DesktopStatus(context.Background()) + if err != nil { + return err + } + } + if status == nil || status.Status != "active" { + return fmt.Errorf("desktop failed to start (status: %s)", func() string { + if status == nil { + return "unknown" + } + return status.Status + }()) + } + url, err := sb.GetPreviewURL(context.Background(), p) + if err != nil { + return err + } + url = buildVncURL(url) + if url == "" { + return fmt.Errorf("unable to construct the desktop URL") + } + fmt.Printf("Desktop URL: %s\n", url) + if !noOpen { + if !tryOpenURL(url) { + fmt.Println("Open the URL in your browser.") + } + } + fmt.Println("Tip: If the page is blank, wait a few seconds and refresh.") + return nil + }, + } + cmd.Flags().StringVar(&port, "port", "6080", "VNC port (default: 6080)") + cmd.Flags().BoolVar(&noOpen, "no-open", false, "Do not open the URL automatically") + return cmd +} + +// SandboxListItem represents a single sandbox in JSON output +type SandboxListItem struct { + ID string `json:"id"` + State string `json:"state"` + Agent string `json:"agent"` + Project string `json:"project,omitempty"` +} + +func buildSandboxLsCommand() *cobra.Command { + var jsonOutput bool + cmd := &cobra.Command{ + Use: "ls", + Short: "List all amux sandboxes", + RunE: func(cmd *cobra.Command, args []string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + providerInstance, _, err := sandbox.ResolveProvider(cfg, cwd, "") + if err != nil { + return err + } + sandboxes, err := sandbox.ListAmuxSandboxes(providerInstance) + if err != nil { + return err + } + + if jsonOutput { + items := make([]SandboxListItem, 0, len(sandboxes)) + for _, sb := range sandboxes { + item := SandboxListItem{ + ID: sb.ID(), + State: string(sb.State()), + Agent: "unknown", + } + labels := sb.Labels() + if labels != nil { + if val, ok := labels["amux.agent"]; ok { + item.Agent = val + } + if val, ok := labels["amux.project"]; ok { + item.Project = val + } + } + items = append(items, item) + } + data, _ := json.MarshalIndent(items, "", " ") + fmt.Println(string(data)) + return nil + } + + if len(sandboxes) == 0 { + fmt.Println("No sandboxes found") + return nil + } + fmt.Printf("%-12s %-10s %-10s %s\n", "ID", "STATE", "AGENT", "PROJECT") + fmt.Println(strings.Repeat("─", 60)) + for _, sb := range sandboxes { + agent := "unknown" + project := "unknown" + labels := sb.Labels() + if labels != nil { + if val, ok := labels["amux.agent"]; ok { + agent = val + } + if val, ok := labels["amux.project"]; ok { + project = val + } + } + id := sb.ID() + if len(id) > 12 { + id = id[:12] + } + fmt.Printf("%-12s %-10s %-10s %s\n", id, sb.State(), agent, project) + } + return nil + }, + } + cmd.Flags().BoolVar(&jsonOutput, "json", false, "Output in JSON format") + return cmd +} diff --git a/internal/cli/sandbox_manage.go b/internal/cli/sandbox_manage.go new file mode 100644 index 00000000..348bc73d --- /dev/null +++ b/internal/cli/sandbox_manage.go @@ -0,0 +1,241 @@ +package cli + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + "golang.org/x/term" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +func buildSandboxUpdateCommand() *cobra.Command { + var all bool + + cmd := &cobra.Command{ + Use: "update [agent]", + Short: "Update agent CLIs to latest versions", + Long: `Update agent CLIs to their latest versions in the current project sandbox. + +If no agent is specified, updates the default agent (claude). +Use --all to update all supported agents.`, + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + + providerInstance, _, err := sandbox.ResolveProvider(cfg, cwd, "") + if err != nil { + return err + } + + // Get current sandbox + spinner := NewSpinner("Connecting to sandbox") + spinner.Start() + sb, _, err := resolveCurrentSandbox(providerInstance, cwd) + if err != nil { + spinner.StopWithMessage("✗ Failed to connect") + return err + } + spinner.StopWithMessage("✓ Connected") + + if all { + // Update all agents + fmt.Println("Updating all agents...") + if err := sandbox.UpdateAllAgents(sb, true); err != nil { + return err + } + fmt.Println("✓ All agents updated") + } else { + // Update specific agent + agentName := "claude" + if len(args) > 0 { + agentName = args[0] + } + if !sandbox.IsValidAgent(agentName) { + return fmt.Errorf("invalid agent: use claude, codex, opencode, amp, gemini, or droid") + } + agent := sandbox.Agent(agentName) + + spinner := NewSpinner(fmt.Sprintf("Updating %s", agent)) + spinner.Start() + if err := sandbox.UpdateAgent(sb, agent, false); err != nil { + spinner.StopWithMessage(fmt.Sprintf("✗ Failed to update %s", agent)) + return err + } + spinner.StopWithMessage(fmt.Sprintf("✓ %s updated", agent)) + } + + return nil + }, + } + + cmd.Flags().BoolVar(&all, "all", false, "Update all agents") + + return cmd +} + +func buildSandboxRmCommand() *cobra.Command { + var project bool + cmd := &cobra.Command{ + Use: "rm [id]", + Short: "Remove a sandbox", + RunE: func(cmd *cobra.Command, args []string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + providerInstance, _, err := sandbox.ResolveProvider(cfg, cwd, "") + if err != nil { + return err + } + if project { + if err := sandbox.RemoveSandbox(providerInstance, cwd, ""); err != nil { + return err + } + fmt.Println("Removed sandbox for current project") + return nil + } + if len(args) == 0 { + return fmt.Errorf("provide a sandbox ID or use --project to remove the current project sandbox") + } + if err := sandbox.RemoveSandbox(providerInstance, cwd, args[0]); err != nil { + return err + } + fmt.Printf("Removed sandbox %s\n", args[0]) + return nil + }, + } + cmd.Flags().BoolVar(&project, "project", false, "Remove sandbox for current project") + return cmd +} + +func buildSandboxResetCommand() *cobra.Command { + var name string + var yes bool + + cmd := &cobra.Command{ + Use: "reset", + Short: "Reset persistent sandbox data (credentials and CLI caches)", + Long: `Reset persistent sandbox data by switching to a new persistence volume. + +This does NOT delete the old volume; it simply rotates to a new one so future +sandboxes start clean without requiring manual Daytona cleanup.`, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + current := sandbox.ResolvePersistenceVolumeName(cfg) + + next := strings.TrimSpace(name) + if next == "" { + next = fmt.Sprintf("amux-persist-%s", time.Now().UTC().Format("20060102-150405")) + } + if next == current { + return fmt.Errorf("new persistence volume name matches current: %s", current) + } + + if !yes { + fmt.Println("This will switch to a fresh persistence volume.") + fmt.Printf("Current volume: %s\n", current) + fmt.Printf("New volume: %s\n", next) + fmt.Println("You will need to re-authenticate agents in the new sandbox.") + if !confirmChoice("Continue? [y/N]: ") { + fmt.Println("Canceled.") + return nil + } + } + + cfg.PersistenceVolumeName = next + if err := sandbox.SaveConfig(cfg); err != nil { + return err + } + + // Best-effort: create the new volume now so first run is fast. + cwd, err := os.Getwd() + if err == nil { + if provider, _, err := sandbox.ResolveProvider(cfg, cwd, ""); err == nil { + if provider.SupportsFeature(sandbox.FeatureVolumes) && provider.Volumes() != nil { + if _, err := provider.Volumes().GetOrCreate(context.Background(), next); err == nil { + _, _ = provider.Volumes().WaitReady(context.Background(), next, 0) + } + } + } + } + + fmt.Println("Persistence reset complete.") + fmt.Printf("New volume: %s\n", next) + fmt.Printf("Old volume retained: %s\n", current) + fmt.Println("To delete old volumes, use the Daytona UI.") + return nil + }, + } + + cmd.Flags().StringVar(&name, "name", "", "Explicit name for the new persistence volume") + cmd.Flags().BoolVarP(&yes, "yes", "y", false, "Skip confirmation prompt") + + return cmd +} + +func confirmChoice(prompt string) bool { + if !term.IsTerminal(int(os.Stdin.Fd())) { + return false + } + fmt.Print(prompt) + var resp string + if _, err := fmt.Fscanln(os.Stdin, &resp); err != nil { + return false + } + resp = strings.TrimSpace(strings.ToLower(resp)) + return resp == "y" || resp == "yes" +} + +func resolveCurrentSandbox(provider sandbox.Provider, cwd string) (sandbox.RemoteSandbox, bool, error) { + if provider == nil { + return nil, false, fmt.Errorf("provider is required") + } + meta, err := sandbox.LoadSandboxMeta(cwd, provider.Name()) + if err != nil { + return nil, false, err + } + if meta != nil { + sb, err := provider.GetSandbox(context.Background(), meta.SandboxID) + if err == nil { + if err := sb.Start(context.Background()); err == nil { + if waitErr := sb.WaitReady(context.Background(), 60*time.Second); waitErr != nil { + if Verbose { + fmt.Fprintf(os.Stderr, "Warning: sandbox may not be fully ready: %v\n", waitErr) + } + } + return sb, true, nil + } + } + fmt.Fprintln(os.Stderr, "Existing sandbox not found. Run `amux sandbox run ` to create one.") + } + return nil, false, fmt.Errorf("no sandbox for this project - run `amux sandbox run ` first") +} + +// exitError lets commands return a specific exit code without printing an error. +type exitError struct { + code int +} + +func (e exitError) Error() string { + return fmt.Sprintf("exit with code %d", e.code) +} diff --git a/internal/cli/sandbox_run.go b/internal/cli/sandbox_run.go new file mode 100644 index 00000000..c3b9a363 --- /dev/null +++ b/internal/cli/sandbox_run.go @@ -0,0 +1,398 @@ +package cli + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +func buildSandboxRunCommand() *cobra.Command { + var envVars []string + var volumes []string + var credentials string + var snapshot string + var noSync bool + var autoStop int32 + var update bool + var keep bool + var syncSettings bool + var noSyncSettings bool + var previewPort int + var previewNoOpen bool + var recordLogs bool + + cmd := &cobra.Command{ + Use: "run ", + Short: "Run Claude Code, Codex, OpenCode, Amp, Gemini CLI, Droid, or a shell in a sandbox", + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + agentName := args[0] + if !sandbox.IsValidAgent(agentName) { + return fmt.Errorf("invalid agent: use claude, codex, opencode, amp, gemini, droid, or shell") + } + agent := sandbox.Agent(agentName) + + cwd := os.Getenv("INIT_CWD") + if cwd == "" { + var err error + cwd, err = os.Getwd() + if err != nil { + return err + } + } + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + + // Parse credentials mode + credMode := strings.ToLower(credentials) + switch credMode { + case "sandbox", "none", "auto", "": + if credMode == "" { + credMode = "auto" + } + default: + return fmt.Errorf("invalid credentials mode: use sandbox, none, or auto") + } + if credMode == "auto" { + if agent == sandbox.AgentShell { + credMode = "none" + } else { + credMode = "sandbox" + } + } + + // Parse environment variables + envMap := map[string]string{} + for _, env := range envVars { + parts := strings.SplitN(env, "=", 2) + if len(parts) == 2 && parts[0] != "" { + envMap[parts[0]] = parts[1] + } + } + + // Parse volume specs + volumeSpecs := []sandbox.VolumeSpec{} + for _, spec := range volumes { + vol, err := parseVolumeSpec(spec) + if err != nil { + return err + } + volumeSpecs = append(volumeSpecs, vol) + } + + // Parse agent args + syncEnabled := !noSync + userArgs := getAgentArgs(os.Args, agentName) + agentArgs := append([]string{}, userArgs...) + if agent == sandbox.AgentCodex && getenvFallback("AMUX_CODEX_TUI2") != "0" { + hasTui2Flag := false + for i := 0; i < len(agentArgs); i++ { + arg := agentArgs[i] + if (arg == "--enable" || arg == "--disable") && i+1 < len(agentArgs) && agentArgs[i+1] == "tui2" { + hasTui2Flag = true + break + } + if arg == "-c" && i+1 < len(agentArgs) && strings.HasPrefix(agentArgs[i+1], "features.tui2") { + hasTui2Flag = true + break + } + } + if !hasTui2Flag { + agentArgs = append([]string{"--enable", "tui2"}, agentArgs...) + } + } + + // Resolve snapshot + snapshotID := snapshot + if snapshotID == "" { + snapshotID = sandbox.ResolveSnapshotID(cfg) + } + if Verbose && snapshotID != "" { + fmt.Printf("Using snapshot: %s\n", snapshotID) + } + + previewExplicit := cmd.Flags().Changed("preview") + if previewExplicit && (previewPort < 1 || previewPort > 65535) { + return fmt.Errorf("preview port must be between 1 and 65535") + } + keepExplicit := cmd.Flags().Changed("keep") + if previewPort != 0 && !keepExplicit { + keep = true + fmt.Println("Preview enabled; keeping sandbox after exit. Use --keep=false to override.") + } + + // Determine settings sync mode based on flags + settingsSyncMode := "auto" // default: use global config + if syncSettings { + settingsSyncMode = "force" + } else if noSyncSettings { + settingsSyncMode = "skip" + } + + // Run the agent with clean output + return runAgent(runAgentParams{ + agent: agent, + cwd: cwd, + envMap: envMap, + volumeSpecs: volumeSpecs, + credMode: credMode, + autoStop: autoStop, + snapshotID: snapshotID, + syncEnabled: syncEnabled, + forceUpdate: update, + agentArgs: agentArgs, + keepSandbox: keep, + settingsSyncMode: settingsSyncMode, + persistenceVolumeName: sandbox.ResolvePersistenceVolumeName(cfg), + previewPort: previewPort, + previewNoOpen: previewNoOpen, + recordLogs: recordLogs, + }) + }, + } + + cmd.Flags().StringArrayVarP(&envVars, "env", "e", []string{}, "Environment variable (repeatable)") + cmd.Flags().StringArrayVarP(&volumes, "volume", "v", []string{}, "Volume mount (repeatable)") + cmd.Flags().StringVarP(&credentials, "credentials", "c", "auto", "Credentials mode (sandbox|none|auto)") + cmd.Flags().StringVarP(&snapshot, "snapshot", "s", "", "Use a specific snapshot") + cmd.Flags().BoolVar(&noSync, "no-sync", false, "Skip workspace sync") + cmd.Flags().Int32VarP(&autoStop, "auto-stop", "a", 30, "Auto-stop interval in minutes (0 to disable)") + cmd.Flags().BoolVarP(&update, "update", "u", false, "Update agent to latest version") + cmd.Flags().BoolVarP(&Verbose, "verbose", "V", false, "Enable verbose output") + cmd.Flags().BoolVar(&keep, "keep", false, "Keep sandbox after the session exits") + cmd.Flags().BoolVar(&syncSettings, "sync-settings", false, "Sync local settings files to sandbox") + cmd.Flags().BoolVar(&noSyncSettings, "no-sync-settings", false, "Skip settings sync even if enabled globally") + cmd.Flags().IntVar(&previewPort, "preview", 0, "Open a preview URL for the given port (implies --keep unless --keep=false)") + cmd.Flags().BoolVar(&previewNoOpen, "no-open", false, "Do not open the preview URL automatically") + cmd.Flags().BoolVar(&recordLogs, "record", false, "Record the session output to persistent logs") + + return cmd +} + +type runAgentParams struct { + agent sandbox.Agent + cwd string + envMap map[string]string + volumeSpecs []sandbox.VolumeSpec + credMode string + autoStop int32 + snapshotID string + syncEnabled bool + forceUpdate bool + agentArgs []string + keepSandbox bool + settingsSyncMode string // "auto" (use global config), "force" (always sync), "skip" (never sync) + persistenceVolumeName string + previewPort int + previewNoOpen bool + recordLogs bool +} + +// runAgent is the core logic for running an agent in a sandbox. +// It provides a clean, minimal output experience similar to `docker run`. +func runAgent(p runAgentParams) error { + var sb sandbox.RemoteSandbox + var err error + + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + provider, _, err := sandbox.ResolveProvider(cfg, p.cwd, "") + if err != nil { + return err + } + if err := sandbox.RunPreflight(); err != nil { + return err + } + if p.previewPort != 0 && !provider.SupportsFeature(sandbox.FeaturePreviewURLs) { + return fmt.Errorf("preview URLs are not supported by the selected provider") + } + + // Step 1: Create sandbox + if Verbose { + fmt.Printf("Starting %s sandbox...\n", p.agent) + sb, _, err = sandbox.CreateSandboxSession(provider, p.cwd, sandbox.SandboxConfig{ + Agent: p.agent, + EnvVars: p.envMap, + Volumes: p.volumeSpecs, + CredentialsMode: p.credMode, + AutoStopInterval: p.autoStop, + Snapshot: p.snapshotID, + Ephemeral: !p.keepSandbox, + PersistenceVolumeName: p.persistenceVolumeName, + }) + } else { + spinner := NewSpinner(fmt.Sprintf("Starting %s sandbox", p.agent)) + spinner.Start() + sb, _, err = sandbox.CreateSandboxSession(provider, p.cwd, sandbox.SandboxConfig{ + Agent: p.agent, + EnvVars: p.envMap, + Volumes: p.volumeSpecs, + CredentialsMode: p.credMode, + AutoStopInterval: p.autoStop, + Snapshot: p.snapshotID, + Ephemeral: !p.keepSandbox, + PersistenceVolumeName: p.persistenceVolumeName, + }) + if err != nil { + spinner.StopWithMessage("✗ Failed to start sandbox") + } else { + spinner.StopWithMessage("✓ Sandbox ready") + } + } + if err != nil { + return err + } + + if Verbose { + fmt.Println("Sandbox ID: " + sb.ID()) + } + + cleanup := func() { + if p.keepSandbox || sb == nil { + return + } + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + _ = sb.Stop(ctx) + _ = provider.DeleteSandbox(ctx, sb.ID()) + _ = sandbox.RemoveSandboxMetaByID(sb.ID()) + } + if !p.keepSandbox { + defer cleanup() + } + + worktreeID := sandbox.ComputeWorktreeID(p.cwd) + workspacePath := sandbox.GetWorktreeRepoPath(sb, sandbox.SyncOptions{Cwd: p.cwd, WorktreeID: worktreeID}) + logDir := fmt.Sprintf("/amux/logs/%s", worktreeID) + recordPath := "" + + // Step 2: Sync workspace + if p.syncEnabled { + if Verbose { + fmt.Println("Syncing workspace...") + if err := sandbox.UploadWorkspace(sb, sandbox.SyncOptions{Cwd: p.cwd, WorktreeID: worktreeID}, Verbose); err != nil { + return err + } + } else { + spinner := NewSpinner("Syncing workspace") + spinner.Start() + syncErr := sandbox.UploadWorkspace(sb, sandbox.SyncOptions{Cwd: p.cwd, WorktreeID: worktreeID}, false) + if syncErr != nil { + spinner.StopWithMessage("✗ Sync failed") + return syncErr + } + spinner.StopWithMessage("✓ Workspace synced") + } + } else { + _, _ = sb.Exec(context.Background(), fmt.Sprintf("mkdir -p %s", workspacePath), nil) + } + + // Step 3: Setup credentials + // Determine spinner message based on update flag + setupMsg := "Setting up environment" + if p.forceUpdate { + setupMsg = "Updating agent" + } + + if Verbose { + fmt.Println(setupMsg + "...") + if err := sandbox.SetupCredentials(sb, sandbox.CredentialsConfig{Mode: p.credMode, Agent: p.agent, SettingsSyncMode: p.settingsSyncMode}, Verbose); err != nil { + return err + } + if err := sandbox.EnsureAgentInstalled(sb, p.agent, Verbose, p.forceUpdate); err != nil { + return err + } + } else { + spinner := NewSpinner(setupMsg) + spinner.Start() + if err := sandbox.SetupCredentials(sb, sandbox.CredentialsConfig{Mode: p.credMode, Agent: p.agent, SettingsSyncMode: p.settingsSyncMode}, false); err != nil { + spinner.StopWithMessage("✗ Setup failed") + return err + } + if err := sandbox.EnsureAgentInstalled(sb, p.agent, false, p.forceUpdate); err != nil { + spinner.StopWithMessage("✗ Agent install failed") + return err + } + if p.forceUpdate { + spinner.StopWithMessage("✓ Updated") + } else { + spinner.StopWithMessage("✓ Ready") + } + } + + if p.recordLogs { + if _, err := sb.Exec(context.Background(), sandbox.SafeCommands.MkdirP(logDir), nil); err != nil { + fmt.Fprintf(os.Stderr, "Warning: failed to create log directory: %v\n", err) + } else { + timestamp := time.Now().UTC().Format("20060102-150405") + recordPath = fmt.Sprintf("%s/%s-%s.log", logDir, timestamp, p.agent) + fmt.Printf("Recording session to %s\n", recordPath) + fmt.Println("Tip: Use `amux sandbox logs` to view from another terminal.") + } + } + + if p.previewPort != 0 { + url, err := sb.GetPreviewURL(context.Background(), p.previewPort) + if err != nil { + return err + } + if url == "" { + return fmt.Errorf("unable to construct a preview URL") + } + fmt.Printf("Preview URL: %s\n", url) + if !p.previewNoOpen { + if !tryOpenURL(url) { + fmt.Println("Open the URL in your browser.") + } + } + fmt.Printf("Tip: Ensure your app listens on 0.0.0.0:%d inside the sandbox.\n", p.previewPort) + if !p.keepSandbox { + fmt.Println("Tip: Use --keep to leave the sandbox running for preview.") + } + } + + // Step 4: Check credentials and handle login if needed + needsLogin := false + if p.credMode != "none" && len(p.agentArgs) == 0 { + needsLogin = checkNeedsLogin(sb, p.agent, p.envMap) + } + + var exitCode int + + if needsLogin { + // Handle first-time login + exitCode, err = handleAgentLogin(sb, p.agent, workspacePath, p.envMap) + if err != nil { + return err + } + if exitCode != 0 { + return handleAgentExit(sb, p.agent, exitCode, p.syncEnabled, p.cwd) + } + } + + // Step 5: Run the agent + fmt.Println() // Clean line before agent starts + exitCode, err = sandbox.RunAgentInteractive(sb, sandbox.AgentConfig{ + Agent: p.agent, + WorkspacePath: workspacePath, + Args: p.agentArgs, + Env: p.envMap, + RecordPath: recordPath, + }) + if err != nil { + return err + } + + // Handle agent exit + return handleAgentExit(sb, p.agent, exitCode, p.syncEnabled, p.cwd) +} diff --git a/internal/cli/settings.go b/internal/cli/settings.go new file mode 100644 index 00000000..ffc87af9 --- /dev/null +++ b/internal/cli/settings.go @@ -0,0 +1,412 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +func buildSettingsCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "settings", + Short: "Manage local settings sync to sandboxes", + } + + cmd.AddCommand(buildSettingsSyncCommand()) + cmd.AddCommand(buildSettingsStatusCommand()) + cmd.AddCommand(buildSettingsShowCommand()) + + return cmd +} + +func buildSettingsSyncCommand() *cobra.Command { + var enable bool + var disable bool + var claude bool + var codex bool + var git bool + var all bool + + cmd := &cobra.Command{ + Use: "sync", + Short: "Configure which local settings to sync to sandboxes", + Long: `Configure which local settings files to sync to cloud sandboxes. + +Settings sync is opt-in and requires explicit consent. When enabled, amux will +copy your local configuration files (like ~/.claude/settings.json) to the +sandbox so your preferences are available in cloud sessions. + +IMPORTANT: Settings sync only copies non-sensitive configuration. API keys, +tokens, and credentials are automatically filtered out. + +Examples: + amux settings sync --enable --claude # Enable Claude settings sync + amux settings sync --enable --all # Enable all detected settings + amux settings sync --disable # Disable all settings sync + amux settings sync # Show current sync status`, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + + syncCfg := cfg.SettingsSync + + // Handle disable flag + if disable { + syncCfg.Enabled = false + syncCfg.Files = nil + syncCfg.Claude = false + syncCfg.Codex = false + syncCfg.Git = false + syncCfg.Shell = false + cfg.SettingsSync = syncCfg + if err := sandbox.SaveConfig(cfg); err != nil { + return err + } + fmt.Println("Settings sync disabled") + return nil + } + + // Handle enable flag + if enable { + // Detect existing settings files + detected := sandbox.DetectExistingSettings() + + fmt.Println("amux settings sync") + fmt.Println(strings.Repeat("─", 50)) + fmt.Println() + + // Show detected files + if len(detected) > 0 { + fmt.Println("Detected settings files:") + for _, s := range detected { + fmt.Printf(" ~/%s (%s)\n", s.HomePath, s.Description) + } + fmt.Println() + } else { + fmt.Println("No settings files detected locally.") + fmt.Println() + return nil + } + + // Determine which files to sync based on flags + var filesToSync []string + + if all { + // Sync all detected files + for _, s := range detected { + filesToSync = append(filesToSync, "~/"+s.HomePath) + } + } else { + // Sync only specified agents + for _, s := range detected { + shouldSync := false + switch s.Agent { + case "claude": + shouldSync = claude + case "codex": + shouldSync = codex + case "git": + shouldSync = git + } + if shouldSync { + filesToSync = append(filesToSync, "~/"+s.HomePath) + } + } + + // If no specific flags given, show help + if len(filesToSync) == 0 && !claude && !codex && !git { + fmt.Println("Specify which settings to sync:") + fmt.Println(" --all Sync all detected files") + fmt.Println(" --claude Sync Claude settings") + fmt.Println(" --codex Sync Codex settings") + fmt.Println(" --git Sync git config") + fmt.Println() + fmt.Println("Example: amux settings sync --enable --all") + return nil + } + } + + if len(filesToSync) == 0 { + fmt.Println("No matching settings files to sync.") + return nil + } + + // Show what will be synced + fmt.Println("Will sync these files to sandbox:") + for _, f := range filesToSync { + note := "" + if strings.Contains(f, ".gitconfig") { + note = " (safe keys only)" + } + fmt.Printf(" %s%s\n", f, note) + } + fmt.Println() + fmt.Println("Note: API keys and tokens are automatically filtered out.") + fmt.Println() + + // Save config with explicit file list + syncCfg.Enabled = true + syncCfg.Files = filesToSync + // Also set legacy flags for backwards compatibility + for _, f := range filesToSync { + if strings.Contains(f, ".claude") { + syncCfg.Claude = true + } + if strings.Contains(f, ".codex") { + syncCfg.Codex = true + } + if strings.Contains(f, ".gitconfig") { + syncCfg.Git = true + } + } + + cfg.SettingsSync = syncCfg + if err := sandbox.SaveConfig(cfg); err != nil { + return err + } + + fmt.Println("✓ Settings sync enabled") + fmt.Println() + fmt.Println("Files will sync on next `amux claude/codex/...` run.") + fmt.Println("To disable: amux settings sync --disable") + return nil + } + + // Show current status if no flags + return showSettingsSyncStatus(syncCfg) + }, + } + + cmd.Flags().BoolVar(&enable, "enable", false, "Enable settings sync") + cmd.Flags().BoolVar(&disable, "disable", false, "Disable settings sync") + cmd.Flags().BoolVar(&claude, "claude", false, "Sync Claude settings") + cmd.Flags().BoolVar(&codex, "codex", false, "Sync Codex settings") + cmd.Flags().BoolVar(&git, "git", false, "Sync git config (safe keys only)") + cmd.Flags().BoolVar(&all, "all", false, "Sync all detected settings") + + return cmd +} + +func buildSettingsStatusCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "status", + Short: "Show local settings files and sync status", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + + fmt.Println("amux settings status") + fmt.Println(strings.Repeat("─", 50)) + fmt.Println() + + // Show sync status + syncCfg := cfg.SettingsSync + if syncCfg.Enabled { + fmt.Println("Settings sync: enabled") + } else { + fmt.Println("Settings sync: disabled") + } + fmt.Println() + + // Show configured files if using explicit file list + if syncCfg.Enabled && len(syncCfg.Files) > 0 { + fmt.Println("Configured to sync:") + for _, f := range syncCfg.Files { + fmt.Printf(" %s\n", f) + } + fmt.Println() + } + + // Show all detected local settings files + fmt.Println("Local files detected:") + detected := sandbox.DetectLocalSettings() + + for _, s := range detected { + if s.Exists { + syncing := isSyncing(s.HomePath, syncCfg) + status := formatFileSize(s.Size) + if syncing { + status += " (syncing)" + } + fmt.Printf(" ~/%s (%s)\n", s.HomePath, status) + } + } + + // Show files that don't exist + var notFound []string + for _, s := range detected { + if !s.Exists { + notFound = append(notFound, s.HomePath) + } + } + if len(notFound) > 0 { + fmt.Println() + fmt.Println("Not found:") + for _, f := range notFound { + fmt.Printf(" ~/%s\n", f) + } + } + + fmt.Println() + fmt.Println(strings.Repeat("─", 50)) + + if !syncCfg.Enabled { + fmt.Println("Run `amux settings sync --enable --all` to sync settings") + } + + return nil + }, + } + + return cmd +} + +// isSyncing checks if a file path is configured for syncing +func isSyncing(homePath string, cfg sandbox.SettingsSyncConfig) bool { + if !cfg.Enabled { + return false + } + + // Check explicit Files list first + if len(cfg.Files) > 0 { + for _, f := range cfg.Files { + // Normalize path for comparison + normalized := strings.TrimPrefix(f, "~/") + if normalized == homePath { + return true + } + } + return false + } + + // Fall back to legacy flags + if strings.Contains(homePath, ".claude") && cfg.Claude { + return true + } + if strings.Contains(homePath, ".codex") && cfg.Codex { + return true + } + if strings.Contains(homePath, ".gitconfig") && cfg.Git { + return true + } + return false +} + +// formatFileSize formats a file size in human-readable form +func formatFileSize(size int64) string { + if size < 1024 { + return fmt.Sprintf("%d B", size) + } + return fmt.Sprintf("%.1f KB", float64(size)/1024) +} + +func showSettingsSyncStatus(cfg sandbox.SettingsSyncConfig) error { + fmt.Println("amux settings sync status") + fmt.Println(strings.Repeat("─", 50)) + fmt.Println() + + if cfg.Enabled { + fmt.Println("Settings sync: enabled") + fmt.Println() + + // Show explicit file list if available + if len(cfg.Files) > 0 { + fmt.Println("Configured files:") + for _, f := range cfg.Files { + note := "" + if strings.Contains(f, ".gitconfig") { + note = " (safe keys only)" + } + fmt.Printf(" %s%s\n", f, note) + } + } else { + // Fall back to legacy display + fmt.Println("Syncing:") + if cfg.Claude { + fmt.Println(" ✓ ~/.claude/settings.json") + } + if cfg.Codex { + fmt.Println(" ✓ ~/.codex/config.toml") + } + if cfg.Git { + fmt.Println(" ✓ ~/.gitconfig (safe keys)") + } + if !cfg.Claude && !cfg.Codex && !cfg.Git { + fmt.Println(" (no settings selected)") + } + } + } else { + fmt.Println("Settings sync: disabled") + fmt.Println() + fmt.Println("Enable with: amux settings sync --enable --all") + } + + fmt.Println() + fmt.Println(strings.Repeat("─", 50)) + return nil +} + +func buildSettingsShowCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "show", + Short: "Show what would sync to a sandbox (dry-run preview)", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + + fmt.Println("amux settings show") + fmt.Println(strings.Repeat("─", 50)) + fmt.Println() + + syncCfg := cfg.SettingsSync + if !syncCfg.Enabled { + fmt.Println("Settings sync is disabled.") + fmt.Println() + fmt.Println("Enable with: amux settings sync --enable --all") + return nil + } + + // Get the files that would sync + detected := sandbox.DetectLocalSettings() + var willSync []sandbox.DetectedSetting + + for _, s := range detected { + if s.Exists && isSyncing(s.HomePath, syncCfg) { + willSync = append(willSync, s) + } + } + + if len(willSync) == 0 { + fmt.Println("No settings files would sync.") + fmt.Println() + fmt.Println("Either no files are configured or they don't exist locally.") + return nil + } + + fmt.Println("Would sync to sandbox:") + fmt.Println() + for _, s := range willSync { + note := "" + if strings.Contains(s.HomePath, ".gitconfig") { + note = " (filtered: user.*, core.*, alias.*)" + } + fmt.Printf(" ~/%s → ~/%s%s\n", s.HomePath, s.HomePath, note) + } + fmt.Println() + fmt.Println(strings.Repeat("─", 50)) + + return nil + }, + } + + return cmd +} diff --git a/internal/cli/setup.go b/internal/cli/setup.go new file mode 100644 index 00000000..526468e4 --- /dev/null +++ b/internal/cli/setup.go @@ -0,0 +1,116 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +func buildSetupCommand() *cobra.Command { + var agents string + var baseImage string + var snapshotName string + var createSnapshot bool + var withGh bool + + cmd := &cobra.Command{ + Use: "setup", + Short: "Quick setup: validate credentials (optionally build a snapshot)", + RunE: func(cmd *cobra.Command, args []string) error { + fmt.Println("amux setup") + fmt.Println(strings.Repeat("─", 50)) + fmt.Println() + + if err := ensureDaytonaAPIKey(); err != nil { + return err + } + fmt.Println("✓ Daytona API key configured") + + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + + client, err := sandbox.GetDaytonaClient() + if err != nil { + return err + } + + if createSnapshot { + parsedAgents, err := sandbox.ParseAgentList(agents) + if err != nil { + return err + } + if baseImage == "" { + baseImage = sandbox.DefaultSnapshotBaseImage + } + name := snapshotName + if name == "" { + name = sandbox.BuildSnapshotName("amux") + } + fmt.Println("\nBuilding snapshot (this can take a few minutes)...") + fmt.Printf("Creating snapshot %q with agents: %s\n", name, joinAgents(parsedAgents)) + snap, err := sandbox.CreateSnapshot(client, name, parsedAgents, baseImage, func(chunk string) { + fmt.Println(chunk) + }) + if err != nil { + return err + } + cfg.DefaultSnapshotName = snap.Name + cfg.SnapshotAgents = agentsToStrings(parsedAgents) + cfg.SnapshotBaseImage = baseImage + if err := sandbox.SaveConfig(cfg); err != nil { + return err + } + fmt.Printf("✓ Saved default snapshot: %s\n", snap.Name) + } + + if withGh { + if err := runGhAuthLogin(); err != nil { + return err + } + } + + fmt.Println() + fmt.Println(strings.Repeat("─", 50)) + fmt.Println("Setup complete!") + fmt.Println() + fmt.Println("Next steps:") + fmt.Println(" amux claude # Run Claude Code") + fmt.Println(" amux doctor # Verify setup") + if !createSnapshot { + fmt.Println() + fmt.Println("Optional:") + fmt.Println(" amux setup --create-snapshot --agents claude,codex") + } + return nil + }, + } + + cmd.Flags().StringVar(&agents, "agents", "", "Agents to preinstall (claude,codex,opencode,amp,gemini,droid)") + cmd.Flags().StringVar(&baseImage, "base-image", sandbox.DefaultSnapshotBaseImage, "Base image for the snapshot") + cmd.Flags().StringVar(&snapshotName, "snapshot-name", "", "Snapshot name") + cmd.Flags().BoolVar(&createSnapshot, "create-snapshot", false, "Build a snapshot with preinstalled agents") + cmd.Flags().BoolVar(&withGh, "with-gh", false, "Run GitHub CLI login helper") + + return cmd +} + +func agentsToStrings(agents []sandbox.Agent) []string { + out := make([]string, 0, len(agents)) + for _, agent := range agents { + out = append(out, agent.String()) + } + return out +} + +func joinAgents(agents []sandbox.Agent) string { + parts := make([]string, 0, len(agents)) + for _, agent := range agents { + parts = append(parts, agent.String()) + } + return strings.Join(parts, ", ") +} diff --git a/internal/cli/snapshot.go b/internal/cli/snapshot.go new file mode 100644 index 00000000..57adad57 --- /dev/null +++ b/internal/cli/snapshot.go @@ -0,0 +1,241 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +func buildSnapshotCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "snapshot", + Short: "Manage snapshots", + } + cmd.AddCommand(buildSnapshotCreateCommand()) + cmd.AddCommand(buildSnapshotUpdateCommand()) + cmd.AddCommand(buildSnapshotListCommand()) + return cmd +} + +func buildSnapshotCreateCommand() *cobra.Command { + var agents string + var baseImage string + var name string + var setDefault bool + + cmd := &cobra.Command{ + Use: "create", + Short: "Create a snapshot with preinstalled agent CLIs", + RunE: func(cmd *cobra.Command, args []string) error { + if err := ensureDaytonaAPIKey(); err != nil { + return err + } + client, err := sandbox.GetDaytonaClient() + if err != nil { + return err + } + agentsList, err := sandbox.ParseAgentList(agents) + if err != nil { + return err + } + if baseImage == "" { + baseImage = sandbox.DefaultSnapshotBaseImage + } + if name == "" { + name = sandbox.BuildSnapshotName("amux") + } + fmt.Printf("Creating snapshot \"%s\"...\n", name) + snap, err := sandbox.CreateSnapshot(client, name, agentsList, baseImage, func(chunk string) { + fmt.Println(chunk) + }) + if err != nil { + return err + } + fmt.Printf("Snapshot created: %s\n", snap.Name) + if setDefault { + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + cfg.DefaultSnapshotName = snap.Name + cfg.SnapshotAgents = agentsToStrings(agentsList) + cfg.SnapshotBaseImage = baseImage + if err := sandbox.SaveConfig(cfg); err != nil { + return err + } + fmt.Printf("Saved default snapshot: %s\n", snap.Name) + fmt.Println("New sandboxes will use this snapshot.") + } + return nil + }, + } + + cmd.Flags().StringVar(&agents, "agents", "", "Comma-separated agents to preinstall (claude,codex,opencode,amp,gemini,droid)") + cmd.Flags().StringVar(&baseImage, "base-image", sandbox.DefaultSnapshotBaseImage, "Base image for the snapshot") + cmd.Flags().StringVar(&name, "name", "", "Snapshot name (optional)") + cmd.Flags().BoolVar(&setDefault, "set-default", false, "Save snapshot as default") + + return cmd +} + +func buildSnapshotUpdateCommand() *cobra.Command { + var addAgents string + var removeAgents string + var baseImage string + + cmd := &cobra.Command{ + Use: "update", + Short: "Rebuild the default snapshot with additional agents", + RunE: func(cmd *cobra.Command, args []string) error { + if err := ensureDaytonaAPIKey(); err != nil { + return err + } + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + current := cfg.SnapshotAgents + if len(current) == 0 { + return fmt.Errorf("no snapshot agents configured. Run `amux snapshot create` first") + } + currentAgents, err := sandbox.ParseAgentList(strings.Join(current, ",")) + if err != nil { + return err + } + + addList := []sandbox.Agent{} + if addAgents != "" { + addList, err = sandbox.ParseAgentList(addAgents) + if err != nil { + return err + } + } + removeList := []sandbox.Agent{} + if removeAgents != "" { + removeList, err = sandbox.ParseAgentList(removeAgents) + if err != nil { + return err + } + } + + next := filterAgents(currentAgents, removeList) + next = appendMissingAgents(next, addList) + if len(next) == 0 { + return fmt.Errorf("snapshot must include at least one agent") + } + if baseImage == "" { + baseImage = cfg.SnapshotBaseImage + } + if baseImage == "" { + baseImage = sandbox.DefaultSnapshotBaseImage + } + name := sandbox.BuildSnapshotName("amux") + + client, err := sandbox.GetDaytonaClient() + if err != nil { + return err + } + fmt.Printf("Creating snapshot \"%s\" with agents: %s\n", name, joinAgents(next)) + snap, err := sandbox.CreateSnapshot(client, name, next, baseImage, func(chunk string) { + fmt.Println(chunk) + }) + if err != nil { + return err + } + + cfg.DefaultSnapshotName = snap.Name + cfg.SnapshotAgents = agentsToStrings(next) + cfg.SnapshotBaseImage = baseImage + if err := sandbox.SaveConfig(cfg); err != nil { + return err + } + fmt.Printf("Updated default snapshot: %s\n", snap.Name) + fmt.Println("New sandboxes will use this snapshot.") + return nil + }, + } + + cmd.Flags().StringVar(&addAgents, "add", "", "Comma-separated agents to add (claude,codex,opencode,amp,gemini,droid)") + cmd.Flags().StringVar(&removeAgents, "remove", "", "Comma-separated agents to remove (claude,codex,opencode,amp,gemini,droid)") + cmd.Flags().StringVar(&baseImage, "base-image", "", "Override base image for the new snapshot") + + return cmd +} + +func filterAgents(current []sandbox.Agent, remove []sandbox.Agent) []sandbox.Agent { + removeSet := map[sandbox.Agent]bool{} + for _, agent := range remove { + removeSet[agent] = true + } + out := []sandbox.Agent{} + for _, agent := range current { + if !removeSet[agent] { + out = append(out, agent) + } + } + return out +} + +func appendMissingAgents(current []sandbox.Agent, add []sandbox.Agent) []sandbox.Agent { + set := map[sandbox.Agent]bool{} + for _, agent := range current { + set[agent] = true + } + for _, agent := range add { + if !set[agent] { + current = append(current, agent) + set[agent] = true + } + } + return current +} + +func buildSnapshotListCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "List available snapshots", + Aliases: []string{"ls"}, + RunE: func(cmd *cobra.Command, args []string) error { + if err := ensureDaytonaAPIKey(); err != nil { + return err + } + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + defaultSnapshot := sandbox.ResolveSnapshotID(cfg) + + client, err := sandbox.GetDaytonaClient() + if err != nil { + return err + } + snapshots, err := client.Snapshot.List() + if err != nil { + return err + } + if len(snapshots) == 0 { + fmt.Println("No snapshots found") + fmt.Println("Run `amux setup` or `amux snapshot create` to create one") + return nil + } + fmt.Println("amux snapshots:") + fmt.Println(strings.Repeat("─", 60)) + for _, snap := range snapshots { + marker := " " + if snap.Name == defaultSnapshot { + marker = "* " + } + fmt.Printf("%s%s (%s)\n", marker, snap.Name, snap.State) + } + fmt.Println(strings.Repeat("─", 60)) + if defaultSnapshot != "" { + fmt.Printf("* = default snapshot (%s)\n", defaultSnapshot) + } + return nil + }, + } + return cmd +} diff --git a/internal/cli/spinner.go b/internal/cli/spinner.go new file mode 100644 index 00000000..09d170e9 --- /dev/null +++ b/internal/cli/spinner.go @@ -0,0 +1,111 @@ +package cli + +import ( + "fmt" + "sync" + "time" +) + +var spinnerFrames = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"} + +// Spinner provides a simple terminal spinner for long-running operations. +type Spinner struct { + message string + done chan struct{} + mu sync.Mutex + active bool + stopOnce sync.Once +} + +// NewSpinner creates a new spinner with the given message. +func NewSpinner(message string) *Spinner { + return &Spinner{ + message: message, + done: make(chan struct{}), + } +} + +// Start begins the spinner animation. +func (s *Spinner) Start() { + s.mu.Lock() + if s.active { + s.mu.Unlock() + return + } + s.active = true + s.mu.Unlock() + + go func() { + frame := 0 + ticker := time.NewTicker(80 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-s.done: + return + case <-ticker.C: + s.mu.Lock() + if s.active { + fmt.Printf("\r%s %s", spinnerFrames[frame%len(spinnerFrames)], s.message) + frame++ + } + s.mu.Unlock() + } + } + }() +} + +// Stop stops the spinner and clears the line. +func (s *Spinner) Stop() { + s.doStop("") +} + +// StopWithMessage stops the spinner and prints a final message. +func (s *Spinner) StopWithMessage(message string) { + s.doStop(message) +} + +// doStop handles the actual stop logic with sync.Once to prevent double-close panic. +func (s *Spinner) doStop(message string) { + s.mu.Lock() + wasActive := s.active + s.active = false + s.mu.Unlock() + + s.stopOnce.Do(func() { + close(s.done) + }) + + // Clear line and optionally print message + if wasActive { + if message != "" { + fmt.Printf("\r\033[K%s\n", message) + } else { + fmt.Printf("\r\033[K") + } + } else if message != "" { + // Spinner wasn't active but we have a message to print + fmt.Println(message) + } +} + +// UpdateMessage changes the spinner message while running. +func (s *Spinner) UpdateMessage(message string) { + s.mu.Lock() + defer s.mu.Unlock() + s.message = message +} + +// WithSpinner runs a function with a spinner, handling success/failure messages. +func WithSpinner(message string, fn func() error) error { + spinner := NewSpinner(message) + spinner.Start() + err := fn() + if err != nil { + spinner.StopWithMessage(fmt.Sprintf("✗ %s failed", message)) + return err + } + spinner.StopWithMessage(fmt.Sprintf("✓ %s", message)) + return nil +} diff --git a/internal/cli/spinner_test.go b/internal/cli/spinner_test.go new file mode 100644 index 00000000..46119d44 --- /dev/null +++ b/internal/cli/spinner_test.go @@ -0,0 +1,103 @@ +package cli + +import ( + "sync" + "testing" + "time" +) + +func TestSpinner_StopMultipleTimes(t *testing.T) { + // Test that calling Stop multiple times doesn't panic + spinner := NewSpinner("test") + spinner.Start() + time.Sleep(10 * time.Millisecond) // Let spinner start + + // Should not panic on multiple stops + spinner.Stop() + spinner.Stop() + spinner.Stop() +} + +func TestSpinner_StopWithMessageMultipleTimes(t *testing.T) { + // Test that calling StopWithMessage multiple times doesn't panic + spinner := NewSpinner("test") + spinner.Start() + time.Sleep(10 * time.Millisecond) + + spinner.StopWithMessage("done") + spinner.StopWithMessage("done again") + spinner.Stop() +} + +func TestSpinner_StopAndStopWithMessage(t *testing.T) { + // Test mixing Stop and StopWithMessage + spinner := NewSpinner("test") + spinner.Start() + time.Sleep(10 * time.Millisecond) + + spinner.Stop() + spinner.StopWithMessage("message") +} + +func TestSpinner_StopWithoutStart(t *testing.T) { + // Test stopping a spinner that was never started + spinner := NewSpinner("test") + spinner.Stop() // Should not panic + spinner.StopWithMessage("done") +} + +func TestSpinner_ConcurrentStop(t *testing.T) { + // Test concurrent stops don't cause race or panic + spinner := NewSpinner("test") + spinner.Start() + time.Sleep(10 * time.Millisecond) + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + spinner.Stop() + }() + } + wg.Wait() +} + +func TestSpinner_UpdateMessage(t *testing.T) { + spinner := NewSpinner("initial") + spinner.Start() + time.Sleep(10 * time.Millisecond) + + spinner.UpdateMessage("updated") + time.Sleep(10 * time.Millisecond) + + spinner.Stop() +} + +func TestWithSpinner_Success(t *testing.T) { + err := WithSpinner("test operation", func() error { + time.Sleep(10 * time.Millisecond) + return nil + }) + if err != nil { + t.Errorf("expected nil error, got %v", err) + } +} + +func TestWithSpinner_Error(t *testing.T) { + expectedErr := &testError{msg: "test error"} + err := WithSpinner("test operation", func() error { + return expectedErr + }) + if err != expectedErr { + t.Errorf("expected %v, got %v", expectedErr, err) + } +} + +type testError struct { + msg string +} + +func (e *testError) Error() string { + return e.msg +} diff --git a/internal/cli/status.go b/internal/cli/status.go new file mode 100644 index 00000000..ebffef85 --- /dev/null +++ b/internal/cli/status.go @@ -0,0 +1,296 @@ +package cli + +import ( + "context" + "encoding/json" + "fmt" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +// StatusOutput represents the JSON output for status command +type StatusOutput struct { + SandboxID string `json:"sandbox_id"` + State string `json:"state"` + Agent string `json:"agent"` + CPUCores float32 `json:"cpu_cores,omitempty"` + MemoryGB float32 `json:"memory_gb,omitempty"` + Provider string `json:"provider"` + PersistenceVolume string `json:"persistence_volume,omitempty"` + Exists bool `json:"exists"` +} + +func buildStatusCommand() *cobra.Command { + var jsonOutput bool + cmd := &cobra.Command{ + Use: "status", + Short: "Show current project sandbox status", + RunE: func(cmd *cobra.Command, args []string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + providerInstance, providerName, err := sandbox.ResolveProvider(cfg, cwd, "") + if err != nil { + return err + } + + meta, err := sandbox.LoadSandboxMeta(cwd, providerInstance.Name()) + if err != nil { + return err + } + if meta == nil { + if jsonOutput { + output := StatusOutput{ + Exists: false, + Provider: providerName, + PersistenceVolume: sandbox.ResolvePersistenceVolumeName(cfg), + } + data, _ := json.MarshalIndent(output, "", " ") + fmt.Println(string(data)) + return nil + } + fmt.Println("No sandbox for this project") + fmt.Println("Run `amux sandbox run ` to create one") + return nil + } + + sb, err := providerInstance.GetSandbox(context.Background(), meta.SandboxID) + if err != nil { + if jsonOutput { + output := StatusOutput{ + SandboxID: meta.SandboxID, + Agent: string(meta.Agent), + Exists: false, + Provider: providerName, + PersistenceVolume: sandbox.ResolvePersistenceVolumeName(cfg), + } + data, _ := json.MarshalIndent(output, "", " ") + fmt.Println(string(data)) + return nil + } + fmt.Println("Sandbox not found (may have been deleted)") + fmt.Printf(" Sandbox ID: %s\n", meta.SandboxID) + fmt.Printf(" Last agent: %s\n", meta.Agent) + fmt.Println("\nRun `amux sandbox run ` to create a new one") + return nil + } + + if jsonOutput { + output := StatusOutput{ + SandboxID: sb.ID(), + State: string(sb.State()), + Agent: string(meta.Agent), + Provider: providerName, + Exists: true, + PersistenceVolume: sandbox.ResolvePersistenceVolumeName(cfg), + } + if resources, ok := sb.(sandbox.SandboxResources); ok { + output.CPUCores = resources.CPUCores() + output.MemoryGB = resources.MemoryGB() + } + data, _ := json.MarshalIndent(output, "", " ") + fmt.Println(string(data)) + return nil + } + + fmt.Println("amux sandbox status") + fmt.Println(strings.Repeat("─", 50)) + fmt.Println() + fmt.Printf(" Sandbox ID: %s\n", sb.ID()) + fmt.Printf(" State: %s\n", stateWithColor(string(sb.State()))) + fmt.Printf(" Agent: %s\n", meta.Agent) + fmt.Printf(" Persistence: %s\n", sandbox.ResolvePersistenceVolumeName(cfg)) + if resources, ok := sb.(sandbox.SandboxResources); ok { + fmt.Printf(" Resources: %.1f CPU, %.1f GiB RAM\n", resources.CPUCores(), resources.MemoryGB()) + } + + if sb.State() == sandbox.StateStarted { + fmt.Println() + fmt.Println(" Ready for:") + fmt.Printf(" amux ssh # raw shell access\n") + fmt.Printf(" amux exec # run a command\n") + fmt.Printf(" amux sandbox run %s # interactive session\n", meta.Agent) + } else if sb.State() == sandbox.StateStopped { + fmt.Println() + fmt.Println(" Sandbox is stopped. Run `amux sandbox run ` to start it.") + } + + fmt.Println() + fmt.Println(strings.Repeat("─", 50)) + return nil + }, + } + cmd.Flags().BoolVar(&jsonOutput, "json", false, "Output in JSON format") + return cmd +} + +func stateWithColor(state string) string { + switch state { + case "started": + return state + " (running)" + case "stopped": + return state + case "pending": + return state + " (starting...)" + default: + return state + } +} + +func buildSSHCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "ssh", + Short: "Open a raw SSH shell to the current project sandbox", + RunE: func(cmd *cobra.Command, args []string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + providerInstance, _, err := sandbox.ResolveProvider(cfg, cwd, "") + if err != nil { + return err + } + meta, err := sandbox.LoadSandboxMeta(cwd, providerInstance.Name()) + if err != nil { + return err + } + if meta == nil { + return fmt.Errorf("no sandbox for this project - run `amux sandbox run ` first") + } + + sb, err := providerInstance.GetSandbox(context.Background(), meta.SandboxID) + if err != nil { + return fmt.Errorf("sandbox not found - run `amux sandbox run ` to create one") + } + + if sb.State() != sandbox.StateStarted { + fmt.Fprintln(os.Stderr, "Starting sandbox...") + if err := sb.Start(context.Background()); err != nil { + return fmt.Errorf("failed to start sandbox: %w", err) + } + if err := sb.WaitReady(context.Background(), 60*time.Second); err != nil { + fmt.Fprintf(os.Stderr, "Warning: sandbox may not be fully ready: %v\n", err) + } + } + + worktreeID := sandbox.ComputeWorktreeID(cwd) + workspacePath := sandbox.GetWorktreeRepoPath(sb, sandbox.SyncOptions{Cwd: cwd, WorktreeID: worktreeID}) + + id := sb.ID() + if len(id) > 8 { + id = id[:8] + } + fmt.Printf("Connecting to sandbox %s...\n", id) + exitCode, err := sandbox.RunAgentInteractive(sb, sandbox.AgentConfig{ + Agent: sandbox.AgentShell, + WorkspacePath: workspacePath, + Args: []string{}, + Env: map[string]string{}, + }) + if err != nil { + return err + } + if exitCode != 0 { + return exitError{code: exitCode} + } + return nil + }, + } + return cmd +} + +func buildExecCommand() *cobra.Command { + var workdir string + + cmd := &cobra.Command{ + Use: "exec [args...]", + Short: "Execute a command in the current project sandbox", + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + + cfg, err := sandbox.LoadConfig() + if err != nil { + return err + } + providerInstance, _, err := sandbox.ResolveProvider(cfg, cwd, "") + if err != nil { + return err + } + meta, err := sandbox.LoadSandboxMeta(cwd, providerInstance.Name()) + if err != nil { + return err + } + if meta == nil { + return fmt.Errorf("no sandbox for this project - run `amux sandbox run ` first") + } + + sb, err := providerInstance.GetSandbox(context.Background(), meta.SandboxID) + if err != nil { + return fmt.Errorf("sandbox not found - run `amux sandbox run ` to create one") + } + + if sb.State() != sandbox.StateStarted { + fmt.Fprintln(os.Stderr, "Starting sandbox...") + if err := sb.Start(context.Background()); err != nil { + return fmt.Errorf("failed to start sandbox: %w", err) + } + if err := sb.WaitReady(context.Background(), 60*time.Second); err != nil { + fmt.Fprintf(os.Stderr, "Warning: sandbox may not be fully ready: %v\n", err) + } + } + + execPath := workdir + if execPath == "" { + worktreeID := sandbox.ComputeWorktreeID(cwd) + execPath = sandbox.GetWorktreeRepoPath(sb, sandbox.SyncOptions{Cwd: cwd, WorktreeID: worktreeID}) + } + + // Build command string + cmdStr := strings.Join(args, " ") + fullCmd := fmt.Sprintf("cd %s && %s", quoteShell(execPath), cmdStr) + + resp, err := sb.Exec(context.Background(), fullCmd, nil) + if err != nil { + return err + } + + // Print output + if resp.Stdout != "" { + fmt.Print(resp.Stdout) + } + + if resp.ExitCode != 0 { + return exitError{code: resp.ExitCode} + } + return nil + }, + } + + cmd.Flags().StringVarP(&workdir, "workdir", "w", "", "Working directory (default: worktree repo path)") + + return cmd +} + +func quoteShell(s string) string { + return "'" + strings.ReplaceAll(s, "'", "'\\''") + "'" +} diff --git a/internal/cli/wizard.go b/internal/cli/wizard.go new file mode 100644 index 00000000..1739b0cf --- /dev/null +++ b/internal/cli/wizard.go @@ -0,0 +1,294 @@ +package cli + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/andyrewlee/amux/internal/sandbox" +) + +// WizardConfig holds the results of the setup wizard. +type WizardConfig struct { + Agents []string + SyncSettings bool + SyncClaude bool + SyncGit bool + CreatePrebuilt bool +} + +// RunFirstRunWizard guides new users through initial setup. +func RunFirstRunWizard() (*WizardConfig, error) { + config := &WizardConfig{} + + fmt.Println() + fmt.Println("\033[1m" + "Welcome to amux!" + "\033[0m") + fmt.Println("Let's set up your sandbox environment.") + fmt.Println() + + // Step 1: Check if Daytona API key is set + if os.Getenv("DAYTONA_API_KEY") == "" { + fmt.Println("\033[33m!\033[0m Daytona API key not found.") + fmt.Println() + fmt.Println("To get an API key:") + fmt.Println(" 1. Go to https://app.daytona.io/settings") + fmt.Println(" 2. Create a new API key") + fmt.Println(" 3. Run: export DAYTONA_API_KEY=your-key") + fmt.Println() + + if !confirm("Do you have a Daytona API key ready to configure?") { + fmt.Println() + fmt.Println("You can run `amux setup` later to configure your API key.") + return nil, fmt.Errorf("setup cancelled") + } + + apiKey := prompt("Enter your Daytona API key:") + if apiKey == "" { + return nil, fmt.Errorf("API key is required") + } + + // Save API key to shell profile + if confirm("Save API key to your shell profile?") { + if err := appendToShellProfile(fmt.Sprintf("export DAYTONA_API_KEY=%s", apiKey)); err != nil { + fmt.Printf("\033[33m!\033[0m Could not save to profile: %v\n", err) + fmt.Println(" Add this to your shell profile manually:") + fmt.Printf(" export DAYTONA_API_KEY=%s\n", apiKey) + } else { + fmt.Println("\033[32m✓\033[0m API key saved to shell profile") + } + } + + // Set for current session + os.Setenv("DAYTONA_API_KEY", apiKey) + } else { + fmt.Println("\033[32m✓\033[0m Daytona API key configured") + } + + fmt.Println() + + // Step 2: Select agents + fmt.Println("\033[1m[1/3] Which agents do you use?\033[0m") + fmt.Println("Select the AI coding agents you want to use in sandboxes.") + fmt.Println() + + agents := []struct { + Name string + Description string + }{ + {"claude", "Claude Code (Anthropic)"}, + {"codex", "Codex CLI (OpenAI)"}, + {"gemini", "Gemini CLI (Google)"}, + {"opencode", "OpenCode (open source)"}, + {"amp", "Amp (Sourcegraph)"}, + {"droid", "Droid (Factory)"}, + } + + selectedAgents := []string{} + for _, agent := range agents { + if confirm(fmt.Sprintf(" %s - %s?", agent.Name, agent.Description)) { + selectedAgents = append(selectedAgents, agent.Name) + } + } + + if len(selectedAgents) == 0 { + selectedAgents = []string{"claude"} // Default + fmt.Println(" Defaulting to Claude Code") + } + + config.Agents = selectedAgents + fmt.Println() + + // Step 3: Settings sync + fmt.Println("\033[1m[2/3] Sync local settings to sandbox?\033[0m") + fmt.Println("This copies your preferences (NOT credentials) to the sandbox.") + fmt.Println() + + config.SyncSettings = confirm("Enable settings sync?") + if config.SyncSettings { + // Check which settings exist locally + status := sandbox.GetLocalSettingsStatus() + + if status[sandbox.AgentClaude] { + config.SyncClaude = confirm(" Sync Claude settings (~/.claude/settings.json)?") + } + + if status["git"] { + config.SyncGit = confirm(" Sync Git config (~/.gitconfig - name, email, aliases only)?") + } + } + + fmt.Println() + + // Step 4: Prebuilt snapshot (optional, advanced) + fmt.Println("\033[1m[3/3] Create a prebuilt snapshot?\033[0m") + fmt.Println("Pre-installing agents in a snapshot makes startup faster.") + fmt.Println("This takes a few minutes but only needs to be done once.") + fmt.Println() + + config.CreatePrebuilt = confirm("Create prebuilt snapshot with selected agents?") + + fmt.Println() + return config, nil +} + +// ApplyWizardConfig applies the wizard configuration. +func ApplyWizardConfig(config *WizardConfig) error { + // Save settings sync config + if config.SyncSettings { + syncCfg := sandbox.SettingsSyncConfig{ + Enabled: true, + Claude: config.SyncClaude, + Git: config.SyncGit, + } + + if err := sandbox.SaveSettingsSyncConfig(syncCfg); err != nil { + return fmt.Errorf("failed to save settings config: %w", err) + } + fmt.Println("\033[32m✓\033[0m Settings sync configured") + } + + // Mark first run as complete + cfg, _ := sandbox.LoadConfig() + cfg.FirstRunComplete = true + if err := sandbox.SaveConfig(cfg); err != nil { + return fmt.Errorf("failed to save config: %w", err) + } + + fmt.Println() + fmt.Println("\033[32m✓ Setup complete!\033[0m") + fmt.Println() + fmt.Println("Quick start:") + for _, agent := range config.Agents { + fmt.Printf(" amux %s # Run %s in a sandbox\n", agent, agent) + } + fmt.Println() + fmt.Println("Other commands:") + fmt.Println(" amux status # Check sandbox status") + fmt.Println(" amux doctor # Run diagnostics") + fmt.Println(" amux --help # See all commands") + fmt.Println() + + return nil +} + +// ShouldRunWizard checks if the first-run wizard should be shown. +func ShouldRunWizard() bool { + cfg, err := sandbox.LoadConfig() + if err != nil { + return true // Config doesn't exist + } + return !cfg.FirstRunComplete +} + +// confirm asks a yes/no question and returns the answer. +func confirm(question string) bool { + reader := bufio.NewReader(os.Stdin) + fmt.Printf("%s [y/N]: ", question) + + answer, _ := reader.ReadString('\n') + answer = strings.TrimSpace(strings.ToLower(answer)) + + return answer == "y" || answer == "yes" +} + +// prompt asks for text input and returns the answer. +func prompt(question string) string { + reader := bufio.NewReader(os.Stdin) + fmt.Printf("%s ", question) + + answer, _ := reader.ReadString('\n') + return strings.TrimSpace(answer) +} + +// promptWithDefault asks for text input with a default value. +// Kept for future use by setup wizard enhancements. +func promptWithDefault(question, defaultVal string) string { //nolint:unused + reader := bufio.NewReader(os.Stdin) + fmt.Printf("%s [%s]: ", question, defaultVal) + + answer, _ := reader.ReadString('\n') + answer = strings.TrimSpace(answer) + + if answer == "" { + return defaultVal + } + return answer +} + +// selectOne presents a list of options and returns the selected one. +// Kept for future use by setup wizard enhancements. +func selectOne(question string, options []string) string { //nolint:unused + fmt.Println(question) + for i, opt := range options { + fmt.Printf(" %d. %s\n", i+1, opt) + } + + reader := bufio.NewReader(os.Stdin) + for { + fmt.Print("Enter number: ") + answer, _ := reader.ReadString('\n') + answer = strings.TrimSpace(answer) + + var idx int + if _, err := fmt.Sscanf(answer, "%d", &idx); err == nil { + if idx >= 1 && idx <= len(options) { + return options[idx-1] + } + } + fmt.Println("Invalid selection, try again.") + } +} + +// appendToShellProfile appends a line to the user's shell profile. +func appendToShellProfile(line string) error { + home, err := os.UserHomeDir() + if err != nil { + return err + } + + // Determine shell profile + shell := os.Getenv("SHELL") + var profilePath string + + switch { + case strings.Contains(shell, "zsh"): + profilePath = filepath.Join(home, ".zshrc") + case strings.Contains(shell, "bash"): + // Check for .bash_profile first (macOS), then .bashrc + if _, err := os.Stat(filepath.Join(home, ".bash_profile")); err == nil { + profilePath = filepath.Join(home, ".bash_profile") + } else { + profilePath = filepath.Join(home, ".bashrc") + } + case strings.Contains(shell, "fish"): + profilePath = filepath.Join(home, ".config", "fish", "config.fish") + default: + profilePath = filepath.Join(home, ".profile") + } + + // Append the line + f, err := os.OpenFile(profilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + // Add newline before and after for safety + _, err = f.WriteString(fmt.Sprintf("\n# Added by amux setup\n%s\n", line)) + return err +} + +// PrintWelcomeBanner prints a welcome banner for new users. +func PrintWelcomeBanner() { + fmt.Println() + fmt.Println(" \033[1;36m╭─────────────────────────────────────────╮\033[0m") + fmt.Println(" \033[1;36m│\033[0m \033[1;36m│\033[0m") + fmt.Println(" \033[1;36m│\033[0m \033[1mamux\033[0m - AI Coding Agents in Computeres \033[1;36m│\033[0m") + fmt.Println(" \033[1;36m│\033[0m \033[1;36m│\033[0m") + fmt.Println(" \033[1;36m│\033[0m Claude · Codex · Gemini · and more \033[1;36m│\033[0m") + fmt.Println(" \033[1;36m│\033[0m \033[1;36m│\033[0m") + fmt.Println(" \033[1;36m╰─────────────────────────────────────────╯\033[0m") + fmt.Println() +} diff --git a/internal/data/workspace.go b/internal/data/workspace.go index cfab6ebd..7b1ffa10 100644 --- a/internal/data/workspace.go +++ b/internal/data/workspace.go @@ -19,9 +19,9 @@ func NormalizeRuntime(runtime string) string { switch runtime { case RuntimeLocalWorktree, RuntimeLocalCheckout, RuntimeLocalDocker, RuntimeCloudSandbox: return runtime - case "sandbox": + case "sandbox": // backward compatibility return RuntimeCloudSandbox - case "local", "": + case "local", "": // backward compatibility return RuntimeLocalWorktree default: return RuntimeLocalWorktree diff --git a/internal/daytona/artifacts.go b/internal/daytona/artifacts.go new file mode 100644 index 00000000..5473d77d --- /dev/null +++ b/internal/daytona/artifacts.go @@ -0,0 +1,20 @@ +package daytona + +import "strings" + +const artifactPrefix = "dtn_artifact_k39fd2:" + +// ParseArtifacts extracts artifacts from stdout text. +func ParseArtifacts(output string) ExecutionArtifacts { + lines := strings.Split(output, "\n") + filtered := make([]string, 0, len(lines)) + + for _, line := range lines { + if strings.HasPrefix(line, artifactPrefix) { + continue + } + filtered = append(filtered, line) + } + + return ExecutionArtifacts{Stdout: strings.Join(filtered, "\n")} +} diff --git a/internal/daytona/client.go b/internal/daytona/client.go new file mode 100644 index 00000000..923ecbb1 --- /dev/null +++ b/internal/daytona/client.go @@ -0,0 +1,301 @@ +package daytona + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" +) + +const defaultAPIURL = "https://app.daytona.io/api" + +// Daytona is the main client for interacting with the API. +type Daytona struct { + apiKey string + apiURL string + target string + + headers http.Header + client *http.Client + + proxyToolboxOnce sync.Once + proxyToolboxURL string + proxyToolboxErr error + + Volume *VolumeService + Snapshot *SnapshotService +} + +// NewDaytona creates a new client. +func NewDaytona(cfg *DaytonaConfig) (*Daytona, error) { + if cfg == nil || cfg.APIKey == "" { + return nil, errors.New("API key is required") + } + apiURL := cfg.APIURL + if apiURL == "" { + apiURL = defaultAPIURL + } + client := &Daytona{ + apiKey: cfg.APIKey, + apiURL: strings.TrimRight(apiURL, "/"), + target: cfg.Target, + client: &http.Client{Timeout: 24 * time.Hour}, + } + + headers := make(http.Header) + headers.Set("Authorization", fmt.Sprintf("Bearer %s", cfg.APIKey)) + headers.Set("X-Daytona-Source", "amux") + client.headers = headers + + client.Volume = &VolumeService{client: client} + client.Snapshot = &SnapshotService{client: client} + return client, nil +} + +func (d *Daytona) getProxyToolboxURL(ctx context.Context) (string, error) { + d.proxyToolboxOnce.Do(func() { + var resp struct { + ProxyToolboxURL string `json:"proxyToolboxUrl"` + } + if err := d.doJSON(ctx, http.MethodGet, "/config", nil, &resp); err != nil { + d.proxyToolboxErr = err + return + } + if resp.ProxyToolboxURL == "" { + d.proxyToolboxErr = errors.New("proxy toolbox URL not available") + return + } + d.proxyToolboxURL = strings.TrimRight(resp.ProxyToolboxURL, "/") + }) + if d.proxyToolboxErr != nil { + return "", d.proxyToolboxErr + } + return d.proxyToolboxURL, nil +} + +func (d *Daytona) endpoint(path string) string { + return d.apiURL + path +} + +func (d *Daytona) doJSON(ctx context.Context, method, path string, payload any, out any) error { + var body *bytes.Reader + if payload != nil { + data, err := json.Marshal(payload) + if err != nil { + return err + } + body = bytes.NewReader(data) + } else { + body = bytes.NewReader(nil) + } + + req, err := http.NewRequestWithContext(ctx, method, d.endpoint(path), body) + if err != nil { + return err + } + for k, vals := range d.headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + if payload != nil { + req.Header.Set("Content-Type", "application/json") + } + if out != nil { + req.Header.Set("Accept", "application/json") + } + + resp, err := d.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode >= 400 { + return parseAPIError(resp) + } + if out != nil { + decoder := json.NewDecoder(resp.Body) + if err := decoder.Decode(out); err != nil { + return err + } + } + return nil +} + +func (d *Daytona) doRequest(ctx context.Context, method, path string, query url.Values, payload any, out any) error { + full := d.endpoint(path) + if len(query) > 0 { + full += "?" + query.Encode() + } + var body *bytes.Reader + if payload != nil { + data, err := json.Marshal(payload) + if err != nil { + return err + } + body = bytes.NewReader(data) + } else { + body = bytes.NewReader(nil) + } + if ctx == nil { + ctx = context.Background() + } + req, err := http.NewRequestWithContext(ctx, method, full, body) + if err != nil { + return err + } + for k, vals := range d.headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + if payload != nil { + req.Header.Set("Content-Type", "application/json") + } + if out != nil { + req.Header.Set("Accept", "application/json") + } + resp, err := d.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode >= 400 { + return parseAPIError(resp) + } + if out != nil { + decoder := json.NewDecoder(resp.Body) + if err := decoder.Decode(out); err != nil { + return err + } + } + return nil +} + +// Create creates a sandbox. +func (d *Daytona) Create(params *CreateSandboxParams, opts *CreateOptions) (*Sandbox, error) { + if params == nil { + params = &CreateSandboxParams{Language: "python"} + } + labels := map[string]string{} + for k, v := range params.Labels { + labels[k] = v + } + if params.Language != "" { + labels["code-toolbox-language"] = params.Language + } + + payload := map[string]any{} + if params.Name != "" { + payload["name"] = params.Name + } + if params.Snapshot != "" { + payload["snapshot"] = params.Snapshot + } + if len(params.EnvVars) > 0 { + payload["env"] = params.EnvVars + } + if len(labels) > 0 { + payload["labels"] = labels + } + if params.AutoStopInterval != 0 { + payload["autoStopInterval"] = params.AutoStopInterval + } + if params.AutoDeleteInterval != 0 { + payload["autoDeleteInterval"] = params.AutoDeleteInterval + } + if params.AutoArchiveInterval != 0 { + payload["autoArchiveInterval"] = params.AutoArchiveInterval + } + if params.Ephemeral { + payload["ephemeral"] = true + } + if len(params.Volumes) > 0 { + payload["volumes"] = params.Volumes + } + if d.target != "" { + payload["target"] = d.target + } + + ctx := context.Background() + if opts != nil && opts.Timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, opts.Timeout) + defer cancel() + } + + var dto sandboxDTO + if err := d.doRequest(ctx, http.MethodPost, "/sandbox", nil, payload, &dto); err != nil { + return nil, err + } + return newSandboxFromDTO(&dto, d), nil +} + +// Get retrieves a sandbox by ID. +func (d *Daytona) Get(id string) (*Sandbox, error) { + var dto sandboxDTO + if err := d.doJSON(context.Background(), http.MethodGet, "/sandbox/"+url.PathEscape(id), nil, &dto); err != nil { + return nil, err + } + return newSandboxFromDTO(&dto, d), nil +} + +// List returns all sandboxes. +func (d *Daytona) List() ([]*Sandbox, error) { + var items []sandboxDTO + if err := d.doJSON(context.Background(), http.MethodGet, "/sandbox", nil, &items); err != nil { + return nil, err + } + out := make([]*Sandbox, 0, len(items)) + for i := range items { + out = append(out, newSandboxFromDTO(&items[i], d)) + } + return out, nil +} + +// Delete deletes a sandbox. +func (d *Daytona) Delete(sandbox *Sandbox) error { + if sandbox == nil { + return errors.New("sandbox is required") + } + return d.doJSON(context.Background(), http.MethodDelete, "/sandbox/"+url.PathEscape(sandbox.ID), nil, nil) +} + +// Stop stops a sandbox. +func (d *Daytona) Stop(sandbox *Sandbox, timeout time.Duration) error { + if sandbox == nil { + return errors.New("sandbox is required") + } + return sandbox.Stop(timeout) +} + +func parseAPIError(resp *http.Response) error { + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(resp.Body) + msg := strings.TrimSpace(buf.String()) + if msg == "" { + msg = resp.Status + } + return &APIError{StatusCode: resp.StatusCode, Message: msg} +} + +// APIError represents a non-2xx response. +type APIError struct { + StatusCode int + Message string +} + +func (e *APIError) Error() string { + return fmt.Sprintf("api error (%d): %s", e.StatusCode, e.Message) +} + +func isNotFound(err error) bool { + apiErr, ok := err.(*APIError) + return ok && apiErr.StatusCode == http.StatusNotFound +} diff --git a/internal/daytona/filesystem.go b/internal/daytona/filesystem.go new file mode 100644 index 00000000..50b8438b --- /dev/null +++ b/internal/daytona/filesystem.go @@ -0,0 +1,204 @@ +package daytona + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "mime" + "mime/multipart" + "os" + "path/filepath" + "strings" + "time" +) + +// FileSystem provides file operations in a sandbox. +type FileSystem struct { + toolbox func() (*toolboxClient, error) +} + +type fileUpload struct { + source any + destination string +} + +type fileDownloadRequest struct { + source string + destination string +} + +type fileDownloadResponse struct { + source string + path string + data []byte + err string +} + +// UploadFile uploads a single file or buffer to the sandbox. +func (fs *FileSystem) UploadFile(src any, remotePath string, timeout time.Duration) error { + return fs.uploadFiles([]fileUpload{{source: src, destination: remotePath}}, timeout) +} + +// DownloadFile downloads a file from the sandbox and returns its bytes. +func (fs *FileSystem) DownloadFile(remotePath string, timeout time.Duration) ([]byte, error) { + results, err := fs.downloadFiles([]fileDownloadRequest{{source: remotePath}}, timeout) + if err != nil { + return nil, err + } + if len(results) == 0 { + return nil, fmt.Errorf("no data received for this file") + } + if results[0].err != "" { + return nil, errors.New(results[0].err) + } + return results[0].data, nil +} + +// DownloadFileTo downloads a file from the sandbox to a local path. +func (fs *FileSystem) DownloadFileTo(remotePath, localPath string, timeout time.Duration) error { + results, err := fs.downloadFiles([]fileDownloadRequest{{source: remotePath, destination: localPath}}, timeout) + if err != nil { + return err + } + if len(results) == 0 { + return fmt.Errorf("no data received for this file") + } + if results[0].err != "" { + return errors.New(results[0].err) + } + return nil +} + +func (fs *FileSystem) downloadFiles(files []fileDownloadRequest, timeout time.Duration) ([]fileDownloadResponse, error) { + if len(files) == 0 { + return nil, nil + } + + client, err := fs.toolbox() + if err != nil { + return nil, err + } + + paths := make([]string, 0, len(files)) + meta := make(map[string]fileDownloadResponse) + for _, f := range files { + paths = append(paths, f.source) + meta[f.source] = fileDownloadResponse{source: f.source, path: f.destination} + } + + payload, _ := json.Marshal(map[string]any{"paths": paths}) + ctx, cancel := withTimeout(context.Background(), timeout) + defer cancel() + + resp, err := client.doRequest(ctx, httpMethodPost, "/files/bulk-download", nil, bytes.NewReader(payload), "application/json") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + contentType := resp.Header.Get("Content-Type") + mediaType, params, err := mime.ParseMediaType(contentType) + if err != nil || !strings.HasPrefix(mediaType, "multipart/") { + return nil, fmt.Errorf("unexpected Content-Type: %s", contentType) + } + boundary := params["boundary"] + if boundary == "" { + return nil, fmt.Errorf("missing multipart boundary") + } + + reader := multipart.NewReader(resp.Body, boundary) + for { + part, err := reader.NextPart() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + + source := part.FileName() + entry, ok := meta[source] + if !ok && len(meta) == 1 { + for key, val := range meta { + source = key + entry = val + break + } + } + switch part.FormName() { + case "error": + buf, _ := io.ReadAll(part) + entry.err = strings.TrimSpace(string(buf)) + case "file": + if entry.path != "" { + if err := os.MkdirAll(filepath.Dir(entry.path), 0o755); err != nil { + entry.err = err.Error() + break + } + out, err := os.Create(entry.path) + if err != nil { + entry.err = err.Error() + break + } + _, err = io.Copy(out, part) + _ = out.Close() + if err != nil { + entry.err = err.Error() + } + } else { + buf, _ := io.ReadAll(part) + entry.data = buf + } + } + meta[source] = entry + } + + results := make([]fileDownloadResponse, 0, len(files)) + for _, f := range files { + entry := meta[f.source] + if entry.err == "" && entry.path == "" && entry.data == nil { + entry.err = "No data received for this file" + } + results = append(results, entry) + } + return results, nil +} + +func (fs *FileSystem) uploadFiles(files []fileUpload, timeout time.Duration) error { + if len(files) == 0 { + return nil + } + client, err := fs.toolbox() + if err != nil { + return err + } + + fields := map[string]string{} + fileFields := map[string]multipartFile{} + for i, f := range files { + fields[fmt.Sprintf("files[%d].path", i)] = f.destination + var reader io.Reader + var name string + switch v := f.source.(type) { + case []byte: + reader = bytes.NewReader(v) + name = filepath.Base(f.destination) + case string: + file, err := os.Open(v) + if err != nil { + return err + } + defer file.Close() + reader = file + name = filepath.Base(f.destination) + default: + return fmt.Errorf("unsupported source type") + } + fileFields[fmt.Sprintf("files[%d].file", i)] = multipartFile{Name: name, Reader: reader} + } + + return client.uploadMultipart(context.Background(), "/files/bulk-upload", fields, fileFields, timeout) +} diff --git a/internal/daytona/http.go b/internal/daytona/http.go new file mode 100644 index 00000000..b810c337 --- /dev/null +++ b/internal/daytona/http.go @@ -0,0 +1,9 @@ +package daytona + +import "net/http" + +const ( + httpMethodGet = http.MethodGet + httpMethodPost = http.MethodPost + httpMethodDelete = http.MethodDelete +) diff --git a/internal/daytona/image.go b/internal/daytona/image.go new file mode 100644 index 00000000..336c79d5 --- /dev/null +++ b/internal/daytona/image.go @@ -0,0 +1,46 @@ +package daytona + +import "strings" + +// Image represents a simple build context for snapshots. +type Image struct { + dockerfile strings.Builder +} + +// Dockerfile returns the generated Dockerfile. +func (i *Image) Dockerfile() string { + return i.dockerfile.String() +} + +// ImageBase creates an Image from an existing base image. +func ImageBase(image string) *Image { + img := &Image{} + img.dockerfile.WriteString("FROM ") + img.dockerfile.WriteString(image) + img.dockerfile.WriteString("\n") + return img +} + +// RunCommands appends RUN instructions to the Dockerfile. +func (i *Image) RunCommands(commands ...any) *Image { + for _, command := range commands { + switch v := command.(type) { + case string: + i.dockerfile.WriteString("RUN ") + i.dockerfile.WriteString(v) + i.dockerfile.WriteString("\n") + case []string: + quoted := make([]string, 0, len(v)) + for _, part := range v { + part = strings.ReplaceAll(part, "\\", "\\\\") + part = strings.ReplaceAll(part, "\"", "\\\"") + part = strings.ReplaceAll(part, "'", "\\'") + quoted = append(quoted, "\""+part+"\"") + } + i.dockerfile.WriteString("RUN ") + i.dockerfile.WriteString(strings.Join(quoted, " ")) + i.dockerfile.WriteString("\n") + } + } + return i +} diff --git a/internal/daytona/process.go b/internal/daytona/process.go new file mode 100644 index 00000000..1ea1140c --- /dev/null +++ b/internal/daytona/process.go @@ -0,0 +1,57 @@ +package daytona + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "strings" +) + +// Process handles process execution within a sandbox. +type Process struct { + toolbox func() (*toolboxClient, error) +} + +// ExecuteCommand executes a shell command in the sandbox. +func (p *Process) ExecuteCommand(command string, opts ...ExecuteCommandOptions) (*ExecuteResponse, error) { + client, err := p.toolbox() + if err != nil { + return nil, err + } + + var options ExecuteCommandOptions + if len(opts) > 0 { + options = opts[0] + } + + encoded := base64.StdEncoding.EncodeToString([]byte(command)) + cmd := fmt.Sprintf("echo '%s' | base64 -d | sh", encoded) + if len(options.Env) > 0 { + parts := make([]string, 0, len(options.Env)) + for k, v := range options.Env { + encodedVal := base64.StdEncoding.EncodeToString([]byte(v)) + parts = append(parts, fmt.Sprintf("export %s=$(echo '%s' | base64 -d)", k, encodedVal)) + } + cmd = fmt.Sprintf("%s; %s", strings.Join(parts, ";"), cmd) + } + cmd = fmt.Sprintf("sh -c \"%s\"", cmd) + + payload := map[string]any{"command": cmd} + if options.Cwd != "" { + payload["cwd"] = options.Cwd + } + if options.Timeout > 0 { + payload["timeout"] = int(options.Timeout.Seconds()) + } + + var resp struct { + ExitCode int32 `json:"exitCode"` + Result string `json:"result"` + } + if err := client.doJSON(context.Background(), http.MethodPost, "/process/execute", nil, payload, &resp); err != nil { + return nil, err + } + artifacts := ParseArtifacts(resp.Result) + return &ExecuteResponse{ExitCode: resp.ExitCode, Result: artifacts.Stdout, Artifacts: &artifacts}, nil +} diff --git a/internal/daytona/sandbox.go b/internal/daytona/sandbox.go new file mode 100644 index 00000000..d3e637d4 --- /dev/null +++ b/internal/daytona/sandbox.go @@ -0,0 +1,238 @@ +package daytona + +import ( + "context" + "fmt" + "net/url" + "strings" + "sync" + "time" +) + +type sandboxDTO struct { + ID string `json:"id"` + Name string `json:"name"` + Snapshot *string `json:"snapshot,omitempty"` + Env map[string]string `json:"env"` + Labels map[string]string `json:"labels"` + State string `json:"state"` + ErrorReason *string `json:"errorReason,omitempty"` + CPU float32 `json:"cpu"` + Memory float32 `json:"memory"` +} + +// Sandbox represents a remote sandbox. +type Sandbox struct { + ID string + Name string + Snapshot string + Env map[string]string + Labels map[string]string + State string + ErrorReason string + CPU float32 + Memory float32 + + FS *FileSystem + Process *Process + + client *Daytona + + toolboxOnce sync.Once + toolbox *toolboxClient + toolboxErr error +} + +func newSandboxFromDTO(dto *sandboxDTO, client *Daytona) *Sandbox { + s := &Sandbox{client: client} + if dto != nil { + s.applyDTO(dto) + } + provider := func() (*toolboxClient, error) { + return s.toolboxClient() + } + s.FS = &FileSystem{toolbox: provider} + s.Process = &Process{toolbox: provider} + return s +} + +func (s *Sandbox) applyDTO(dto *sandboxDTO) { + s.ID = dto.ID + s.Name = dto.Name + s.Snapshot = "" + if dto.Snapshot != nil { + s.Snapshot = *dto.Snapshot + } + s.Env = dto.Env + s.Labels = dto.Labels + s.State = dto.State + s.ErrorReason = "" + if dto.ErrorReason != nil { + s.ErrorReason = *dto.ErrorReason + } + s.CPU = dto.CPU + s.Memory = dto.Memory +} + +func (s *Sandbox) toolboxClient() (*toolboxClient, error) { + s.toolboxOnce.Do(func() { + base, err := s.client.getProxyToolboxURL(context.Background()) + if err != nil { + s.toolboxErr = err + return + } + if !strings.HasSuffix(base, "/") { + base += "/" + } + s.toolbox = newToolboxClient(base+s.ID, s.client.headers, 24*time.Hour) + }) + if s.toolboxErr != nil { + return nil, s.toolboxErr + } + return s.toolbox, nil +} + +// Start starts the sandbox and waits for it to be ready. +func (s *Sandbox) Start(timeout time.Duration) error { + if timeout < 0 { + return &APIError{StatusCode: 0, Message: "timeout must be non-negative"} + } + ctx := context.Background() + if timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + var dto sandboxDTO + if err := s.client.doRequest(ctx, httpMethodPost, "/sandbox/"+url.PathEscape(s.ID)+"/start", nil, nil, &dto); err != nil { + return err + } + s.applyDTO(&dto) + return s.WaitUntilStarted(timeout) +} + +// Stop stops the sandbox. +func (s *Sandbox) Stop(timeout time.Duration) error { + ctx := context.Background() + if timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + return s.client.doRequest(ctx, httpMethodPost, "/sandbox/"+url.PathEscape(s.ID)+"/stop", nil, nil, nil) +} + +// WaitUntilStarted waits until sandbox reaches started state. +func (s *Sandbox) WaitUntilStarted(timeout time.Duration) error { + deadline := time.Now().Add(timeout) + for { + if err := s.RefreshData(); err != nil { + return err + } + if s.State == "started" { + return nil + } + if s.State == "error" { + return &APIError{StatusCode: 0, Message: "sandbox failed to start: " + s.ErrorReason} + } + if timeout > 0 && time.Now().After(deadline) { + return &APIError{StatusCode: 0, Message: "sandbox failed to become ready within timeout"} + } + time.Sleep(100 * time.Millisecond) + } +} + +// RefreshData refreshes sandbox data from the API. +func (s *Sandbox) RefreshData() error { + var dto sandboxDTO + if err := s.client.doJSON(context.Background(), httpMethodGet, "/sandbox/"+url.PathEscape(s.ID), nil, &dto); err != nil { + return err + } + s.applyDTO(&dto) + return nil +} + +// CreateSshAccess creates SSH access for the sandbox. +func (s *Sandbox) CreateSshAccess(expiresInMinutes int32) (*SshAccess, error) { + query := url.Values{} + if expiresInMinutes > 0 { + query.Set("expiresInMinutes", fmt.Sprintf("%d", expiresInMinutes)) + } + var dto SshAccess + if err := s.client.doRequest(context.Background(), httpMethodPost, "/sandbox/"+url.PathEscape(s.ID)+"/ssh-access", query, nil, &dto); err != nil { + return nil, err + } + return &dto, nil +} + +// RevokeSshAccess revokes SSH access. +func (s *Sandbox) RevokeSshAccess(token string) error { + query := url.Values{} + if token != "" { + query.Set("token", token) + } + return s.client.doRequest(context.Background(), httpMethodDelete, "/sandbox/"+url.PathEscape(s.ID)+"/ssh-access", query, nil, nil) +} + +// ValidateSshAccess validates SSH access token. +func (s *Sandbox) ValidateSshAccess(token string) (*SshAccessValidation, error) { + query := url.Values{} + query.Set("token", token) + var dto SshAccessValidation + if err := s.client.doRequest(context.Background(), httpMethodGet, "/sandbox/ssh-access/validate", query, nil, &dto); err != nil { + return nil, err + } + return &dto, nil +} + +// GetPreviewLink returns a preview URL and token for a sandbox port. +func (s *Sandbox) GetPreviewLink(port int) (*PortPreview, error) { + var resp struct { + URL string `json:"url"` + Token string `json:"token"` + } + path := fmt.Sprintf("/sandbox/%s/ports/%d/preview-url", url.PathEscape(s.ID), port) + if err := s.client.doJSON(context.Background(), httpMethodGet, path, nil, &resp); err != nil { + return nil, err + } + return &PortPreview{URL: resp.URL, Token: resp.Token}, nil +} + +// GetSandboxUseStatus returns the desktop service status. +func (s *Sandbox) GetSandboxUseStatus() (*ComputerUseStatus, error) { + client, err := s.toolboxClient() + if err != nil { + return nil, err + } + var resp ComputerUseStatus + if err := client.doJSON(context.Background(), httpMethodGet, "/toolbox/computeruse/status", nil, nil, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// StartComputerUse starts desktop services. +func (s *Sandbox) StartComputerUse() (*ComputerUseStartResponse, error) { + client, err := s.toolboxClient() + if err != nil { + return nil, err + } + var resp ComputerUseStartResponse + if err := client.doJSON(context.Background(), httpMethodPost, "/toolbox/computeruse/start", nil, nil, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// StopComputerUse stops desktop services. +func (s *Sandbox) StopComputerUse() (*ComputerUseStopResponse, error) { + client, err := s.toolboxClient() + if err != nil { + return nil, err + } + var resp ComputerUseStopResponse + if err := client.doJSON(context.Background(), httpMethodPost, "/toolbox/computeruse/stop", nil, nil, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/internal/daytona/snapshot.go b/internal/daytona/snapshot.go new file mode 100644 index 00000000..8aa72fc0 --- /dev/null +++ b/internal/daytona/snapshot.go @@ -0,0 +1,117 @@ +package daytona + +import ( + "bufio" + "context" + "fmt" + "net/http" + "net/url" + "strings" + "time" +) + +// SnapshotService manages snapshots. +type SnapshotService struct { + client *Daytona +} + +// List retrieves all snapshots. +func (s *SnapshotService) List() ([]*Snapshot, error) { + var resp []*Snapshot + if err := s.client.doJSON(context.Background(), httpMethodGet, "/snapshots", nil, &resp); err != nil { + return nil, err + } + return resp, nil +} + +// Get retrieves a snapshot by name or ID. +func (s *SnapshotService) Get(name string) (*Snapshot, error) { + var resp Snapshot + if err := s.client.doJSON(context.Background(), httpMethodGet, "/snapshots/"+url.PathEscape(name), nil, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// Create creates a snapshot and optionally streams build logs. +func (s *SnapshotService) Create(params CreateSnapshotParams, options *SnapshotCreateOptions) (*Snapshot, error) { + payload := map[string]any{"name": params.Name} + switch img := params.Image.(type) { + case string: + payload["imageName"] = img + case *Image: + payload["buildInfo"] = map[string]any{"dockerfileContent": img.Dockerfile()} + case nil: + return nil, fmt.Errorf("image is required") + default: + return nil, fmt.Errorf("image must be a string or *Image") + } + + var created Snapshot + if err := s.client.doJSON(context.Background(), httpMethodPost, "/snapshots", payload, &created); err != nil { + return nil, err + } + if created.ID == "" { + return nil, fmt.Errorf("failed to create snapshot") + } + + terminal := map[string]bool{ + "active": true, + "error": true, + "build_failed": true, + } + + if options != nil && options.OnLogs != nil { + options.OnLogs(fmt.Sprintf("Creating snapshot %s (%s)", created.Name, created.State)) + } + + if options != nil && options.OnLogs != nil && created.State != "pending" && !terminal[created.State] { + _ = s.streamLogs(created.ID, options.OnLogs, terminal) + } + + for !terminal[created.State] { + time.Sleep(1 * time.Second) + latest, err := s.Get(created.ID) + if err != nil { + return nil, err + } + created = *latest + } + + return &created, nil +} + +func (s *SnapshotService) streamLogs(id string, onLogs func(string), terminal map[string]bool) error { + url := fmt.Sprintf("%s/snapshots/%s/build-logs?follow=true", strings.TrimRight(s.client.apiURL, "/"), id) + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil) + if err != nil { + return err + } + for k, vals := range s.client.headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + resp, err := s.client.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode >= 400 { + return fmt.Errorf("failed to stream logs: %s", resp.Status) + } + + scanner := bufio.NewScanner(resp.Body) + for scanner.Scan() { + line := strings.TrimRight(scanner.Text(), "\n") + onLogs(line) + latest, err := s.Get(id) + if err != nil { + return err + } + if terminal[latest.State] { + return nil + } + } + return scanner.Err() +} diff --git a/internal/daytona/toolbox_client.go b/internal/daytona/toolbox_client.go new file mode 100644 index 00000000..50b92b91 --- /dev/null +++ b/internal/daytona/toolbox_client.go @@ -0,0 +1,155 @@ +package daytona + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/url" + "path" + "strings" + "time" +) + +type toolboxClient struct { + baseURL string + headers http.Header + httpClient *http.Client +} + +func newToolboxClient(baseURL string, headers http.Header, timeout time.Duration) *toolboxClient { + client := &http.Client{} + if timeout > 0 { + client.Timeout = timeout + } + return &toolboxClient{ + baseURL: strings.TrimRight(baseURL, "/"), + headers: headers.Clone(), + httpClient: client, + } +} + +func (c *toolboxClient) urlFor(p string, query url.Values) string { + u, _ := url.Parse(c.baseURL) + u.Path = path.Join(u.Path, p) + if query != nil { + u.RawQuery = query.Encode() + } + return u.String() +} + +func (c *toolboxClient) doJSON(ctx context.Context, method, p string, query url.Values, body any, out any) error { + var bodyReader io.Reader + if body != nil { + payload, err := json.Marshal(body) + if err != nil { + return err + } + bodyReader = bytes.NewReader(payload) + } + + req, err := http.NewRequestWithContext(ctx, method, c.urlFor(p, query), bodyReader) + if err != nil { + return err + } + for k, vals := range c.headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode >= 400 { + buf, _ := io.ReadAll(resp.Body) + msg := strings.TrimSpace(string(buf)) + if msg == "" { + msg = fmt.Sprintf("request failed with status %d", resp.StatusCode) + } + return &APIError{StatusCode: resp.StatusCode, Message: msg} + } + + if out != nil { + decoder := json.NewDecoder(resp.Body) + if err := decoder.Decode(out); err != nil && err != io.EOF { + return err + } + } + return nil +} + +func (c *toolboxClient) doRequest(ctx context.Context, method, p string, query url.Values, body io.Reader, contentType string) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, c.urlFor(p, query), body) + if err != nil { + return nil, err + } + for k, vals := range c.headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + if contentType != "" { + req.Header.Set("Content-Type", contentType) + } + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode >= 400 { + buf, _ := io.ReadAll(resp.Body) + msg := strings.TrimSpace(string(buf)) + if msg == "" { + msg = fmt.Sprintf("request failed with status %d", resp.StatusCode) + } + resp.Body.Close() + return nil, &APIError{StatusCode: resp.StatusCode, Message: msg} + } + return resp, nil +} + +func (c *toolboxClient) uploadMultipart(ctx context.Context, p string, fields map[string]string, files map[string]multipartFile, timeout time.Duration) error { + pr, pw := io.Pipe() + writer := multipart.NewWriter(pw) + + go func() { + defer pw.Close() + for k, v := range fields { + _ = writer.WriteField(k, v) + } + for field, file := range files { + part, err := writer.CreateFormFile(field, file.Name) + if err != nil { + _ = pw.CloseWithError(err) + return + } + if _, err := io.Copy(part, file.Reader); err != nil { + _ = pw.CloseWithError(err) + return + } + } + _ = writer.Close() + }() + + ctxReq, cancel := withTimeout(ctx, timeout) + defer cancel() + resp, err := c.doRequest(ctxReq, httpMethodPost, p, nil, pr, writer.FormDataContentType()) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +type multipartFile struct { + Name string + Reader io.Reader +} diff --git a/internal/daytona/types.go b/internal/daytona/types.go new file mode 100644 index 00000000..04854c2d --- /dev/null +++ b/internal/daytona/types.go @@ -0,0 +1,121 @@ +package daytona + +import "time" + +// DaytonaConfig defines configuration for the API client. +type DaytonaConfig struct { + APIKey string + APIURL string + Target string +} + +// VolumeMount defines a volume mount for a sandbox. +type VolumeMount struct { + VolumeID string `json:"volumeId"` + MountPath string `json:"mountPath"` + Subpath string `json:"subpath,omitempty"` +} + +// CreateSandboxParams defines params for sandbox creation. +type CreateSandboxParams struct { + Name string + Language string + Snapshot string + EnvVars map[string]string + Labels map[string]string + AutoStopInterval int32 + AutoDeleteInterval int32 + AutoArchiveInterval int32 + Ephemeral bool + Volumes []VolumeMount +} + +// CreateOptions defines create options. +type CreateOptions struct { + Timeout time.Duration +} + +// ExecuteCommandOptions defines optional params for command execution. +type ExecuteCommandOptions struct { + Cwd string + Env map[string]string + Timeout time.Duration +} + +// ExecutionArtifacts contains parsed artifacts from execution output. +type ExecutionArtifacts struct { + Stdout string +} + +// ExecuteResponse is the result of command execution. +type ExecuteResponse struct { + ExitCode int32 + Result string + Artifacts *ExecutionArtifacts +} + +// CreateSnapshotParams defines snapshot creation parameters. +type CreateSnapshotParams struct { + Name string + Image any // string or *Image +} + +// SnapshotCreateOptions defines create options. +type SnapshotCreateOptions struct { + Timeout time.Duration + OnLogs func(string) +} + +// Snapshot represents a snapshot object. +type Snapshot struct { + ID string `json:"id"` + Name string `json:"name"` + State string `json:"state"` + ErrorReason string `json:"errorReason"` +} + +// Volume represents a volume object. +type Volume struct { + ID string `json:"id"` + Name string `json:"name"` + State string `json:"state"` + ErrorReason string `json:"errorReason"` +} + +// SshAccess represents SSH access data. +type SshAccess struct { + ID string `json:"id"` + SandboxID string `json:"sandboxId"` + Token string `json:"token"` + ExpiresAt time.Time `json:"expiresAt"` +} + +// SshAccessValidation represents SSH access validation results. +type SshAccessValidation struct { + Valid bool `json:"valid"` + SandboxID string `json:"sandboxId"` + RunnerDomain string `json:"runnerDomain"` +} + +// PortPreview contains preview URL data for a sandbox port. +type PortPreview struct { + URL string + Token string +} + +// ComputerUseStatus reports desktop status. +type ComputerUseStatus struct { + Status string `json:"status"` +} + +// ComputerUseStartResponse reports start results. +type ComputerUseStartResponse struct { + Message string `json:"message"` + Status map[string]interface{} `json:"status"` +} + +// ComputerUseStopResponse reports stop results. +type ComputerUseStopResponse struct { + Message string `json:"message"` + Status map[string]interface{} `json:"status"` +} diff --git a/internal/daytona/util.go b/internal/daytona/util.go new file mode 100644 index 00000000..df2a833d --- /dev/null +++ b/internal/daytona/util.go @@ -0,0 +1,13 @@ +package daytona + +import ( + "context" + "time" +) + +func withTimeout(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + if timeout <= 0 { + return context.WithCancel(ctx) + } + return context.WithTimeout(ctx, timeout) +} diff --git a/internal/daytona/volume.go b/internal/daytona/volume.go new file mode 100644 index 00000000..9e790008 --- /dev/null +++ b/internal/daytona/volume.go @@ -0,0 +1,77 @@ +package daytona + +import ( + "context" + "fmt" + "net/url" + "time" +) + +// VolumeService manages volumes. +type VolumeService struct { + client *Daytona +} + +// VolumeWaitOptions configures volume wait behavior. +type VolumeWaitOptions struct { + Timeout time.Duration + Interval time.Duration +} + +// Get gets a volume by name. If createIfMissing is true, creates if not found. +func (v *VolumeService) Get(name string, createIfMissing bool) (*Volume, error) { + var resp Volume + path := "/volumes/by-name/" + url.PathEscape(name) + if err := v.client.doJSON(context.Background(), httpMethodGet, path, nil, &resp); err != nil { + if isNotFound(err) && createIfMissing { + return v.create(name) + } + return nil, err + } + return &resp, nil +} + +// WaitForReady waits until a volume reaches the ready state. +func (v *VolumeService) WaitForReady(name string, options *VolumeWaitOptions) (*Volume, error) { + timeout := 60 * time.Second + interval := 1500 * time.Millisecond + if options != nil { + if options.Timeout > 0 { + timeout = options.Timeout + } + if options.Interval > 0 { + interval = options.Interval + } + } + + start := time.Now() + for { + volume, err := v.Get(name, true) + if err != nil { + return nil, err + } + if volume.State == "ready" { + return volume, nil + } + if volume.State == "error" || volume.State == "deleted" || volume.State == "deleting" || volume.State == "pending_delete" { + reason := volume.ErrorReason + if reason != "" { + return nil, fmt.Errorf("volume '%s' is in state %s: %s", name, volume.State, reason) + } + return nil, fmt.Errorf("volume '%s' is in state %s", name, volume.State) + } + if timeout > 0 && time.Since(start) > timeout { + return nil, fmt.Errorf("volume '%s' not ready after %ds (state: %s)", name, int(timeout.Seconds()), volume.State) + } + time.Sleep(interval) + } +} + +func (v *VolumeService) create(name string) (*Volume, error) { + payload := map[string]any{"name": name} + var resp Volume + if err := v.client.doJSON(context.Background(), httpMethodPost, "/volumes", payload, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/internal/git/operations.go b/internal/git/operations.go index 38121d6a..e9d01457 100644 --- a/internal/git/operations.go +++ b/internal/git/operations.go @@ -76,6 +76,12 @@ func GetRepoRoot(path string) (string, error) { return RunGitCtx(context.Background(), path, "rev-parse", "--show-toplevel") } +// GetRepoCommonDir returns the common git directory for a repo/worktree. +// For worktrees, this points at the main repo's .git directory. +func GetRepoCommonDir(path string) (string, error) { + return RunGit(path, "rev-parse", "--git-common-dir") +} + // GetCurrentBranch returns the current branch name func GetCurrentBranch(path string) (string, error) { return RunGitCtx(context.Background(), path, "rev-parse", "--abbrev-ref", "HEAD") diff --git a/internal/git/status.go b/internal/git/status.go index d43088f2..30fea40c 100644 --- a/internal/git/status.go +++ b/internal/git/status.go @@ -55,6 +55,81 @@ type StatusResult struct { TotalDeleted int // Total lines deleted across all changes } +// ParseStatus parses git status --short output and returns a StatusResult. +// This is used for parsing remote git status output (e.g., from sandbox). +func ParseStatus(output string) *StatusResult { + result := &StatusResult{ + Staged: []Change{}, + Unstaged: []Change{}, + Untracked: []Change{}, + Clean: true, + } + + if output == "" { + return result + } + + lines := strings.Split(output, "\n") + for _, line := range lines { + if len(line) < 3 { + continue + } + indexStatus := line[0] + workTreeStatus := line[1] + path := strings.TrimSpace(line[3:]) + + if path == "" { + continue + } + + result.Clean = false + + // Untracked files + if indexStatus == '?' && workTreeStatus == '?' { + result.Untracked = append(result.Untracked, Change{Path: path, Kind: ChangeUntracked}) + continue + } + + // Staged changes (index has status) + if indexStatus != ' ' && indexStatus != '?' { + result.Staged = append(result.Staged, Change{ + Path: path, + Kind: parseStatusChar(indexStatus), + Staged: true, + }) + } + + // Unstaged changes (work tree has status) + if workTreeStatus != ' ' && workTreeStatus != '?' { + result.Unstaged = append(result.Unstaged, Change{ + Path: path, + Kind: parseStatusChar(workTreeStatus), + Staged: false, + }) + } + } + + return result +} + +// parseStatusChar converts a git status character to a ChangeKind +func parseStatusChar(c byte) ChangeKind { + switch c { + case 'M': + return ChangeModified + case 'A': + return ChangeAdded + case 'D': + return ChangeDeleted + case 'R': + return ChangeRenamed + case 'C': + return ChangeCopied + default: + return ChangeModified + } +} + // GetStatus returns the git status for a repository using porcelain v1 -z format // This format handles spaces, unicode, and special characters in paths correctly func GetStatus(repoPath string) (*StatusResult, error) { diff --git a/internal/pty/terminal.go b/internal/pty/terminal.go index 0ed65445..4d434d81 100644 --- a/internal/pty/terminal.go +++ b/internal/pty/terminal.go @@ -15,10 +15,12 @@ import ( // Terminal wraps a PTY with an associated command type Terminal struct { - mu sync.Mutex - ptyFile *os.File - cmd *exec.Cmd - closed bool + mu sync.Mutex + ptyFile *os.File + cmd *exec.Cmd + closed bool + cleanup func() + cleanupOnce sync.Once } // New creates a new terminal with the given command. @@ -54,6 +56,26 @@ func NewWithSize(command string, dir string, env []string, rows, cols uint16) (* }, nil } +// NewWithCmd creates a new terminal from an exec.Cmd. +// Optional cleanup runs once on Close. +func NewWithCmd(cmd *exec.Cmd, cleanup func()) (*Terminal, error) { + if cmd.Env == nil { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, "TERM=xterm-256color") + + ptmx, err := pty.Start(cmd) + if err != nil { + return nil, err + } + + return &Terminal{ + ptyFile: ptmx, + cmd: cmd, + cleanup: cleanup, + }, nil +} + // SetSize sets the terminal size func (t *Terminal) SetSize(rows, cols uint16) error { t.mu.Lock() @@ -142,6 +164,9 @@ func (t *Terminal) Close() error { } _ = cmd.Wait() } + if t.cleanup != nil { + t.cleanupOnce.Do(t.cleanup) + } return nil } diff --git a/internal/sandbox/ARCHITECTURE.md b/internal/sandbox/ARCHITECTURE.md new file mode 100644 index 00000000..defb5b87 --- /dev/null +++ b/internal/sandbox/ARCHITECTURE.md @@ -0,0 +1,186 @@ +# amux Sandbox Architecture + +## Overview + +The amux sandbox system provides a unified interface for running coding agents +(Claude, Codex, OpenCode, Amp, Gemini, Droid) in Daytona sandboxes with +persistent credentials and settings. + +## Core Design Principles + +### 1. Fresh Sandbox Per Run + +amux creates a **new sandbox per run**. Workspace isolation is achieved via a +`worktreeID` - each project gets its own workspace directory inside the sandbox: + +``` +/workspace/{worktreeID}/repo # Each project's workspace +``` + +The `worktreeID` is a SHA256 hash of the absolute working directory path, +ensuring each project has a unique workspace without reusing sandboxes. + +**Benefits:** +- Clean environments for each session +- No per-project sandbox lifecycle management +- Easy cleanup with ephemeral sandboxes + +### 2. Daytona Provider (Today) + +The `Provider` interface (`provider.go`) abstracts sandbox backends. amux ships +with the Daytona provider only; provider selection flags/env have been removed. + +```go +type Provider interface { + Name() string + CreateSandbox(ctx, config) (RemoteSandbox, error) + GetSandbox(ctx, id) (RemoteSandbox, error) + ListSandboxes(ctx) ([]RemoteSandbox, error) + DeleteSandbox(ctx, id) error + Volumes() VolumeManager + Snapshots() SnapshotManager + SupportsFeature(feature) bool +} +``` + +### 3. Persistent Credentials via Volume + +Credentials and CLI caches are stored on a persistent volume mounted at `/amux`. +On sandbox startup, amux symlinks credential/cache directories into the sandbox +home directory so they persist across sandboxes. + +``` +/amux/home/.claude +/amux/home/.codex +/amux/home/.config +/amux/home/.local +/amux/home/.npm +/amux/home/.factory +``` + +Deleting a sandbox does **not** delete credentials. The volume name is stored in +`~/.amux/config.json` as `persistenceVolumeName` (default `amux-persist`). + +To reset persistence, amux rotates to a new volume via `amux sandbox reset`. +Old volumes are retained for manual cleanup in Daytona. + +Recorded session logs (when `--record` is used) are stored under: + +``` +/amux/logs/{worktreeId}/YYYYMMDD-HHMMSS-agent.log +``` + +### 4. Opt-in Settings Sync + +Users can opt-in to sync local settings (not credentials) to sandboxes: + +```bash +amux settings sync --enable --claude --git +``` + +This copies configuration files like `~/.claude/settings.json` to the sandbox, +with sensitive keys automatically filtered out. See `settings.go` for the +filtering logic. + +### 5. TUI Integration Architecture + +The sandbox system is designed for TUI integration: + +- TUI (Bubble Tea) Agent Tab +- When "Cloud" is selected: + 1. TUI calls `sandbox.CreateSandboxSession()` with agent config + 2. New sandbox created (ephemeral) + 3. TUI gets `RemoteSandbox` handle + 4. PTY streams through `sandbox.RunAgentInteractive()` + +**Key integration points:** + +1. **Sandbox Creation**: `CreateSandboxSession()` always creates a new sandbox +2. **Credential Setup**: `SetupCredentials()` mounts persistence + home symlinks +3. **Agent Execution**: `RunAgentInteractive()` provides PTY integration +4. **Workspace Sync**: `UploadWorkspace()`/`DownloadWorkspace()` for file sync + +### 6. Credential Flow + +``` +First Run: +1. User runs `amux claude` +2. amux creates a new sandbox +3. amux mounts /amux and symlinks home directories +4. Agent prompts for login (OAuth in browser) +5. Credentials stored under /amux/home +6. User exits; sandbox is deleted + +Subsequent Runs: +1. User runs `amux claude` +2. amux creates a new sandbox +3. /amux is mounted again; credentials already present +4. Agent starts immediately (no login needed) +``` + +## File Structure + +``` +internal/ +|-- cli/ +| |-- aliases.go # Agent shortcuts (amux claude, etc.) +| |-- auth.go # Auth commands +| |-- cli.go # Root command +| |-- doctor.go # Health checks +| |-- sandbox.go # Sandbox subcommands +| |-- settings.go # Settings sync CLI +| |-- spinner.go # Progress indicators +| |-- status.go # Status, SSH, exec commands +| `-- ... +|-- daytona/ +| |-- client.go # Daytona API client +| |-- sandbox.go # Sandbox operations +| |-- volume.go # Volume management +| |-- snapshot.go # Snapshot management +| `-- ... +`-- sandbox/ + |-- provider.go # Provider interface (Daytona) + |-- providers.go # Provider registry + resolution + |-- config.go # Configuration + |-- credentials.go # Credential management + |-- settings.go # Settings sync + |-- agent.go # Agent installation/execution + |-- sync.go # Workspace sync + `-- ... +``` + +## Adding a New Provider (Optional) + +If additional providers are reintroduced in the future: + +1. Implement the `Provider` interface in a new package (e.g., `internal/e2b/`) +2. Register the provider in the registry +3. Add provider selection to CLI/TUI + +Example: + +```go +// internal/e2b/provider.go +type E2BProvider struct { ... } + +func (p *E2BProvider) Name() string { return "e2b" } +func (p *E2BProvider) CreateSandbox(...) (RemoteSandbox, error) { ... } +// ... implement remaining methods + +// Registration +registry, _ := sandbox.DefaultProviderRegistry(cfg) +registry.Register(newE2BProvider(cfg)) +``` + +## Security Considerations + +1. **Credential Isolation**: Each user has their own persistent volume +2. **Settings Filtering**: Sensitive keys stripped from synced settings +3. **No Credential Logging**: Credentials never appear in logs or output +4. **OAuth-first Authentication**: Agents authenticate via browser/OAuth inside the sandbox + +## Future Enhancements + +1. **Credential rotation**: Automatic refresh of expired tokens +2. **Backup/restore**: Export/import credentials from the persistent volume +3. **Audit logging**: Track credential access patterns diff --git a/internal/sandbox/agent.go b/internal/sandbox/agent.go new file mode 100644 index 00000000..d1809e1d --- /dev/null +++ b/internal/sandbox/agent.go @@ -0,0 +1,241 @@ +package sandbox + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + "time" +) + +const ( + defaultSSHHost = "ssh.app.daytona.io" + sshReadyTimeout = 15 * time.Second + sshReadyInterval = 1 * time.Second +) + +// AgentConfig configures interactive agent sessions. +type AgentConfig struct { + Agent Agent + WorkspacePath string + Args []string + Env map[string]string + RawMode *bool + RecordPath string +} + +func getSSHHost() string { + host := envFirst("AMUX_SSH_HOST", "DAYTONA_SSH_HOST") + if host == "" { + return defaultSSHHost + } + return host +} + +// quoteForShell quotes a string for safe use in shell commands. +// Uses the central ShellQuote function for consistency. +func quoteForShell(value string) string { + return ShellQuote(value) +} + +func buildEnvExportsLocal(env map[string]string) []string { + return BuildEnvExports(env) +} + +func buildEnvAssignmentsLocal(env map[string]string) string { + return BuildEnvAssignments(env) +} + +func redactExports(input string) string { + return RedactSecrets(input) +} + +func getStdoutFromResponse(resp *ExecResult) string { + if resp == nil { + return "" + } + return resp.Stdout +} + +func getNodeBinDir(computer RemoteSandbox) string { + resp, err := execCommand(computer, "command -v node", nil) + if err == nil && resp.ExitCode == 0 { + path := strings.TrimSpace(getStdoutFromResponse(resp)) + if path != "" { + resp, err = execCommand(computer, fmt.Sprintf("dirname %s", quoteForShell(path)), nil) + if err == nil && resp.ExitCode == 0 { + dir := strings.TrimSpace(getStdoutFromResponse(resp)) + if dir != "" { + return dir + } + } + } + } + return "" +} + +func getHomeDir(computer RemoteSandbox) string { + resp, err := execCommand(computer, `sh -lc "USER_NAME=$(id -un 2>/dev/null || echo daytona); HOME_DIR=$(getent passwd \"$USER_NAME\" 2>/dev/null | cut -d: -f6 || true); if [ -z \"$HOME_DIR\" ]; then HOME_DIR=/home/$USER_NAME; fi; printf \"%s\" \"$HOME_DIR\""`, nil) + if err == nil { + stdout := strings.TrimSpace(getStdoutFromResponse(resp)) + if stdout != "" { + return stdout + } + } + return "/home/daytona" +} + +func resolveAgentCommandPath(computer RemoteSandbox, command string) string { + home := getHomeDir(computer) + + // Check native installation locations first (before PATH lookup) + if command == "claude" { + // Native installer puts claude at ~/.local/bin/claude + candidate := fmt.Sprintf("%s/.local/bin/claude", home) + resp, err := execCommand(computer, fmt.Sprintf("test -x %s", quoteForShell(candidate)), nil) + if err == nil && resp.ExitCode == 0 { + return candidate + } + } + if command == "amp" { + candidate := fmt.Sprintf("%s/.amp/bin/amp", home) + resp, err := execCommand(computer, fmt.Sprintf("test -x %s", quoteForShell(candidate)), nil) + if err == nil && resp.ExitCode == 0 { + return candidate + } + } + if command == "droid" { + candidate := fmt.Sprintf("%s/.factory/bin/droid", home) + resp, err := execCommand(computer, fmt.Sprintf("test -x %s", quoteForShell(candidate)), nil) + if err == nil && resp.ExitCode == 0 { + return candidate + } + } + + // Check PATH + resp, err := execCommand(computer, fmt.Sprintf("command -v %s", command), nil) + if err == nil && resp.ExitCode == 0 { + path := strings.TrimSpace(getStdoutFromResponse(resp)) + if path != "" { + return path + } + } + + // Check node bin directory (for npm-installed tools) + if nodeBin := getNodeBinDir(computer); nodeBin != "" { + candidate := fmt.Sprintf("%s/%s", nodeBin, command) + resp, err = execCommand(computer, fmt.Sprintf("test -x %s", quoteForShell(candidate)), nil) + if err == nil && resp.ExitCode == 0 { + return candidate + } + } + + return command +} + +func hasScript(computer RemoteSandbox) bool { + resp, err := execCommand(computer, "command -v script", nil) + return err == nil && resp.ExitCode == 0 && strings.TrimSpace(getStdoutFromResponse(resp)) != "" +} + +type agentInteractiveRunner interface { + RunAgentInteractive(cfg AgentConfig) (int, error) +} + +// RunAgentInteractive runs the agent in an interactive session. +func RunAgentInteractive(computer RemoteSandbox, cfg AgentConfig) (int, error) { + if runner, ok := computer.(agentInteractiveRunner); ok { + return runner.RunAgentInteractive(cfg) + } + return runAgentInteractiveGeneric(computer, cfg) +} + +func runAgentInteractiveGeneric(computer RemoteSandbox, cfg AgentConfig) (int, error) { + if computer == nil { + return 1, errors.New("sandbox is nil") + } + command := "bash" + switch cfg.Agent { + case AgentClaude: + command = "claude" + case AgentCodex: + command = "codex" + case AgentOpenCode: + command = "opencode" + case AgentAmp: + command = "amp" + case AgentGemini: + command = "gemini" + case AgentDroid: + command = "droid" + case AgentShell: + command = "bash" + } + args := cfg.Args + if args == nil { + args = []string{} + } + cmdLine := command + if len(args) > 0 { + cmdLine = fmt.Sprintf("%s %s", command, ShellJoin(args)) + } + if envAssignments := buildEnvAssignmentsLocal(cfg.Env); envAssignments != "" { + cmdLine = fmt.Sprintf("%s %s", envAssignments, cmdLine) + } + if cfg.WorkspacePath != "" { + cmdLine = fmt.Sprintf("cd %s && %s", ShellQuote(cfg.WorkspacePath), cmdLine) + } + if strings.TrimSpace(cfg.RecordPath) != "" { + if hasScript(computer) { + cmdLine = fmt.Sprintf("script -q -f %s -c %s", ShellQuote(cfg.RecordPath), ShellQuote(cmdLine)) + } else { + fmt.Fprintln(os.Stderr, "Warning: recording requested but `script` is unavailable; proceeding without recording.") + } + } + return computer.ExecInteractive(context.Background(), cmdLine, os.Stdin, os.Stdout, os.Stderr, nil) +} + +// RunAgentCommand executes a non-interactive command for an agent. +func RunAgentCommand(computer RemoteSandbox, cfg AgentConfig, args []string) (int32, string, error) { + command := "bash" + switch cfg.Agent { + case AgentClaude: + command = "claude" + case AgentCodex: + command = "codex" + case AgentOpenCode: + command = "opencode" + case AgentAmp: + command = "amp" + case AgentGemini: + command = "gemini" + case AgentDroid: + command = "droid" + } + resolved := resolveAgentCommandPath(computer, command) + allArgs := strings.Join(args, " ") + cmdLine := resolved + if allArgs != "" { + cmdLine = fmt.Sprintf("%s %s", resolved, allArgs) + } + envAssignments := buildEnvAssignmentsLocal(cfg.Env) + if envAssignments != "" { + cmdLine = fmt.Sprintf("%s %s", envAssignments, cmdLine) + } + resp, err := execCommand(computer, cmdLine, &ExecOptions{Cwd: cfg.WorkspacePath}) + if err != nil { + return 1, "", err + } + return int32(resp.ExitCode), getStdoutFromResponse(resp), nil +} + +func sortStrings(values []string) { + for i := 0; i < len(values)-1; i++ { + for j := i + 1; j < len(values); j++ { + if values[j] < values[i] { + values[i], values[j] = values[j], values[i] + } + } + } +} diff --git a/internal/sandbox/agent_daytona.go b/internal/sandbox/agent_daytona.go new file mode 100644 index 00000000..91a745d9 --- /dev/null +++ b/internal/sandbox/agent_daytona.go @@ -0,0 +1,349 @@ +package sandbox + +import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "strings" + "time" + + "golang.org/x/term" + + "github.com/andyrewlee/amux/internal/daytona" +) + +func waitForSshAccessDaytona(ds *daytona.Sandbox, token string) (string, error) { + deadline := time.Now().Add(sshReadyTimeout) + for time.Now().Before(deadline) { + validation, err := ds.ValidateSshAccess(token) + if err == nil && validation.Valid { + return validation.RunnerDomain, nil + } + time.Sleep(sshReadyInterval) + } + return "", errors.New("SSH access token not ready. Try again.") +} + +type agentCommandSpec struct { + Command string + CommandBlock string + RemoteCommand string + DebugEnabled bool +} + +func buildAgentCommandSpec(s RemoteSandbox, cfg AgentConfig) (agentCommandSpec, error) { + command := "bash" + switch cfg.Agent { + case AgentClaude: + command = "claude" + case AgentCodex: + command = "codex" + case AgentOpenCode: + command = "opencode" + case AgentAmp: + command = "amp" + case AgentGemini: + command = "gemini" + case AgentDroid: + command = "droid" + case AgentShell: + command = "bash" + } + + resolvedCommand := resolveAgentCommandPath(s, command) + args := cfg.Args + if args == nil { + args = []string{} + } + homeDir := getHomeDir(s) + + wrapPref := envFirst("AMUX_TTY_WRAP") + wrapTty := false + if wrapPref == "1" { + wrapTty = hasScript(s) + } else if wrapPref == "0" { + wrapTty = false + } else { + wrapTty = cfg.Agent == AgentClaude && hasScript(s) + } + + safeWorkspace := quoteForShell(cfg.WorkspacePath) + safeResolved := quoteForShell(resolvedCommand) + safeArgs := "" + if len(args) > 0 { + quotedArgs := make([]string, 0, len(args)) + for _, arg := range args { + quotedArgs = append(quotedArgs, quoteForShell(arg)) + } + safeArgs = strings.Join(quotedArgs, " ") + } + shellInteractiveFlag := "" + if cfg.Agent == AgentShell { + shellInteractiveFlag = " -i" + } + + exportHome := fmt.Sprintf("export HOME=%s", quoteForShell(homeDir)) + exportTerm := strings.Join([]string{ + `if [ -z "$TERM" ] || [ "$TERM" = "dumb" ]; then`, + ` export TERM=xterm-256color`, + `else`, + ` infocmp "$TERM" >/dev/null 2>&1 || export TERM=xterm-256color`, + `fi`, + }, "\n") + unsetCi := `if [ -n "$CI" ]; then unset CI; fi` + envExports := buildEnvExportsLocal(cfg.Env) + debugEnabled := envIsOne("AMUX_SSH_DEBUG") + debugLines := []string{} + if debugEnabled { + debugLines = append(debugLines, + `echo "AMUX_DEBUG: HOME=$HOME"`, + `echo "AMUX_DEBUG: PATH=$PATH"`, + `echo "AMUX_DEBUG: TERM=$TERM"`, + `echo "AMUX_DEBUG: CI=$CI"`, + `echo "AMUX_DEBUG: NODE_BIN=$NODE_BIN"`, + `echo "AMUX_DEBUG: NODE_DIR=$NODE_DIR"`, + `echo "AMUX_DEBUG: AMUX_RESOLVED=$AMUX_RESOLVED"`, + `echo "AMUX_DEBUG: AMUX_CMD=$AMUX_CMD"`, + `if [ -n "$ANTHROPIC_API_KEY" ]; then echo "AMUX_DEBUG: ANTHROPIC_API_KEY=SET"; else echo "AMUX_DEBUG: ANTHROPIC_API_KEY=UNSET"; fi`, + `if [ -n "$CLAUDE_API_KEY" ]; then echo "AMUX_DEBUG: CLAUDE_API_KEY=SET"; else echo "AMUX_DEBUG: CLAUDE_API_KEY=UNSET"; fi`, + `if [ -n "$ANTHROPIC_AUTH_TOKEN" ]; then echo "AMUX_DEBUG: ANTHROPIC_AUTH_TOKEN=SET"; else echo "AMUX_DEBUG: ANTHROPIC_AUTH_TOKEN=UNSET"; fi`, + `if [ -n "$OPENAI_API_KEY" ]; then echo "AMUX_DEBUG: OPENAI_API_KEY=SET"; else echo "AMUX_DEBUG: OPENAI_API_KEY=UNSET"; fi`, + `if [ -n "$GEMINI_API_KEY" ]; then echo "AMUX_DEBUG: GEMINI_API_KEY=SET"; else echo "AMUX_DEBUG: GEMINI_API_KEY=UNSET"; fi`, + `if [ -n "$GOOGLE_API_KEY" ]; then echo "AMUX_DEBUG: GOOGLE_API_KEY=SET"; else echo "AMUX_DEBUG: GOOGLE_API_KEY=UNSET"; fi`, + `if [ -n "$GOOGLE_APPLICATION_CREDENTIALS" ]; then echo "AMUX_DEBUG: GOOGLE_APPLICATION_CREDENTIALS=SET"; else echo "AMUX_DEBUG: GOOGLE_APPLICATION_CREDENTIALS=UNSET"; fi`, + `if [ -n "$FACTORY_API_KEY" ]; then echo "AMUX_DEBUG: FACTORY_API_KEY=SET"; else echo "AMUX_DEBUG: FACTORY_API_KEY=UNSET"; fi`, + fmt.Sprintf("command -v %s 2>/dev/null || true", command), + `command -v node 2>/dev/null || true`, + `command -v git 2>/dev/null || true`, + ) + } + + innerCommand := []string{ + exportHome, + exportTerm, + unsetCi, + } + innerCommand = append(innerCommand, envExports...) + innerCommand = append(innerCommand, + `stty sane >/dev/null 2>&1 || true`, + `if [ -d /usr/local/share/nvm/current/bin ]; then export PATH="/usr/local/share/nvm/current/bin:$PATH"; fi`, + `if [ -d "$HOME/.local/bin" ]; then export PATH="$HOME/.local/bin:$PATH"; fi`, + `if [ -d "$HOME/.amp/bin" ]; then export PATH="$HOME/.amp/bin:$PATH"; fi`, + `if [ -d "$HOME/.factory/bin" ]; then export PATH="$HOME/.factory/bin:$PATH"; fi`, + `if [ -d /usr/local/share/nvm/versions/node ]; then`, + ` for p in /usr/local/share/nvm/versions/node/*/bin; do`, + ` if [ -d "$p" ]; then export PATH="$p:$PATH"; fi`, + ` done`, + `fi`, + fmt.Sprintf("cd %s", safeWorkspace), + fmt.Sprintf("AMUX_RESOLVED=%s", safeResolved), + `NODE_BIN=$(command -v node 2>/dev/null || true)`, + `if [ -z "$NODE_BIN" ]; then`, + ` for p in /usr/local/share/nvm/versions/node/*/bin/node /usr/local/share/nvm/current/bin/node /usr/local/bin/node /usr/bin/node; do`, + ` if [ -x "$p" ]; then NODE_BIN="$p"; break; fi`, + ` done`, + `fi`, + `if [ -n "$NODE_BIN" ]; then`, + ` NODE_DIR=$(dirname "$NODE_BIN")`, + ` export PATH="$NODE_DIR:$PATH"`, + `fi`, + `if command -v npm >/dev/null 2>&1; then`, + ` NPM_PREFIX=$(npm config get prefix 2>/dev/null || true)`, + ` if [ -n "$NPM_PREFIX" ] && [ -d "$NPM_PREFIX/bin" ]; then export PATH="$NPM_PREFIX/bin:$PATH"; fi`, + `fi`, + `AMUX_CMD=""`, + `if [ -n "$AMUX_RESOLVED" ] && [ -x "$AMUX_RESOLVED" ]; then AMUX_CMD="$AMUX_RESOLVED"; fi`, + fmt.Sprintf(`if [ -z "$AMUX_CMD" ]; then AMUX_CMD=$(command -v %s 2>/dev/null || true); fi`, command), + fmt.Sprintf(`if [ -z "$AMUX_CMD" ] && [ -n "$NODE_DIR" ] && [ -x "$NODE_DIR/%s" ]; then AMUX_CMD="$NODE_DIR/%s"; fi`, command, command), + `if [ -z "$AMUX_CMD" ]; then`, + fmt.Sprintf(` for p in $HOME/.local/bin/%s $HOME/.amp/bin/%s $HOME/.factory/bin/%s /usr/local/share/nvm/versions/node/*/bin/%s /usr/local/share/nvm/current/bin/%s /usr/local/bin/%s /usr/bin/%s /home/daytona/.local/bin/%s; do`, command, command, command, command, command, command, command, command), + ` if [ -x "$p" ]; then AMUX_CMD="$p"; break; fi`, + ` done`, + `fi`, + ) + innerCommand = append(innerCommand, debugLines...) + innerCommand = append(innerCommand, fmt.Sprintf("if [ -z \"$AMUX_CMD\" ]; then echo \"%s not found\" >&2; exit 127; fi", command)) + + execLines := []string{ + fmt.Sprintf(`AMUX_CMDLINE="$AMUX_CMD%s%s"`, shellInteractiveFlag, func() string { + if safeArgs != "" { + return " " + safeArgs + } + return "" + }()), + } + recordPath := strings.TrimSpace(cfg.RecordPath) + recordEnabled := recordPath != "" && hasScript(s) + if recordPath != "" && !recordEnabled { + fmt.Fprintln(os.Stderr, "Warning: recording requested but `script` is unavailable; proceeding without recording.") + } + if recordEnabled { + execLines = append(execLines, fmt.Sprintf(`exec script -q -f %s -c "$AMUX_CMDLINE"`, quoteForShell(recordPath))) + } else if wrapTty { + execLines = append(execLines, `exec script -q -c "$AMUX_CMDLINE" /dev/null`) + } + execLines = append(execLines, fmt.Sprintf(`exec "$AMUX_CMD"%s%s`, shellInteractiveFlag, func() string { + if safeArgs != "" { + return " " + safeArgs + } + return "" + }())) + + commandBlock := strings.Join(append(innerCommand, execLines...), "\n") + remoteCommand := fmt.Sprintf("bash -lc %s", quoteForShell(commandBlock)) + + return agentCommandSpec{ + Command: command, + CommandBlock: commandBlock, + RemoteCommand: remoteCommand, + DebugEnabled: debugEnabled, + }, nil +} + +// BuildAgentRemoteCommand returns the remote command string for an agent session. +func BuildAgentRemoteCommand(sb RemoteSandbox, cfg AgentConfig) (string, error) { + spec, err := buildAgentCommandSpec(sb, cfg) + if err != nil { + return "", err + } + return spec.RemoteCommand, nil +} + +// RunAgentInteractive runs the agent in an interactive SSH session (Daytona). +func (s *daytonaSandbox) RunAgentInteractive(cfg AgentConfig) (int, error) { + spec, err := buildAgentCommandSpec(s, cfg) + if err != nil { + return 1, err + } + + fmt.Printf("Starting %s in interactive mode...\n", cfg.Agent) + if !term.IsTerminal(int(os.Stdin.Fd())) { + return 1, errors.New("interactive mode requires a TTY") + } + + sshAccess, err := s.inner.CreateSshAccess(60) + if err != nil { + return 1, err + } + defer func() { + _ = s.inner.RevokeSshAccess(sshAccess.Token) + }() + + runnerDomain, err := waitForSshAccessDaytona(s.inner, sshAccess.Token) + if err != nil { + return 1, err + } + sshHost := runnerDomain + if sshHost == "" { + sshHost = getSSHHost() + } + target := fmt.Sprintf("%s@%s", sshAccess.Token, sshHost) + + rawShell := cfg.Agent == AgentShell && envDefaultTrue("AMUX_SHELL_RAW") + useShellBootstrap := !rawShell && envIsOne("AMUX_SSH_SHELL") + useRawMode := false + if cfg.RawMode != nil { + useRawMode = *cfg.RawMode + } else { + useRawMode = envIsOne("AMUX_SSH_RAW") || cfg.Agent == AgentCodex || cfg.Agent == AgentOpenCode || cfg.Agent == AgentAmp || cfg.Agent == AgentGemini || cfg.Agent == AgentDroid + } + + remoteCommand := spec.RemoteCommand + + sshArgs := []string{ + "-tt", + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "LogLevel=ERROR", + target, + } + if !rawShell && !useShellBootstrap { + sshArgs = append(sshArgs, remoteCommand) + } + + if spec.DebugEnabled { + sshArgs = append([]string{"-vvv"}, sshArgs...) + fmt.Printf("SSH target: %s\n", target) + if len(cfg.Env) > 0 { + keys := make([]string, 0, len(cfg.Env)) + for key := range cfg.Env { + keys = append(keys, key) + } + sortStrings(keys) + fmt.Printf("SSH env keys: %s\n", strings.Join(keys, ", ")) + } + if rawShell || useShellBootstrap { + fmt.Printf("SSH command: ssh %s\n", target) + } else { + fmt.Printf("SSH command: %s\n", redactExports(remoteCommand)) + } + } + + cmd := exec.Command("ssh", sshArgs...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + var stdinPipe io.WriteCloser + var pipeReader io.ReadCloser + if useShellBootstrap { + pr, pw := io.Pipe() + pipeReader = pr + stdinPipe = pw + cmd.Stdin = pr + } else { + cmd.Stdin = os.Stdin + } + + var restoreRaw func() + if useRawMode { + if term.IsTerminal(int(os.Stdin.Fd())) { + state, err := term.MakeRaw(int(os.Stdin.Fd())) + if err == nil { + restoreRaw = func() { _ = term.Restore(int(os.Stdin.Fd()), state) } + } + } + } + + if err := cmd.Start(); err != nil { + if restoreRaw != nil { + restoreRaw() + } + if pipeReader != nil { + _ = pipeReader.Close() + } + if errors.Is(err, exec.ErrNotFound) { + return 1, errors.New("ssh is required to run interactive sessions. Install OpenSSH and try again.") + } + return 1, err + } + + if useShellBootstrap && stdinPipe != nil { + go func() { + wrappedScript := strings.Join([]string{ + "set +o history", + "stty -echo", + spec.CommandBlock, + }, "\n") + _, _ = io.WriteString(stdinPipe, wrappedScript+"\n") + _, _ = io.Copy(stdinPipe, os.Stdin) + _ = stdinPipe.Close() + }() + } + + err = cmd.Wait() + if restoreRaw != nil { + restoreRaw() + } + if pipeReader != nil { + _ = pipeReader.Close() + } + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return exitErr.ExitCode(), nil + } + return 1, err + } + return 0, nil +} diff --git a/internal/sandbox/agent_install.go b/internal/sandbox/agent_install.go new file mode 100644 index 00000000..7516e04c --- /dev/null +++ b/internal/sandbox/agent_install.go @@ -0,0 +1,337 @@ +package sandbox + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +const ( + agentInstallBasePath = "/amux/.installed" + agentInstallTTL = 24 * time.Hour // Re-check for updates after 24 hours +) + +func agentInstallMarker(agent string) string { + return fmt.Sprintf("%s/%s", agentInstallBasePath, agent) +} + +// isAgentInstallFresh checks if the agent was installed recently (within TTL). +// Returns true if the marker exists and is fresh, false if missing or stale. +func isAgentInstallFresh(computer RemoteSandbox, agent string) bool { + marker := agentInstallMarker(agent) + // Check if marker exists and get its age in seconds + resp, err := execCommand(computer, fmt.Sprintf( + `if [ -f %s ]; then stat -c %%Y %s 2>/dev/null || stat -f %%m %s 2>/dev/null; else echo 0; fi`, + marker, marker, marker, + ), nil) + if err != nil { + return false + } + stdout := strings.TrimSpace(getStdoutFromResponse(resp)) + if stdout == "" || stdout == "0" { + return false + } + // Parse the modification timestamp + modTime, err := strconv.ParseInt(stdout, 10, 64) + if err != nil || modTime == 0 { + return false + } + // Check if within TTL + age := time.Since(time.Unix(modTime, 0)) + return age < agentInstallTTL +} + +// touchAgentMarker creates or updates the install marker timestamp. +func touchAgentMarker(computer RemoteSandbox, agent string) { + marker := agentInstallMarker(agent) + _, _ = execCommand(computer, SafeCommands.MkdirP(agentInstallBasePath), nil) + _, _ = execCommand(computer, SafeCommands.Touch(marker), nil) +} + +// isAgentInstalled checks if the agent has been installed (marker file exists). +// This is a simpler check than isAgentInstallFresh - it doesn't check the TTL. +// Used for agents that auto-update themselves. +func isAgentInstalled(computer RemoteSandbox, agent string) bool { + marker := agentInstallMarker(agent) + resp, err := execCommand(computer, fmt.Sprintf("test -f %s", marker), nil) + return err == nil && resp != nil && resp.ExitCode == 0 +} + +func installClaude(computer RemoteSandbox, verbose bool, forceUpdate bool) error { + if verbose { + fmt.Println("Installing Claude Code...") + } + // Check for native installation first (~/.local/bin/claude), then fall back to PATH + home := getHomeDir(computer) + nativeCheck := fmt.Sprintf("test -x %s/.local/bin/claude", home) + resp, _ := execCommand(computer, nativeCheck, nil) + nativeInstalled := resp != nil && resp.ExitCode == 0 + + pathResp, _ := execCommand(computer, "which claude", nil) + pathInstalled := pathResp != nil && pathResp.ExitCode == 0 + + alreadyInstalled := nativeInstalled || pathInstalled + if alreadyInstalled && !forceUpdate { + if verbose { + fmt.Println("Claude Code already installed") + } + } else { + action := "Installing" + if alreadyInstalled { + action = "Updating" + } + if verbose { + fmt.Printf("%s Claude Code...\n", action) + } + // Use native installer (recommended by Anthropic) + // Installs to ~/.local/bin/claude with binary at ~/.local/share/claude/versions/{version} + resp, err := execCommand(computer, `bash -lc "curl -fsSL https://claude.ai/install.sh | bash"`, nil) + if err != nil || resp.ExitCode != 0 { + return errors.New("failed to install claude code via native installer") + } + if verbose { + fmt.Println("Claude Code installed") + } + } + touchAgentMarker(computer, "claude") + return nil +} + +func installCodex(computer RemoteSandbox, verbose bool, forceUpdate bool) error { + if verbose { + fmt.Println("Installing Codex CLI...") + } + resp, _ := execCommand(computer, "which codex", nil) + alreadyInstalled := resp != nil && resp.ExitCode == 0 + if alreadyInstalled && !forceUpdate { + if verbose { + fmt.Println("Codex CLI already installed") + } + } else { + action := "Installing" + if alreadyInstalled { + action = "Updating" + } + if verbose { + fmt.Printf("%s Codex CLI...\n", action) + } + resp, err := execCommand(computer, "npm install -g @openai/codex@latest", nil) + if err != nil || resp.ExitCode != 0 { + return errors.New("failed to install codex cli in sandbox") + } + if verbose { + fmt.Println("Codex CLI installed") + } + } + touchAgentMarker(computer, "codex") + return nil +} + +func installOpenCode(computer RemoteSandbox, verbose bool, forceUpdate bool) error { + if verbose { + fmt.Println("Installing OpenCode CLI...") + } + resp, _ := execCommand(computer, "which opencode", nil) + alreadyInstalled := resp != nil && resp.ExitCode == 0 + if alreadyInstalled && !forceUpdate { + if verbose { + fmt.Println("OpenCode CLI already installed") + } + } else { + action := "Installing" + if alreadyInstalled { + action = "Updating" + } + if verbose { + fmt.Printf("%s OpenCode CLI...\n", action) + } + resp, err := execCommand(computer, `bash -lc "curl -fsSL https://opencode.ai/install | bash"`, nil) + if err != nil || resp.ExitCode != 0 { + if verbose { + fmt.Println("OpenCode install script failed, trying npm...") + } + resp, err = execCommand(computer, "npm install -g opencode-ai@latest", nil) + if err != nil || resp.ExitCode != 0 { + return errors.New("failed to install opencode cli in sandbox") + } + } + if verbose { + fmt.Println("OpenCode CLI installed") + } + } + touchAgentMarker(computer, "opencode") + return nil +} + +func installAmp(computer RemoteSandbox, verbose bool, forceUpdate bool) error { + if verbose { + fmt.Println("Installing Amp CLI...") + } + home := getHomeDir(computer) + ampBin := fmt.Sprintf("%s/.amp/bin/amp", home) + resp, _ := execCommand(computer, fmt.Sprintf("sh -lc \"command -v amp >/dev/null 2>&1 || test -x %s\"", quoteForShell(ampBin)), nil) + alreadyInstalled := resp != nil && resp.ExitCode == 0 + if alreadyInstalled && !forceUpdate { + if verbose { + fmt.Println("Amp CLI already installed") + } + } else { + action := "Installing" + if alreadyInstalled { + action = "Updating" + } + if verbose { + fmt.Printf("%s Amp CLI...\n", action) + } + resp, err := execCommand(computer, `bash -lc "curl -fsSL https://ampcode.com/install.sh | bash"`, nil) + if err != nil || resp.ExitCode != 0 { + if verbose { + fmt.Println("Amp install script failed, trying npm...") + } + resp, err = execCommand(computer, "npm install -g @sourcegraph/amp@latest", nil) + if err != nil || resp.ExitCode != 0 { + return errors.New("failed to install amp cli in sandbox") + } + } + if verbose { + fmt.Println("Amp CLI installed") + } + } + touchAgentMarker(computer, "amp") + return nil +} + +func installGemini(computer RemoteSandbox, verbose bool, forceUpdate bool) error { + if verbose { + fmt.Println("Installing Gemini CLI...") + } + resp, _ := execCommand(computer, "which gemini", nil) + alreadyInstalled := resp != nil && resp.ExitCode == 0 + if alreadyInstalled && !forceUpdate { + if verbose { + fmt.Println("Gemini CLI already installed") + } + } else { + action := "Installing" + if alreadyInstalled { + action = "Updating" + } + if verbose { + fmt.Printf("%s Gemini CLI...\n", action) + } + resp, err := execCommand(computer, "npm install -g @google/gemini-cli@latest", nil) + if err != nil || resp.ExitCode != 0 { + return errors.New("failed to install gemini cli in sandbox") + } + if verbose { + fmt.Println("Gemini CLI installed") + } + } + touchAgentMarker(computer, "gemini") + return nil +} + +func installDroid(computer RemoteSandbox, verbose bool, forceUpdate bool) error { + if verbose { + fmt.Println("Installing Droid CLI...") + } + resp, _ := execCommand(computer, "which droid", nil) + alreadyInstalled := resp != nil && resp.ExitCode == 0 + if alreadyInstalled && !forceUpdate { + if verbose { + fmt.Println("Droid CLI already installed") + } + } else { + action := "Installing" + if alreadyInstalled { + action = "Updating" + } + if verbose { + fmt.Printf("%s Droid CLI...\n", action) + } + resp, err := execCommand(computer, `bash -lc "curl -fsSL https://app.factory.ai/cli | sh"`, nil) + if err != nil || resp.ExitCode != 0 { + return errors.New("failed to install droid cli in sandbox") + } + if verbose { + fmt.Println("Droid CLI installed") + } + } + touchAgentMarker(computer, "droid") + return nil +} + +// EnsureAgentInstalled installs the requested agent if missing or stale. +// If forceUpdate is true, always reinstalls to get the latest version. +// For agents that auto-update (Claude, OpenCode, Amp, Gemini, Droid): just check if installed. +// For agents that don't auto-update (Codex): use TTL-based caching (24h). +func EnsureAgentInstalled(computer RemoteSandbox, agent Agent, verbose bool, forceUpdate bool) error { + if agent == AgentShell { + return nil + } + + // Check if we can skip installation based on agent's auto-update capability + if !forceUpdate { + if AgentAutoUpdates[agent] { + // Agent handles its own updates - just check if installed + if isAgentInstalled(computer, agent.String()) { + if verbose { + fmt.Printf("%s already installed (auto-updates itself)\n", agent) + } + return nil + } + } else { + // Agent doesn't auto-update - use TTL-based checking + if isAgentInstallFresh(computer, agent.String()) { + if verbose { + fmt.Printf("%s already installed (checked within 24h)\n", agent) + } + return nil + } + } + } + + // Determine if this is an update (for messaging) + needsUpdate := forceUpdate && isAgentInstalled(computer, agent.String()) + if needsUpdate && verbose { + fmt.Printf("Checking for %s updates...\n", agent) + } + + switch agent { + case AgentClaude: + return installClaude(computer, verbose, forceUpdate) + case AgentCodex: + return installCodex(computer, verbose, forceUpdate) + case AgentOpenCode: + return installOpenCode(computer, verbose, forceUpdate) + case AgentAmp: + return installAmp(computer, verbose, forceUpdate) + case AgentGemini: + return installGemini(computer, verbose, forceUpdate) + case AgentDroid: + return installDroid(computer, verbose, forceUpdate) + default: + return nil + } +} + +// UpdateAgent forces a reinstall of the agent to get the latest version. +func UpdateAgent(computer RemoteSandbox, agent Agent, verbose bool) error { + return EnsureAgentInstalled(computer, agent, verbose, true) +} + +// UpdateAllAgents updates all supported agents to their latest versions. +func UpdateAllAgents(computer RemoteSandbox, verbose bool) error { + agents := []Agent{AgentClaude, AgentCodex, AgentOpenCode, AgentAmp, AgentGemini, AgentDroid} + for _, agent := range agents { + if err := UpdateAgent(computer, agent, verbose); err != nil { + if verbose { + fmt.Printf("Warning: failed to update %s: %v\n", agent, err) + } + // Continue with other agents + } + } + return nil +} diff --git a/internal/sandbox/agent_install_test.go b/internal/sandbox/agent_install_test.go new file mode 100644 index 00000000..3e786c19 --- /dev/null +++ b/internal/sandbox/agent_install_test.go @@ -0,0 +1,191 @@ +package sandbox + +import ( + "strings" + "testing" +) + +func TestInstallClaude(t *testing.T) { + t.Run("skips when already installed (native)", func(t *testing.T) { + mock := NewMockRemoteSandbox("test") + // Mock getHomeDir + mock.SetExecResult("sh -lc", "/home/user", 0) + // Mock native installation check - succeeds + mock.SetExecResult("test -x", "", 0) + // Mock marker commands + mock.SetExecResult("mkdir", "", 0) + mock.SetExecResult("touch", "", 0) + + err := installClaude(mock, false, false) + if err != nil { + t.Errorf("installClaude() error = %v", err) + } + + // Should not have called curl since it was already installed + history := mock.GetExecHistory() + for _, cmd := range history { + if strings.Contains(cmd, "curl") { + t.Error("installClaude() should not call curl when already installed") + } + } + }) + + t.Run("installs when not present", func(t *testing.T) { + mock := NewMockRemoteSandbox("test") + // Mock getHomeDir + mock.SetExecResult("sh -lc", "/home/user", 0) + // Mock native installation check - fails (not installed) + mock.SetExecResult("test -x", "", 1) + // Mock which - fails (not in PATH) + mock.SetExecResult("which claude", "", 1) + // Mock curl install - succeeds + mock.SetExecResult("bash -lc", "", 0) + // Mock marker commands + mock.SetExecResult("mkdir", "", 0) + mock.SetExecResult("touch", "", 0) + + err := installClaude(mock, false, false) + if err != nil { + t.Errorf("installClaude() error = %v", err) + } + + // Should have called curl for installation + history := mock.GetExecHistory() + foundCurl := false + for _, cmd := range history { + if strings.Contains(cmd, "curl") && strings.Contains(cmd, "claude.ai/install.sh") { + foundCurl = true + break + } + } + if !foundCurl { + t.Error("installClaude() should use native installer (curl)") + } + }) +} + +func TestInstallCodex(t *testing.T) { + mock := NewMockRemoteSandbox("test") + mock.SetExecResult("which codex", "", 1) // Not installed + mock.SetExecResult("npm install", "", 0) // Install succeeds + mock.SetExecResult("mkdir", "", 0) + mock.SetExecResult("touch", "", 0) + + err := installCodex(mock, false, false) + if err != nil { + t.Errorf("installCodex() error = %v", err) + } + + history := mock.GetExecHistory() + foundNpm := false + for _, cmd := range history { + if strings.Contains(cmd, "npm install -g @openai/codex") { + foundNpm = true + break + } + } + if !foundNpm { + t.Error("installCodex() should use npm install") + } +} + +func TestInstallGemini(t *testing.T) { + mock := NewMockRemoteSandbox("test") + mock.SetExecResult("which gemini", "", 1) // Not installed + mock.SetExecResult("npm install", "", 0) // Install succeeds + mock.SetExecResult("mkdir", "", 0) + mock.SetExecResult("touch", "", 0) + + err := installGemini(mock, false, false) + if err != nil { + t.Errorf("installGemini() error = %v", err) + } + + history := mock.GetExecHistory() + foundNpm := false + for _, cmd := range history { + if strings.Contains(cmd, "npm install -g @google/gemini-cli") { + foundNpm = true + break + } + } + if !foundNpm { + t.Error("installGemini() should use npm install") + } +} + +func TestInstallAmp(t *testing.T) { + mock := NewMockRemoteSandbox("test") + mock.SetHomeDir("/home/user") + mock.SetExecResult("sh -lc", "", 1) // command -v amp fails + mock.SetExecResult("bash -lc", "", 0) // curl install succeeds + mock.SetExecResult("mkdir", "", 0) + mock.SetExecResult("touch", "", 0) + + err := installAmp(mock, false, false) + if err != nil { + t.Errorf("installAmp() error = %v", err) + } + + history := mock.GetExecHistory() + foundCurl := false + for _, cmd := range history { + if strings.Contains(cmd, "curl") && strings.Contains(cmd, "ampcode.com/install.sh") { + foundCurl = true + break + } + } + if !foundCurl { + t.Error("installAmp() should use curl installer") + } +} + +func TestInstallDroid(t *testing.T) { + mock := NewMockRemoteSandbox("test") + mock.SetExecResult("which droid", "", 1) // Not installed + mock.SetExecResult("bash -lc", "", 0) // curl install succeeds + mock.SetExecResult("mkdir", "", 0) + mock.SetExecResult("touch", "", 0) + + err := installDroid(mock, false, false) + if err != nil { + t.Errorf("installDroid() error = %v", err) + } + + history := mock.GetExecHistory() + foundCurl := false + for _, cmd := range history { + if strings.Contains(cmd, "curl") && strings.Contains(cmd, "factory.ai/cli") { + foundCurl = true + break + } + } + if !foundCurl { + t.Error("installDroid() should use curl installer") + } +} + +func TestInstallOpenCode(t *testing.T) { + mock := NewMockRemoteSandbox("test") + mock.SetExecResult("which opencode", "", 1) // Not installed + mock.SetExecResult("bash -lc", "", 0) // curl install succeeds + mock.SetExecResult("mkdir", "", 0) + mock.SetExecResult("touch", "", 0) + + err := installOpenCode(mock, false, false) + if err != nil { + t.Errorf("installOpenCode() error = %v", err) + } + + history := mock.GetExecHistory() + foundCurl := false + for _, cmd := range history { + if strings.Contains(cmd, "curl") && strings.Contains(cmd, "opencode.ai/install") { + foundCurl = true + break + } + } + if !foundCurl { + t.Error("installOpenCode() should use curl installer") + } +} diff --git a/internal/sandbox/agent_test.go b/internal/sandbox/agent_test.go new file mode 100644 index 00000000..fbbcbca0 --- /dev/null +++ b/internal/sandbox/agent_test.go @@ -0,0 +1,323 @@ +package sandbox + +import ( + "strings" + "testing" +) + +func TestResolveAgentCommandPath(t *testing.T) { + tests := []struct { + name string + command string + setupExec map[string]MockExecResult + want string + }{ + { + name: "claude native installation found", + command: "claude", + setupExec: map[string]MockExecResult{ + // getHomeDir lookup + "sh -lc": {Output: "/home/user", ExitCode: 0}, + // Native installation check succeeds + "test -x": {Output: "", ExitCode: 0}, + }, + want: "/home/user/.local/bin/claude", + }, + { + name: "codex found in PATH", + command: "codex", + setupExec: map[string]MockExecResult{ + "sh -lc": {Output: "/home/user", ExitCode: 0}, + "command -v codex": {Output: "/usr/local/bin/codex\n", ExitCode: 0}, + }, + want: "/usr/local/bin/codex", + }, + { + name: "gemini found in PATH", + command: "gemini", + setupExec: map[string]MockExecResult{ + "sh -lc": {Output: "/home/user", ExitCode: 0}, + "command -v gemini": {Output: "/usr/local/bin/gemini\n", ExitCode: 0}, + }, + want: "/usr/local/bin/gemini", + }, + { + name: "command not found returns original command", + command: "unknown", + setupExec: map[string]MockExecResult{ + "sh -lc": {Output: "/home/user", ExitCode: 0}, + "command -v unknown": {Output: "", ExitCode: 1}, + "command -v node": {Output: "", ExitCode: 1}, + }, + want: "unknown", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mock := NewMockRemoteSandbox("test") + + for prefix, result := range tt.setupExec { + mock.SetExecResult(prefix, result.Output, result.ExitCode) + } + + got := resolveAgentCommandPath(mock, tt.command) + if got != tt.want { + t.Errorf("resolveAgentCommandPath(%q) = %q, want %q", tt.command, got, tt.want) + } + }) + } +} + +func TestIsAgentInstallFresh(t *testing.T) { + tests := []struct { + name string + agent string + setupExec map[string]MockExecResult + want bool + }{ + { + name: "marker exists and is fresh", + agent: "claude", + setupExec: map[string]MockExecResult{ + // Return current timestamp (simulating fresh marker) + "if [ -f /amux/.installed/claude": {Output: "1704067200", ExitCode: 0}, // Jan 1, 2024 + }, + want: false, // Will be false because mock timestamp is old + }, + { + name: "marker does not exist", + agent: "claude", + setupExec: map[string]MockExecResult{ + "if [ -f /amux/.installed/claude": {Output: "0", ExitCode: 0}, + }, + want: false, + }, + { + name: "command fails", + agent: "claude", + setupExec: map[string]MockExecResult{ + "if [ -f /amux/.installed/claude": {Output: "", ExitCode: 1}, + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mock := NewMockRemoteSandbox("test") + + for prefix, result := range tt.setupExec { + mock.SetExecResult(prefix, result.Output, result.ExitCode) + } + + got := isAgentInstallFresh(mock, tt.agent) + if got != tt.want { + t.Errorf("isAgentInstallFresh(%q) = %v, want %v", tt.agent, got, tt.want) + } + }) + } +} + +func TestTouchAgentMarker(t *testing.T) { + mock := NewMockRemoteSandbox("test") + + touchAgentMarker(mock, "claude") + + history := mock.GetExecHistory() + + // Should have mkdir and touch commands + foundMkdir := false + foundTouch := false + for _, cmd := range history { + if strings.Contains(cmd, "mkdir") && strings.Contains(cmd, "/amux/.installed") { + foundMkdir = true + } + if strings.Contains(cmd, "touch") && strings.Contains(cmd, "/amux/.installed/claude") { + foundTouch = true + } + } + + if !foundMkdir { + t.Error("touchAgentMarker should create /amux/.installed directory") + } + if !foundTouch { + t.Error("touchAgentMarker should touch /amux/.installed/claude marker") + } +} + +func TestAgentInstallMarker(t *testing.T) { + tests := []struct { + agent string + want string + }{ + {"claude", "/amux/.installed/claude"}, + {"codex", "/amux/.installed/codex"}, + {"amp", "/amux/.installed/amp"}, + {"droid", "/amux/.installed/droid"}, + } + + for _, tt := range tests { + t.Run(tt.agent, func(t *testing.T) { + got := agentInstallMarker(tt.agent) + if got != tt.want { + t.Errorf("agentInstallMarker(%q) = %q, want %q", tt.agent, got, tt.want) + } + }) + } +} + +func TestEnsureAgentInstalled_Shell(t *testing.T) { + mock := NewMockRemoteSandbox("test") + + // Shell agent should return nil without doing anything + err := EnsureAgentInstalled(mock, AgentShell, false, false) + if err != nil { + t.Errorf("EnsureAgentInstalled(AgentShell) error = %v", err) + } + + // Should not execute any commands + history := mock.GetExecHistory() + if len(history) > 0 { + t.Errorf("EnsureAgentInstalled(AgentShell) should not execute commands, got %d", len(history)) + } +} + +func TestEnsureAgentInstalled_SkipsIfFresh(t *testing.T) { + mock := NewMockRemoteSandbox("test") + + // Simulate fresh marker (within 24h) - use current timestamp + // The isAgentInstallFresh function checks if timestamp is within TTL + // We need to return "0" to indicate no marker exists, then it will install + mock.SetExecResult("if [ -f /amux/.installed/claude", "0", 0) + mock.SetExecResult("which claude", "", 0) // Already installed + mock.SetExecResult("touch", "", 0) + mock.SetExecResult("mkdir", "", 0) + + err := EnsureAgentInstalled(mock, AgentClaude, false, false) + if err != nil { + t.Errorf("EnsureAgentInstalled() error = %v", err) + } +} + +func TestGetHomeDir(t *testing.T) { + tests := []struct { + name string + setupExec map[string]MockExecResult + want string + }{ + { + name: "returns home dir from command", + setupExec: map[string]MockExecResult{ + "sh -lc": {Output: "/home/testuser", ExitCode: 0}, + }, + want: "/home/testuser", + }, + { + name: "falls back to /home/daytona on failure", + setupExec: map[string]MockExecResult{ + "sh -lc": {Output: "", ExitCode: 1}, + }, + want: "/home/daytona", + }, + { + name: "falls back to /home/daytona on empty output", + setupExec: map[string]MockExecResult{ + "sh -lc": {Output: " ", ExitCode: 0}, + }, + want: "/home/daytona", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mock := NewMockRemoteSandbox("test") + + for prefix, result := range tt.setupExec { + mock.SetExecResult(prefix, result.Output, result.ExitCode) + } + + got := getHomeDir(mock) + if got != tt.want { + t.Errorf("getHomeDir() = %q, want %q", got, tt.want) + } + }) + } +} + +func TestHasScript(t *testing.T) { + tests := []struct { + name string + setupExec map[string]MockExecResult + want bool + }{ + { + name: "script command available", + setupExec: map[string]MockExecResult{ + "command -v script": {Output: "/usr/bin/script", ExitCode: 0}, + }, + want: true, + }, + { + name: "script command not available", + setupExec: map[string]MockExecResult{ + "command -v script": {Output: "", ExitCode: 1}, + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mock := NewMockRemoteSandbox("test") + + for prefix, result := range tt.setupExec { + mock.SetExecResult(prefix, result.Output, result.ExitCode) + } + + got := hasScript(mock) + if got != tt.want { + t.Errorf("hasScript() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetNodeBinDir(t *testing.T) { + tests := []struct { + name string + setupExec map[string]MockExecResult + want string + }{ + { + name: "returns node bin directory", + setupExec: map[string]MockExecResult{ + "command -v node": {Output: "/usr/local/bin/node\n", ExitCode: 0}, + "dirname '/usr/local/bin/node'": {Output: "/usr/local/bin\n", ExitCode: 0}, + }, + want: "/usr/local/bin", + }, + { + name: "returns empty when node not found", + setupExec: map[string]MockExecResult{ + "command -v node": {Output: "", ExitCode: 1}, + }, + want: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mock := NewMockRemoteSandbox("test") + + for prefix, result := range tt.setupExec { + mock.SetExecResult(prefix, result.Output, result.ExitCode) + } + + got := getNodeBinDir(mock) + if got != tt.want { + t.Errorf("getNodeBinDir() = %q, want %q", got, tt.want) + } + }) + } +} diff --git a/internal/sandbox/computer.go b/internal/sandbox/computer.go new file mode 100644 index 00000000..efc3e507 --- /dev/null +++ b/internal/sandbox/computer.go @@ -0,0 +1,253 @@ +package sandbox + +import ( + "context" + "fmt" + "path/filepath" + "strings" + "time" +) + +const ( + persistMountPath = "/amux" +) + +// SandboxConfig configures sandbox creation. +type SandboxConfig struct { + Agent Agent + EnvVars map[string]string + Volumes []VolumeSpec + CredentialsMode string + AutoStopInterval int32 + AutoDeleteInterval int32 + Snapshot string + Ephemeral bool + PersistenceVolumeName string +} + +func resolveVolumeMounts(manager VolumeManager, specs []VolumeSpec) ([]VolumeMount, error) { + if len(specs) == 0 { + return nil, nil + } + if manager == nil { + return nil, fmt.Errorf("volume manager is not available") + } + mounts := make([]VolumeMount, 0, len(specs)) + for _, spec := range specs { + volume, err := manager.WaitReady(context.Background(), spec.Name, 0) + if err != nil { + return nil, err + } + mounts = append(mounts, VolumeMount{VolumeID: volume.ID, MountPath: spec.MountPath}) + } + return mounts, nil +} + +func resolvePersistentMount(provider Provider, userSpecs []VolumeSpec, volumeName string) (*VolumeMount, error) { + if provider == nil { + return nil, fmt.Errorf("provider is required") + } + for _, spec := range userSpecs { + if spec.MountPath == persistMountPath || strings.HasPrefix(spec.MountPath, persistMountPath+"/") { + return nil, fmt.Errorf("volume mount path %q conflicts with amux persistence mount", spec.MountPath) + } + } + if !provider.SupportsFeature(FeatureVolumes) { + return nil, fmt.Errorf("provider %q does not support volumes", provider.Name()) + } + manager := provider.Volumes() + if manager == nil { + return nil, fmt.Errorf("volume manager is not available") + } + if strings.TrimSpace(volumeName) == "" { + volumeName = defaultPersistenceVolumeName + } + volume, err := manager.GetOrCreate(context.Background(), volumeName) + if err != nil { + return nil, err + } + if _, err := manager.WaitReady(context.Background(), volumeName, 0); err != nil { + return nil, err + } + return &VolumeMount{VolumeID: volume.ID, MountPath: persistMountPath}, nil +} + +// CreateSandboxSession always creates a new sandbox for this run. +func CreateSandboxSession(provider Provider, cwd string, cfg SandboxConfig) (RemoteSandbox, *SandboxMeta, error) { + if provider == nil { + return nil, nil, fmt.Errorf("provider is required") + } + providerName := provider.Name() + worktreeID := ComputeWorktreeID(cwd) + project := filepath.Base(cwd) + if project == "" || project == "." || project == "/" { + project = "unknown" + } + + if cfg.Agent == "" { + cfg.Agent = AgentShell + } + if len(cfg.Volumes) > 0 && !provider.SupportsFeature(FeatureVolumes) { + return nil, nil, fmt.Errorf("provider %q does not support volumes", providerName) + } + + createdAt := time.Now().UTC().Format(time.RFC3339) + labels := map[string]string{ + "amux.worktreeId": worktreeID, + "amux.agent": cfg.Agent.String(), + "amux.provider": providerName, + "amux.project": project, + "amux.createdAt": createdAt, + } + + userMounts, err := resolveVolumeMounts(provider.Volumes(), cfg.Volumes) + if err != nil { + return nil, nil, err + } + persistMount, err := resolvePersistentMount(provider, cfg.Volumes, cfg.PersistenceVolumeName) + if err != nil { + return nil, nil, err + } + mounts := make([]VolumeMount, 0, len(userMounts)+1) + if persistMount != nil { + mounts = append(mounts, *persistMount) + } + mounts = append(mounts, userMounts...) + + sb, err := provider.CreateSandbox(context.Background(), SandboxCreateConfig{ + Agent: cfg.Agent, + Snapshot: cfg.Snapshot, + EnvVars: cfg.EnvVars, + Labels: labels, + Volumes: mounts, + AutoStopMinutes: cfg.AutoStopInterval, + AutoDeleteMinutes: cfg.AutoDeleteInterval, + Ephemeral: cfg.Ephemeral, + }) + if err != nil { + return nil, nil, err + } + if err := sb.WaitReady(context.Background(), 60*time.Second); err != nil { + return nil, nil, err + } + applyEnvVars(sb, cfg.EnvVars) + + meta := &SandboxMeta{ + SandboxID: sb.ID(), + CreatedAt: createdAt, + Agent: cfg.Agent, + Provider: providerName, + WorktreeID: worktreeID, + Project: project, + } + if err := SaveSandboxMeta(cwd, providerName, *meta); err != nil { + return nil, nil, err + } + + return sb, meta, nil +} + +type envConfigurator interface { + setDefaultEnv(env map[string]string) +} + +func applyEnvVars(handle RemoteSandbox, env map[string]string) { + if handle == nil { + return + } + if len(env) == 0 { + return + } + if configurator, ok := handle.(envConfigurator); ok { + configurator.setDefaultEnv(env) + } +} + +// ListAmuxSandboxes returns sandboxes created by amux. +func ListAmuxSandboxes(provider Provider) ([]RemoteSandbox, error) { + if provider == nil { + return nil, fmt.Errorf("provider is required") + } + sandboxes, err := provider.ListSandboxes(context.Background()) + if err != nil { + return nil, err + } + filtered := make([]RemoteSandbox, 0, len(sandboxes)) + for _, sb := range sandboxes { + labels := sb.Labels() + if labels != nil { + if _, ok := labels["amux.provider"]; ok { + filtered = append(filtered, sb) + } + } + } + return filtered, nil +} + +// RemoveSandbox deletes a sandbox by ID or by current worktree meta. +func RemoveSandbox(provider Provider, cwd string, sandboxID string) error { + if provider == nil { + return fmt.Errorf("provider is required") + } + if sandboxID != "" { + if err := provider.DeleteSandbox(context.Background(), sandboxID); err != nil { + return err + } + return RemoveSandboxMetaByID(sandboxID) + } + meta, err := LoadSandboxMeta(cwd, provider.Name()) + if err != nil { + return err + } + if meta == nil { + return fmt.Errorf("no sandbox metadata found for this project") + } + if err := provider.DeleteSandbox(context.Background(), meta.SandboxID); err != nil { + return err + } + return RemoveSandboxMeta(cwd, provider.Name()) +} + +// Agent identifies the CLI agents supported by AMUX sandboxes. +type Agent string + +const ( + AgentClaude Agent = "claude" + AgentCodex Agent = "codex" + AgentOpenCode Agent = "opencode" + AgentAmp Agent = "amp" + AgentGemini Agent = "gemini" + AgentDroid Agent = "droid" + AgentShell Agent = "shell" +) + +func (a Agent) String() string { return string(a) } + +func IsValidAgent(value string) bool { + switch value { + case string(AgentClaude), string(AgentCodex), string(AgentOpenCode), string(AgentAmp), string(AgentGemini), string(AgentDroid), string(AgentShell): + return true + default: + return false + } +} + +// AgentAutoUpdates indicates which agents handle their own updates automatically. +// Agents that auto-update don't need TTL-based version checking - we just verify +// they're installed. Agents that don't auto-update (only Codex currently) use +// TTL-based reinstalls to stay current. +var AgentAutoUpdates = map[Agent]bool{ + AgentClaude: true, // Native installer auto-updates on startup + AgentOpenCode: true, // Auto-downloads updates on startup + AgentAmp: true, // Bun-based installer supports auto-update + AgentGemini: true, // npm-triggered auto-update + AgentDroid: true, // Bun-based auto-update + AgentCodex: false, // Only shows TUI notification, requires manual update + AgentShell: true, // N/A - no installation needed +} + +// VolumeSpec defines a named volume mount. +type VolumeSpec struct { + Name string + MountPath string +} diff --git a/internal/sandbox/config.go b/internal/sandbox/config.go new file mode 100644 index 00000000..56d514af --- /dev/null +++ b/internal/sandbox/config.go @@ -0,0 +1,230 @@ +package sandbox + +import ( + "encoding/json" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/andyrewlee/amux/internal/daytona" + + "github.com/andyrewlee/amux/internal/config" +) + +const ( + envAmuxDaytonaAPIKey = "AMUX_DAYTONA_API_KEY" + envDaytonaAPIKey = "DAYTONA_API_KEY" + envAmuxDaytonaAPIURL = "AMUX_DAYTONA_API_URL" + envDaytonaAPIURL = "DAYTONA_API_URL" + envAmuxDaytonaTarget = "AMUX_DAYTONA_TARGET" + envDaytonaTarget = "DAYTONA_TARGET" + envAmuxProvider = "AMUX_PROVIDER" + defaultPersistenceVolumeName = "amux-persist" +) + +var configKeys = []string{ + "daytonaApiKey", + "daytonaApiUrl", + "daytonaTarget", + "defaultSnapshotName", + "snapshotAgents", + "snapshotBaseImage", + "persistenceVolumeName", + "settingsSync", + "firstRunComplete", +} + +// Config stores AMUX sandbox configuration. +// Note: Agent API keys (Anthropic, OpenAI, etc.) are NOT stored here. +// Agents authenticate via OAuth/browser login inside the sandbox. +// API keys can optionally be passed via --env flag when running agents. +type Config struct { + DaytonaAPIKey string `json:"daytonaApiKey,omitempty"` + DaytonaAPIURL string `json:"daytonaApiUrl,omitempty"` + DaytonaTarget string `json:"daytonaTarget,omitempty"` + DefaultSnapshotName string `json:"defaultSnapshotName,omitempty"` + SnapshotAgents []string `json:"snapshotAgents,omitempty"` + SnapshotBaseImage string `json:"snapshotBaseImage,omitempty"` + PersistenceVolumeName string `json:"persistenceVolumeName,omitempty"` + SettingsSync SettingsSyncConfig `json:"settingsSync,omitempty"` + FirstRunComplete bool `json:"firstRunComplete,omitempty"` +} + +func configPath() (string, error) { + paths, err := config.DefaultPaths() + if err != nil { + return "", err + } + return paths.ConfigPath, nil +} + +// LoadConfig reads AMUX sandbox config. +func LoadConfig() (Config, error) { + var cfg Config + path, err := configPath() + if err != nil { + return cfg, err + } + data, err := os.ReadFile(path) + if err == nil { + _ = json.Unmarshal(data, &cfg) + return cfg, nil + } + if !errors.Is(err, os.ErrNotExist) { + return cfg, err + } + return cfg, nil +} + +// SaveConfig writes AMUX sandbox config, preserving unrelated config keys (e.g. UI settings). +func SaveConfig(cfg Config) error { + path, err := configPath() + if err != nil { + return err + } + payload := map[string]any{} + if data, err := os.ReadFile(path); err == nil { + _ = json.Unmarshal(data, &payload) + } + for _, key := range configKeys { + delete(payload, key) + } + cfgBytes, err := json.Marshal(cfg) + if err != nil { + return err + } + cfgMap := map[string]any{} + if err := json.Unmarshal(cfgBytes, &cfgMap); err != nil { + return err + } + for k, v := range cfgMap { + payload[k] = v + } + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return err + } + data, err := json.MarshalIndent(payload, "", " ") + if err != nil { + return err + } + return os.WriteFile(path, data, 0o644) +} + +// ClearConfigKeys removes AMUX sandbox config keys from the config file. +func ClearConfigKeys() error { + path, err := configPath() + if err != nil { + return err + } + payload := map[string]any{} + data, err := os.ReadFile(path) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } + if err := json.Unmarshal(data, &payload); err != nil { + return err + } + for _, key := range configKeys { + delete(payload, key) + } + if len(payload) == 0 { + return os.Remove(path) + } + out, err := json.MarshalIndent(payload, "", " ") + if err != nil { + return err + } + return os.WriteFile(path, out, 0o644) +} + +// GetDaytonaClient returns a configured Daytona client. +func GetDaytonaClient() (*daytona.Daytona, error) { + cfg, err := LoadConfig() + if err != nil { + return nil, err + } + apiKey := cfg.DaytonaAPIKey + if apiKey == "" { + apiKey = envFirst(envAmuxDaytonaAPIKey, envDaytonaAPIKey) + } + if apiKey == "" { + return nil, errors.New("Daytona API key not found. Set AMUX_DAYTONA_API_KEY or run `amux auth login`.") + } + apiURL := cfg.DaytonaAPIURL + if apiURL == "" { + apiURL = envFirst(envAmuxDaytonaAPIURL, envDaytonaAPIURL) + } + target := cfg.DaytonaTarget + if target == "" { + target = envFirst(envAmuxDaytonaTarget, envDaytonaTarget) + } + return daytona.NewDaytona(&daytona.DaytonaConfig{ + APIKey: apiKey, + APIURL: apiURL, + Target: target, + }) +} + +// ResolveAPIKey returns API key from config or environment without creating a client. +func ResolveAPIKey(cfg Config) string { + if cfg.DaytonaAPIKey != "" { + return cfg.DaytonaAPIKey + } + return envFirst(envAmuxDaytonaAPIKey, envDaytonaAPIKey) +} + +// ResolveSnapshotID returns snapshot ID from config or environment. +func ResolveSnapshotID(cfg Config) string { + if cfg.DefaultSnapshotName != "" { + return cfg.DefaultSnapshotName + } + return envFirst("AMUX_SNAPSHOT_ID") +} + +// ResolvePersistenceVolumeName returns the name of the persistent volume to mount. +func ResolvePersistenceVolumeName(cfg Config) string { + if strings.TrimSpace(cfg.PersistenceVolumeName) != "" { + return strings.TrimSpace(cfg.PersistenceVolumeName) + } + return defaultPersistenceVolumeName +} + +// ResolveProviderName returns the selected provider name (override or env). +func ResolveProviderName(_ Config, override string) string { + if override != "" { + return strings.ToLower(strings.TrimSpace(override)) + } + value := envFirst(envAmuxProvider) + if value != "" { + return strings.ToLower(strings.TrimSpace(value)) + } + return DefaultProviderName +} + +// Environment variable helpers + +func envFirst(keys ...string) string { + for _, key := range keys { + if val, ok := os.LookupEnv(key); ok && val != "" { + return val + } + } + return "" +} + +func envIsOne(key string) bool { + return os.Getenv(key) == "1" +} + +func envDefaultTrue(keys ...string) bool { + for _, key := range keys { + if val, ok := os.LookupEnv(key); ok { + return val != "0" + } + } + return true +} diff --git a/internal/sandbox/credentials.go b/internal/sandbox/credentials.go new file mode 100644 index 00000000..1f9e34ac --- /dev/null +++ b/internal/sandbox/credentials.go @@ -0,0 +1,354 @@ +package sandbox + +import ( + "fmt" + "path" + "strings" +) + +// CredentialsConfig configures shared credentials. +type CredentialsConfig struct { + Mode string + Agent Agent + SettingsSyncMode string // "auto" (use global config), "force" (always sync), "skip" (never sync) +} + +func getSandboxHomeDir(sb RemoteSandbox) string { + resp, err := execCommand(sb, `sh -lc "USER_NAME=$(id -un 2>/dev/null || echo daytona); HOME_DIR=$(getent passwd \"$USER_NAME\" 2>/dev/null | cut -d: -f6 || true); if [ -z \"$HOME_DIR\" ]; then HOME_DIR=/home/$USER_NAME; fi; printf \"%s\" \"$HOME_DIR\""`, nil) + if err == nil && resp != nil { + stdout := strings.TrimSpace(resp.Stdout) + if stdout != "" { + return stdout + } + } + return "/home/daytona" +} + +func persistHomeDir() string { + return path.Join(persistMountPath, "home") +} + +func ensurePersistentDir(sb RemoteSandbox, target, persist string) error { + if _, err := execCommand(sb, SafeCommands.MkdirP(persist), nil); err != nil { + return err + } + cleanup := fmt.Sprintf("if [ -e %s ] && [ ! -L %s ]; then rm -rf %s; fi", ShellQuote(target), ShellQuote(target), ShellQuote(target)) + _, _ = execCommand(sb, cleanup, nil) + _, err := execCommand(sb, SafeCommands.LnForce(persist, target), nil) + return err +} + +func ensurePersistentFile(sb RemoteSandbox, target, persist string) error { + if _, err := execCommand(sb, SafeCommands.MkdirParent(persist), nil); err != nil { + return err + } + if _, err := execCommand(sb, SafeCommands.Touch(persist), nil); err != nil { + return err + } + cleanup := fmt.Sprintf("if [ -e %s ] && [ ! -L %s ]; then rm -f %s; fi", ShellQuote(target), ShellQuote(target), ShellQuote(target)) + _, _ = execCommand(sb, cleanup, nil) + _, err := execCommand(sb, SafeCommands.LnForce(persist, target), nil) + return err +} + +func ensureNpmConfig(sb RemoteSandbox, homeDir string) { + npmrc := path.Join(homeDir, ".npmrc") + prefix := path.Join(homeDir, ".local") + cache := path.Join(homeDir, ".npm") + script := fmt.Sprintf("cfg=%s; touch \"$cfg\"; grep -q '^prefix=' \"$cfg\" || echo %s >> \"$cfg\"; grep -q '^cache=' \"$cfg\" || echo %s >> \"$cfg\"", + ShellQuote(npmrc), + ShellQuote("prefix="+prefix), + ShellQuote("cache="+cache), + ) + _, _ = execCommand(sb, fmt.Sprintf("bash -lc %s", ShellQuote(script)), nil) +} + +func ensurePersistentHome(sb RemoteSandbox, homeDir string, verbose bool) { + if persistMountPath == "" { + return + } + if _, err := execCommand(sb, SafeCommands.MkdirP(persistMountPath), nil); err != nil { + if verbose { + fmt.Printf("Warning: persistence root unavailable: %v\n", err) + } + return + } + persistHome := persistHomeDir() + if _, err := execCommand(sb, SafeCommands.MkdirP(persistHome), nil); err != nil { + if verbose { + fmt.Printf("Warning: persistence home unavailable: %v\n", err) + } + return + } + + persistDirs := []string{ + ".config", + ".local", + ".npm", + ".claude", + ".codex", + ".gemini", + ".amp", + ".factory", + } + for _, rel := range persistDirs { + target := path.Join(homeDir, rel) + persist := path.Join(persistHome, rel) + if err := ensurePersistentDir(sb, target, persist); err != nil && verbose { + fmt.Printf("Warning: persistence setup failed for %s: %v\n", rel, err) + } + } + + persistFiles := []string{".npmrc"} + for _, rel := range persistFiles { + target := path.Join(homeDir, rel) + persist := path.Join(persistHome, rel) + if err := ensurePersistentFile(sb, target, persist); err != nil && verbose { + fmt.Printf("Warning: persistence setup failed for %s: %v\n", rel, err) + } + } + + ensureNpmConfig(sb, homeDir) +} + +func ensureCredentialDirs(sb RemoteSandbox) (string, error) { + homeDir := getSandboxHomeDir(sb) + dirs := []string{ + ".claude", + ".codex", + ".config/codex", + ".config/opencode", + ".local/share/opencode", + ".config/amp", + ".local/share/amp", + ".gemini", + ".factory", + ".config/gh", + } + var lastErr error + for _, dir := range dirs { + if _, err := execCommand(sb, SafeCommands.MkdirP(fmt.Sprintf("%s/%s", homeDir, dir)), nil); err != nil { + LogDebug("failed to create credential directory", "dir", dir, "error", err) + lastErr = err + } + } + if lastErr != nil { + LogDebug("some credential directories may not have been created", "lastError", lastErr) + } + return homeDir, nil +} + +func prepareClaudeHome(sb RemoteSandbox, homeDir string) { + claudeHome := fmt.Sprintf("%s/.claude", homeDir) + _, _ = execCommand(sb, SafeCommands.MkdirP(claudeHome), nil) + // Symlink cache and debug to /tmp for performance (these are ephemeral) + _, _ = execCommand(sb, SafeCommands.MkdirP("/tmp/amux-claude-cache"), nil) + _, _ = execCommand(sb, SafeCommands.MkdirP("/tmp/amux-claude-debug"), nil) + _, _ = execCommand(sb, SafeCommands.LnForce("/tmp/amux-claude-cache", fmt.Sprintf("%s/cache", claudeHome)), nil) + _, _ = execCommand(sb, SafeCommands.LnForce("/tmp/amux-claude-debug", fmt.Sprintf("%s/debug", claudeHome)), nil) +} + +func prepareCodexHome(sb RemoteSandbox, homeDir string) { + codexHome := fmt.Sprintf("%s/.codex", homeDir) + codexConfigHome := fmt.Sprintf("%s/.config/codex", homeDir) + _, _ = execCommand(sb, SafeCommands.MkdirP(codexHome), nil) + _, _ = execCommand(sb, SafeCommands.MkdirP(codexConfigHome), nil) + // Ensure file-based credential store for codex + ensureFileStore := func(path string) string { + return fmt.Sprintf(`if [ -f %s ]; then if grep -q '^cli_auth_credentials_store' %s; then sed -i 's/^cli_auth_credentials_store.*/cli_auth_credentials_store = "file"/' %s; else echo 'cli_auth_credentials_store = "file"' >> %s; fi; else mkdir -p $(dirname %s); echo 'cli_auth_credentials_store = "file"' > %s; fi`, path, path, path, path, path, path) + } + _, _ = execCommand(sb, ensureFileStore(fmt.Sprintf("%s/config.toml", codexConfigHome)), nil) +} + +func prepareOpenCodeHome(sb RemoteSandbox, homeDir string) { + dataDir := fmt.Sprintf("%s/.local/share/opencode", homeDir) + configDir := fmt.Sprintf("%s/.config/opencode", homeDir) + _, _ = execCommand(sb, SafeCommands.MkdirP(dataDir), nil) + _, _ = execCommand(sb, SafeCommands.MkdirP(configDir), nil) +} + +func prepareAmpHome(sb RemoteSandbox, homeDir string) { + ampConfig := fmt.Sprintf("%s/.config/amp", homeDir) + ampData := fmt.Sprintf("%s/.local/share/amp", homeDir) + _, _ = execCommand(sb, SafeCommands.MkdirP(ampConfig), nil) + _, _ = execCommand(sb, SafeCommands.MkdirP(ampData), nil) +} + +func prepareGeminiHome(sb RemoteSandbox, homeDir string) { + geminiHome := fmt.Sprintf("%s/.gemini", homeDir) + _, _ = execCommand(sb, SafeCommands.MkdirP(geminiHome), nil) +} + +func prepareFactoryHome(sb RemoteSandbox, homeDir string) { + factoryHome := fmt.Sprintf("%s/.factory", homeDir) + _, _ = execCommand(sb, SafeCommands.MkdirP(factoryHome), nil) +} + +func prepareGhHome(sb RemoteSandbox, homeDir string) { + ghConfig := fmt.Sprintf("%s/.config/gh", homeDir) + _, _ = execCommand(sb, SafeCommands.MkdirP(ghConfig), nil) +} + +// SetupCredentials prepares credential directories on the sandbox. +// Credentials are stored in the home directory (symlinked to the persistent volume). +func SetupCredentials(sb RemoteSandbox, cfg CredentialsConfig, verbose bool) error { + mode := strings.ToLower(strings.TrimSpace(cfg.Mode)) + if mode == "" { + mode = "auto" + } + switch mode { + case "sandbox", "none", "auto": + default: + return fmt.Errorf("unsupported credentials mode: %s", mode) + } + if mode == "none" { + if verbose { + fmt.Println("Credentials mode: none") + } + return nil + } + if verbose { + if mode == "auto" { + fmt.Println("Credentials mode: sandbox (auto)") + } else { + fmt.Printf("Credentials mode: %s\n", mode) + } + } + homeDir := getSandboxHomeDir(sb) + ensurePersistentHome(sb, homeDir, verbose) + + resolvedHome, err := ensureCredentialDirs(sb) + if err != nil { + return err + } + prepareClaudeHome(sb, resolvedHome) + prepareCodexHome(sb, resolvedHome) + prepareOpenCodeHome(sb, resolvedHome) + prepareAmpHome(sb, resolvedHome) + prepareGeminiHome(sb, resolvedHome) + prepareFactoryHome(sb, resolvedHome) + prepareGhHome(sb, resolvedHome) + + // Sync local settings based on mode and global config + amuxCfg, _ := LoadConfig() + shouldSync := false + switch cfg.SettingsSyncMode { + case "force": + shouldSync = true + case "skip": + shouldSync = false + default: // "auto" or empty - use global config + shouldSync = amuxCfg.SettingsSync.Enabled + } + + if shouldSync { + if verbose { + fmt.Println("Syncing local settings...") + } + if err := SyncSettingsToVolume(sb, amuxCfg.SettingsSync, verbose); err != nil { + if verbose { + fmt.Printf("Warning: settings sync failed: %v\n", err) + } + // Don't fail the whole setup for settings sync errors + } + } + + if verbose { + fmt.Println("Credentials ready") + } + return nil +} + +// AgentCredentialStatus represents whether an agent has credentials stored +// on the sandbox. +type AgentCredentialStatus struct { + Agent Agent + HasCredential bool + CredentialAge string // e.g., "2 days ago" or empty if unknown +} + +// CheckAgentCredentials checks if credentials exist for an agent on the sandbox. +func CheckAgentCredentials(sb RemoteSandbox, agent Agent) AgentCredentialStatus { + status := AgentCredentialStatus{Agent: agent, HasCredential: false} + homeDir := getSandboxHomeDir(sb) + + switch agent { + case AgentClaude: + resp, err := execCommand(sb, fmt.Sprintf( + "test -f %s/.claude/.credentials.json && echo exists", + homeDir, + ), nil) + if err == nil && resp != nil && resp.ExitCode == 0 { + status.HasCredential = true + } + + case AgentCodex: + resp, err := execCommand(sb, fmt.Sprintf( + "test -f %s/.codex/auth.json && echo exists", + homeDir, + ), nil) + if err == nil && resp != nil && resp.ExitCode == 0 { + status.HasCredential = true + } + + case AgentOpenCode: + resp, err := execCommand(sb, fmt.Sprintf( + "test -f %s/.local/share/opencode/auth.json && echo exists", + homeDir, + ), nil) + if err == nil && resp != nil && resp.ExitCode == 0 { + status.HasCredential = true + } + + case AgentAmp: + resp, err := execCommand(sb, fmt.Sprintf( + "test -f %s/.config/amp/secrets.json && echo exists", + homeDir, + ), nil) + if err == nil && resp != nil && resp.ExitCode == 0 { + status.HasCredential = true + } + + case AgentGemini: + resp, err := execCommand(sb, fmt.Sprintf( + "test -f %s/.gemini/oauth_creds.json && echo exists", + homeDir, + ), nil) + if err == nil && resp != nil && resp.ExitCode == 0 { + status.HasCredential = true + } + + case AgentDroid: + resp, err := execCommand(sb, fmt.Sprintf( + "test -f %s/.factory/config.json && echo exists", + homeDir, + ), nil) + if err == nil && resp != nil && resp.ExitCode == 0 { + status.HasCredential = true + } + } + + return status +} + +// CheckAllAgentCredentials returns credential status for all agents. +func CheckAllAgentCredentials(sb RemoteSandbox) []AgentCredentialStatus { + agents := []Agent{AgentClaude, AgentCodex, AgentOpenCode, AgentAmp, AgentGemini, AgentDroid} + results := make([]AgentCredentialStatus, 0, len(agents)) + + for _, agent := range agents { + results = append(results, CheckAgentCredentials(sb, agent)) + } + + return results +} + +// HasGitHubCredentials checks if GitHub CLI is authenticated. +func HasGitHubCredentials(sb RemoteSandbox) bool { + homeDir := getSandboxHomeDir(sb) + resp, err := execCommand(sb, fmt.Sprintf( + "test -f %s/.config/gh/hosts.yml && echo exists", + homeDir, + ), nil) + return err == nil && resp != nil && resp.ExitCode == 0 +} diff --git a/internal/sandbox/credentials_test.go b/internal/sandbox/credentials_test.go new file mode 100644 index 00000000..27aeaf77 --- /dev/null +++ b/internal/sandbox/credentials_test.go @@ -0,0 +1,156 @@ +package sandbox + +import ( + "strings" + "testing" +) + +func TestSetupCredentials_ModeNone(t *testing.T) { + mock := NewMockRemoteSandbox("test-123") + + cfg := CredentialsConfig{ + Mode: "none", + Agent: AgentClaude, + } + + err := SetupCredentials(mock, cfg, false) + if err != nil { + t.Errorf("SetupCredentials() with mode=none error = %v", err) + } + + // Should not have executed any commands + history := mock.GetExecHistory() + if len(history) > 0 { + t.Errorf("SetupCredentials() with mode=none should not execute commands, got %d", len(history)) + } +} + +func TestSetupCredentials_CreatesDirectories(t *testing.T) { + mock := NewMockRemoteSandbox("test-123") + mock.SetHomeDir("/home/testuser") + + cfg := CredentialsConfig{ + Mode: "sandbox", + Agent: AgentClaude, + } + + err := SetupCredentials(mock, cfg, false) + if err != nil { + t.Errorf("SetupCredentials() error = %v", err) + } + + // Should have executed mkdir commands + history := mock.GetExecHistory() + foundMkdir := false + for _, cmd := range history { + if strings.Contains(cmd, "mkdir") && strings.Contains(cmd, ".claude") { + foundMkdir = true + break + } + } + + if !foundMkdir { + t.Error("SetupCredentials() should create .claude directory") + } +} + +func TestSetupCredentials_AllAgents(t *testing.T) { + agents := []Agent{ + AgentClaude, + AgentCodex, + AgentOpenCode, + AgentAmp, + AgentGemini, + AgentDroid, + } + + for _, agent := range agents { + t.Run(string(agent), func(t *testing.T) { + mock := NewMockRemoteSandbox("test-123") + mock.SetHomeDir("/home/testuser") + + cfg := CredentialsConfig{ + Mode: "sandbox", + Agent: agent, + } + + err := SetupCredentials(mock, cfg, false) + if err != nil { + t.Errorf("SetupCredentials() for %s error = %v", agent, err) + } + + // Should have executed some mkdir commands + history := mock.GetExecHistory() + if len(history) == 0 { + t.Errorf("SetupCredentials() for %s should execute commands", agent) + } + }) + } +} + +func TestCheckAgentCredentials(t *testing.T) { + tests := []struct { + name string + agent Agent + setupExec map[string]MockExecResult + wantHasCred bool + }{ + { + name: "claude with credentials", + agent: AgentClaude, + setupExec: map[string]MockExecResult{ + "test -f": {Output: "", ExitCode: 0}, + }, + wantHasCred: true, + }, + { + name: "claude without credentials", + agent: AgentClaude, + setupExec: map[string]MockExecResult{ + "test -f": {Output: "", ExitCode: 1}, + }, + wantHasCred: false, + }, + { + name: "codex with credentials", + agent: AgentCodex, + setupExec: map[string]MockExecResult{ + "test -f": {Output: "", ExitCode: 0}, + }, + wantHasCred: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mock := NewMockRemoteSandbox("test-123") + mock.SetHomeDir("/home/testuser") + + for prefix, result := range tt.setupExec { + mock.SetExecResult(prefix, result.Output, result.ExitCode) + } + + status := CheckAgentCredentials(mock, tt.agent) + + if status.HasCredential != tt.wantHasCred { + t.Errorf("CheckAgentCredentials() HasCredential = %v, want %v", status.HasCredential, tt.wantHasCred) + } + + if status.Agent != tt.agent { + t.Errorf("CheckAgentCredentials() Agent = %v, want %v", status.Agent, tt.agent) + } + }) + } +} + +func TestHasGitHubCredentials(t *testing.T) { + // Test with authenticated user + t.Run("gh authenticated", func(t *testing.T) { + mock := NewMockRemoteSandbox("test-123") + // Default mock returns exit code 0 for all commands + got := HasGitHubCredentials(mock) + if !got { + t.Error("HasGitHubCredentials() should return true when gh auth succeeds") + } + }) +} diff --git a/internal/sandbox/errors.go b/internal/sandbox/errors.go new file mode 100644 index 00000000..32efc53e --- /dev/null +++ b/internal/sandbox/errors.go @@ -0,0 +1,307 @@ +package sandbox + +import ( + "errors" + "fmt" + "strings" +) + +// ErrorCode represents specific error categories for better handling. +type ErrorCode string + +const ( + ErrCodeUnknown ErrorCode = "unknown" + ErrCodeSandboxCreate ErrorCode = "sandbox_create" + ErrCodeSandboxStart ErrorCode = "sandbox_start" + ErrCodeSandboxMissing ErrorCode = "sandbox_not_found" + ErrCodeCredentials ErrorCode = "credentials" + ErrCodeSync ErrorCode = "sync" + ErrCodeAgent ErrorCode = "agent" + ErrCodeSSH ErrorCode = "ssh" + ErrCodeNetwork ErrorCode = "network" + ErrCodeConfig ErrorCode = "config" + ErrCodeVolume ErrorCode = "volume" + ErrCodeSnapshot ErrorCode = "snapshot" + ErrCodePreflight ErrorCode = "preflight" + ErrCodeTimeout ErrorCode = "timeout" + ErrCodePermission ErrorCode = "permission" +) + +// ErrNotFound is a sentinel error for resource-not-found cases. +var ErrNotFound = errors.New("not found") + +// SandboxError is a structured error type with context for debugging. +type SandboxError struct { + Code ErrorCode + Op string // Operation that failed (e.g., "create", "sync", "credentials.setup") + Agent Agent // Agent involved, if applicable + SandboxID string // Sandbox ID, if known + Cause error // Underlying error + Context map[string]string // Additional context + Suggestion string // User-friendly suggestion for resolution + Retryable bool // Whether the operation can be retried +} + +// Error implements the error interface. +func (e *SandboxError) Error() string { + var b strings.Builder + b.WriteString(fmt.Sprintf("[%s] %s failed", e.Code, e.Op)) + if e.Agent != "" { + b.WriteString(fmt.Sprintf(" (agent: %s)", e.Agent)) + } + if e.SandboxID != "" { + b.WriteString(fmt.Sprintf(" (sandbox: %s)", truncateID(e.SandboxID))) + } + if e.Cause != nil { + b.WriteString(fmt.Sprintf(": %v", e.Cause)) + } + return b.String() +} + +// Unwrap returns the underlying error. +func (e *SandboxError) Unwrap() error { + return e.Cause +} + +// UserMessage returns a human-friendly error message with suggestions. +func (e *SandboxError) UserMessage() string { + var b strings.Builder + + // Start with a clear description + switch e.Code { + case ErrCodeSandboxCreate: + b.WriteString("Failed to create sandbox") + case ErrCodeSandboxStart: + b.WriteString("Failed to start sandbox") + case ErrCodeSandboxMissing: + b.WriteString("Sandbox not found") + case ErrCodeCredentials: + b.WriteString("Credential setup failed") + case ErrCodeSync: + b.WriteString("Workspace sync failed") + case ErrCodeAgent: + b.WriteString("Agent operation failed") + case ErrCodeSSH: + b.WriteString("SSH connection failed") + case ErrCodeNetwork: + b.WriteString("Network error") + case ErrCodeVolume: + b.WriteString("Volume operation failed") + case ErrCodeSnapshot: + b.WriteString("Snapshot operation failed") + case ErrCodePreflight: + b.WriteString("Preflight check failed") + case ErrCodeTimeout: + b.WriteString("Operation timed out") + case ErrCodePermission: + b.WriteString("Permission denied") + default: + b.WriteString("An error occurred") + } + + if e.Agent != "" { + b.WriteString(fmt.Sprintf(" for %s", e.Agent)) + } + + // Add cause if available + if e.Cause != nil { + b.WriteString(fmt.Sprintf("\n\nDetails: %v", e.Cause)) + } + + // Add suggestion if available + if e.Suggestion != "" { + b.WriteString(fmt.Sprintf("\n\nSuggestion: %s", e.Suggestion)) + } else { + // Generate default suggestions based on error code + suggestion := e.defaultSuggestion() + if suggestion != "" { + b.WriteString(fmt.Sprintf("\n\nSuggestion: %s", suggestion)) + } + } + + return b.String() +} + +func (e *SandboxError) defaultSuggestion() string { + switch e.Code { + case ErrCodeSandboxCreate: + return "Check your provider credentials with `amux auth status`. Run `amux doctor` for diagnostics." + case ErrCodeSandboxMissing: + return "The sandbox may have been deleted. Run `amux sandbox ls` to see available sandboxes." + case ErrCodeCredentials: + return "Try running `amux doctor --deep` to diagnose credential issues." + case ErrCodeSync: + return "Check available disk space and network connectivity. Try `amux sandbox run --no-sync` to skip sync." + case ErrCodeSSH: + return "Ensure SSH is installed and accessible. Check firewall settings. Run `amux doctor` for diagnostics." + case ErrCodeNetwork: + return "Check your internet connection and firewall settings." + case ErrCodeVolume: + return "Volume operation failed. Run `amux doctor --deep` to check." + case ErrCodeTimeout: + if e.Retryable { + return "The operation timed out but may succeed if retried." + } + return "The operation timed out. Check network connectivity and try again." + case ErrCodePreflight: + return "Run `amux doctor` to identify and fix the issue." + default: + return "" + } +} + +// Helper functions for creating common errors + +// NewSandboxError creates a new SandboxError with the given parameters. +func NewSandboxError(code ErrorCode, op string, cause error) *SandboxError { + return &SandboxError{ + Code: code, + Op: op, + Cause: cause, + Context: make(map[string]string), + } +} + +// WithAgent adds agent context to the error. +func (e *SandboxError) WithAgent(agent Agent) *SandboxError { + e.Agent = agent + return e +} + +// WithSandbox adds sandbox ID context to the error. +func (e *SandboxError) WithSandbox(id string) *SandboxError { + e.SandboxID = id + return e +} + +// WithContext adds additional context to the error. +func (e *SandboxError) WithContext(key, value string) *SandboxError { + if e.Context == nil { + e.Context = make(map[string]string) + } + e.Context[key] = value + return e +} + +// WithSuggestion sets a user-friendly suggestion. +func (e *SandboxError) WithSuggestion(suggestion string) *SandboxError { + e.Suggestion = suggestion + return e +} + +// WithRetryable marks the error as retryable. +func (e *SandboxError) WithRetryable(retryable bool) *SandboxError { + e.Retryable = retryable + return e +} + +// Convenience constructors for common errors + +func ErrCredentialSetup(op string, cause error) *SandboxError { + return NewSandboxError(ErrCodeCredentials, op, cause) +} + +func ErrSyncFailed(op string, cause error) *SandboxError { + return NewSandboxError(ErrCodeSync, op, cause) +} + +func ErrAgentInstall(agent Agent, cause error) *SandboxError { + return NewSandboxError(ErrCodeAgent, "install", cause).WithAgent(agent) +} + +func ErrSSHConnection(cause error) *SandboxError { + return NewSandboxError(ErrCodeSSH, "connect", cause).WithRetryable(true) +} + +func ErrTimeout(op string) *SandboxError { + return NewSandboxError(ErrCodeTimeout, op, errors.New("operation timed out")).WithRetryable(true) +} + +func ErrVolumeNotReady(volumeName string, cause error) *SandboxError { + return NewSandboxError(ErrCodeVolume, "wait_ready", cause). + WithContext("volume", volumeName) +} + +// IsSandboxError checks if an error is a SandboxError. +func IsSandboxError(err error) bool { + var se *SandboxError + return errors.As(err, &se) +} + +// IsRetryable checks if an error is retryable. +func IsRetryable(err error) bool { + if se := GetSandboxError(err); se != nil { + return se.Retryable + } + return false +} + +func truncateID(id string) string { + if len(id) <= 8 { + return id + } + return id[:8] +} + +// GetSandboxError extracts a SandboxError from an error chain if present. +func GetSandboxError(err error) *SandboxError { + if err == nil { + return nil + } + var se *SandboxError + if errors.As(err, &se) { + return se + } + return nil +} + +// IsNotFoundError checks if error represents a "not found" condition. +func IsNotFoundError(err error) bool { + if err == nil { + return false + } + if errors.Is(err, ErrNotFound) { + return true + } + if se := GetSandboxError(err); se != nil { + return se.Code == ErrCodeSandboxMissing + } + return false +} + +// MultiError collects multiple errors. +type MultiError struct { + Errors []error +} + +func (m *MultiError) Error() string { + if len(m.Errors) == 0 { + return "no errors" + } + if len(m.Errors) == 1 { + return m.Errors[0].Error() + } + var b strings.Builder + b.WriteString(fmt.Sprintf("%d errors occurred:\n", len(m.Errors))) + for i, err := range m.Errors { + b.WriteString(fmt.Sprintf(" %d. %v\n", i+1, err)) + } + return b.String() +} + +func (m *MultiError) Add(err error) { + if err != nil { + m.Errors = append(m.Errors, err) + } +} + +func (m *MultiError) HasErrors() bool { + return len(m.Errors) > 0 +} + +func (m *MultiError) ErrorOrNil() error { + if m.HasErrors() { + return m + } + return nil +} diff --git a/internal/sandbox/errors_test.go b/internal/sandbox/errors_test.go new file mode 100644 index 00000000..29626b53 --- /dev/null +++ b/internal/sandbox/errors_test.go @@ -0,0 +1,173 @@ +package sandbox + +import ( + "errors" + "testing" +) + +func TestSandboxError(t *testing.T) { + t.Run("basic error creation", func(t *testing.T) { + err := NewSandboxError(ErrCodeSSH, "connect", nil) + if err.Code != ErrCodeSSH { + t.Errorf("Expected code %s, got %s", ErrCodeSSH, err.Code) + } + if err.Op != "connect" { + t.Errorf("Expected op 'connect', got %s", err.Op) + } + }) + + t.Run("error with cause", func(t *testing.T) { + cause := errors.New("connection refused") + err := NewSandboxError(ErrCodeNetwork, "dial", cause) + if err.Cause != cause { + t.Error("Expected cause to be set") + } + if !errors.Is(err, cause) { + t.Error("errors.Is should return true for the cause") + } + }) + + t.Run("error with suggestion", func(t *testing.T) { + err := NewSandboxError(ErrCodeCredentials, "setup", nil). + WithSuggestion("Run `amux setup` to configure credentials") + if err.Suggestion == "" { + t.Error("Expected suggestion to be set") + } + }) + + t.Run("retryable error", func(t *testing.T) { + err := NewSandboxError(ErrCodeSSH, "connect", nil).WithRetryable(true) + if !err.Retryable { + t.Error("Expected error to be retryable") + } + }) + + t.Run("error message formatting", func(t *testing.T) { + err := NewSandboxError(ErrCodeSync, "upload", errors.New("timeout")) + msg := err.Error() + if msg == "" { + t.Error("Expected non-empty error message") + } + }) + + t.Run("user-friendly message", func(t *testing.T) { + err := NewSandboxError(ErrCodeCredentials, "setup", nil) + msg := err.UserMessage() + if msg == "" { + t.Error("Expected non-empty user message") + } + }) +} + +func TestErrorCodes(t *testing.T) { + codes := []ErrorCode{ + ErrCodeCredentials, + ErrCodeSync, + ErrCodeSSH, + ErrCodeNetwork, + ErrCodeAgent, + ErrCodePreflight, + ErrCodeConfig, + ErrCodeSnapshot, + } + + for _, code := range codes { + err := NewSandboxError(code, "test", nil) + if err.Code != code { + t.Errorf("Expected code %s, got %s", code, err.Code) + } + } +} + +func TestConvenienceFunctions(t *testing.T) { + t.Run("ErrCredentialSetup", func(t *testing.T) { + err := ErrCredentialSetup("claude", errors.New("not found")) + if err.Code != ErrCodeCredentials { + t.Errorf("Expected code %s, got %s", ErrCodeCredentials, err.Code) + } + }) + + t.Run("ErrSyncFailed", func(t *testing.T) { + err := ErrSyncFailed("upload", errors.New("timeout")) + if err.Code != ErrCodeSync { + t.Errorf("Expected code %s, got %s", ErrCodeSync, err.Code) + } + }) + + t.Run("ErrSSHConnection", func(t *testing.T) { + err := ErrSSHConnection(errors.New("refused")) + if err.Code != ErrCodeSSH { + t.Errorf("Expected code %s, got %s", ErrCodeSSH, err.Code) + } + // SSH connection errors should be retryable by default + if !err.Retryable { + t.Error("Expected SSH connection error to be retryable") + } + }) +} + +func TestIsRetryable(t *testing.T) { + t.Run("retryable sandbox error", func(t *testing.T) { + err := NewSandboxError(ErrCodeSSH, "connect", nil).WithRetryable(true) + if !IsRetryable(err) { + t.Error("Expected IsRetryable to return true") + } + }) + + t.Run("non-retryable sandbox error", func(t *testing.T) { + err := NewSandboxError(ErrCodeConfig, "load", nil) + if IsRetryable(err) { + t.Error("Expected IsRetryable to return false") + } + }) + + t.Run("non-sandbox error", func(t *testing.T) { + err := errors.New("random error") + if IsRetryable(err) { + t.Error("Expected IsRetryable to return false for non-sandbox errors") + } + }) + + t.Run("nil error", func(t *testing.T) { + if IsRetryable(nil) { + t.Error("Expected IsRetryable to return false for nil") + } + }) +} + +func TestMultiError(t *testing.T) { + t.Run("empty multi error", func(t *testing.T) { + me := &MultiError{} + if me.HasErrors() { + t.Error("Expected HasErrors to return false for empty MultiError") + } + if me.ErrorOrNil() != nil { + t.Error("Expected ErrorOrNil to return nil for empty MultiError") + } + }) + + t.Run("multi error with errors", func(t *testing.T) { + me := &MultiError{} + me.Add(errors.New("error 1")) + me.Add(errors.New("error 2")) + if !me.HasErrors() { + t.Error("Expected HasErrors to return true") + } + if me.ErrorOrNil() == nil { + t.Error("Expected ErrorOrNil to return error") + } + if len(me.Errors) != 2 { + t.Errorf("Expected 2 errors, got %d", len(me.Errors)) + } + }) + + t.Run("multi error ignores nil", func(t *testing.T) { + me := &MultiError{} + me.Add(nil) + me.Add(errors.New("error")) + me.Add(nil) + if len(me.Errors) != 1 { + t.Errorf("Expected 1 error, got %d", len(me.Errors)) + } + }) +} diff --git a/internal/sandbox/exec_helpers.go b/internal/sandbox/exec_helpers.go new file mode 100644 index 00000000..4db216b1 --- /dev/null +++ b/internal/sandbox/exec_helpers.go @@ -0,0 +1,25 @@ +package sandbox + +import ( + "context" + "fmt" +) + +// execCommand executes a command, respecting opts.Timeout if provided. +// If opts is nil or opts.Timeout is 0, no context timeout is applied +// and the provider is expected to enforce any default timeout. +func execCommand(computer RemoteSandbox, cmd string, opts *ExecOptions) (*ExecResult, error) { + if computer == nil { + return nil, fmt.Errorf("sandbox is nil") + } + + // If opts specifies a timeout, apply it to the context + if opts != nil && opts.Timeout > 0 { + ctx, cancel := context.WithTimeout(context.Background(), opts.Timeout) + defer cancel() + return computer.Exec(ctx, cmd, opts) + } + + // Otherwise, let the provider handle timeout (via opts.Timeout or its own defaults) + return computer.Exec(context.Background(), cmd, opts) +} diff --git a/internal/sandbox/file_transfer.go b/internal/sandbox/file_transfer.go new file mode 100644 index 00000000..ba4eebb7 --- /dev/null +++ b/internal/sandbox/file_transfer.go @@ -0,0 +1,41 @@ +package sandbox + +import ( + "context" + "os" +) + +func uploadBytes(ctx context.Context, computer RemoteSandbox, data []byte, remotePath string) error { + tmp, err := os.CreateTemp("", "amux-upload-*") + if err != nil { + return err + } + defer func() { + _ = os.Remove(tmp.Name()) + }() + if _, err := tmp.Write(data); err != nil { + _ = tmp.Close() + return err + } + if err := tmp.Close(); err != nil { + return err + } + return computer.UploadFile(ctx, tmp.Name(), remotePath) +} + +func downloadBytes(ctx context.Context, computer RemoteSandbox, remotePath string) ([]byte, error) { + tmp, err := os.CreateTemp("", "amux-download-*") + if err != nil { + return nil, err + } + defer func() { + _ = os.Remove(tmp.Name()) + }() + if err := tmp.Close(); err != nil { + return nil, err + } + if err := computer.DownloadFile(ctx, remotePath, tmp.Name()); err != nil { + return nil, err + } + return os.ReadFile(tmp.Name()) +} diff --git a/internal/sandbox/health.go b/internal/sandbox/health.go new file mode 100644 index 00000000..85b3d3c6 --- /dev/null +++ b/internal/sandbox/health.go @@ -0,0 +1,448 @@ +package sandbox + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/andyrewlee/amux/internal/daytona" +) + +// HealthStatus represents the overall health of a sandbox. +type HealthStatus string + +const ( + HealthStatusHealthy HealthStatus = "healthy" + HealthStatusDegraded HealthStatus = "degraded" + HealthStatusUnhealthy HealthStatus = "unhealthy" + HealthStatusUnknown HealthStatus = "unknown" +) + +// HealthCheck represents a single health check. +type HealthCheck struct { + Name string + Status HealthStatus + Message string + Duration time.Duration + Recoverable bool + Details map[string]string +} + +// HealthReport contains all health check results. +type HealthReport struct { + Overall HealthStatus + Checks []HealthCheck + Timestamp time.Time + Duration time.Duration +} + +// SandboxHealth provides health checking and self-healing capabilities. +type SandboxHealth struct { + inner *daytona.Sandbox + client *daytona.Daytona + agent Agent + verbose bool +} + +// NewSandboxHealth creates a new health checker for a sandbox. +func NewSandboxHealth(client *daytona.Daytona, sandboxHandle RemoteSandbox, agent Agent) (*SandboxHealth, error) { + dc, ok := sandboxHandle.(*daytonaSandbox) + if !ok { + return nil, fmt.Errorf("sandbox provider does not support Daytona health checks") + } + return &SandboxHealth{ + inner: dc.inner, + client: client, + agent: agent, + }, nil +} + +// SetVerbose enables verbose output. +func (h *SandboxHealth) SetVerbose(verbose bool) { + h.verbose = verbose +} + +func (h *SandboxHealth) sandboxHandle() RemoteSandbox { + return &daytonaSandbox{inner: h.inner} +} + +// Check performs all health checks and returns a report. +func (h *SandboxHealth) Check(ctx context.Context) *HealthReport { + start := time.Now() + report := &HealthReport{ + Overall: HealthStatusHealthy, + Checks: make([]HealthCheck, 0), + Timestamp: start, + } + + // Run all checks + checks := []func(context.Context) HealthCheck{ + h.checkSandboxState, + h.checkCredentialDirs, + h.checkAgentInstalled, + h.checkNetworkConnectivity, + h.checkDiskSpace, + h.checkProcesses, + } + + for _, check := range checks { + if ctx.Err() != nil { + break + } + result := check(ctx) + report.Checks = append(report.Checks, result) + + // Update overall status + if result.Status == HealthStatusUnhealthy && report.Overall != HealthStatusUnhealthy { + report.Overall = HealthStatusUnhealthy + } else if result.Status == HealthStatusDegraded && report.Overall == HealthStatusHealthy { + report.Overall = HealthStatusDegraded + } + } + + report.Duration = time.Since(start) + return report +} + +// checkSandboxState verifies the sandbox is running. +func (h *SandboxHealth) checkSandboxState(ctx context.Context) HealthCheck { + start := time.Now() + check := HealthCheck{ + Name: "sandbox_state", + Details: make(map[string]string), + } + + // Simple echo test + resp, err := execCommand(h.sandboxHandle(), "echo healthy", nil) + check.Duration = time.Since(start) + + if err != nil { + check.Status = HealthStatusUnhealthy + check.Message = fmt.Sprintf("Cannot execute commands: %v", err) + check.Recoverable = true + return check + } + + if resp.ExitCode != 0 { + check.Status = HealthStatusUnhealthy + check.Message = "Command execution failed" + check.Recoverable = true + return check + } + + check.Status = HealthStatusHealthy + check.Message = "Sandbox is responsive" + check.Details["response_time"] = check.Duration.String() + return check +} + +// checkCredentialDirs verifies credential directories exist in home. +func (h *SandboxHealth) checkCredentialDirs(ctx context.Context) HealthCheck { + start := time.Now() + check := HealthCheck{ + Name: "credential_dirs", + Details: make(map[string]string), + } + + if h.agent == AgentShell || h.agent == "" { + check.Status = HealthStatusHealthy + check.Message = "No credentials required for shell" + check.Duration = time.Since(start) + return check + } + + plugin, ok := GetAgentPlugin(string(h.agent)) + if !ok { + check.Status = HealthStatusUnknown + check.Message = fmt.Sprintf("Unknown agent: %s", h.agent) + check.Duration = time.Since(start) + return check + } + + credPaths := plugin.CredentialPaths() + if len(credPaths) == 0 { + check.Status = HealthStatusHealthy + check.Message = "No credential paths configured" + check.Duration = time.Since(start) + return check + } + + homeDir := getSandboxHomeDir(h.sandboxHandle()) + issues := []string{} + for _, cred := range credPaths { + dirPath := fmt.Sprintf("%s/%s", homeDir, cred.HomePath) + + // Check if directory exists + cmd := SafeCommands.Test("-d", dirPath) + resp, _ := execCommand(h.sandboxHandle(), cmd, nil) + if resp == nil || resp.ExitCode != 0 { + issues = append(issues, fmt.Sprintf("%s: directory missing", cred.HomePath)) + } + } + + check.Duration = time.Since(start) + + if len(issues) > 0 { + check.Status = HealthStatusDegraded + check.Message = fmt.Sprintf("Credential directory issues: %s", strings.Join(issues, "; ")) + check.Recoverable = true + return check + } + + check.Status = HealthStatusHealthy + check.Message = "All credential directories exist" + return check +} + +// checkAgentInstalled verifies the agent is installed. +func (h *SandboxHealth) checkAgentInstalled(ctx context.Context) HealthCheck { + start := time.Now() + check := HealthCheck{ + Name: "agent_installed", + Details: make(map[string]string), + } + + if h.agent == AgentShell || h.agent == "" { + check.Status = HealthStatusHealthy + check.Message = "Shell is always available" + check.Duration = time.Since(start) + return check + } + + plugin, ok := GetAgentPlugin(string(h.agent)) + if !ok { + check.Status = HealthStatusUnknown + check.Message = fmt.Sprintf("Unknown agent: %s", h.agent) + check.Duration = time.Since(start) + return check + } + + err := plugin.Validate(h.sandboxHandle()) + check.Duration = time.Since(start) + + if err != nil { + check.Status = HealthStatusUnhealthy + check.Message = fmt.Sprintf("Agent not installed: %v", err) + check.Recoverable = true + return check + } + + // Get version if possible + versionCmd := plugin.VersionCommand() + if versionCmd != "" { + resp, err := execCommand(h.sandboxHandle(), versionCmd, nil) + if err == nil && resp.ExitCode == 0 { + version := strings.TrimSpace(getStdoutFromResp(resp)) + if version != "" { + check.Details["version"] = version + } + } + } + + check.Status = HealthStatusHealthy + check.Message = fmt.Sprintf("%s is installed", plugin.DisplayName()) + return check +} + +// checkNetworkConnectivity verifies network access. +func (h *SandboxHealth) checkNetworkConnectivity(ctx context.Context) HealthCheck { + start := time.Now() + check := HealthCheck{ + Name: "network", + Details: make(map[string]string), + } + + // Try to reach a reliable endpoint + resp, err := execCommand(h.sandboxHandle(), "curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 https://api.anthropic.com", nil) + check.Duration = time.Since(start) + + if err != nil { + check.Status = HealthStatusDegraded + check.Message = "Network check failed (curl may not be available)" + return check + } + + statusCode := strings.TrimSpace(getStdoutFromResp(resp)) + check.Details["status_code"] = statusCode + + if resp.ExitCode != 0 { + check.Status = HealthStatusDegraded + check.Message = "Cannot reach external services" + return check + } + + check.Status = HealthStatusHealthy + check.Message = "Network connectivity is working" + return check +} + +// checkDiskSpace verifies sufficient disk space. +func (h *SandboxHealth) checkDiskSpace(ctx context.Context) HealthCheck { + start := time.Now() + check := HealthCheck{ + Name: "disk_space", + Details: make(map[string]string), + } + + resp, err := execCommand(h.sandboxHandle(), "df -h / | tail -1 | awk '{print $5}'", nil) + check.Duration = time.Since(start) + + if err != nil { + check.Status = HealthStatusUnknown + check.Message = "Could not check disk space" + return check + } + + usage := strings.TrimSpace(strings.TrimSuffix(getStdoutFromResp(resp), "%")) + check.Details["usage"] = usage + "%" + + var usageInt int + _, _ = fmt.Sscanf(usage, "%d", &usageInt) + + if usageInt >= 95 { + check.Status = HealthStatusUnhealthy + check.Message = fmt.Sprintf("Disk nearly full: %s%% used", usage) + return check + } + + if usageInt >= 80 { + check.Status = HealthStatusDegraded + check.Message = fmt.Sprintf("Disk usage high: %s%% used", usage) + return check + } + + check.Status = HealthStatusHealthy + check.Message = fmt.Sprintf("Disk usage normal: %s%% used", usage) + return check +} + +// checkProcesses verifies no zombie processes or resource issues. +func (h *SandboxHealth) checkProcesses(ctx context.Context) HealthCheck { + start := time.Now() + check := HealthCheck{ + Name: "processes", + Details: make(map[string]string), + } + + // Check for zombie processes + resp, _ := execCommand(h.sandboxHandle(), "ps aux | grep -c ' Z '", nil) + check.Duration = time.Since(start) + + if resp != nil { + zombies := strings.TrimSpace(getStdoutFromResp(resp)) + check.Details["zombie_count"] = zombies + + var zombieCount int + _, _ = fmt.Sscanf(zombies, "%d", &zombieCount) + + if zombieCount > 10 { + check.Status = HealthStatusDegraded + check.Message = fmt.Sprintf("Many zombie processes detected: %d", zombieCount) + return check + } + } + + check.Status = HealthStatusHealthy + check.Message = "Process state is healthy" + return check +} + +// Repair attempts to fix common issues. +func (h *SandboxHealth) Repair(ctx context.Context) error { + report := h.Check(ctx) + + var errors MultiError + + for _, check := range report.Checks { + if check.Status != HealthStatusHealthy && check.Recoverable { + if err := h.repairCheck(ctx, check); err != nil { + errors.Add(fmt.Errorf("repair %s: %w", check.Name, err)) + } + } + } + + return errors.ErrorOrNil() +} + +func (h *SandboxHealth) repairCheck(ctx context.Context, check HealthCheck) error { + switch check.Name { + case "sandbox_state": + // Try to restart the sandbox + if h.verbose { + fmt.Println("Attempting to restart sandbox...") + } + if err := h.inner.Start(60 * time.Second); err != nil { + return err + } + return nil + + case "credential_dirs": + // Re-create credential directories + if h.verbose { + fmt.Println("Re-creating credential directories...") + } + return SetupCredentials(h.sandboxHandle(), CredentialsConfig{ + Mode: "sandbox", + Agent: h.agent, + }, h.verbose) + + case "agent_installed": + // Re-install the agent + if h.verbose { + fmt.Printf("Re-installing %s...\n", h.agent) + } + return EnsureAgentInstalled(h.sandboxHandle(), h.agent, h.verbose, true) + + default: + return fmt.Errorf("no repair strategy for %s", check.Name) + } +} + +// FormatReport returns a human-readable health report. +func FormatReport(report *HealthReport) string { + var b strings.Builder + + // Overall status + statusIcon := "?" + switch report.Overall { + case HealthStatusHealthy: + statusIcon = "\033[32m✓\033[0m" + case HealthStatusDegraded: + statusIcon = "\033[33m!\033[0m" + case HealthStatusUnhealthy: + statusIcon = "\033[31m✗\033[0m" + } + + b.WriteString(fmt.Sprintf("%s Overall: %s (checked in %s)\n\n", statusIcon, report.Overall, report.Duration.Round(time.Millisecond))) + + // Individual checks + for _, check := range report.Checks { + icon := "?" + switch check.Status { + case HealthStatusHealthy: + icon = "\033[32m✓\033[0m" + case HealthStatusDegraded: + icon = "\033[33m!\033[0m" + case HealthStatusUnhealthy: + icon = "\033[31m✗\033[0m" + case HealthStatusUnknown: + icon = "\033[90m?\033[0m" + } + + b.WriteString(fmt.Sprintf(" %s %s: %s", icon, check.Name, check.Message)) + if check.Duration > 0 { + b.WriteString(fmt.Sprintf(" (%s)", check.Duration.Round(time.Millisecond))) + } + b.WriteString("\n") + + // Show details for non-healthy checks + if check.Status != HealthStatusHealthy && len(check.Details) > 0 { + for k, v := range check.Details { + b.WriteString(fmt.Sprintf(" %s: %s\n", k, v)) + } + } + } + + return b.String() +} diff --git a/internal/sandbox/logger.go b/internal/sandbox/logger.go new file mode 100644 index 00000000..b88da082 --- /dev/null +++ b/internal/sandbox/logger.go @@ -0,0 +1,333 @@ +package sandbox + +import ( + "fmt" + "io" + "os" + "strings" + "sync" + "time" +) + +// LogLevel represents the severity of a log message. +type LogLevel int + +const ( + LogLevelDebug LogLevel = iota + LogLevelInfo + LogLevelWarn + LogLevelError + LogLevelNone // Disables all logging +) + +func (l LogLevel) String() string { + switch l { + case LogLevelDebug: + return "DEBUG" + case LogLevelInfo: + return "INFO" + case LogLevelWarn: + return "WARN" + case LogLevelError: + return "ERROR" + default: + return "UNKNOWN" + } +} + +// ParseLogLevel parses a string log level. +func ParseLogLevel(s string) LogLevel { + switch strings.ToLower(s) { + case "debug": + return LogLevelDebug + case "info": + return LogLevelInfo + case "warn", "warning": + return LogLevelWarn + case "error": + return LogLevelError + case "none", "off": + return LogLevelNone + default: + return LogLevelInfo + } +} + +// Logger provides structured logging with levels. +type Logger struct { + mu sync.Mutex + level LogLevel + output io.Writer + prefix string + fields map[string]interface{} + colorize bool +} + +// LoggerOption configures a Logger. +type LoggerOption func(*Logger) + +// WithLevel sets the log level. +func WithLevel(level LogLevel) LoggerOption { + return func(l *Logger) { + l.level = level + } +} + +// WithOutput sets the output writer. +func WithOutput(w io.Writer) LoggerOption { + return func(l *Logger) { + l.output = w + } +} + +// WithPrefix sets a prefix for all log messages. +func WithPrefix(prefix string) LoggerOption { + return func(l *Logger) { + l.prefix = prefix + } +} + +// WithColor enables/disables colorized output. +func WithColor(enabled bool) LoggerOption { + return func(l *Logger) { + l.colorize = enabled + } +} + +// NewLogger creates a new Logger with the given options. +func NewLogger(opts ...LoggerOption) *Logger { + l := &Logger{ + level: LogLevelInfo, + output: os.Stderr, + fields: make(map[string]interface{}), + colorize: true, + } + for _, opt := range opts { + opt(l) + } + return l +} + +// With creates a child logger with additional fields. +func (l *Logger) With(key string, value interface{}) *Logger { + l.mu.Lock() + defer l.mu.Unlock() + + newFields := make(map[string]interface{}, len(l.fields)+1) + for k, v := range l.fields { + newFields[k] = v + } + newFields[key] = value + + return &Logger{ + level: l.level, + output: l.output, + prefix: l.prefix, + fields: newFields, + colorize: l.colorize, + } +} + +// WithFields creates a child logger with multiple additional fields. +func (l *Logger) WithFields(fields map[string]interface{}) *Logger { + l.mu.Lock() + defer l.mu.Unlock() + + newFields := make(map[string]interface{}, len(l.fields)+len(fields)) + for k, v := range l.fields { + newFields[k] = v + } + for k, v := range fields { + newFields[k] = v + } + + return &Logger{ + level: l.level, + output: l.output, + prefix: l.prefix, + fields: newFields, + colorize: l.colorize, + } +} + +// SetLevel changes the log level. +func (l *Logger) SetLevel(level LogLevel) { + l.mu.Lock() + defer l.mu.Unlock() + l.level = level +} + +// Debug logs a debug message. +func (l *Logger) Debug(msg string, args ...interface{}) { + l.log(LogLevelDebug, msg, args...) +} + +// Info logs an info message. +func (l *Logger) Info(msg string, args ...interface{}) { + l.log(LogLevelInfo, msg, args...) +} + +// Warn logs a warning message. +func (l *Logger) Warn(msg string, args ...interface{}) { + l.log(LogLevelWarn, msg, args...) +} + +// Error logs an error message. +func (l *Logger) Error(msg string, args ...interface{}) { + l.log(LogLevelError, msg, args...) +} + +// Debugf logs a formatted debug message. +func (l *Logger) Debugf(format string, args ...interface{}) { + l.log(LogLevelDebug, fmt.Sprintf(format, args...)) +} + +// Infof logs a formatted info message. +func (l *Logger) Infof(format string, args ...interface{}) { + l.log(LogLevelInfo, fmt.Sprintf(format, args...)) +} + +// Warnf logs a formatted warning message. +func (l *Logger) Warnf(format string, args ...interface{}) { + l.log(LogLevelWarn, fmt.Sprintf(format, args...)) +} + +// Errorf logs a formatted error message. +func (l *Logger) Errorf(format string, args ...interface{}) { + l.log(LogLevelError, fmt.Sprintf(format, args...)) +} + +func (l *Logger) log(level LogLevel, msg string, args ...interface{}) { + l.mu.Lock() + defer l.mu.Unlock() + + if level < l.level { + return + } + + var b strings.Builder + + // Timestamp + b.WriteString(time.Now().Format("15:04:05")) + b.WriteString(" ") + + // Level with optional color + levelStr := level.String() + if l.colorize { + levelStr = l.colorizeLevel(level) + } + b.WriteString(fmt.Sprintf("%-5s", levelStr)) + b.WriteString(" ") + + // Prefix + if l.prefix != "" { + b.WriteString("[") + b.WriteString(l.prefix) + b.WriteString("] ") + } + + // Message + b.WriteString(msg) + + // Key-value pairs from args (key1, val1, key2, val2, ...) + fields := make(map[string]interface{}) + for k, v := range l.fields { + fields[k] = v + } + for i := 0; i+1 < len(args); i += 2 { + if key, ok := args[i].(string); ok { + fields[key] = args[i+1] + } + } + + // Append fields + if len(fields) > 0 { + b.WriteString(" ") + first := true + for k, v := range fields { + if !first { + b.WriteString(" ") + } + first = false + b.WriteString(k) + b.WriteString("=") + b.WriteString(fmt.Sprintf("%v", v)) + } + } + + b.WriteString("\n") + fmt.Fprint(l.output, b.String()) +} + +func (l *Logger) colorizeLevel(level LogLevel) string { + switch level { + case LogLevelDebug: + return "\033[36mDEBUG\033[0m" // Cyan + case LogLevelInfo: + return "\033[32mINFO\033[0m" // Green + case LogLevelWarn: + return "\033[33mWARN\033[0m" // Yellow + case LogLevelError: + return "\033[31mERROR\033[0m" // Red + default: + return level.String() + } +} + +// Global logger instance +var defaultLogger = NewLogger() + +// SetDefaultLogger sets the global default logger. +func SetDefaultLogger(l *Logger) { + defaultLogger = l +} + +// GetLogger returns the global default logger. +func GetLogger() *Logger { + return defaultLogger +} + +// Package-level logging functions that use the default logger + +func LogDebug(msg string, args ...interface{}) { + defaultLogger.Debug(msg, args...) +} + +func LogInfo(msg string, args ...interface{}) { + defaultLogger.Info(msg, args...) +} + +func LogWarn(msg string, args ...interface{}) { + defaultLogger.Warn(msg, args...) +} + +func LogError(msg string, args ...interface{}) { + defaultLogger.Error(msg, args...) +} + +// InitLogger initializes the default logger based on environment variables. +func InitLogger() { + level := LogLevelInfo + if lvl := os.Getenv("AMUX_LOG_LEVEL"); lvl != "" { + level = ParseLogLevel(lvl) + } + + colorize := true + if os.Getenv("NO_COLOR") != "" || os.Getenv("AMUX_NO_COLOR") != "" { + colorize = false + } + + var output io.Writer = os.Stderr + if logFile := os.Getenv("AMUX_LOG_FILE"); logFile != "" { + if f, err := os.OpenFile(logFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644); err == nil { + output = io.MultiWriter(os.Stderr, f) + } + } + + defaultLogger = NewLogger( + WithLevel(level), + WithOutput(output), + WithColor(colorize), + WithPrefix("amux"), + ) +} diff --git a/internal/sandbox/metadata.go b/internal/sandbox/metadata.go new file mode 100644 index 00000000..e630720c --- /dev/null +++ b/internal/sandbox/metadata.go @@ -0,0 +1,171 @@ +package sandbox + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "os" + "path/filepath" + "strings" + "time" + + "github.com/andyrewlee/amux/internal/config" +) + +// SandboxMeta tracks the most recent sandbox for a worktree. +type SandboxMeta struct { + SandboxID string `json:"sandboxId"` + CreatedAt string `json:"createdAt"` + Agent Agent `json:"agent"` + Provider string `json:"provider,omitempty"` + WorktreeID string `json:"worktreeId,omitempty"` + Project string `json:"project,omitempty"` +} + +// SandboxStore stores sandbox metadata per worktree. +// Stored globally at ~/.amux/sandbox.json +// Keys are worktree IDs. +type SandboxStore struct { + Sandboxes map[string]SandboxMeta `json:"sandboxes"` +} + +// ComputeWorktreeID returns a stable ID based on the working directory path. +// This is used to isolate workspaces for different projects within a sandbox. +func ComputeWorktreeID(cwd string) string { + abs, err := filepath.Abs(cwd) + if err != nil { + abs = cwd + } + hash := sha256.Sum256([]byte(abs)) + return hex.EncodeToString(hash[:])[:16] +} + +// globalMetaPath returns the path to the global sandbox metadata file. +func globalMetaPath() (string, error) { + paths, err := config.DefaultPaths() + if err != nil { + return "", err + } + return filepath.Join(paths.Home, "sandbox.json"), nil +} + +// LoadSandboxMeta loads sandbox metadata for the current worktree. +func LoadSandboxMeta(cwd string, provider string) (*SandboxMeta, error) { + state, err := LoadSandboxStore() + if err != nil || state == nil { + return nil, err + } + worktreeID := ComputeWorktreeID(cwd) + meta, ok := state.Sandboxes[worktreeID] + if !ok { + return nil, nil + } + if provider != "" && meta.Provider != "" && meta.Provider != provider { + return nil, nil + } + return &meta, nil +} + +// SaveSandboxMeta saves sandbox metadata for the current worktree. +func SaveSandboxMeta(cwd string, provider string, meta SandboxMeta) error { + state, err := LoadSandboxStore() + if err != nil { + return err + } + if state == nil { + state = &SandboxStore{Sandboxes: map[string]SandboxMeta{}} + } + if state.Sandboxes == nil { + state.Sandboxes = map[string]SandboxMeta{} + } + metaPath, err := globalMetaPath() + if err != nil { + return err + } + metaDir := filepath.Dir(metaPath) + if err := os.MkdirAll(metaDir, 0o755); err != nil { + return err + } + meta.CreatedAt = strings.TrimSpace(meta.CreatedAt) + if meta.CreatedAt == "" { + meta.CreatedAt = time.Now().UTC().Format(time.RFC3339) + } + meta.Provider = provider + meta.WorktreeID = ComputeWorktreeID(cwd) + state.Sandboxes[meta.WorktreeID] = meta + data, err := json.MarshalIndent(state, "", " ") + if err != nil { + return err + } + return os.WriteFile(metaPath, data, 0o644) +} + +// LoadSandboxStore loads the global sandbox store from ~/.amux/sandbox.json +func LoadSandboxStore() (*SandboxStore, error) { + metaPath, err := globalMetaPath() + if err != nil { + return nil, err + } + data, err := os.ReadFile(metaPath) + if err != nil { + return nil, nil + } + var state SandboxStore + if err := json.Unmarshal(data, &state); err != nil { + return nil, nil + } + if state.Sandboxes == nil { + state.Sandboxes = map[string]SandboxMeta{} + } + return &state, nil +} + +// RemoveSandboxMeta removes sandbox metadata for the current worktree. +func RemoveSandboxMeta(cwd string, _ string) error { + state, err := LoadSandboxStore() + if err != nil || state == nil { + return err + } + worktreeID := ComputeWorktreeID(cwd) + delete(state.Sandboxes, worktreeID) + metaPath, err := globalMetaPath() + if err != nil { + return err + } + if len(state.Sandboxes) == 0 { + return os.Remove(metaPath) + } + data, err := json.MarshalIndent(state, "", " ") + if err != nil { + return err + } + return os.WriteFile(metaPath, data, 0o644) +} + +// RemoveSandboxMetaByID removes sandbox metadata that matches the given sandbox ID. +func RemoveSandboxMetaByID(id string) error { + state, err := LoadSandboxStore() + if err != nil || state == nil { + return err + } + if id == "" { + return nil + } + for key, meta := range state.Sandboxes { + if meta.SandboxID == id { + delete(state.Sandboxes, key) + } + } + metaPath, err := globalMetaPath() + if err != nil { + return err + } + if len(state.Sandboxes) == 0 { + return os.Remove(metaPath) + } + data, err := json.MarshalIndent(state, "", " ") + if err != nil { + return err + } + return os.WriteFile(metaPath, data, 0o644) +} diff --git a/internal/sandbox/metadata_test.go b/internal/sandbox/metadata_test.go new file mode 100644 index 00000000..91c7a96d --- /dev/null +++ b/internal/sandbox/metadata_test.go @@ -0,0 +1,83 @@ +package sandbox + +import ( + "testing" +) + +func TestComputeWorktreeID(t *testing.T) { + tests := []struct { + name string + cwd string + wantLen int + wantSame bool // if true, compare with previous test + }{ + { + name: "absolute path", + cwd: "/home/user/project", + wantLen: 16, + }, + { + name: "different path gives different ID", + cwd: "/home/user/other-project", + wantLen: 16, + }, + { + name: "same path gives same ID", + cwd: "/home/user/project", + wantLen: 16, + }, + } + + var prevID string + var firstID string + + for i, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ComputeWorktreeID(tt.cwd) + + if len(got) != tt.wantLen { + t.Errorf("ComputeWorktreeID() length = %d, want %d", len(got), tt.wantLen) + } + + // Verify it's a valid hex string + for _, c := range got { + if !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f')) { + t.Errorf("ComputeWorktreeID() contains invalid hex char: %c", c) + } + } + + // Track IDs for comparison + if i == 0 { + firstID = got + } else if i == 1 { + // Second test should be different from first + if got == firstID { + t.Error("Different paths should produce different IDs") + } + prevID = got + } else if i == 2 { + // Third test (same path as first) should match first + if got != firstID { + t.Error("Same path should produce same ID") + } + // And be different from second + if got == prevID { + t.Error("Same path should not match different path's ID") + } + } + }) + } +} + +func TestComputeWorktreeID_Deterministic(t *testing.T) { + cwd := "/home/user/my-project" + + // Call multiple times, should always return same value + id1 := ComputeWorktreeID(cwd) + id2 := ComputeWorktreeID(cwd) + id3 := ComputeWorktreeID(cwd) + + if id1 != id2 || id2 != id3 { + t.Errorf("ComputeWorktreeID should be deterministic: got %s, %s, %s", id1, id2, id3) + } +} diff --git a/internal/sandbox/plugins.go b/internal/sandbox/plugins.go new file mode 100644 index 00000000..000787d7 --- /dev/null +++ b/internal/sandbox/plugins.go @@ -0,0 +1,140 @@ +package sandbox + +// AgentPlugin defines the interface for agent implementations. +// This allows adding new agents without modifying core code. +type AgentPlugin interface { + // Name returns the agent's identifier (e.g., "claude", "codex"). + Name() string + + // DisplayName returns a human-friendly name (e.g., "Claude Code"). + DisplayName() string + + // Description returns a short description of the agent. + Description() string + + // InstallMethods returns the installation methods in priority order. + InstallMethods() []InstallMethod + + // CredentialPaths returns paths where credentials are stored. + // Paths are relative to the home directory. + CredentialPaths() []CredentialPath + + // SettingsPaths returns paths where settings are stored locally. + // Paths are relative to the user's home directory. + SettingsPaths() []SettingsPath + + // ContextFiles returns project-level context files the agent reads. + // (e.g., CLAUDE.md, AGENT.md) + ContextFiles() []string + + // EnvVars returns environment variables that can configure the agent. + EnvVars() []EnvVarSpec + + // LoginCommands returns the commands to run for authentication. + // Empty if the agent doesn't require explicit login. + LoginCommands() []string + + // VersionCommand returns the command to check the installed version. + VersionCommand() string + + // Validate checks if the agent is properly installed and configured. + Validate(computer RemoteSandbox) error +} + +// InstallMethod describes how to install an agent. +type InstallMethod struct { + Type InstallType // npm, curl, binary, etc. + Command string // The installation command + Package string // Package name (for npm/pip) + URL string // URL (for curl installers) +} + +type InstallType string + +const ( + InstallTypeNPM InstallType = "npm" + InstallTypeCurl InstallType = "curl" + InstallTypeBinary InstallType = "binary" + InstallTypePip InstallType = "pip" + InstallTypeGo InstallType = "go" +) + +// CredentialPath describes where an agent stores credentials. +type CredentialPath struct { + HomePath string // Path in home directory (e.g., ".claude") + IsDir bool // Whether this is a directory or file +} + +// SettingsPath describes where an agent stores settings locally. +type SettingsPath struct { + LocalPath string // Path relative to home (e.g., ".claude/settings.json") + Description string // Human-readable description + SafeKeys []string // If JSON, only sync these keys (empty = all safe keys) +} + +// EnvVarSpec describes an environment variable an agent uses. +type EnvVarSpec struct { + Name string // Variable name (e.g., "ANTHROPIC_API_KEY") + Description string // What it's used for + Required bool // Whether it's required + Secret bool // Whether it contains sensitive data +} + +// AgentRegistry manages registered agent plugins. +type AgentRegistry struct { + plugins map[string]AgentPlugin +} + +// NewAgentRegistry creates a new agent registry. +func NewAgentRegistry() *AgentRegistry { + return &AgentRegistry{ + plugins: make(map[string]AgentPlugin), + } +} + +// Register adds a plugin to the registry. +func (r *AgentRegistry) Register(plugin AgentPlugin) { + r.plugins[plugin.Name()] = plugin +} + +// Get returns a plugin by name. +func (r *AgentRegistry) Get(name string) (AgentPlugin, bool) { + plugin, ok := r.plugins[name] + return plugin, ok +} + +// All returns all registered plugins. +func (r *AgentRegistry) All() []AgentPlugin { + plugins := make([]AgentPlugin, 0, len(r.plugins)) + for _, p := range r.plugins { + plugins = append(plugins, p) + } + return plugins +} + +// Names returns all registered plugin names. +func (r *AgentRegistry) Names() []string { + names := make([]string, 0, len(r.plugins)) + for name := range r.plugins { + names = append(names, name) + } + return names +} + +// Global agent registry +var defaultRegistry = NewAgentRegistry() + +// RegisterAgent registers an agent plugin globally. +func RegisterAgent(plugin AgentPlugin) { + defaultRegistry.Register(plugin) +} + +// GetAgentPlugin returns a registered agent plugin. +func GetAgentPlugin(name string) (AgentPlugin, bool) { + return defaultRegistry.Get(name) +} + +// AllAgentPlugins returns all registered agent plugins. +func AllAgentPlugins() []AgentPlugin { + return defaultRegistry.All() +} diff --git a/internal/sandbox/plugins_impl.go b/internal/sandbox/plugins_impl.go new file mode 100644 index 00000000..6a01b7a0 --- /dev/null +++ b/internal/sandbox/plugins_impl.go @@ -0,0 +1,407 @@ +package sandbox + +import ( + "fmt" + "strings" +) + +// ========== Built-in Agent Implementations ========== + +// ClaudePlugin implements AgentPlugin for Claude Code. +type ClaudePlugin struct{} + +func (p *ClaudePlugin) Name() string { return "claude" } +func (p *ClaudePlugin) DisplayName() string { return "Claude Code" } +func (p *ClaudePlugin) Description() string { + return "Anthropic's AI coding assistant" +} + +func (p *ClaudePlugin) InstallMethods() []InstallMethod { + return []InstallMethod{ + {Type: InstallTypeNPM, Package: "@anthropic-ai/claude-code@latest"}, + } +} + +func (p *ClaudePlugin) CredentialPaths() []CredentialPath { + return []CredentialPath{ + {HomePath: ".claude", IsDir: true}, + } +} + +func (p *ClaudePlugin) SettingsPaths() []SettingsPath { + return []SettingsPath{ + { + LocalPath: ".claude/settings.json", + Description: "Claude Code settings (model preferences, features, permissions)", + }, + } +} + +func (p *ClaudePlugin) ContextFiles() []string { + return []string{"CLAUDE.md", ".claude/settings.local.json"} +} + +func (p *ClaudePlugin) EnvVars() []EnvVarSpec { + return []EnvVarSpec{ + {Name: "ANTHROPIC_API_KEY", Description: "Anthropic API key", Secret: true}, + {Name: "CLAUDE_API_KEY", Description: "Alternative API key name", Secret: true}, + {Name: "ANTHROPIC_AUTH_TOKEN", Description: "OAuth token", Secret: true}, + } +} + +func (p *ClaudePlugin) LoginCommands() []string { + return nil // Claude handles login interactively +} + +func (p *ClaudePlugin) VersionCommand() string { + return "claude --version" +} + +func (p *ClaudePlugin) Validate(computer RemoteSandbox) error { + resp, err := execCommand(computer, "command -v claude", nil) + if err != nil || resp.ExitCode != 0 { + return fmt.Errorf("claude not found in PATH") + } + return nil +} + +// CodexPlugin implements AgentPlugin for OpenAI Codex. +type CodexPlugin struct{} + +func (p *CodexPlugin) Name() string { return "codex" } +func (p *CodexPlugin) DisplayName() string { return "Codex CLI" } +func (p *CodexPlugin) Description() string { + return "OpenAI's Codex coding agent" +} + +func (p *CodexPlugin) InstallMethods() []InstallMethod { + return []InstallMethod{ + {Type: InstallTypeNPM, Package: "@openai/codex@latest"}, + } +} + +func (p *CodexPlugin) CredentialPaths() []CredentialPath { + return []CredentialPath{ + {HomePath: ".codex", IsDir: true}, + {HomePath: ".config/codex", IsDir: true}, + } +} + +func (p *CodexPlugin) SettingsPaths() []SettingsPath { + return []SettingsPath{ + { + LocalPath: ".codex/config.toml", + Description: "Codex CLI settings (model preferences, editor config)", + }, + } +} + +func (p *CodexPlugin) ContextFiles() []string { + return []string{"AGENTS.md", "codex.md"} +} + +func (p *CodexPlugin) EnvVars() []EnvVarSpec { + return []EnvVarSpec{ + {Name: "OPENAI_API_KEY", Description: "OpenAI API key", Secret: true}, + } +} + +func (p *CodexPlugin) LoginCommands() []string { + return []string{"login", "--device-auth"} +} + +func (p *CodexPlugin) VersionCommand() string { + return "codex --version" +} + +func (p *CodexPlugin) Validate(computer RemoteSandbox) error { + resp, err := execCommand(computer, "command -v codex", nil) + if err != nil || resp.ExitCode != 0 { + return fmt.Errorf("codex not found in PATH") + } + return nil +} + +// OpenCodePlugin implements AgentPlugin for OpenCode. +type OpenCodePlugin struct{} + +func (p *OpenCodePlugin) Name() string { return "opencode" } +func (p *OpenCodePlugin) DisplayName() string { return "OpenCode" } +func (p *OpenCodePlugin) Description() string { + return "Open source AI coding agent" +} + +func (p *OpenCodePlugin) InstallMethods() []InstallMethod { + return []InstallMethod{ + {Type: InstallTypeCurl, URL: "https://opencode.ai/install"}, + {Type: InstallTypeNPM, Package: "opencode-ai@latest"}, + } +} + +func (p *OpenCodePlugin) CredentialPaths() []CredentialPath { + return []CredentialPath{ + {HomePath: ".local/share/opencode", IsDir: true}, + {HomePath: ".config/opencode", IsDir: true}, + } +} + +func (p *OpenCodePlugin) SettingsPaths() []SettingsPath { + return []SettingsPath{ + { + LocalPath: ".config/opencode/config.json", + Description: "OpenCode settings (model preferences, keybindings)", + }, + } +} + +func (p *OpenCodePlugin) ContextFiles() []string { + return []string{} +} + +func (p *OpenCodePlugin) EnvVars() []EnvVarSpec { + return []EnvVarSpec{ + {Name: "ANTHROPIC_API_KEY", Description: "Anthropic API key for Claude models", Secret: true}, + {Name: "OPENAI_API_KEY", Description: "OpenAI API key", Secret: true}, + {Name: "GEMINI_API_KEY", Description: "Google Gemini API key", Secret: true}, + } +} + +func (p *OpenCodePlugin) LoginCommands() []string { + return []string{"auth", "login"} +} + +func (p *OpenCodePlugin) VersionCommand() string { + return "opencode --version" +} + +func (p *OpenCodePlugin) Validate(computer RemoteSandbox) error { + resp, err := execCommand(computer, "command -v opencode", nil) + if err != nil || resp.ExitCode != 0 { + return fmt.Errorf("opencode not found in PATH") + } + return nil +} + +// AmpPlugin implements AgentPlugin for Sourcegraph Amp. +type AmpPlugin struct{} + +func (p *AmpPlugin) Name() string { return "amp" } +func (p *AmpPlugin) DisplayName() string { return "Amp" } +func (p *AmpPlugin) Description() string { + return "Sourcegraph's AI coding agent" +} + +func (p *AmpPlugin) InstallMethods() []InstallMethod { + return []InstallMethod{ + {Type: InstallTypeCurl, URL: "https://ampcode.com/install.sh"}, + {Type: InstallTypeNPM, Package: "@sourcegraph/amp@latest"}, + } +} + +func (p *AmpPlugin) CredentialPaths() []CredentialPath { + return []CredentialPath{ + {HomePath: ".config/amp", IsDir: true}, + {HomePath: ".local/share/amp", IsDir: true}, + } +} + +func (p *AmpPlugin) SettingsPaths() []SettingsPath { + return []SettingsPath{ + { + LocalPath: ".config/amp/config.json", + Description: "Amp settings (model preferences, workspace config)", + }, + } +} + +func (p *AmpPlugin) ContextFiles() []string { + return []string{"AGENT.md"} +} + +func (p *AmpPlugin) EnvVars() []EnvVarSpec { + return []EnvVarSpec{ + {Name: "AMP_API_KEY", Description: "Amp API key", Secret: true}, + } +} + +func (p *AmpPlugin) LoginCommands() []string { + return []string{"login"} +} + +func (p *AmpPlugin) VersionCommand() string { + return "amp --version" +} + +func (p *AmpPlugin) Validate(computer RemoteSandbox) error { + resp, err := execCommand(computer, "command -v amp || test -x $HOME/.amp/bin/amp", nil) + if err != nil || resp.ExitCode != 0 { + return fmt.Errorf("amp not found in PATH") + } + return nil +} + +// GeminiPlugin implements AgentPlugin for Google Gemini CLI. +type GeminiPlugin struct{} + +func (p *GeminiPlugin) Name() string { return "gemini" } +func (p *GeminiPlugin) DisplayName() string { return "Gemini CLI" } +func (p *GeminiPlugin) Description() string { + return "Google's Gemini AI coding agent" +} + +func (p *GeminiPlugin) InstallMethods() []InstallMethod { + return []InstallMethod{ + {Type: InstallTypeNPM, Package: "@google/gemini-cli@latest"}, + } +} + +func (p *GeminiPlugin) CredentialPaths() []CredentialPath { + return []CredentialPath{ + {HomePath: ".gemini", IsDir: true}, + } +} + +func (p *GeminiPlugin) SettingsPaths() []SettingsPath { + return []SettingsPath{ + { + LocalPath: ".gemini/settings.json", + Description: "Gemini CLI settings (model preferences)", + }, + } +} + +func (p *GeminiPlugin) ContextFiles() []string { + return []string{"GEMINI.md"} +} + +func (p *GeminiPlugin) EnvVars() []EnvVarSpec { + return []EnvVarSpec{ + {Name: "GEMINI_API_KEY", Description: "Gemini API key", Secret: true}, + {Name: "GOOGLE_API_KEY", Description: "Google API key", Secret: true}, + {Name: "GOOGLE_APPLICATION_CREDENTIALS", Description: "Service account credentials", Secret: true}, + } +} + +func (p *GeminiPlugin) LoginCommands() []string { + return nil // Gemini handles login interactively +} + +func (p *GeminiPlugin) VersionCommand() string { + return "gemini --version" +} + +func (p *GeminiPlugin) Validate(computer RemoteSandbox) error { + resp, err := execCommand(computer, "command -v gemini", nil) + if err != nil || resp.ExitCode != 0 { + return fmt.Errorf("gemini not found in PATH") + } + return nil +} + +// DroidPlugin implements AgentPlugin for Factory Droid. +type DroidPlugin struct{} + +func (p *DroidPlugin) Name() string { return "droid" } +func (p *DroidPlugin) DisplayName() string { return "Droid" } +func (p *DroidPlugin) Description() string { + return "Factory's AI coding agent" +} + +func (p *DroidPlugin) InstallMethods() []InstallMethod { + return []InstallMethod{ + {Type: InstallTypeCurl, URL: "https://app.factory.ai/cli"}, + } +} + +func (p *DroidPlugin) CredentialPaths() []CredentialPath { + return []CredentialPath{ + {HomePath: ".factory", IsDir: true}, + } +} + +func (p *DroidPlugin) SettingsPaths() []SettingsPath { + return []SettingsPath{} +} + +func (p *DroidPlugin) ContextFiles() []string { + return []string{} +} + +func (p *DroidPlugin) EnvVars() []EnvVarSpec { + return []EnvVarSpec{ + {Name: "FACTORY_API_KEY", Description: "Factory API key", Secret: true}, + } +} + +func (p *DroidPlugin) LoginCommands() []string { + return nil // Droid handles login via /login command +} + +func (p *DroidPlugin) VersionCommand() string { + return "droid --version" +} + +func (p *DroidPlugin) Validate(computer RemoteSandbox) error { + resp, err := execCommand(computer, "command -v droid || test -x $HOME/.factory/bin/droid", nil) + if err != nil || resp.ExitCode != 0 { + return fmt.Errorf("droid not found in PATH") + } + return nil +} + +// ShellPlugin implements AgentPlugin for a basic shell. +type ShellPlugin struct{} + +func (p *ShellPlugin) Name() string { return "shell" } +func (p *ShellPlugin) DisplayName() string { return "Shell" } +func (p *ShellPlugin) Description() string { return "Interactive bash shell" } +func (p *ShellPlugin) InstallMethods() []InstallMethod { return nil } +func (p *ShellPlugin) CredentialPaths() []CredentialPath { return nil } +func (p *ShellPlugin) SettingsPaths() []SettingsPath { return nil } +func (p *ShellPlugin) ContextFiles() []string { return nil } +func (p *ShellPlugin) EnvVars() []EnvVarSpec { return nil } +func (p *ShellPlugin) LoginCommands() []string { return nil } +func (p *ShellPlugin) VersionCommand() string { return "bash --version" } +func (p *ShellPlugin) Validate(computer RemoteSandbox) error { + return nil // bash is always available +} + +// Initialize registers all built-in plugins. +func init() { + RegisterAgent(&ClaudePlugin{}) + RegisterAgent(&CodexPlugin{}) + RegisterAgent(&OpenCodePlugin{}) + RegisterAgent(&AmpPlugin{}) + RegisterAgent(&GeminiPlugin{}) + RegisterAgent(&DroidPlugin{}) + RegisterAgent(&ShellPlugin{}) +} + +// HasCredentials checks if the agent has credentials in the sandbox. +func HasCredentials(plugin AgentPlugin, computer RemoteSandbox) bool { + homeDir := getSandboxHomeDir(computer) + for _, cred := range plugin.CredentialPaths() { + checkPath := fmt.Sprintf("%s/%s", homeDir, cred.HomePath) + var cmd string + if cred.IsDir { + cmd = fmt.Sprintf("test -d %s && ls -A %s | head -1", ShellQuote(checkPath), ShellQuote(checkPath)) + } else { + cmd = SafeCommands.Test("-f", checkPath) + } + resp, err := execCommand(computer, cmd, nil) + if err == nil && resp.ExitCode == 0 { + stdout := strings.TrimSpace(getStdoutFromResp(resp)) + if !cred.IsDir || stdout != "" { + return true + } + } + } + return false +} + +func getStdoutFromResp(resp *ExecResult) string { + if resp == nil { + return "" + } + return resp.Stdout +} diff --git a/internal/sandbox/plugins_test.go b/internal/sandbox/plugins_test.go new file mode 100644 index 00000000..c4dbd487 --- /dev/null +++ b/internal/sandbox/plugins_test.go @@ -0,0 +1,236 @@ +package sandbox + +import ( + "testing" +) + +func TestAgentRegistry_GetPlugin(t *testing.T) { + agents := []string{ + "claude", + "codex", + "opencode", + "amp", + "gemini", + "droid", + } + + for _, agent := range agents { + t.Run(agent, func(t *testing.T) { + plugin, ok := GetAgentPlugin(agent) + if !ok { + t.Errorf("GetAgentPlugin(%s) returned false", agent) + return + } + if plugin == nil { + t.Errorf("GetAgentPlugin(%s) returned nil", agent) + return + } + + if string(plugin.Name()) != agent { + t.Errorf("GetAgentPlugin(%s).Name() = %s, want %s", agent, plugin.Name(), agent) + } + }) + } +} + +func TestAgentRegistry_UnknownAgent(t *testing.T) { + plugin, ok := GetAgentPlugin("unknown") + if ok { + t.Error("GetAgentPlugin(unknown) should return false") + } + if plugin != nil { + t.Error("GetAgentPlugin(unknown) should return nil") + } +} + +func TestAgentRegistry_ShellAgent(t *testing.T) { + // Shell IS a valid plugin (provides bash shell access) + plugin, ok := GetAgentPlugin("shell") + if !ok { + t.Error("GetAgentPlugin(shell) should return true") + } + if plugin == nil { + t.Error("GetAgentPlugin(shell) should not return nil") + } + if plugin != nil && plugin.Name() != "shell" { + t.Errorf("GetAgentPlugin(shell).Name() = %s, want shell", plugin.Name()) + } +} + +func TestAgentPlugin_CredentialPaths(t *testing.T) { + agents := []string{ + "claude", + "codex", + "opencode", + "amp", + "gemini", + "droid", + } + + for _, agent := range agents { + t.Run(agent, func(t *testing.T) { + plugin, ok := GetAgentPlugin(agent) + if !ok { + t.Fatalf("GetAgentPlugin(%s) returned false", agent) + } + + paths := plugin.CredentialPaths() + if len(paths) == 0 { + t.Errorf("Plugin %s has no credential paths", agent) + } + + for _, path := range paths { + if path.HomePath == "" { + t.Errorf("Plugin %s has empty HomePath", agent) + } + } + }) + } +} + +func TestAgentPlugin_InstallMethods(t *testing.T) { + agents := []string{ + "claude", + "codex", + "opencode", + "amp", + "gemini", + "droid", + } + + for _, agent := range agents { + t.Run(agent, func(t *testing.T) { + plugin, ok := GetAgentPlugin(agent) + if !ok { + t.Fatalf("GetAgentPlugin(%s) returned false", agent) + } + + methods := plugin.InstallMethods() + if len(methods) == 0 { + t.Errorf("Plugin %s has no install methods", agent) + } + + for _, method := range methods { + if method.Type == "" { + t.Errorf("Plugin %s has empty install type", agent) + } + } + }) + } +} + +func TestAgentPlugin_Name(t *testing.T) { + tests := []struct { + agent string + wantName string + }{ + {"claude", "claude"}, + {"codex", "codex"}, + {"opencode", "opencode"}, + {"amp", "amp"}, + {"gemini", "gemini"}, + {"droid", "droid"}, + } + + for _, tt := range tests { + t.Run(tt.agent, func(t *testing.T) { + plugin, ok := GetAgentPlugin(tt.agent) + if !ok { + t.Fatalf("GetAgentPlugin(%s) returned false", tt.agent) + } + + if plugin.Name() != tt.wantName { + t.Errorf("Plugin %s Name() = %s, want %s", tt.agent, plugin.Name(), tt.wantName) + } + }) + } +} + +func TestAgentPlugin_EnvVars(t *testing.T) { + // Test that plugins have expected env vars + tests := []struct { + agent string + expectedEnv string + }{ + {"claude", "ANTHROPIC_API_KEY"}, + {"codex", "OPENAI_API_KEY"}, + {"gemini", "GEMINI_API_KEY"}, + } + + for _, tt := range tests { + t.Run(tt.agent, func(t *testing.T) { + plugin, ok := GetAgentPlugin(tt.agent) + if !ok { + t.Fatalf("GetAgentPlugin(%s) returned false", tt.agent) + } + + envVars := plugin.EnvVars() + found := false + for _, env := range envVars { + if env.Name == tt.expectedEnv { + found = true + if !env.Secret { + t.Errorf("Plugin %s env var %s should be marked as secret", tt.agent, tt.expectedEnv) + } + break + } + } + + if !found { + t.Errorf("Plugin %s should have env var %s", tt.agent, tt.expectedEnv) + } + }) + } +} + +func TestHasCredentials(t *testing.T) { + // Note: The HasCredentials function checks directories with complex commands + // like "test -d ... && ls -A ... | head -1" + // The mock by default returns empty output, which means "no files in dir" + // so HasCredentials returns false for directory checks + + t.Run("claude without credentials", func(t *testing.T) { + mock := NewMockRemoteSandbox("test-123") + mock.SetHomeDir("/home/testuser") + // Default mock returns empty output, so dir appears empty + + plugin, ok := GetAgentPlugin("claude") + if !ok { + t.Fatal("GetAgentPlugin(claude) returned false") + } + + got := HasCredentials(plugin, mock) + // With default mock (empty output), credentials are not found + if got { + t.Error("HasCredentials() should return false when dir is empty") + } + }) +} + +func TestIsValidAgent(t *testing.T) { + validAgents := []string{ + "claude", "codex", "opencode", "amp", "gemini", "droid", "shell", + } + + for _, agent := range validAgents { + if !IsValidAgent(agent) { + t.Errorf("IsValidAgent(%s) = false, want true", agent) + } + } + + invalidAgents := []string{ + "", "unknown", "Claude", "CLAUDE", "foo", + } + + for _, agent := range invalidAgents { + if IsValidAgent(agent) { + t.Errorf("IsValidAgent(%s) = true, want false", agent) + } + } +} + +func TestAgent_String(t *testing.T) { + if AgentClaude.String() != "claude" { + t.Errorf("AgentClaude.String() = %s, want claude", AgentClaude.String()) + } +} diff --git a/internal/sandbox/preflight.go b/internal/sandbox/preflight.go new file mode 100644 index 00000000..d708b7fb --- /dev/null +++ b/internal/sandbox/preflight.go @@ -0,0 +1,34 @@ +package sandbox + +import ( + "errors" + "os" + "os/exec" + + "golang.org/x/term" +) + +const ( + envAmuxSkipPreflight = "AMUX_SKIP_PREFLIGHT" +) + +// RunPreflight validates required local dependencies for interactive sessions. +func RunPreflight() error { + if envIsOne(envAmuxSkipPreflight) { + return nil + } + cfg, err := LoadConfig() + if err != nil { + return err + } + if ResolveAPIKey(cfg) == "" { + return errors.New("Daytona API key not found. Set AMUX_DAYTONA_API_KEY or run `amux auth login`.") + } + if _, err := exec.LookPath("ssh"); err != nil { + return errors.New("ssh is required for interactive sessions. Install OpenSSH and try again.") + } + if !term.IsTerminal(int(os.Stdin.Fd())) { + return errors.New("Interactive mode requires a TTY.") + } + return nil +} diff --git a/internal/sandbox/preflight_enhanced.go b/internal/sandbox/preflight_enhanced.go new file mode 100644 index 00000000..60b747fe --- /dev/null +++ b/internal/sandbox/preflight_enhanced.go @@ -0,0 +1,490 @@ +package sandbox + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" +) + +// PreflightCheck represents a single preflight check. +type PreflightCheck struct { + Name string + Description string + Required bool // If true, failure blocks sandbox creation + Check func(ctx context.Context) PreflightResult +} + +// PreflightResult contains the result of a preflight check. +type PreflightResult struct { + Passed bool + Message string + Suggestion string + Details map[string]string + Duration time.Duration +} + +// PreflightReport contains all preflight check results. +type PreflightReport struct { + Passed bool + Checks map[string]PreflightResult + Duration time.Duration + Errors []string + Warnings []string +} + +// RunEnhancedPreflight performs comprehensive preflight checks. +func RunEnhancedPreflight(ctx context.Context, verbose bool) (*PreflightReport, error) { + start := time.Now() + report := &PreflightReport{ + Passed: true, + Checks: make(map[string]PreflightResult), + } + + checks := []PreflightCheck{ + { + Name: "api_key", + Description: "Daytona API key configured", + Required: true, + Check: checkAPIKey, + }, + { + Name: "ssh_available", + Description: "SSH client available", + Required: true, + Check: checkSSHAvailable, + }, + { + Name: "network_connectivity", + Description: "Network connectivity to Daytona", + Required: true, + Check: checkNetworkConnectivity, + }, + { + Name: "disk_space", + Description: "Sufficient disk space", + Required: false, + Check: checkLocalDiskSpace, + }, + { + Name: "git_available", + Description: "Git available for workspace detection", + Required: false, + Check: checkGitAvailable, + }, + { + Name: "node_available", + Description: "Node.js available (for npm agents)", + Required: false, + Check: checkNodeAvailable, + }, + { + Name: "terminal", + Description: "Terminal is interactive", + Required: false, + Check: checkTerminal, + }, + { + Name: "config_valid", + Description: "Configuration file valid", + Required: false, + Check: checkConfigValid, + }, + } + + for _, check := range checks { + if ctx.Err() != nil { + break + } + + if verbose { + fmt.Printf("Checking %s... ", check.Description) + } + + result := check.Check(ctx) + report.Checks[check.Name] = result + + if verbose { + if result.Passed { + fmt.Println("\033[32m✓\033[0m") + } else if check.Required { + fmt.Println("\033[31m✗\033[0m") + } else { + fmt.Println("\033[33m!\033[0m") + } + } + + if !result.Passed { + if check.Required { + report.Passed = false + report.Errors = append(report.Errors, fmt.Sprintf("%s: %s", check.Name, result.Message)) + } else { + report.Warnings = append(report.Warnings, fmt.Sprintf("%s: %s", check.Name, result.Message)) + } + } + } + + report.Duration = time.Since(start) + return report, nil +} + +func checkAPIKey(ctx context.Context) PreflightResult { + start := time.Now() + result := PreflightResult{Details: make(map[string]string)} + + apiKey := os.Getenv("DAYTONA_API_KEY") + if apiKey == "" { + result.Passed = false + result.Message = "DAYTONA_API_KEY environment variable not set" + result.Suggestion = "Run `amux setup` to configure your API key" + result.Duration = time.Since(start) + return result + } + + // Basic format validation + if len(apiKey) < 20 { + result.Passed = false + result.Message = "API key appears to be invalid (too short)" + result.Suggestion = "Check your API key at https://app.daytona.io/settings" + result.Duration = time.Since(start) + return result + } + + result.Passed = true + result.Message = "API key configured" + result.Details["key_prefix"] = apiKey[:8] + "..." + result.Duration = time.Since(start) + return result +} + +func checkSSHAvailable(ctx context.Context) PreflightResult { + start := time.Now() + result := PreflightResult{Details: make(map[string]string)} + + path, err := exec.LookPath("ssh") + if err != nil { + result.Passed = false + result.Message = "SSH client not found" + result.Suggestion = "Install OpenSSH client" + result.Duration = time.Since(start) + return result + } + + // Get SSH version + cmd := exec.CommandContext(ctx, "ssh", "-V") + output, _ := cmd.CombinedOutput() + version := strings.TrimSpace(string(output)) + + result.Passed = true + result.Message = "SSH client available" + result.Details["path"] = path + if version != "" { + result.Details["version"] = version + } + result.Duration = time.Since(start) + return result +} + +func checkNetworkConnectivity(ctx context.Context) PreflightResult { + start := time.Now() + result := PreflightResult{Details: make(map[string]string)} + + // Check DNS resolution + _, err := net.LookupHost("api.daytona.io") + if err != nil { + result.Passed = false + result.Message = "Cannot resolve api.daytona.io" + result.Suggestion = "Check your DNS settings and internet connection" + result.Duration = time.Since(start) + return result + } + + // Check HTTP connectivity + client := &http.Client{Timeout: 10 * time.Second} + req, _ := http.NewRequestWithContext(ctx, "HEAD", "https://api.daytona.io", nil) + resp, err := client.Do(req) + if err != nil { + result.Passed = false + result.Message = fmt.Sprintf("Cannot connect to Daytona API: %v", err) + result.Suggestion = "Check your firewall settings and internet connection" + result.Duration = time.Since(start) + return result + } + defer resp.Body.Close() + + result.Passed = true + result.Message = "Network connectivity to Daytona working" + result.Details["status"] = resp.Status + result.Duration = time.Since(start) + return result +} + +func checkLocalDiskSpace(ctx context.Context) PreflightResult { + start := time.Now() + result := PreflightResult{Details: make(map[string]string)} + + // Get temp directory + tmpDir := os.TempDir() + + // Check available space (platform-specific) + var availableGB float64 + var err error + + if runtime.GOOS == "windows" { + // Windows - just skip this check + result.Passed = true + result.Message = "Disk space check skipped on Windows" + result.Duration = time.Since(start) + return result + } + + // Unix-like systems + cmd := exec.CommandContext(ctx, "df", "-BG", tmpDir) + output, err := cmd.Output() + if err != nil { + // Try alternative format + cmd = exec.CommandContext(ctx, "df", "-g", tmpDir) + output, err = cmd.Output() + } + + if err != nil { + result.Passed = true // Don't fail on check errors + result.Message = "Could not determine disk space" + result.Duration = time.Since(start) + return result + } + + // Parse output + lines := strings.Split(string(output), "\n") + if len(lines) >= 2 { + fields := strings.Fields(lines[1]) + if len(fields) >= 4 { + _, _ = fmt.Sscanf(strings.TrimSuffix(fields[3], "G"), "%f", &availableGB) + } + } + + result.Details["temp_dir"] = tmpDir + result.Details["available_gb"] = fmt.Sprintf("%.1f", availableGB) + + if availableGB < 1 { + result.Passed = false + result.Message = fmt.Sprintf("Low disk space: %.1fGB available", availableGB) + result.Suggestion = "Free up disk space in your temp directory" + } else if availableGB < 5 { + result.Passed = true + result.Message = fmt.Sprintf("Disk space is low: %.1fGB available", availableGB) + } else { + result.Passed = true + result.Message = fmt.Sprintf("Sufficient disk space: %.1fGB available", availableGB) + } + + result.Duration = time.Since(start) + return result +} + +func checkGitAvailable(ctx context.Context) PreflightResult { + start := time.Now() + result := PreflightResult{Details: make(map[string]string)} + + path, err := exec.LookPath("git") + if err != nil { + result.Passed = false + result.Message = "Git not found" + result.Suggestion = "Install Git for better workspace detection" + result.Duration = time.Since(start) + return result + } + + // Get git version + cmd := exec.CommandContext(ctx, "git", "--version") + output, _ := cmd.Output() + version := strings.TrimSpace(string(output)) + + result.Passed = true + result.Message = "Git available" + result.Details["path"] = path + if version != "" { + result.Details["version"] = version + } + result.Duration = time.Since(start) + return result +} + +func checkNodeAvailable(ctx context.Context) PreflightResult { + start := time.Now() + result := PreflightResult{Details: make(map[string]string)} + + nodePath, nodeErr := exec.LookPath("node") + npmPath, npmErr := exec.LookPath("npm") + + if nodeErr != nil && npmErr != nil { + result.Passed = false + result.Message = "Node.js not found locally" + result.Suggestion = "Node.js is optional - agents will be installed in the sandbox" + result.Duration = time.Since(start) + return result + } + + if nodePath != "" { + result.Details["node_path"] = nodePath + cmd := exec.CommandContext(ctx, "node", "--version") + if output, err := cmd.Output(); err == nil { + result.Details["node_version"] = strings.TrimSpace(string(output)) + } + } + + if npmPath != "" { + result.Details["npm_path"] = npmPath + cmd := exec.CommandContext(ctx, "npm", "--version") + if output, err := cmd.Output(); err == nil { + result.Details["npm_version"] = strings.TrimSpace(string(output)) + } + } + + result.Passed = true + result.Message = "Node.js available" + result.Duration = time.Since(start) + return result +} + +func checkTerminal(ctx context.Context) PreflightResult { + start := time.Now() + result := PreflightResult{Details: make(map[string]string)} + + // Check if stdin is a terminal + fi, err := os.Stdin.Stat() + if err != nil { + result.Passed = true + result.Message = "Could not determine terminal status" + result.Duration = time.Since(start) + return result + } + + isTerminal := (fi.Mode() & os.ModeCharDevice) != 0 + result.Details["is_terminal"] = fmt.Sprintf("%v", isTerminal) + + if term := os.Getenv("TERM"); term != "" { + result.Details["TERM"] = term + } + + if !isTerminal { + result.Passed = false + result.Message = "Not running in an interactive terminal" + result.Suggestion = "Some features may not work correctly in non-interactive mode" + } else { + result.Passed = true + result.Message = "Running in interactive terminal" + } + + result.Duration = time.Since(start) + return result +} + +func checkConfigValid(ctx context.Context) PreflightResult { + start := time.Now() + result := PreflightResult{Details: make(map[string]string)} + + cfg, err := LoadConfig() + if err != nil { + result.Passed = false + result.Message = fmt.Sprintf("Config error: %v", err) + result.Suggestion = "Run `amux setup` to fix configuration" + result.Duration = time.Since(start) + return result + } + + // Get config path + home, _ := os.UserHomeDir() + configPath := filepath.Join(home, ".amux", "config.json") + result.Details["config_path"] = configPath + + if cfg.DefaultSnapshotName != "" { + result.Details["default_snapshot"] = cfg.DefaultSnapshotName + } + + result.Passed = true + result.Message = "Configuration valid" + result.Duration = time.Since(start) + return result +} + +// FormatPreflightReport returns a human-readable preflight report. +func FormatPreflightReport(report *PreflightReport) string { + var b strings.Builder + + // Overall status + if report.Passed { + b.WriteString("\033[32m✓ Preflight checks passed\033[0m") + } else { + b.WriteString("\033[31m✗ Preflight checks failed\033[0m") + } + b.WriteString(fmt.Sprintf(" (completed in %s)\n\n", report.Duration.Round(time.Millisecond))) + + // Errors + if len(report.Errors) > 0 { + b.WriteString("\033[31mErrors:\033[0m\n") + for _, err := range report.Errors { + b.WriteString(fmt.Sprintf(" ✗ %s\n", err)) + } + b.WriteString("\n") + } + + // Warnings + if len(report.Warnings) > 0 { + b.WriteString("\033[33mWarnings:\033[0m\n") + for _, warn := range report.Warnings { + b.WriteString(fmt.Sprintf(" ! %s\n", warn)) + } + b.WriteString("\n") + } + + // Detailed results + b.WriteString("Details:\n") + for name, result := range report.Checks { + icon := "\033[32m✓\033[0m" + if !result.Passed { + icon = "\033[31m✗\033[0m" + } + b.WriteString(fmt.Sprintf(" %s %s: %s\n", icon, name, result.Message)) + + if result.Suggestion != "" && !result.Passed { + b.WriteString(fmt.Sprintf(" Suggestion: %s\n", result.Suggestion)) + } + } + + return b.String() +} + +// QuickPreflight runs only the required checks for fast startup. +func QuickPreflight() error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Check API key + if os.Getenv("DAYTONA_API_KEY") == "" { + return NewSandboxError(ErrCodePreflight, "api_key", nil). + WithSuggestion("Run `amux setup` to configure your Daytona API key") + } + + // Check SSH + if _, err := exec.LookPath("ssh"); err != nil { + return NewSandboxError(ErrCodePreflight, "ssh", err). + WithSuggestion("Install OpenSSH client") + } + + // Quick network check (with short timeout) + client := &http.Client{Timeout: 5 * time.Second} + req, _ := http.NewRequestWithContext(ctx, "HEAD", "https://api.daytona.io", nil) + if _, err := client.Do(req); err != nil { + return NewSandboxError(ErrCodeNetwork, "connectivity", err). + WithSuggestion("Check your internet connection") + } + + return nil +} diff --git a/internal/sandbox/provider.go b/internal/sandbox/provider.go new file mode 100644 index 00000000..5d2bdfdc --- /dev/null +++ b/internal/sandbox/provider.go @@ -0,0 +1,345 @@ +package sandbox + +import ( + "context" + "io" + "time" +) + +// Provider defines the interface for sandbox providers. +// This abstraction allows amux to support multiple backends while maintaining a consistent API. +type Provider interface { + // Name returns the provider identifier (e.g., "daytona", "e2b", "modal") + Name() string + + // CreateSandbox creates a new sandbox with the given configuration + CreateSandbox(ctx context.Context, config SandboxCreateConfig) (RemoteSandbox, error) + + // GetSandbox retrieves an existing sandbox by ID + GetSandbox(ctx context.Context, id string) (RemoteSandbox, error) + + // ListSandboxes returns all sandboxes for this provider + ListSandboxes(ctx context.Context) ([]RemoteSandbox, error) + + // DeleteSandbox removes a sandbox + DeleteSandbox(ctx context.Context, id string) error + + // Volumes returns the volume manager for persistent storage + Volumes() VolumeManager + + // Snapshots returns the snapshot manager for pre-built images + Snapshots() SnapshotManager + + // SupportsFeature checks if provider supports a specific feature + SupportsFeature(feature ProviderFeature) bool +} + +// ProviderFeature represents optional provider capabilities +type ProviderFeature string + +const ( + ProviderDaytona = "daytona" + // DefaultProviderName is the provider used when none is specified. + DefaultProviderName = ProviderDaytona + // FeatureVolumes indicates persistent volume support + FeatureVolumes ProviderFeature = "volumes" + // FeatureSnapshots indicates snapshot/image support + FeatureSnapshots ProviderFeature = "snapshots" + // FeatureDesktop indicates remote desktop (VNC) support + FeatureDesktop ProviderFeature = "desktop" + // FeaturePreviewURLs indicates public URL preview support + FeaturePreviewURLs ProviderFeature = "preview_urls" + // FeatureSSHAccess indicates SSH access support + FeatureSSHAccess ProviderFeature = "ssh_access" + // FeatureExecSessions indicates exec session listing/attach support + FeatureExecSessions ProviderFeature = "exec_sessions" + // FeatureCheckpoints indicates checkpoint/restore support + FeatureCheckpoints ProviderFeature = "checkpoints" + // FeatureNetworkPolicy indicates network policy support + FeatureNetworkPolicy ProviderFeature = "network_policy" + // FeatureTCPProxy indicates raw TCP proxy support + FeatureTCPProxy ProviderFeature = "tcp_proxy" +) + +// SandboxCreateConfig defines configuration for creating a sandbox +type SandboxCreateConfig struct { + // Name is an optional provider-specific identifier. + Name string + // Agent is the coding agent to run (claude, codex, etc.) + Agent Agent + // Snapshot is the pre-built image to use (optional) + Snapshot string + // EnvVars are environment variables to inject + EnvVars map[string]string + // Labels are metadata labels for the sandbox + Labels map[string]string + // Volumes are persistent volumes to mount + Volumes []VolumeMount + // AutoStopMinutes is the idle timeout before auto-stop (0 = disabled) + AutoStopMinutes int32 + // AutoDeleteMinutes is the idle timeout before auto-delete (0 = delete immediately after stop) + AutoDeleteMinutes int32 + // Ephemeral deletes the sandbox after it stops + Ephemeral bool + // Resources specifies CPU/memory requirements (provider-specific) + Resources *ResourceConfig +} + +// ResourceConfig specifies compute resources +type ResourceConfig struct { + CPUCores float32 + MemoryGB float32 +} + +// VolumeMount defines how a volume is mounted in a sandbox +type VolumeMount struct { + VolumeID string + MountPath string + Subpath string + ReadOnly bool +} + +// RemoteSandbox represents a running or stopped sandbox instance +type RemoteSandbox interface { + // ID returns the unique sandbox identifier + ID() string + + // State returns current state (pending, started, stopped, error) + State() SandboxState + + // Labels returns the sandbox metadata labels + Labels() map[string]string + + // Start starts a stopped sandbox + Start(ctx context.Context) error + + // Stop stops a running sandbox + Stop(ctx context.Context) error + + // WaitReady waits until sandbox is ready for commands + WaitReady(ctx context.Context, timeout time.Duration) error + + // Exec executes a command and returns the result + Exec(ctx context.Context, cmd string, opts *ExecOptions) (*ExecResult, error) + + // ExecInteractive runs an interactive session with PTY + ExecInteractive(ctx context.Context, cmd string, stdin io.Reader, stdout, stderr io.Writer, opts *ExecOptions) (int, error) + + // UploadFile uploads a file to the sandbox + UploadFile(ctx context.Context, localPath, remotePath string) error + + // DownloadFile downloads a file from the sandbox + DownloadFile(ctx context.Context, remotePath, localPath string) error + + // GetPreviewURL returns a public URL for a port (if supported) + GetPreviewURL(ctx context.Context, port int) (string, error) + + // Refresh updates sandbox state from the provider + Refresh(ctx context.Context) error +} + +// SandboxState represents the lifecycle state of a sandbox +type SandboxState string + +const ( + StatePending SandboxState = "pending" + StateStarted SandboxState = "started" + StateStopped SandboxState = "stopped" + StateError SandboxState = "error" +) + +// ExecOptions configures command execution +type ExecOptions struct { + Cwd string + Env map[string]string + Timeout time.Duration + User string +} + +// ExecResult contains command execution results +type ExecResult struct { + ExitCode int + Stdout string + Stderr string +} + +// ExecSession describes a running or historical exec session. +type ExecSession struct { + ID string + Command string + TTY bool + Active bool + Workdir string + CreatedAt time.Time + LastActivity time.Time +} + +// ExecSessionManager provides optional exec session management. +type ExecSessionManager interface { + ListExecSessions(ctx context.Context) ([]ExecSession, error) + AttachExecSession(ctx context.Context, id string, stdin io.Reader, stdout, stderr io.Writer, opts *ExecOptions) (int, error) + KillExecSession(ctx context.Context, id string, signal string, timeout time.Duration) error +} + +// CheckpointInfo describes a sandbox checkpoint. +type CheckpointInfo struct { + ID string + CreatedAt time.Time + SourceID string + Comment string +} + +// CheckpointEvent represents a streaming checkpoint event. +type CheckpointEvent struct { + Type string + Message string + Timestamp time.Time +} + +// CheckpointManager provides optional checkpoint/restore operations. +type CheckpointManager interface { + CreateCheckpoint(ctx context.Context, comment string, onEvent func(CheckpointEvent)) (*CheckpointInfo, error) + ListCheckpoints(ctx context.Context) ([]CheckpointInfo, error) + GetCheckpoint(ctx context.Context, id string) (*CheckpointInfo, error) + RestoreCheckpoint(ctx context.Context, id string, onEvent func(CheckpointEvent)) error +} + +// NetworkPolicyRule describes a network policy rule. +type NetworkPolicyRule struct { + Domain string + Action string // allow|deny + Include string // optional preset include +} + +// NetworkPolicy describes outbound network policy rules. +type NetworkPolicy struct { + Rules []NetworkPolicyRule +} + +// NetworkPolicyManager provides optional network policy support. +type NetworkPolicyManager interface { + GetNetworkPolicy(ctx context.Context) (*NetworkPolicy, error) + SetNetworkPolicy(ctx context.Context, policy NetworkPolicy) error +} + +// TCPProxyManager provides optional raw TCP proxy support. +type TCPProxyManager interface { + OpenTCPProxy(ctx context.Context, host string, port int) (io.ReadWriteCloser, error) +} + +// VolumeManager manages persistent volumes +type VolumeManager interface { + // Create creates a new volume + Create(ctx context.Context, name string) (*VolumeInfo, error) + + // Get retrieves a volume by name + Get(ctx context.Context, name string) (*VolumeInfo, error) + + // GetOrCreate gets an existing volume or creates it + GetOrCreate(ctx context.Context, name string) (*VolumeInfo, error) + + // Delete removes a volume + Delete(ctx context.Context, name string) error + + // List returns all volumes + List(ctx context.Context) ([]*VolumeInfo, error) + + // WaitReady waits for volume to be ready + WaitReady(ctx context.Context, name string, timeout time.Duration) (*VolumeInfo, error) +} + +// VolumeInfo contains volume metadata +type VolumeInfo struct { + ID string + Name string + State string + Size int64 +} + +// SnapshotManager manages pre-built sandbox images +type SnapshotManager interface { + // Create creates a new snapshot + Create(ctx context.Context, name string, baseImage string, onLogs func(string)) (*SnapshotInfo, error) + + // Get retrieves a snapshot by name + Get(ctx context.Context, name string) (*SnapshotInfo, error) + + // Delete removes a snapshot + Delete(ctx context.Context, name string) error + + // List returns all snapshots + List(ctx context.Context) ([]*SnapshotInfo, error) +} + +// SnapshotInfo contains snapshot metadata +type SnapshotInfo struct { + ID string + Name string + State string +} + +// ProviderRegistry manages available sandbox providers +type ProviderRegistry struct { + providers map[string]Provider + defaultID string +} + +// NewProviderRegistry creates a new provider registry +func NewProviderRegistry() *ProviderRegistry { + return &ProviderRegistry{ + providers: make(map[string]Provider), + } +} + +// Register adds a provider to the registry +func (r *ProviderRegistry) Register(p Provider) { + r.providers[p.Name()] = p +} + +// SetDefault sets the default provider +func (r *ProviderRegistry) SetDefault(name string) { + r.defaultID = name +} + +// Get returns a provider by name +func (r *ProviderRegistry) Get(name string) (Provider, bool) { + p, ok := r.providers[name] + return p, ok +} + +// Default returns the default provider +func (r *ProviderRegistry) Default() (Provider, bool) { + if r.defaultID == "" { + return nil, false + } + return r.Get(r.defaultID) +} + +// List returns all registered provider names +func (r *ProviderRegistry) List() []string { + names := make([]string, 0, len(r.providers)) + for name := range r.providers { + names = append(names, name) + } + return names +} + +// Optional provider interfaces + +// DesktopStatus reports desktop/VNC availability. +type DesktopStatus struct { + Status string +} + +// DesktopAccess is an optional interface for providers that support remote desktops. +type DesktopAccess interface { + DesktopStatus(ctx context.Context) (*DesktopStatus, error) + StartDesktop(ctx context.Context) error + StopDesktop(ctx context.Context) error +} + +// SandboxResources is an optional interface for providers that expose resource details. +type SandboxResources interface { + CPUCores() float32 + MemoryGB() float32 +} diff --git a/internal/sandbox/provider_daytona.go b/internal/sandbox/provider_daytona.go new file mode 100644 index 00000000..2ed2003c --- /dev/null +++ b/internal/sandbox/provider_daytona.go @@ -0,0 +1,384 @@ +package sandbox + +import ( + "context" + "errors" + "fmt" + "io" + "os/exec" + "strings" + "time" + + "github.com/andyrewlee/amux/internal/daytona" +) + +type daytonaProvider struct { + client *daytona.Daytona +} + +func newDaytonaProvider(client *daytona.Daytona) Provider { + return &daytonaProvider{client: client} +} + +func (p *daytonaProvider) Name() string { return ProviderDaytona } + +func (p *daytonaProvider) CreateSandbox(ctx context.Context, config SandboxCreateConfig) (RemoteSandbox, error) { + params := &daytona.CreateSandboxParams{ + Language: "typescript", + Snapshot: config.Snapshot, + EnvVars: config.EnvVars, + Labels: config.Labels, + AutoStopInterval: config.AutoStopMinutes, + AutoDeleteInterval: config.AutoDeleteMinutes, + Ephemeral: config.Ephemeral, + } + if len(config.Volumes) > 0 { + volumes := make([]daytona.VolumeMount, 0, len(config.Volumes)) + for _, mount := range config.Volumes { + volumes = append(volumes, daytona.VolumeMount{ + VolumeID: mount.VolumeID, + MountPath: mount.MountPath, + Subpath: mount.Subpath, + }) + } + params.Volumes = volumes + } + + sb, err := p.client.Create(params, nil) + if err != nil { + return nil, err + } + return &daytonaSandbox{inner: sb}, nil +} + +func (p *daytonaProvider) GetSandbox(ctx context.Context, id string) (RemoteSandbox, error) { + sb, err := p.client.Get(id) + if err != nil { + return nil, err + } + return &daytonaSandbox{inner: sb}, nil +} + +func (p *daytonaProvider) ListSandboxes(ctx context.Context) ([]RemoteSandbox, error) { + sandboxes, err := p.client.List() + if err != nil { + return nil, err + } + out := make([]RemoteSandbox, 0, len(sandboxes)) + for _, sb := range sandboxes { + out = append(out, &daytonaSandbox{inner: sb}) + } + return out, nil +} + +func (p *daytonaProvider) DeleteSandbox(ctx context.Context, id string) error { + sb, err := p.client.Get(id) + if err != nil { + return err + } + return p.client.Delete(sb) +} + +func (p *daytonaProvider) Volumes() VolumeManager { + return &daytonaVolumeManager{client: p.client} +} + +func (p *daytonaProvider) Snapshots() SnapshotManager { + return &daytonaSnapshotManager{client: p.client} +} + +func (p *daytonaProvider) SupportsFeature(feature ProviderFeature) bool { + switch feature { + case FeatureVolumes, FeatureSnapshots, FeaturePreviewURLs, FeatureSSHAccess, FeatureDesktop: + return true + default: + return false + } +} + +type daytonaSandbox struct { + inner *daytona.Sandbox +} + +func (s *daytonaSandbox) ID() string { return s.inner.ID } + +func (s *daytonaSandbox) State() SandboxState { + switch s.inner.State { + case "pending": + return StatePending + case "started": + return StateStarted + case "stopped": + return StateStopped + case "error": + return StateError + default: + return SandboxState(s.inner.State) + } +} + +func (s *daytonaSandbox) Labels() map[string]string { return s.inner.Labels } + +func (s *daytonaSandbox) Start(ctx context.Context) error { + timeout := timeoutFromContext(ctx, 60*time.Second) + return s.inner.Start(timeout) +} + +func (s *daytonaSandbox) Stop(ctx context.Context) error { + timeout := timeoutFromContext(ctx, 60*time.Second) + return s.inner.Stop(timeout) +} + +func (s *daytonaSandbox) WaitReady(ctx context.Context, timeout time.Duration) error { + if deadline, ok := ctx.Deadline(); ok { + remaining := time.Until(deadline) + if remaining < timeout || timeout == 0 { + timeout = remaining + } + } + return s.inner.WaitUntilStarted(timeout) +} + +func (s *daytonaSandbox) Exec(ctx context.Context, cmd string, opts *ExecOptions) (*ExecResult, error) { + var options daytona.ExecuteCommandOptions + if opts != nil { + options.Cwd = opts.Cwd + options.Env = opts.Env + options.Timeout = opts.Timeout + } + resp, err := s.inner.Process.ExecuteCommand(cmd, options) + if err != nil { + return nil, err + } + result := &ExecResult{ExitCode: int(resp.ExitCode)} + if resp.Artifacts != nil && resp.Artifacts.Stdout != "" { + result.Stdout = resp.Artifacts.Stdout + } else { + result.Stdout = resp.Result + } + return result, nil +} + +func (s *daytonaSandbox) ExecInteractive(ctx context.Context, cmd string, stdin io.Reader, stdout, stderr io.Writer, opts *ExecOptions) (int, error) { + sshAccess, err := s.inner.CreateSshAccess(60) + if err != nil { + return 1, err + } + defer func() { _ = s.inner.RevokeSshAccess(sshAccess.Token) }() + + runnerDomain, err := waitForSshAccessDaytona(s.inner, sshAccess.Token) + if err != nil { + return 1, err + } + sshHost := runnerDomain + if sshHost == "" { + sshHost = getSSHHost() + } + target := fmt.Sprintf("%s@%s", sshAccess.Token, sshHost) + + remoteCommand := cmd + if opts != nil { + if len(opts.Env) > 0 { + exports := BuildEnvExports(opts.Env) + if len(exports) > 0 { + remoteCommand = strings.Join(exports, "; ") + "; " + remoteCommand + } + } + if opts.Cwd != "" { + remoteCommand = fmt.Sprintf("cd %s && %s", ShellQuote(opts.Cwd), remoteCommand) + } + } + remoteCommand = fmt.Sprintf("bash -lc %s", ShellQuote(remoteCommand)) + + sshArgs := []string{ + "-tt", + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "LogLevel=ERROR", + target, + remoteCommand, + } + + cmdExec := exec.Command("ssh", sshArgs...) + cmdExec.Stdin = stdin + cmdExec.Stdout = stdout + cmdExec.Stderr = stderr + if err := cmdExec.Start(); err != nil { + if errors.Is(err, exec.ErrNotFound) { + return 1, errors.New("ssh is required to run interactive sessions. Install OpenSSH and try again.") + } + return 1, err + } + if err := cmdExec.Wait(); err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return exitErr.ExitCode(), nil + } + return 1, err + } + return 0, nil +} + +func (s *daytonaSandbox) UploadFile(ctx context.Context, localPath, remotePath string) error { + return s.inner.FS.UploadFile(localPath, remotePath, timeoutFromContext(ctx, 0)) +} + +func (s *daytonaSandbox) DownloadFile(ctx context.Context, remotePath, localPath string) error { + return s.inner.FS.DownloadFileTo(remotePath, localPath, timeoutFromContext(ctx, 0)) +} + +func (s *daytonaSandbox) GetPreviewURL(ctx context.Context, port int) (string, error) { + preview, err := s.inner.GetPreviewLink(port) + if err != nil { + return "", err + } + if preview == nil || preview.URL == "" { + return "", nil + } + if preview.Token == "" || strings.Contains(preview.URL, "DAYTONA_SANDBOX_AUTH_KEY=") { + return preview.URL, nil + } + separator := "?" + if strings.Contains(preview.URL, "?") { + separator = "&" + } + return preview.URL + separator + "DAYTONA_SANDBOX_AUTH_KEY=" + preview.Token, nil +} + +func (s *daytonaSandbox) Refresh(ctx context.Context) error { + return s.inner.RefreshData() +} + +// Optional interfaces for richer CLI output. +func (s *daytonaSandbox) CPUCores() float32 { return s.inner.CPU } +func (s *daytonaSandbox) MemoryGB() float32 { return s.inner.Memory } + +// Desktop support. +func (s *daytonaSandbox) DesktopStatus(ctx context.Context) (*DesktopStatus, error) { + status, err := s.inner.GetSandboxUseStatus() + if err != nil { + return nil, err + } + if status == nil { + return nil, nil + } + return &DesktopStatus{Status: status.Status}, nil +} + +func (s *daytonaSandbox) StartDesktop(ctx context.Context) error { + _, err := s.inner.StartComputerUse() + return err +} + +func (s *daytonaSandbox) StopDesktop(ctx context.Context) error { + _, err := s.inner.StopComputerUse() + return err +} + +func timeoutFromContext(ctx context.Context, fallback time.Duration) time.Duration { + if ctx == nil { + return fallback + } + if deadline, ok := ctx.Deadline(); ok { + remaining := time.Until(deadline) + if remaining <= 0 { + return 0 + } + if fallback == 0 || remaining < fallback { + return remaining + } + } + return fallback +} + +type daytonaVolumeManager struct { + client *daytona.Daytona +} + +func (v *daytonaVolumeManager) Create(ctx context.Context, name string) (*VolumeInfo, error) { + volume, err := v.client.Volume.Get(name, true) + if err != nil { + return nil, err + } + return &VolumeInfo{ID: volume.ID, Name: volume.Name, State: volume.State}, nil +} + +func (v *daytonaVolumeManager) Get(ctx context.Context, name string) (*VolumeInfo, error) { + volume, err := v.client.Volume.Get(name, false) + if err != nil { + return nil, err + } + return &VolumeInfo{ID: volume.ID, Name: volume.Name, State: volume.State}, nil +} + +func (v *daytonaVolumeManager) GetOrCreate(ctx context.Context, name string) (*VolumeInfo, error) { + volume, err := v.client.Volume.Get(name, true) + if err != nil { + return nil, err + } + return &VolumeInfo{ID: volume.ID, Name: volume.Name, State: volume.State}, nil +} + +func (v *daytonaVolumeManager) Delete(ctx context.Context, name string) error { + return fmt.Errorf("volume delete is not supported by the Daytona API") +} + +func (v *daytonaVolumeManager) List(ctx context.Context) ([]*VolumeInfo, error) { + return nil, fmt.Errorf("volume listing is not supported by the Daytona API") +} + +func (v *daytonaVolumeManager) WaitReady(ctx context.Context, name string, timeout time.Duration) (*VolumeInfo, error) { + options := &daytona.VolumeWaitOptions{} + if timeout > 0 { + options.Timeout = timeout + } + volume, err := v.client.Volume.WaitForReady(name, options) + if err != nil { + return nil, err + } + return &VolumeInfo{ID: volume.ID, Name: volume.Name, State: volume.State}, nil +} + +type daytonaSnapshotManager struct { + client *daytona.Daytona +} + +func (s *daytonaSnapshotManager) Create(ctx context.Context, name string, baseImage string, onLogs func(string)) (*SnapshotInfo, error) { + image := BuildSnapshotImage(DefaultSnapshotAgents, baseImage) + snap, err := s.client.Snapshot.Create(daytona.CreateSnapshotParams{ + Name: name, + Image: image, + }, &daytona.SnapshotCreateOptions{OnLogs: onLogs}) + if err != nil { + return nil, err + } + return &SnapshotInfo{ID: snap.ID, Name: snap.Name, State: snap.State}, nil +} + +func (s *daytonaSnapshotManager) Get(ctx context.Context, name string) (*SnapshotInfo, error) { + snapshots, err := s.client.Snapshot.List() + if err != nil { + return nil, err + } + for _, snap := range snapshots { + if snap.Name == name { + return &SnapshotInfo{ID: snap.ID, Name: snap.Name, State: snap.State}, nil + } + } + return nil, fmt.Errorf("snapshot %q not found", name) +} + +func (s *daytonaSnapshotManager) Delete(ctx context.Context, name string) error { + return fmt.Errorf("snapshot delete is not supported by the Daytona API") +} + +func (s *daytonaSnapshotManager) List(ctx context.Context) ([]*SnapshotInfo, error) { + snapshots, err := s.client.Snapshot.List() + if err != nil { + return nil, err + } + out := make([]*SnapshotInfo, 0, len(snapshots)) + for _, snap := range snapshots { + out = append(out, &SnapshotInfo{ID: snap.ID, Name: snap.Name, State: snap.State}) + } + return out, nil +} diff --git a/internal/sandbox/providers.go b/internal/sandbox/providers.go new file mode 100644 index 00000000..cf9c142c --- /dev/null +++ b/internal/sandbox/providers.go @@ -0,0 +1,45 @@ +package sandbox + +import ( + "fmt" + "strings" +) + +// DefaultProviderRegistry builds the provider registry from config. +func DefaultProviderRegistry(cfg Config) (*ProviderRegistry, map[string]error) { + registry := NewProviderRegistry() + errs := map[string]error{} + + if client, err := GetDaytonaClient(); err == nil { + registry.Register(newDaytonaProvider(client)) + } else { + errs[ProviderDaytona] = err + } + + return registry, errs +} + +// ResolveProvider returns the provider instance and resolved name. +func ResolveProvider(cfg Config, cwd string, override string) (Provider, string, error) { + name := ResolveProviderName(cfg, override) + if name == "" { + name = ProviderDaytona + } + + registry, errs := DefaultProviderRegistry(cfg) + if registry == nil { + return nil, name, fmt.Errorf("no providers registered") + } + provider, ok := registry.Get(name) + if !ok { + if err, ok := errs[name]; ok { + return nil, name, err + } + available := registry.List() + if len(available) == 0 { + return nil, name, fmt.Errorf("provider %q is unavailable (no providers registered)", name) + } + return nil, name, fmt.Errorf("provider %q is unavailable. Available: %s", name, strings.Join(available, ", ")) + } + return provider, name, nil +} diff --git a/internal/sandbox/retry.go b/internal/sandbox/retry.go new file mode 100644 index 00000000..ea117716 --- /dev/null +++ b/internal/sandbox/retry.go @@ -0,0 +1,329 @@ +package sandbox + +import ( + "context" + "errors" + "fmt" + "math" + "math/rand" + "sync" + "time" +) + +// RetryConfig configures retry behavior. +type RetryConfig struct { + MaxAttempts int // Maximum number of attempts (0 = infinite) + InitialDelay time.Duration // Initial delay between retries + MaxDelay time.Duration // Maximum delay between retries + Multiplier float64 // Multiplier for exponential backoff + Jitter float64 // Jitter factor (0-1) to add randomness + RetryableErrors []error // Specific errors that should be retried (nil = retry all) +} + +// DefaultRetryConfig returns sensible defaults for retry behavior. +func DefaultRetryConfig() RetryConfig { + return RetryConfig{ + MaxAttempts: 5, + InitialDelay: 1 * time.Second, + MaxDelay: 30 * time.Second, + Multiplier: 2.0, + Jitter: 0.1, + } +} + +// SSHRetryConfig returns retry config optimized for SSH connections. +func SSHRetryConfig() RetryConfig { + return RetryConfig{ + MaxAttempts: 10, + InitialDelay: 500 * time.Millisecond, + MaxDelay: 15 * time.Second, + Multiplier: 1.5, + Jitter: 0.2, + } +} + +// NetworkRetryConfig returns retry config for network operations. +func NetworkRetryConfig() RetryConfig { + return RetryConfig{ + MaxAttempts: 3, + InitialDelay: 2 * time.Second, + MaxDelay: 10 * time.Second, + Multiplier: 2.0, + Jitter: 0.1, + } +} + +// RetryResult contains the result of a retry operation. +type RetryResult struct { + Attempts int + Duration time.Duration + Error error +} + +// RetryFunc is a function that can be retried. +type RetryFunc func(ctx context.Context, attempt int) error + +// Retry executes a function with exponential backoff retry. +func Retry(ctx context.Context, cfg RetryConfig, fn RetryFunc) RetryResult { + start := time.Now() + result := RetryResult{} + + for attempt := 1; cfg.MaxAttempts == 0 || attempt <= cfg.MaxAttempts; attempt++ { + result.Attempts = attempt + + // Check context before attempting + if ctx.Err() != nil { + result.Error = ctx.Err() + result.Duration = time.Since(start) + return result + } + + // Execute the function + err := fn(ctx, attempt) + if err == nil { + result.Duration = time.Since(start) + return result + } + + // Check if error is retryable + if !isRetryableError(err, cfg.RetryableErrors) { + result.Error = err + result.Duration = time.Since(start) + return result + } + + // Check if we've exhausted attempts + if cfg.MaxAttempts > 0 && attempt >= cfg.MaxAttempts { + result.Error = fmt.Errorf("max retries (%d) exceeded: %w", cfg.MaxAttempts, err) + result.Duration = time.Since(start) + return result + } + + // Calculate delay with exponential backoff and jitter + delay := calculateDelay(cfg, attempt) + + // Log retry attempt + LogDebug("retrying operation", + "attempt", attempt, + "maxAttempts", cfg.MaxAttempts, + "delay", delay, + "error", err, + ) + + // Wait before next attempt + select { + case <-ctx.Done(): + result.Error = ctx.Err() + result.Duration = time.Since(start) + return result + case <-time.After(delay): + // Continue to next attempt + } + } + + result.Duration = time.Since(start) + return result +} + +// RetryWithResult is like Retry but returns a value. +func RetryWithResult[T any](ctx context.Context, cfg RetryConfig, fn func(ctx context.Context, attempt int) (T, error)) (T, RetryResult) { + var result T + start := time.Now() + retryResult := RetryResult{} + + for attempt := 1; cfg.MaxAttempts == 0 || attempt <= cfg.MaxAttempts; attempt++ { + retryResult.Attempts = attempt + + if ctx.Err() != nil { + retryResult.Error = ctx.Err() + retryResult.Duration = time.Since(start) + return result, retryResult + } + + var err error + result, err = fn(ctx, attempt) + if err == nil { + retryResult.Duration = time.Since(start) + return result, retryResult + } + + if !isRetryableError(err, cfg.RetryableErrors) { + retryResult.Error = err + retryResult.Duration = time.Since(start) + return result, retryResult + } + + if cfg.MaxAttempts > 0 && attempt >= cfg.MaxAttempts { + retryResult.Error = fmt.Errorf("max retries (%d) exceeded: %w", cfg.MaxAttempts, err) + retryResult.Duration = time.Since(start) + return result, retryResult + } + + delay := calculateDelay(cfg, attempt) + + select { + case <-ctx.Done(): + retryResult.Error = ctx.Err() + retryResult.Duration = time.Since(start) + return result, retryResult + case <-time.After(delay): + } + } + + retryResult.Duration = time.Since(start) + return result, retryResult +} + +func calculateDelay(cfg RetryConfig, attempt int) time.Duration { + // Exponential backoff: initialDelay * multiplier^(attempt-1) + delay := float64(cfg.InitialDelay) * math.Pow(cfg.Multiplier, float64(attempt-1)) + + // Apply jitter + if cfg.Jitter > 0 { + jitterRange := delay * cfg.Jitter + delay += (rand.Float64()*2 - 1) * jitterRange + } + + // Cap at max delay + if delay > float64(cfg.MaxDelay) { + delay = float64(cfg.MaxDelay) + } + + // Ensure non-negative + if delay < 0 { + delay = float64(cfg.InitialDelay) + } + + return time.Duration(delay) +} + +func isRetryableError(err error, retryableErrors []error) bool { + // If no specific errors defined, check if it's a SandboxError with Retryable flag + if len(retryableErrors) == 0 { + if se := GetSandboxError(err); se != nil { + return se.Retryable + } + // Default: retry all errors + return true + } + + // Check against specific retryable errors + for _, retryable := range retryableErrors { + if errors.Is(err, retryable) { + return true + } + } + return false +} + +// CircuitBreaker prevents repeated failures from overwhelming a service. +// It is safe for concurrent use. +type CircuitBreaker struct { + mu sync.Mutex + maxFailures int + resetTimeout time.Duration + failures int + lastFailure time.Time + state circuitState + halfOpenLimit int + halfOpenCount int +} + +type circuitState int + +const ( + circuitClosed circuitState = iota + circuitOpen + circuitHalfOpen +) + +// NewCircuitBreaker creates a new circuit breaker. +func NewCircuitBreaker(maxFailures int, resetTimeout time.Duration) *CircuitBreaker { + return &CircuitBreaker{ + maxFailures: maxFailures, + resetTimeout: resetTimeout, + state: circuitClosed, + halfOpenLimit: 1, + } +} + +// Execute runs a function through the circuit breaker. +func (cb *CircuitBreaker) Execute(fn func() error) error { + if !cb.canExecute() { + return errors.New("circuit breaker is open") + } + + err := fn() + cb.recordResult(err) + return err +} + +func (cb *CircuitBreaker) canExecute() bool { + cb.mu.Lock() + defer cb.mu.Unlock() + + switch cb.state { + case circuitClosed: + return true + case circuitOpen: + if time.Since(cb.lastFailure) > cb.resetTimeout { + cb.state = circuitHalfOpen + cb.halfOpenCount = 0 + return true + } + return false + case circuitHalfOpen: + return cb.halfOpenCount < cb.halfOpenLimit + } + return false +} + +func (cb *CircuitBreaker) recordResult(err error) { + cb.mu.Lock() + defer cb.mu.Unlock() + + if err == nil { + cb.onSuccessLocked() + } else { + cb.onFailureLocked() + } +} + +// onSuccessLocked must be called with cb.mu held. +func (cb *CircuitBreaker) onSuccessLocked() { + cb.failures = 0 + if cb.state == circuitHalfOpen { + cb.state = circuitClosed + } +} + +// onFailureLocked must be called with cb.mu held. +func (cb *CircuitBreaker) onFailureLocked() { + cb.failures++ + cb.lastFailure = time.Now() + + if cb.state == circuitHalfOpen { + cb.state = circuitOpen + return + } + + if cb.failures >= cb.maxFailures { + cb.state = circuitOpen + } +} + +// IsOpen returns true if the circuit breaker is open. +func (cb *CircuitBreaker) IsOpen() bool { + cb.mu.Lock() + defer cb.mu.Unlock() + return cb.state == circuitOpen +} + +// Reset resets the circuit breaker to closed state. +func (cb *CircuitBreaker) Reset() { + cb.mu.Lock() + defer cb.mu.Unlock() + cb.state = circuitClosed + cb.failures = 0 + cb.halfOpenCount = 0 +} diff --git a/internal/sandbox/retry_test.go b/internal/sandbox/retry_test.go new file mode 100644 index 00000000..a59838fc --- /dev/null +++ b/internal/sandbox/retry_test.go @@ -0,0 +1,289 @@ +package sandbox + +import ( + "context" + "errors" + "testing" + "time" +) + +func TestRetry(t *testing.T) { + t.Run("successful on first attempt", func(t *testing.T) { + attempts := 0 + fn := func(ctx context.Context, attempt int) error { + attempts++ + return nil + } + + result := Retry(context.Background(), DefaultRetryConfig(), fn) + if result.Error != nil { + t.Errorf("Expected no error, got %v", result.Error) + } + if attempts != 1 { + t.Errorf("Expected 1 attempt, got %d", attempts) + } + if result.Attempts != 1 { + t.Errorf("Expected 1 recorded attempt, got %d", result.Attempts) + } + }) + + t.Run("successful after retries", func(t *testing.T) { + attempts := 0 + fn := func(ctx context.Context, attempt int) error { + attempts++ + if attempts < 3 { + return errors.New("temporary error") + } + return nil + } + + cfg := RetryConfig{ + MaxAttempts: 5, + InitialDelay: 1 * time.Millisecond, + MaxDelay: 10 * time.Millisecond, + Multiplier: 2.0, + } + + result := Retry(context.Background(), cfg, fn) + if result.Error != nil { + t.Errorf("Expected no error, got %v", result.Error) + } + if attempts != 3 { + t.Errorf("Expected 3 attempts, got %d", attempts) + } + }) + + t.Run("failure after max attempts", func(t *testing.T) { + attempts := 0 + fn := func(ctx context.Context, attempt int) error { + attempts++ + return errors.New("persistent error") + } + + cfg := RetryConfig{ + MaxAttempts: 3, + InitialDelay: 1 * time.Millisecond, + MaxDelay: 10 * time.Millisecond, + Multiplier: 2.0, + } + + result := Retry(context.Background(), cfg, fn) + if result.Error == nil { + t.Error("Expected error") + } + if attempts != 3 { + t.Errorf("Expected 3 attempts, got %d", attempts) + } + }) + + t.Run("context cancellation", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + attempts := 0 + fn := func(ctx context.Context, attempt int) error { + attempts++ + if attempts == 2 { + cancel() + } + return errors.New("error") + } + + cfg := RetryConfig{ + MaxAttempts: 10, + InitialDelay: 1 * time.Millisecond, + MaxDelay: 10 * time.Millisecond, + Multiplier: 2.0, + } + + result := Retry(ctx, cfg, fn) + if result.Error == nil { + t.Error("Expected error due to cancellation") + } + // Should stop after context is cancelled + if attempts > 3 { + t.Errorf("Expected to stop soon after cancellation, got %d attempts", attempts) + } + }) +} + +func TestDefaultConfigs(t *testing.T) { + t.Run("default config", func(t *testing.T) { + cfg := DefaultRetryConfig() + if cfg.MaxAttempts <= 0 { + t.Error("Expected positive max attempts") + } + if cfg.InitialDelay <= 0 { + t.Error("Expected positive initial delay") + } + }) + + t.Run("SSH config", func(t *testing.T) { + cfg := SSHRetryConfig() + if cfg.MaxAttempts <= 0 { + t.Error("Expected positive max attempts") + } + // SSH should have more retries than default + if cfg.MaxAttempts < 3 { + t.Error("Expected SSH config to have at least 3 retries") + } + }) + + t.Run("network config", func(t *testing.T) { + cfg := NetworkRetryConfig() + if cfg.MaxAttempts <= 0 { + t.Error("Expected positive max attempts") + } + }) +} + +func TestRetryResult(t *testing.T) { + t.Run("duration tracking", func(t *testing.T) { + fn := func(ctx context.Context, attempt int) error { + time.Sleep(5 * time.Millisecond) + return nil + } + + cfg := RetryConfig{ + MaxAttempts: 1, + InitialDelay: 1 * time.Millisecond, + MaxDelay: 10 * time.Millisecond, + Multiplier: 2.0, + } + + result := Retry(context.Background(), cfg, fn) + if result.Duration < 5*time.Millisecond { + t.Errorf("Expected duration >= 5ms, got %v", result.Duration) + } + }) +} + +func TestCircuitBreaker(t *testing.T) { + t.Run("basic circuit breaker", func(t *testing.T) { + cb := NewCircuitBreaker(3, 100*time.Millisecond) + + // Should start closed + if cb.IsOpen() { + t.Error("Expected circuit to start closed") + } + + // Record failures + for i := 0; i < 3; i++ { + _ = cb.Execute(func() error { return errors.New("fail") }) + } + + // Should be open now + if !cb.IsOpen() { + t.Error("Expected circuit to be open after failures") + } + + // Should not allow execution when open + err := cb.Execute(func() error { return nil }) + if err == nil { + t.Error("Expected error when circuit is open") + } + }) + + t.Run("circuit breaker recovery", func(t *testing.T) { + cb := NewCircuitBreaker(2, 10*time.Millisecond) + + // Trigger circuit to open + _ = cb.Execute(func() error { return errors.New("fail") }) + _ = cb.Execute(func() error { return errors.New("fail") }) + + if !cb.IsOpen() { + t.Error("Expected circuit to be open") + } + + // Wait for reset timeout + time.Sleep(15 * time.Millisecond) + + // Execute a successful request + err := cb.Execute(func() error { return nil }) + if err != nil { + t.Errorf("Expected no error after timeout, got %v", err) + } + + // Should be closed now + if cb.IsOpen() { + t.Error("Expected circuit to be closed after success") + } + }) + + t.Run("circuit breaker reset", func(t *testing.T) { + cb := NewCircuitBreaker(2, 100*time.Millisecond) + + // Trigger circuit to open + _ = cb.Execute(func() error { return errors.New("fail") }) + _ = cb.Execute(func() error { return errors.New("fail") }) + + if !cb.IsOpen() { + t.Error("Expected circuit to be open") + } + + // Reset + cb.Reset() + + if cb.IsOpen() { + t.Error("Expected circuit to be closed after reset") + } + }) + + t.Run("circuit breaker concurrent access", func(t *testing.T) { + cb := NewCircuitBreaker(100, 100*time.Millisecond) + + // Run concurrent operations - this tests the mutex protection + done := make(chan bool) + for i := 0; i < 50; i++ { + go func() { + for j := 0; j < 20; j++ { + _ = cb.Execute(func() error { + if j%2 == 0 { + return errors.New("fail") + } + return nil + }) + _ = cb.IsOpen() + } + done <- true + }() + } + + // Wait for all goroutines + for i := 0; i < 50; i++ { + <-done + } + + // Just verify it didn't panic or deadlock + }) + + t.Run("circuit breaker concurrent reset", func(t *testing.T) { + cb := NewCircuitBreaker(5, 10*time.Millisecond) + + done := make(chan bool) + + // Concurrent failures + for i := 0; i < 10; i++ { + go func() { + for j := 0; j < 10; j++ { + _ = cb.Execute(func() error { return errors.New("fail") }) + } + done <- true + }() + } + + // Concurrent resets + for i := 0; i < 5; i++ { + go func() { + for j := 0; j < 5; j++ { + cb.Reset() + time.Sleep(time.Millisecond) + } + done <- true + }() + } + + // Wait for all + for i := 0; i < 15; i++ { + <-done + } + }) +} diff --git a/internal/sandbox/settings.go b/internal/sandbox/settings.go new file mode 100644 index 00000000..2a45c75c --- /dev/null +++ b/internal/sandbox/settings.go @@ -0,0 +1,209 @@ +package sandbox + +import ( + "fmt" + "os" + "path/filepath" +) + +// SettingsSyncConfig configures which settings to sync to sandboxes +type SettingsSyncConfig struct { + // Enabled indicates if settings sync is enabled (requires explicit opt-in) + Enabled bool `json:"enabled"` + // Files is the explicit list of settings files to sync (e.g., ["~/.claude/settings.json"]) + // When non-empty, only these files are synced (provides transparency) + Files []string `json:"files,omitempty"` + // Legacy per-agent flags (still supported for backwards compatibility) + // Claude syncs ~/.claude/settings.json and related config + Claude bool `json:"claude,omitempty"` + // Codex syncs ~/.codex/config.toml + Codex bool `json:"codex,omitempty"` + // Git syncs ~/.gitconfig (name, email, aliases - NOT credentials) + Git bool `json:"git,omitempty"` + // Shell syncs shell preferences (prompt, aliases from a safe subset) + Shell bool `json:"shell,omitempty"` +} + +// AgentSettingsPath describes where an agent stores its settings locally +type AgentSettingsPath struct { + Agent Agent + LocalPath string // Path relative to home directory + Description string // Human-readable description for consent UI + SafeKeys []string // If non-empty, only sync these keys (for partial sync) +} + +// KnownAgentSettings returns the settings paths for all supported agents +func KnownAgentSettings() []AgentSettingsPath { + return []AgentSettingsPath{ + { + Agent: AgentClaude, + LocalPath: ".claude/settings.json", + Description: "Claude Code settings (model preferences, features, permissions)", + }, + { + Agent: AgentCodex, + LocalPath: ".codex/config.toml", + Description: "Codex CLI settings (model preferences, editor config)", + }, + { + Agent: AgentOpenCode, + LocalPath: ".config/opencode/config.json", + Description: "OpenCode settings (model preferences, keybindings)", + }, + { + Agent: AgentAmp, + LocalPath: ".config/amp/config.json", + Description: "Amp settings (model preferences, workspace config)", + }, + { + Agent: AgentGemini, + LocalPath: ".gemini/settings.json", + Description: "Gemini CLI settings (model preferences)", + }, + } +} + +// GitConfigSafeKeys are the .gitconfig keys that are safe to sync (no credentials) +var GitConfigSafeKeys = []string{ + "user.name", + "user.email", + "core.editor", + "core.autocrlf", + "init.defaultBranch", + "pull.rebase", + "push.default", + "alias.*", +} + +// DetectedSetting represents a settings file found on the local system +type DetectedSetting struct { + Agent string // Agent name (e.g., "claude", "codex") or "git" for gitconfig + LocalPath string // Full path (e.g., "/home/user/.claude/settings.json") + HomePath string // Path relative to home (e.g., ".claude/settings.json") + Description string // Human-readable description + Exists bool // Whether the file exists locally + Size int64 // File size in bytes (0 if not exists) +} + +// DetectLocalSettings finds all settings files that exist on the local machine. +// It iterates through all registered agent plugins and checks for their settings files. +func DetectLocalSettings() []DetectedSetting { + homeDir, err := os.UserHomeDir() + if err != nil { + return nil + } + + var detected []DetectedSetting + + // Check all registered agent plugins + for _, plugin := range AllAgentPlugins() { + for _, sp := range plugin.SettingsPaths() { + fullPath := filepath.Join(homeDir, sp.LocalPath) + setting := DetectedSetting{ + Agent: plugin.Name(), + LocalPath: fullPath, + HomePath: sp.LocalPath, + Description: sp.Description, + } + + if info, err := os.Stat(fullPath); err == nil { + setting.Exists = true + setting.Size = info.Size() + } + + detected = append(detected, setting) + } + } + + // Check git config separately (not an agent plugin) + gitConfigPath := filepath.Join(homeDir, ".gitconfig") + gitSetting := DetectedSetting{ + Agent: "git", + LocalPath: gitConfigPath, + HomePath: ".gitconfig", + Description: "Git config (user identity, aliases)", + } + if info, err := os.Stat(gitConfigPath); err == nil { + gitSetting.Exists = true + gitSetting.Size = info.Size() + } + detected = append(detected, gitSetting) + + return detected +} + +// DetectExistingSettings returns only settings files that actually exist locally +func DetectExistingSettings() []DetectedSetting { + all := DetectLocalSettings() + var existing []DetectedSetting + for _, s := range all { + if s.Exists { + existing = append(existing, s) + } + } + return existing +} + +// PrintSettingsManifest prints the list of settings files being synced +func PrintSettingsManifest(settings []DetectedSetting) { + if len(settings) == 0 { + fmt.Println("Settings sync: no settings files found locally") + return + } + + fmt.Println("Settings sync:") + for _, s := range settings { + if s.Exists { + note := "" + if s.Agent == "git" { + note = " (safe keys only)" + } + fmt.Printf(" ~/%s → sandbox%s\n", s.HomePath, note) + } + } +} + +// LoadSettingsSyncConfig loads the settings sync configuration +func LoadSettingsSyncConfig() (SettingsSyncConfig, error) { + cfg, err := LoadConfig() + if err != nil { + return SettingsSyncConfig{}, err + } + + // Settings sync config is stored in the main config file + return cfg.SettingsSync, nil +} + +// SaveSettingsSyncConfig saves the settings sync configuration +func SaveSettingsSyncConfig(syncCfg SettingsSyncConfig) error { + cfg, err := LoadConfig() + if err != nil { + return err + } + cfg.SettingsSync = syncCfg + return SaveConfig(cfg) +} + +// GetLocalSettingsStatus returns which settings files exist locally +func GetLocalSettingsStatus() map[Agent]bool { + homeDir, err := os.UserHomeDir() + if err != nil { + return nil + } + + status := make(map[Agent]bool) + for _, setting := range KnownAgentSettings() { + path := filepath.Join(homeDir, setting.LocalPath) + if _, err := os.Stat(path); err == nil { + status[setting.Agent] = true + } + } + + // Check git config + gitConfigPath := filepath.Join(homeDir, ".gitconfig") + if _, err := os.Stat(gitConfigPath); err == nil { + status["git"] = true + } + + return status +} diff --git a/internal/sandbox/settings_sync.go b/internal/sandbox/settings_sync.go new file mode 100644 index 00000000..5c95868a --- /dev/null +++ b/internal/sandbox/settings_sync.go @@ -0,0 +1,391 @@ +package sandbox + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "time" +) + +// settingsUploadTimeout is the timeout for uploading settings files. +const settingsUploadTimeout = 30 * time.Second + +// SyncSettingsToVolume copies enabled local settings to the sandbox home directory. +// This is called during sandbox setup if settings sync is enabled. +// It always displays a manifest of files being synced for transparency. +func SyncSettingsToVolume(computer RemoteSandbox, syncCfg SettingsSyncConfig, verbose bool) error { + if !syncCfg.Enabled { + return nil + } + + homeDir, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("cannot determine home directory: %w", err) + } + + computerHome := getSandboxHomeDir(computer) + + // Determine which settings to sync + settingsToSync := getSettingsToSync(syncCfg, homeDir) + + // Always show manifest for transparency + PrintSettingsManifest(settingsToSync) + + if len(settingsToSync) == 0 { + return nil + } + + var syncedCount int + + for _, setting := range settingsToSync { + if !setting.Exists { + continue + } + + var syncErr error + if setting.Agent == "git" { + syncErr = syncGitConfig(computer, homeDir, computerHome, verbose) + } else { + agent := Agent(setting.Agent) + syncErr = syncAgentSettings(computer, homeDir, computerHome, agent, verbose) + } + + if syncErr != nil { + if verbose { + fmt.Printf(" Warning: could not sync %s settings: %v\n", setting.Agent, syncErr) + } + } else { + syncedCount++ + } + } + + if verbose && syncedCount > 0 { + fmt.Printf(" Synced %d settings configuration(s)\n", syncedCount) + } + + return nil +} + +// getSettingsToSync determines which settings files should be synced based on config +func getSettingsToSync(syncCfg SettingsSyncConfig, homeDir string) []DetectedSetting { + // If explicit Files list is set, use only those + if len(syncCfg.Files) > 0 { + return getSettingsFromFileList(syncCfg.Files, homeDir) + } + + // Fall back to legacy per-agent flags + return getSettingsFromLegacyFlags(syncCfg, homeDir) +} + +// getSettingsFromFileList returns DetectedSettings for an explicit file list +func getSettingsFromFileList(files []string, homeDir string) []DetectedSetting { + var settings []DetectedSetting + + for _, file := range files { + // Expand ~ to home directory + path := file + if strings.HasPrefix(path, "~/") { + path = filepath.Join(homeDir, path[2:]) + } else if strings.HasPrefix(path, ".") { + path = filepath.Join(homeDir, path) + } + + // Determine agent from path + agent := agentFromPath(file) + homePath := strings.TrimPrefix(file, "~/") + if strings.HasPrefix(homePath, homeDir) { + homePath = strings.TrimPrefix(homePath, homeDir+"/") + } + + setting := DetectedSetting{ + Agent: agent, + LocalPath: path, + HomePath: homePath, + } + + if info, err := os.Stat(path); err == nil { + setting.Exists = true + setting.Size = info.Size() + } + + settings = append(settings, setting) + } + + return settings +} + +// getSettingsFromLegacyFlags returns settings based on legacy per-agent boolean flags +func getSettingsFromLegacyFlags(syncCfg SettingsSyncConfig, homeDir string) []DetectedSetting { + var settings []DetectedSetting + + if syncCfg.Claude { + path := filepath.Join(homeDir, ".claude", "settings.json") + s := DetectedSetting{Agent: "claude", LocalPath: path, HomePath: ".claude/settings.json"} + if info, err := os.Stat(path); err == nil { + s.Exists = true + s.Size = info.Size() + } + settings = append(settings, s) + } + + if syncCfg.Codex { + path := filepath.Join(homeDir, ".codex", "config.toml") + s := DetectedSetting{Agent: "codex", LocalPath: path, HomePath: ".codex/config.toml"} + if info, err := os.Stat(path); err == nil { + s.Exists = true + s.Size = info.Size() + } + settings = append(settings, s) + } + + if syncCfg.Git { + path := filepath.Join(homeDir, ".gitconfig") + s := DetectedSetting{Agent: "git", LocalPath: path, HomePath: ".gitconfig"} + if info, err := os.Stat(path); err == nil { + s.Exists = true + s.Size = info.Size() + } + settings = append(settings, s) + } + + return settings +} + +// agentFromPath determines the agent name from a settings file path +func agentFromPath(path string) string { + if strings.Contains(path, ".claude") { + return "claude" + } + if strings.Contains(path, ".codex") || strings.Contains(path, "codex") { + return "codex" + } + if strings.Contains(path, "opencode") { + return "opencode" + } + if strings.Contains(path, "amp") { + return "amp" + } + if strings.Contains(path, ".gemini") { + return "gemini" + } + if strings.Contains(path, ".gitconfig") { + return "git" + } + return "unknown" +} + +// syncAgentSettings syncs settings for a specific agent +func syncAgentSettings(computer RemoteSandbox, homeDir, computerHome string, agent Agent, verbose bool) error { + var localPath, remotePath string + + switch agent { + case AgentClaude: + localPath = filepath.Join(homeDir, ".claude", "settings.json") + remotePath = fmt.Sprintf("%s/.claude/settings.json", computerHome) + case AgentCodex: + localPath = filepath.Join(homeDir, ".codex", "config.toml") + remotePath = fmt.Sprintf("%s/.config/codex/config.toml", computerHome) + case AgentOpenCode: + localPath = filepath.Join(homeDir, ".config", "opencode", "config.json") + remotePath = fmt.Sprintf("%s/.config/opencode/config.json", computerHome) + case AgentAmp: + localPath = filepath.Join(homeDir, ".config", "amp", "config.json") + remotePath = fmt.Sprintf("%s/.config/amp/config.json", computerHome) + case AgentGemini: + localPath = filepath.Join(homeDir, ".gemini", "settings.json") + remotePath = fmt.Sprintf("%s/.gemini/settings.json", computerHome) + default: + return nil + } + + // Check if local settings file exists + data, err := os.ReadFile(localPath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil // No local settings, skip + } + return err + } + + // For JSON files, filter out any sensitive keys + if strings.HasSuffix(localPath, ".json") { + data, err = filterSensitiveJSON(data) + if err != nil { + return err + } + } + + ctx, cancel := context.WithTimeout(context.Background(), settingsUploadTimeout) + defer cancel() + if err := uploadBytes(ctx, computer, data, remotePath); err != nil { + return fmt.Errorf("failed to upload settings: %w", err) + } + + if verbose { + fmt.Printf(" Synced %s settings\n", agent) + } + + return nil +} + +// syncGitConfig syncs safe git configuration (no credentials) +func syncGitConfig(computer RemoteSandbox, homeDir, computerHome string, verbose bool) error { + gitConfigPath := filepath.Join(homeDir, ".gitconfig") + + data, err := os.ReadFile(gitConfigPath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil // No gitconfig, skip + } + return err + } + + // Filter to only safe keys (very basic INI parsing for safety) + safeConfig := filterGitConfig(string(data)) + if safeConfig == "" { + return nil + } + + remotePath := fmt.Sprintf("%s/.gitconfig", computerHome) + ctx, cancel := context.WithTimeout(context.Background(), settingsUploadTimeout) + defer cancel() + if err := uploadBytes(ctx, computer, []byte(safeConfig), remotePath); err != nil { + return fmt.Errorf("failed to upload git config: %w", err) + } + + if verbose { + fmt.Println(" Synced git config (name, email, aliases)") + } + + return nil +} + +// filterSensitiveJSON removes potentially sensitive keys from JSON config +func filterSensitiveJSON(data []byte) ([]byte, error) { + var obj map[string]interface{} + if err := json.Unmarshal(data, &obj); err != nil { + return data, nil // Not valid JSON, return as-is + } + + // Remove keys that might contain sensitive data + sensitiveKeys := []string{ + "apiKey", "api_key", "apikey", + "token", "auth_token", "authToken", + "secret", "password", "credential", + "key", "private", + } + + filtered := filterMapKeys(obj, sensitiveKeys) + return json.MarshalIndent(filtered, "", " ") +} + +// filterMapKeys recursively removes sensitive keys from a map +func filterMapKeys(obj map[string]interface{}, sensitiveKeys []string) map[string]interface{} { + result := make(map[string]interface{}) + + for k, v := range obj { + // Check if key contains sensitive words + isSensitive := false + lowerKey := strings.ToLower(k) + for _, sensitive := range sensitiveKeys { + if strings.Contains(lowerKey, strings.ToLower(sensitive)) { + isSensitive = true + break + } + } + + if isSensitive { + continue + } + + // Recursively filter nested maps + if nested, ok := v.(map[string]interface{}); ok { + result[k] = filterMapKeys(nested, sensitiveKeys) + } else { + result[k] = v + } + } + + return result +} + +// filterGitConfig extracts only safe configuration from gitconfig +func filterGitConfig(content string) string { + lines := strings.Split(content, "\n") + var result []string + inSafeSection := false + + safeSections := map[string]bool{ + "[user]": true, + "[core]": true, + "[init]": true, + "[pull]": true, + "[push]": true, + "[alias]": true, + "[color]": true, + "[diff]": true, + "[merge]": true, + "[branch]": true, + } + + unsafeSections := map[string]bool{ + "[credential]": true, + "[http]": true, + "[url": true, // Catches [url "..."] + } + + for _, line := range lines { + trimmed := strings.TrimSpace(line) + + // Check if this is a section header + if strings.HasPrefix(trimmed, "[") { + // Check if section is unsafe + isUnsafe := false + for unsafe := range unsafeSections { + if strings.HasPrefix(trimmed, unsafe) { + isUnsafe = true + break + } + } + + if isUnsafe { + inSafeSection = false + continue + } + + // Check if section is explicitly safe + isSafe := false + for safe := range safeSections { + if strings.HasPrefix(trimmed, safe) { + isSafe = true + break + } + } + + inSafeSection = isSafe + if isSafe { + result = append(result, line) + } + continue + } + + // Include line if we're in a safe section + if inSafeSection && trimmed != "" { + // Extra safety: skip any line that looks like it contains credentials + lowerLine := strings.ToLower(trimmed) + if strings.Contains(lowerLine, "token") || + strings.Contains(lowerLine, "password") || + strings.Contains(lowerLine, "credential") || + strings.Contains(lowerLine, "oauth") { + continue + } + result = append(result, line) + } + } + + return strings.Join(result, "\n") +} diff --git a/internal/sandbox/shell.go b/internal/sandbox/shell.go new file mode 100644 index 00000000..3ebdc85a --- /dev/null +++ b/internal/sandbox/shell.go @@ -0,0 +1,297 @@ +package sandbox + +import ( + "fmt" + "regexp" + "strings" +) + +// ShellQuote safely quotes a string for use in shell commands. +// It uses single quotes and escapes any embedded single quotes. +// This is the safest method for POSIX shells. +func ShellQuote(s string) string { + // Replace single quotes with '\'' (end quote, escaped quote, start quote) + escaped := strings.ReplaceAll(s, "'", "'\\''") + return "'" + escaped + "'" +} + +// ShellQuoteAll quotes multiple strings for shell usage. +func ShellQuoteAll(args []string) []string { + quoted := make([]string, len(args)) + for i, arg := range args { + quoted[i] = ShellQuote(arg) + } + return quoted +} + +// ShellJoin joins arguments into a single shell-safe command string. +func ShellJoin(args []string) string { + return strings.Join(ShellQuoteAll(args), " ") +} + +// ShellCommand builds a shell command safely. +type ShellCommand struct { + parts []string +} + +// NewShellCommand creates a new ShellCommand builder. +func NewShellCommand(cmd string) *ShellCommand { + return &ShellCommand{parts: []string{cmd}} +} + +// Arg adds a single argument, properly quoted. +func (c *ShellCommand) Arg(arg string) *ShellCommand { + c.parts = append(c.parts, ShellQuote(arg)) + return c +} + +// Args adds multiple arguments, properly quoted. +func (c *ShellCommand) Args(args ...string) *ShellCommand { + for _, arg := range args { + c.parts = append(c.parts, ShellQuote(arg)) + } + return c +} + +// RawArg adds an argument without quoting (use with caution). +func (c *ShellCommand) RawArg(arg string) *ShellCommand { + c.parts = append(c.parts, arg) + return c +} + +// Flag adds a flag (e.g., "-v", "--verbose"). +func (c *ShellCommand) Flag(flag string) *ShellCommand { + if isValidFlag(flag) { + c.parts = append(c.parts, flag) + } + return c +} + +// FlagWithValue adds a flag with a value (e.g., "-o", "output.txt"). +func (c *ShellCommand) FlagWithValue(flag, value string) *ShellCommand { + if isValidFlag(flag) { + c.parts = append(c.parts, flag, ShellQuote(value)) + } + return c +} + +// Pipe adds a pipe to another command. +func (c *ShellCommand) Pipe(cmd string) *ShellCommand { + c.parts = append(c.parts, "|", cmd) + return c +} + +// And adds && to chain commands. +func (c *ShellCommand) And(cmd string) *ShellCommand { + c.parts = append(c.parts, "&&", cmd) + return c +} + +// Or adds || to chain commands. +func (c *ShellCommand) Or(cmd string) *ShellCommand { + c.parts = append(c.parts, "||", cmd) + return c +} + +// String returns the complete command string. +func (c *ShellCommand) String() string { + return strings.Join(c.parts, " ") +} + +// isValidFlag checks if a string looks like a valid flag. +var flagPattern = regexp.MustCompile(`^-{1,2}[a-zA-Z][a-zA-Z0-9_-]*$`) + +func isValidFlag(s string) bool { + return flagPattern.MatchString(s) +} + +// EnvVar represents a shell environment variable assignment. +type EnvVar struct { + Key string + Value string +} + +// EnvVarPattern validates environment variable names. +var envVarPattern = regexp.MustCompile(`^[A-Za-z_][A-Za-z0-9_]*$`) + +// IsValidEnvKey checks if a key is a valid environment variable name. +func IsValidEnvKey(key string) bool { + return envVarPattern.MatchString(key) +} + +// BuildEnvExport builds a safe export statement. +func BuildEnvExport(key, value string) string { + if !IsValidEnvKey(key) { + return "" + } + return fmt.Sprintf("export %s=%s", key, ShellQuote(value)) +} + +// BuildEnvExports builds multiple export statements. +func BuildEnvExports(env map[string]string) []string { + exports := make([]string, 0, len(env)) + for key, value := range env { + if export := BuildEnvExport(key, value); export != "" { + exports = append(exports, export) + } + } + return exports +} + +// BuildEnvAssignment builds an inline environment assignment (VAR=value cmd). +func BuildEnvAssignment(key, value string) string { + if !IsValidEnvKey(key) { + return "" + } + return fmt.Sprintf("%s=%s", key, ShellQuote(value)) +} + +// BuildEnvAssignments builds inline environment assignments. +func BuildEnvAssignments(env map[string]string) string { + assignments := make([]string, 0, len(env)) + for key, value := range env { + if assignment := BuildEnvAssignment(key, value); assignment != "" { + assignments = append(assignments, assignment) + } + } + return strings.Join(assignments, " ") +} + +// SafeCommands provides pre-built safe command templates. +var SafeCommands = struct { + // File operations + MkdirP func(path string) string + RmRf func(path string) string + RmF func(path string) string + Ln func(target, link string) string + LnForce func(target, link string) string + Touch func(path string) string + Cat func(path string) string + Test func(flag, path string) string + Chmod func(mode, path string) string + Chown func(owner, path string) string + Cp func(src, dst string) string + Mv func(src, dst string) string + Stat func(path string) string + MkdirParent func(path string) string + + // Archive operations + TarCreate func(archive, dir string) string + TarExtract func(archive, dir string) string + + // Command lookup + CommandV func(cmd string) string + Which func(cmd string) string +}{ + MkdirP: func(path string) string { + return fmt.Sprintf("mkdir -p %s", ShellQuote(path)) + }, + RmRf: func(path string) string { + return fmt.Sprintf("rm -rf %s", ShellQuote(path)) + }, + RmF: func(path string) string { + return fmt.Sprintf("rm -f %s", ShellQuote(path)) + }, + Ln: func(target, link string) string { + return fmt.Sprintf("ln -s %s %s", ShellQuote(target), ShellQuote(link)) + }, + LnForce: func(target, link string) string { + return fmt.Sprintf("ln -sfn %s %s", ShellQuote(target), ShellQuote(link)) + }, + Touch: func(path string) string { + return fmt.Sprintf("touch %s", ShellQuote(path)) + }, + Cat: func(path string) string { + return fmt.Sprintf("cat %s", ShellQuote(path)) + }, + Test: func(flag, path string) string { + if !strings.HasPrefix(flag, "-") || len(flag) != 2 { + flag = "-e" + } + return fmt.Sprintf("test %s %s", flag, ShellQuote(path)) + }, + Chmod: func(mode, path string) string { + return fmt.Sprintf("chmod %s %s", mode, ShellQuote(path)) + }, + Chown: func(owner, path string) string { + return fmt.Sprintf("chown %s %s", owner, ShellQuote(path)) + }, + Cp: func(src, dst string) string { + return fmt.Sprintf("cp %s %s", ShellQuote(src), ShellQuote(dst)) + }, + Mv: func(src, dst string) string { + return fmt.Sprintf("mv %s %s", ShellQuote(src), ShellQuote(dst)) + }, + Stat: func(path string) string { + // Use portable stat flags (works on both Linux and macOS) + return fmt.Sprintf("stat -c %%Y %s 2>/dev/null || stat -f %%m %s 2>/dev/null", ShellQuote(path), ShellQuote(path)) + }, + MkdirParent: func(path string) string { + return fmt.Sprintf("mkdir -p $(dirname %s)", ShellQuote(path)) + }, + TarCreate: func(archive, dir string) string { + return fmt.Sprintf("tar -czf %s -C %s .", ShellQuote(archive), ShellQuote(dir)) + }, + TarExtract: func(archive, dir string) string { + return fmt.Sprintf("tar -xzf %s -C %s", ShellQuote(archive), ShellQuote(dir)) + }, + CommandV: func(cmd string) string { + return fmt.Sprintf("command -v %s", ShellQuote(cmd)) + }, + Which: func(cmd string) string { + return fmt.Sprintf("which %s", ShellQuote(cmd)) + }, +} + +// RedactSecrets redacts sensitive values from a string for logging. +func RedactSecrets(s string) string { + // Redact export statements + exportRe := regexp.MustCompile(`export [A-Za-z_][A-Za-z0-9_]*=('[^']*'|"[^"]*"|[^\s]+)`) + s = exportRe.ReplaceAllStringFunc(s, func(match string) string { + parts := strings.SplitN(match, "=", 2) + if len(parts) == 2 { + key := strings.TrimPrefix(parts[0], "export ") + // Check if this looks like a secret + lowerKey := strings.ToLower(key) + if strings.Contains(lowerKey, "key") || + strings.Contains(lowerKey, "token") || + strings.Contains(lowerKey, "secret") || + strings.Contains(lowerKey, "password") || + strings.Contains(lowerKey, "credential") { + return fmt.Sprintf("export %s=", key) + } + } + return match + }) + + // Redact API keys that look like they're inline + apiKeyRe := regexp.MustCompile(`(sk-[a-zA-Z0-9]{20,}|ghp_[a-zA-Z0-9]{36}|gho_[a-zA-Z0-9]{36}|ANTHROPIC_[a-zA-Z0-9_]+=)`) + s = apiKeyRe.ReplaceAllString(s, "") + + return s +} + +// ValidatePath checks if a path is safe for use in commands. +// Returns an error if the path contains potentially dangerous characters. +func ValidatePath(path string) error { + // Check for null bytes + if strings.Contains(path, "\x00") { + return fmt.Errorf("path contains null byte") + } + + // Check for path traversal attempts that are suspicious + if strings.Contains(path, "..") && strings.Contains(path, "/") { + // Allow .. in the middle of paths, but warn about potential traversal + LogDebug("path contains potential traversal", "path", path) + } + + // Check for shell metacharacters that shouldn't be in paths + dangerous := []string{";", "|", "&", "$", "`", "(", ")", "{", "}", "<", ">", "\n", "\r"} + for _, char := range dangerous { + if strings.Contains(path, char) { + return fmt.Errorf("path contains dangerous character: %q", char) + } + } + + return nil +} diff --git a/internal/sandbox/shell_test.go b/internal/sandbox/shell_test.go new file mode 100644 index 00000000..698cd2e9 --- /dev/null +++ b/internal/sandbox/shell_test.go @@ -0,0 +1,339 @@ +package sandbox + +import ( + "testing" +) + +func TestShellQuote(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "simple string", + input: "hello", + expected: "'hello'", + }, + { + name: "string with spaces", + input: "hello world", + expected: "'hello world'", + }, + { + name: "string with single quote", + input: "it's", + expected: "'it'\\''s'", + }, + { + name: "string with multiple single quotes", + input: "it's a 'test'", + expected: "'it'\\''s a '\\''test'\\'''", + }, + { + name: "empty string", + input: "", + expected: "''", + }, + { + name: "string with special chars", + input: "hello; rm -rf /", + expected: "'hello; rm -rf /'", + }, + { + name: "string with newline", + input: "hello\nworld", + expected: "'hello\nworld'", + }, + { + name: "string with dollar sign", + input: "$HOME", + expected: "'$HOME'", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ShellQuote(tt.input) + if result != tt.expected { + t.Errorf("ShellQuote(%q) = %q, want %q", tt.input, result, tt.expected) + } + }) + } +} + +func TestShellQuoteAll(t *testing.T) { + input := []string{"hello", "world", "it's"} + expected := []string{"'hello'", "'world'", "'it'\\''s'"} + + result := ShellQuoteAll(input) + if len(result) != len(expected) { + t.Errorf("ShellQuoteAll length = %d, want %d", len(result), len(expected)) + return + } + + for i, v := range result { + if v != expected[i] { + t.Errorf("ShellQuoteAll[%d] = %q, want %q", i, v, expected[i]) + } + } +} + +func TestShellJoin(t *testing.T) { + tests := []struct { + name string + input []string + expected string + }{ + { + name: "simple args", + input: []string{"echo", "hello", "world"}, + expected: "'echo' 'hello' 'world'", + }, + { + name: "args with spaces", + input: []string{"echo", "hello world"}, + expected: "'echo' 'hello world'", + }, + { + name: "empty args", + input: []string{}, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ShellJoin(tt.input) + if result != tt.expected { + t.Errorf("ShellJoin(%v) = %q, want %q", tt.input, result, tt.expected) + } + }) + } +} + +func TestIsValidEnvKey(t *testing.T) { + tests := []struct { + key string + valid bool + }{ + {"HOME", true}, + {"PATH", true}, + {"MY_VAR", true}, + {"_PRIVATE", true}, + {"VAR123", true}, + {"123VAR", false}, + {"VAR-NAME", false}, + {"VAR.NAME", false}, + {"", false}, + {"$VAR", false}, + } + + for _, tt := range tests { + t.Run(tt.key, func(t *testing.T) { + result := IsValidEnvKey(tt.key) + if result != tt.valid { + t.Errorf("IsValidEnvKey(%q) = %v, want %v", tt.key, result, tt.valid) + } + }) + } +} + +func TestBuildEnvExport(t *testing.T) { + tests := []struct { + name string + key string + value string + expected string + }{ + { + name: "simple value", + key: "HOME", + value: "/home/user", + expected: "export HOME='/home/user'", + }, + { + name: "value with spaces", + key: "MY_VAR", + value: "hello world", + expected: "export MY_VAR='hello world'", + }, + { + name: "invalid key", + key: "123INVALID", + value: "test", + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := BuildEnvExport(tt.key, tt.value) + if result != tt.expected { + t.Errorf("BuildEnvExport(%q, %q) = %q, want %q", tt.key, tt.value, result, tt.expected) + } + }) + } +} + +func TestSafeCommands(t *testing.T) { + tests := []struct { + name string + fn func() string + contains string + }{ + { + name: "MkdirP", + fn: func() string { return SafeCommands.MkdirP("/test/path") }, + contains: "mkdir -p", + }, + { + name: "RmRf", + fn: func() string { return SafeCommands.RmRf("/test/path") }, + contains: "rm -rf", + }, + { + name: "RmF", + fn: func() string { return SafeCommands.RmF("/test/file") }, + contains: "rm -f", + }, + { + name: "Touch", + fn: func() string { return SafeCommands.Touch("/test/file") }, + contains: "touch", + }, + { + name: "Cat", + fn: func() string { return SafeCommands.Cat("/test/file") }, + contains: "cat", + }, + { + name: "TarCreate", + fn: func() string { return SafeCommands.TarCreate("/archive.tgz", "/source") }, + contains: "tar -czf", + }, + { + name: "TarExtract", + fn: func() string { return SafeCommands.TarExtract("/archive.tgz", "/dest") }, + contains: "tar -xzf", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.fn() + if result == "" { + t.Error("Expected non-empty command") + } + if !contains(result, tt.contains) { + t.Errorf("Command %q should contain %q", result, tt.contains) + } + }) + } +} + +func TestSafeCommandsInjectionPrevention(t *testing.T) { + // Test that shell metacharacters are properly quoted + malicious := "/test; rm -rf /" + + cmd := SafeCommands.Cat(malicious) + // The semicolon should be inside quotes, not a command separator + if contains(cmd, "rm -rf") && !contains(cmd, "'") { + t.Errorf("SafeCommands.Cat did not properly quote malicious input: %s", cmd) + } +} + +func TestRedactSecrets(t *testing.T) { + tests := []struct { + name string + input string + shouldMatch bool // check if the key is redacted, not exact match + }{ + { + name: "API key export is redacted", + input: "export ANTHROPIC_API_KEY='sk-ant-12345'", + shouldMatch: true, + }, + { + name: "no secrets unchanged", + input: "export HOME='/home/user'", + shouldMatch: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := RedactSecrets(tt.input) + hasRedacted := contains(result, "") + if tt.shouldMatch && !hasRedacted { + t.Errorf("RedactSecrets(%q) = %q, expected to contain ", tt.input, result) + } + if !tt.shouldMatch && hasRedacted { + t.Errorf("RedactSecrets(%q) = %q, did not expect redaction", tt.input, result) + } + }) + } +} + +func TestValidatePath(t *testing.T) { + tests := []struct { + name string + path string + wantErr bool + }{ + { + name: "valid simple path", + path: "/home/user/file.txt", + wantErr: false, + }, + { + name: "valid path with spaces", + path: "/home/user/my file.txt", + wantErr: false, + }, + { + name: "null byte", + path: "/home/user/\x00file", + wantErr: true, + }, + { + name: "semicolon", + path: "/home/user;rm -rf /", + wantErr: true, + }, + { + name: "pipe", + path: "/home/user|cat /etc/passwd", + wantErr: true, + }, + { + name: "backtick", + path: "/home/`whoami`", + wantErr: true, + }, + { + name: "dollar sign", + path: "/home/$USER", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidatePath(tt.path) + if (err != nil) != tt.wantErr { + t.Errorf("ValidatePath(%q) error = %v, wantErr %v", tt.path, err, tt.wantErr) + } + }) + } +} + +// contains checks if s contains substr +func contains(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/internal/sandbox/snapshot.go b/internal/sandbox/snapshot.go new file mode 100644 index 00000000..a90ab236 --- /dev/null +++ b/internal/sandbox/snapshot.go @@ -0,0 +1,104 @@ +package sandbox + +import ( + "fmt" + "strings" + "time" + + "github.com/andyrewlee/amux/internal/daytona" +) + +const DefaultSnapshotBaseImage = "node:20-bullseye" + +var DefaultSnapshotAgents = []Agent{ + AgentClaude, + AgentCodex, + AgentOpenCode, + AgentAmp, + AgentGemini, + AgentDroid, +} + +var agentInstalls = map[Agent][]string{ + AgentClaude: { + // Native installer (recommended by Anthropic) - installs to ~/.local/bin/claude + "curl -fsSL https://claude.ai/install.sh | bash", + // Symlink to /usr/local/bin for reliable PATH resolution in all shells + "ln -sf /root/.local/bin/claude /usr/local/bin/claude || true", + }, + AgentCodex: {"npm install -g @openai/codex"}, + AgentOpenCode: {"npm install -g opencode-ai"}, + AgentAmp: { + "curl -fsSL https://ampcode.com/install.sh | bash", + "ln -sf /root/.amp/bin/amp /usr/local/bin/amp || true", + }, + AgentGemini: {"npm install -g @google/gemini-cli"}, + AgentDroid: { + "curl -fsSL https://app.factory.ai/cli | sh", + "ln -sf /root/.local/bin/droid /usr/local/bin/droid || true", + }, + AgentShell: {}, +} + +// ParseAgentList parses a comma-separated list of agents. +func ParseAgentList(value string) ([]Agent, error) { + if strings.TrimSpace(value) == "" { + return append([]Agent{}, DefaultSnapshotAgents...), nil + } + items := strings.Split(value, ",") + agents := make([]Agent, 0, len(items)) + seen := map[Agent]bool{} + for _, item := range items { + trimmed := strings.TrimSpace(item) + if trimmed == "" { + continue + } + agent := Agent(trimmed) + switch agent { + case AgentClaude, AgentCodex, AgentOpenCode, AgentAmp, AgentGemini, AgentDroid: + if !seen[agent] { + agents = append(agents, agent) + seen[agent] = true + } + default: + return nil, fmt.Errorf("unknown agent %q. Use: claude,codex,opencode,amp,gemini,droid", trimmed) + } + } + if len(agents) == 0 { + return append([]Agent{}, DefaultSnapshotAgents...), nil + } + return agents, nil +} + +// BuildSnapshotName returns a timestamped snapshot name. +func BuildSnapshotName(prefix string) string { + stamp := time.Now().UTC().Format("20060102-150405") + return fmt.Sprintf("%s-%s", prefix, stamp) +} + +// BuildSnapshotImage builds a Dockerfile image with agent installs. +func BuildSnapshotImage(agents []Agent, baseImage string) *daytona.Image { + if baseImage == "" { + baseImage = DefaultSnapshotBaseImage + } + image := daytona.ImageBase(baseImage) + installCommands := []string{ + "apt-get update -y && apt-get install -y --no-install-recommends curl git ca-certificates && rm -rf /var/lib/apt/lists/*", + } + for _, agent := range agents { + installCommands = append(installCommands, agentInstalls[agent]...) + } + for _, cmd := range installCommands { + image.RunCommands([]string{"bash", "-lc", cmd}) + } + return image +} + +// CreateSnapshot builds and creates a snapshot. +func CreateSnapshot(client *daytona.Daytona, name string, agents []Agent, baseImage string, onLogs func(string)) (*daytona.Snapshot, error) { + image := BuildSnapshotImage(agents, baseImage) + return client.Snapshot.Create(daytona.CreateSnapshotParams{ + Name: name, + Image: image, + }, &daytona.SnapshotCreateOptions{OnLogs: onLogs}) +} diff --git a/internal/sandbox/ssh.go b/internal/sandbox/ssh.go new file mode 100644 index 00000000..7999ba3d --- /dev/null +++ b/internal/sandbox/ssh.go @@ -0,0 +1,49 @@ +package sandbox + +import ( + "errors" + "fmt" + "os/exec" +) + +// BuildSSHCommand returns an ssh exec.Cmd for the sandbox plus a cleanup func to revoke access. +// Only Dayt0na sandboxes currently support SSH access. +func BuildSSHCommand(sb RemoteSandbox, remoteCommand string) (*exec.Cmd, func(), error) { + ds, ok := sb.(*daytonaSandbox) + if !ok || ds == nil || ds.inner == nil { + return nil, nil, errors.New("SSH access is only supported for Daytona sandboxes") + } + + sshAccess, err := ds.inner.CreateSshAccess(60) + if err != nil { + return nil, nil, err + } + cleanup := func() { + _ = ds.inner.RevokeSshAccess(sshAccess.Token) + } + + runnerDomain, err := waitForSshAccessDaytona(ds.inner, sshAccess.Token) + if err != nil { + cleanup() + return nil, nil, err + } + sshHost := runnerDomain + if sshHost == "" { + sshHost = getSSHHost() + } + target := fmt.Sprintf("%s@%s", sshAccess.Token, sshHost) + + sshArgs := []string{ + "-tt", + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "LogLevel=ERROR", + target, + } + if remoteCommand != "" { + sshArgs = append(sshArgs, remoteCommand) + } + + cmd := exec.Command("ssh", sshArgs...) + return cmd, cleanup, nil +} diff --git a/internal/sandbox/sync.go b/internal/sandbox/sync.go new file mode 100644 index 00000000..4203ed94 --- /dev/null +++ b/internal/sandbox/sync.go @@ -0,0 +1,434 @@ +package sandbox + +import ( + "archive/tar" + "compress/gzip" + "context" + "fmt" + "io" + "io/fs" + "os" + "path" + "path/filepath" + "strings" +) + +var defaultIgnorePatterns = []string{ + "node_modules", + ".next", + "dist", + "build", + ".turbo", + ".amux", +} + +const ( + uploadTarPath = "/tmp/amux-upload.tgz" + downloadTarPath = "/tmp/amux-download.tgz" + workspaceBaseDir = ".amux/workspaces" + maxBufferDownloadSize = 100 * 1024 * 1024 + timeoutSeconds = 300 +) + +// SyncOptions configures workspace sync. +type SyncOptions struct { + Cwd string + WorktreeID string + IncludeGit bool + IgnorePatterns []string +} + +func shouldIgnoreFile(filePath string, ignorePatterns []string) bool { + if filePath == "" || filePath == "." { + return false + } + parts := strings.Split(filepath.ToSlash(filePath), "/") + for _, part := range parts { + for _, pattern := range ignorePatterns { + if part == pattern { + return true + } + } + } + return false +} + +func getStdout(resp *ExecResult) string { + if resp == nil { + return "" + } + return resp.Stdout +} + +func assertCommandSuccess(resp *ExecResult, context string) error { + if resp == nil { + return fmt.Errorf("%s (no response)", context) + } + if resp.ExitCode != 0 { + stdout := strings.TrimSpace(getStdout(resp)) + details := "" + if stdout != "" { + details = ": " + stdout + } + return fmt.Errorf("%s (exit %d)%s", context, resp.ExitCode, details) + } + return nil +} + +func getSandboxHomeDirForSync(computer RemoteSandbox) string { + resp, err := execCommand(computer, "echo $HOME", nil) + if err == nil { + stdout := strings.TrimSpace(getStdout(resp)) + if stdout != "" { + return stdout + } + } + return "/home/daytona" +} + +func resolveWorkspaceRepoPath(computer RemoteSandbox, worktreeID string) string { + homeDir := getSandboxHomeDirForSync(computer) + return path.Join(homeDir, workspaceBaseDir, worktreeID, "repo") +} + +func parseSizeFromOutput(output string) int64 { + fields := strings.Fields(output) + if len(fields) == 0 { + return -1 + } + var size int64 + _, err := fmt.Sscanf(fields[0], "%d", &size) + if err != nil { + return -1 + } + return size +} + +func getRemoteFileSize(computer RemoteSandbox, remotePath string) int64 { + resp, err := execCommand(computer, fmt.Sprintf("stat -c %%s %s 2>/dev/null || wc -c < %s", remotePath, remotePath), nil) + if err != nil || resp.ExitCode != 0 { + return -1 + } + return parseSizeFromOutput(getStdout(resp)) +} + +func ensureGzipFile(localPath string) error { + file, err := os.Open(localPath) + if err != nil { + return err + } + defer file.Close() + buf := make([]byte, 4) + if _, err := io.ReadFull(file, buf); err != nil { + return err + } + if buf[0] == 0x1f && buf[1] == 0x8b { + return nil + } + preview, _ := os.ReadFile(localPath) + snippet := strings.TrimSpace(string(preview)) + if len(snippet) > 200 { + snippet = snippet[:200] + } + if snippet != "" { + return fmt.Errorf("downloaded archive is invalid. First bytes: %s", snippet) + } + return fmt.Errorf("downloaded archive is invalid") +} + +func isArchiveError(err error) bool { + if err == nil { + return false + } + msg := err.Error() + return strings.Contains(msg, "gzip") || strings.Contains(msg, "tar") || strings.Contains(msg, "archive") +} + +func getIgnorePatterns(opts SyncOptions) ([]string, error) { + patterns := append([]string{}, defaultIgnorePatterns...) + if len(opts.IgnorePatterns) > 0 { + patterns = append(patterns, opts.IgnorePatterns...) + } + ignoreFiles := []string{".amuxignore"} + for _, name := range ignoreFiles { + ignorePath := filepath.Join(opts.Cwd, name) + if data, err := os.ReadFile(ignorePath); err == nil { + for _, line := range strings.Split(string(data), "\n") { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + patterns = append(patterns, line) + } + } + } + if !opts.IncludeGit { + patterns = append(patterns, ".git") + } + return patterns, nil +} + +func createTarball(opts SyncOptions) (string, error) { + patterns, err := getIgnorePatterns(opts) + if err != nil { + return "", err + } + file, err := os.CreateTemp("", "amux-upload-*.tgz") + if err != nil { + return "", err + } + defer file.Close() + gzipWriter := gzip.NewWriter(file) + defer gzipWriter.Close() + tarWriter := tar.NewWriter(gzipWriter) + defer tarWriter.Close() + + walkFn := func(pathname string, entry fs.DirEntry, err error) error { + if err != nil { + return err + } + rel, err := filepath.Rel(opts.Cwd, pathname) + if err != nil { + return err + } + if rel == "." { + return nil + } + if shouldIgnoreFile(rel, patterns) { + if entry.IsDir() { + return filepath.SkipDir + } + return nil + } + info, err := entry.Info() + if err != nil { + return err + } + linkTarget := "" + if info.Mode()&os.ModeSymlink != 0 { + linkTarget, _ = os.Readlink(pathname) + } + hdr, err := tar.FileInfoHeader(info, linkTarget) + if err != nil { + return err + } + hdr.Name = filepath.ToSlash(rel) + if entry.IsDir() { + hdr.Name += "/" + } + if err := tarWriter.WriteHeader(hdr); err != nil { + return err + } + if info.Mode().IsRegular() { + file, err := os.Open(pathname) + if err != nil { + return err + } + _, err = io.Copy(tarWriter, file) + _ = file.Close() + if err != nil { + return err + } + } + return nil + } + + if err := filepath.WalkDir(opts.Cwd, walkFn); err != nil { + return "", err + } + return file.Name(), nil +} + +// UploadWorkspace syncs local workspace to a sandbox. +func UploadWorkspace(computer RemoteSandbox, opts SyncOptions, verbose bool) error { + if verbose { + fmt.Println("Creating tarball of local workspace...") + } + tarPath, err := createTarball(opts) + if err != nil { + return err + } + defer os.Remove(tarPath) + if verbose { + fmt.Printf("Uploading to %s in sandbox...\n", uploadTarPath) + } + if err := computer.UploadFile(context.Background(), tarPath, uploadTarPath); err != nil { + return err + } + if verbose { + fmt.Println("Upload complete") + } + + repoPath := resolveWorkspaceRepoPath(computer, opts.WorktreeID) + _, _ = execCommand(computer, SafeCommands.RmRf(repoPath), nil) + _, _ = execCommand(computer, SafeCommands.MkdirP(repoPath), nil) + resp, err := execCommand(computer, SafeCommands.TarExtract(uploadTarPath, repoPath), nil) + if err != nil { + return err + } + if err := assertCommandSuccess(resp, "failed to extract workspace tarball in sandbox"); err != nil { + return err + } + if verbose { + fmt.Printf("Workspace location: %s\n", repoPath) + } + _, _ = execCommand(computer, SafeCommands.RmF(uploadTarPath), nil) + return nil +} + +func extractTarball(tarPath, dest string) error { + file, err := os.Open(tarPath) + if err != nil { + return err + } + defer file.Close() + gzipReader, err := gzip.NewReader(file) + if err != nil { + return err + } + defer gzipReader.Close() + tarReader := tar.NewReader(gzipReader) + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + name := filepath.Clean(hdr.Name) + if strings.HasPrefix(name, "../") || strings.HasPrefix(name, "..\\") { + return fmt.Errorf("tar entry outside destination: %s", hdr.Name) + } + target := filepath.Join(dest, name) + if !strings.HasPrefix(filepath.Clean(target), filepath.Clean(dest)) { + return fmt.Errorf("tar entry outside destination: %s", hdr.Name) + } + switch hdr.Typeflag { + case tar.TypeDir: + if err := os.MkdirAll(target, 0o755); err != nil { + return err + } + case tar.TypeSymlink: + if err := os.MkdirAll(filepath.Dir(target), 0o755); err != nil { + return err + } + if err := os.Symlink(hdr.Linkname, target); err != nil && !os.IsExist(err) { + return err + } + case tar.TypeReg: + if err := os.MkdirAll(filepath.Dir(target), 0o755); err != nil { + return err + } + file, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(hdr.Mode)) + if err != nil { + return err + } + if _, err := io.Copy(file, tarReader); err != nil { + _ = file.Close() + return err + } + _ = file.Close() + default: + // Skip unsupported types + } + } + return nil +} + +// DownloadWorkspace syncs workspace from sandbox to local. +func DownloadWorkspace(computer RemoteSandbox, opts SyncOptions, verbose bool) error { + if verbose { + fmt.Println("Creating tarball in sandbox...") + } + repoPath := resolveWorkspaceRepoPath(computer, opts.WorktreeID) + resp, err := execCommand(computer, SafeCommands.TarCreate(downloadTarPath, repoPath), nil) + if err != nil { + return err + } + if err := assertCommandSuccess(resp, "failed to create tarball in sandbox"); err != nil { + return err + } + remoteSize := getRemoteFileSize(computer, downloadTarPath) + if verbose { + fmt.Println("Tarball created in sandbox") + } + + tmpFile, err := os.CreateTemp("", "amux-download-*.tgz") + if err != nil { + return err + } + _ = tmpFile.Close() + localPath := tmpFile.Name() + defer os.Remove(localPath) + + if verbose { + fmt.Printf("Downloading to %s...\n", opts.Cwd) + } + if err := computer.DownloadFile(context.Background(), downloadTarPath, localPath); err != nil { + return err + } + if verbose { + fmt.Println("Download complete") + } + + if remoteSize > 0 { + if stat, err := os.Stat(localPath); err == nil { + if stat.Size() != remoteSize { + return fmt.Errorf("downloaded archive size mismatch (remote %d bytes, local %d bytes)", remoteSize, stat.Size()) + } + } + } + + retried := false + retryWithBuffer := func(original error) error { + if retried { + return original + } + if remoteSize > maxBufferDownloadSize { + return original + } + if verbose { + fmt.Println("Retrying download using buffer...") + } + data, err := downloadBytes(context.Background(), computer, downloadTarPath) + if err != nil { + return original + } + if err := os.WriteFile(localPath, data, 0o644); err != nil { + return err + } + retried = true + return ensureGzipFile(localPath) + } + + if err := ensureGzipFile(localPath); err != nil { + if err := retryWithBuffer(err); err != nil { + return err + } + } + + if verbose { + fmt.Println("Extracting tarball locally...") + } + if err := extractTarball(localPath, opts.Cwd); err != nil { + if !isArchiveError(err) { + return err + } + if err := retryWithBuffer(err); err != nil { + return err + } + if err := extractTarball(localPath, opts.Cwd); err != nil { + return err + } + } + + _, _ = execCommand(computer, SafeCommands.RmF(downloadTarPath), nil) + return nil +} + +// GetWorktreeRepoPath returns the repo path inside sandbox. +func GetWorktreeRepoPath(computer RemoteSandbox, opts SyncOptions) string { + return resolveWorkspaceRepoPath(computer, opts.WorktreeID) +} diff --git a/internal/sandbox/sync_incremental.go b/internal/sandbox/sync_incremental.go new file mode 100644 index 00000000..2a8780ae --- /dev/null +++ b/internal/sandbox/sync_incremental.go @@ -0,0 +1,450 @@ +package sandbox + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "time" +) + +// FileManifest represents a file in the workspace. +type FileManifest struct { + Path string `json:"path"` + Size int64 `json:"size"` + ModTime int64 `json:"mod_time"` + Hash string `json:"hash,omitempty"` // SHA256 hash for content comparison + IsDir bool `json:"is_dir"` + Mode uint32 `json:"mode"` +} + +// WorkspaceManifest contains information about all files in a workspace. +type WorkspaceManifest struct { + Version int `json:"version"` + Generated time.Time `json:"generated"` + RootPath string `json:"root_path"` + Files map[string]*FileManifest `json:"files"` + TotalSize int64 `json:"total_size"` +} + +// SyncDiff represents the differences between local and remote workspaces. +type SyncDiff struct { + Added []string // Files to upload + Modified []string // Files to re-upload + Deleted []string // Files to delete remotely + Stats SyncStats +} + +// SyncStats provides statistics about sync operations. +type SyncStats struct { + FilesAdded int + FilesModified int + FilesDeleted int + FilesUnchanged int + BytesToUpload int64 + BytesToDelete int64 +} + +const ( + manifestFileName = ".amux-manifest.json" + maxHashFileSize = 10 * 1024 * 1024 // 10MB - only hash files smaller than this + manifestVersion = 1 +) + +// compileIgnorePatterns converts glob-style patterns to regex matchers. +func compileIgnorePatterns(patterns []string) []*regexp.Regexp { + matchers := make([]*regexp.Regexp, 0, len(patterns)) + for _, pattern := range patterns { + // Convert glob pattern to regex + regexPattern := globToRegex(pattern) + if re, err := regexp.Compile(regexPattern); err == nil { + matchers = append(matchers, re) + } + } + return matchers +} + +// globToRegex converts a simple glob pattern to a regex pattern. +func globToRegex(glob string) string { + var result strings.Builder + result.WriteString("^") + for _, ch := range glob { + switch ch { + case '*': + result.WriteString(".*") + case '?': + result.WriteString(".") + case '.', '+', '^', '$', '(', ')', '[', ']', '{', '}', '|', '\\': + result.WriteRune('\\') + result.WriteRune(ch) + default: + result.WriteRune(ch) + } + } + result.WriteString("$") + return result.String() +} + +// GenerateLocalManifest creates a manifest of the local workspace. +func GenerateLocalManifest(rootPath string, ignorePatterns []string) (*WorkspaceManifest, error) { + manifest := &WorkspaceManifest{ + Version: manifestVersion, + Generated: time.Now(), + RootPath: rootPath, + Files: make(map[string]*FileManifest), + } + + // Compile ignore patterns + matchers := compileIgnorePatterns(ignorePatterns) + + err := filepath.WalkDir(rootPath, func(path string, d os.DirEntry, err error) error { + if err != nil { + return nil // Skip files we can't access + } + + // Get relative path + relPath, err := filepath.Rel(rootPath, path) + if err != nil { + return nil + } + + // Skip root + if relPath == "." { + return nil + } + + // Check ignore patterns + for _, matcher := range matchers { + if matcher.MatchString(relPath) || matcher.MatchString(filepath.Base(relPath)) { + if d.IsDir() { + return filepath.SkipDir + } + return nil + } + } + + info, err := d.Info() + if err != nil { + return nil + } + + fm := &FileManifest{ + Path: relPath, + Size: info.Size(), + ModTime: info.ModTime().UnixNano(), + IsDir: d.IsDir(), + Mode: uint32(info.Mode()), + } + + // Compute hash for small files + if !d.IsDir() && info.Size() > 0 && info.Size() < maxHashFileSize { + if hash, err := hashFile(path); err == nil { + fm.Hash = hash + } + } + + manifest.Files[relPath] = fm + manifest.TotalSize += info.Size() + + return nil + }) + + return manifest, err +} + +// GetRemoteManifest retrieves the manifest from the remote sandbox. +func GetRemoteManifest(computer RemoteSandbox, remotePath string) (*WorkspaceManifest, error) { + manifestPath := filepath.Join(remotePath, manifestFileName) + + // Try to read the manifest file + resp, err := execCommand(computer, SafeCommands.Cat(manifestPath), nil) + if err != nil || resp.ExitCode != 0 { + // No manifest exists - return empty + return &WorkspaceManifest{ + Version: manifestVersion, + Files: make(map[string]*FileManifest), + }, nil + } + + content := getStdout(resp) + if content == "" { + return &WorkspaceManifest{ + Version: manifestVersion, + Files: make(map[string]*FileManifest), + }, nil + } + + var manifest WorkspaceManifest + if err := json.Unmarshal([]byte(content), &manifest); err != nil { + // Corrupted manifest - return empty + LogWarn("corrupted remote manifest, will do full sync", "error", err) + return &WorkspaceManifest{ + Version: manifestVersion, + Files: make(map[string]*FileManifest), + }, nil + } + + return &manifest, nil +} + +// uploadTimeout is the default timeout for file upload operations. +const uploadTimeout = 5 * time.Minute + +// SaveRemoteManifest saves the manifest to the remote sandbox. +func SaveRemoteManifest(computer RemoteSandbox, remotePath string, manifest *WorkspaceManifest) error { + data, err := json.Marshal(manifest) + if err != nil { + return err + } + + ctx, cancel := context.WithTimeout(context.Background(), uploadTimeout) + defer cancel() + + manifestPath := filepath.Join(remotePath, manifestFileName) + return uploadBytes(ctx, computer, data, manifestPath) +} + +// ComputeDiff calculates the differences between local and remote manifests. +func ComputeDiff(local, remote *WorkspaceManifest) *SyncDiff { + diff := &SyncDiff{ + Added: make([]string, 0), + Modified: make([]string, 0), + Deleted: make([]string, 0), + } + + // Find added and modified files + for path, localFile := range local.Files { + if localFile.IsDir { + continue // Skip directories in diff + } + + remoteFile, exists := remote.Files[path] + if !exists { + diff.Added = append(diff.Added, path) + diff.Stats.FilesAdded++ + diff.Stats.BytesToUpload += localFile.Size + } else if isFileModified(localFile, remoteFile) { + diff.Modified = append(diff.Modified, path) + diff.Stats.FilesModified++ + diff.Stats.BytesToUpload += localFile.Size + } else { + diff.Stats.FilesUnchanged++ + } + } + + // Find deleted files + for path, remoteFile := range remote.Files { + if remoteFile.IsDir { + continue + } + if _, exists := local.Files[path]; !exists { + diff.Deleted = append(diff.Deleted, path) + diff.Stats.FilesDeleted++ + diff.Stats.BytesToDelete += remoteFile.Size + } + } + + // Sort for deterministic ordering + sort.Strings(diff.Added) + sort.Strings(diff.Modified) + sort.Strings(diff.Deleted) + + return diff +} + +// isFileModified checks if a file has been modified. +func isFileModified(local, remote *FileManifest) bool { + // If we have hashes, compare them (most accurate) + if local.Hash != "" && remote.Hash != "" { + return local.Hash != remote.Hash + } + + // Fall back to size and modification time + if local.Size != remote.Size { + return true + } + + // If mod time is significantly different (> 1 second), consider modified + if local.ModTime > 0 && remote.ModTime > 0 { + timeDiff := local.ModTime - remote.ModTime + if timeDiff < 0 { + timeDiff = -timeDiff + } + return timeDiff > int64(time.Second) + } + + return false +} + +// IncrementalSync performs an incremental sync of the workspace. +func IncrementalSync(computer RemoteSandbox, opts SyncOptions, verbose bool) error { + logger := GetLogger().With("op", "incremental_sync") + + // Get ignore patterns + patterns, err := getIgnorePatterns(opts) + if err != nil { + return ErrSyncFailed("get_ignore_patterns", err) + } + + // Generate local manifest + logger.Debug("generating local manifest") + localManifest, err := GenerateLocalManifest(opts.Cwd, patterns) + if err != nil { + return ErrSyncFailed("generate_local_manifest", err) + } + + remotePath := GetWorktreeRepoPath(computer, opts) + + // Get remote manifest + logger.Debug("fetching remote manifest") + remoteManifest, err := GetRemoteManifest(computer, remotePath) + if err != nil { + return ErrSyncFailed("get_remote_manifest", err) + } + + // Compute diff + diff := ComputeDiff(localManifest, remoteManifest) + + if verbose { + fmt.Printf("Sync: %d added, %d modified, %d deleted, %d unchanged\n", + diff.Stats.FilesAdded, diff.Stats.FilesModified, + diff.Stats.FilesDeleted, diff.Stats.FilesUnchanged) + } + + // If many files changed, fall back to full sync + totalChanges := diff.Stats.FilesAdded + diff.Stats.FilesModified + diff.Stats.FilesDeleted + totalFiles := totalChanges + diff.Stats.FilesUnchanged + if totalFiles > 0 && float64(totalChanges)/float64(totalFiles) > 0.5 { + logger.Debug("too many changes, falling back to full sync", + "changes", totalChanges, "total", totalFiles) + return UploadWorkspace(computer, opts, verbose) + } + + // No changes + if totalChanges == 0 { + if verbose { + fmt.Println("Workspace is up to date") + } + return nil + } + + // Ensure remote directory exists + _, _ = execCommand(computer, SafeCommands.MkdirP(remotePath), nil) + + // Delete removed files + for _, path := range diff.Deleted { + fullPath := filepath.Join(remotePath, path) + logger.Debug("deleting", "path", path) + _, _ = execCommand(computer, SafeCommands.RmRf(fullPath), nil) + } + + // Upload added and modified files + filesToUpload := append(diff.Added, diff.Modified...) + for _, path := range filesToUpload { + localPath := filepath.Join(opts.Cwd, path) + remoteFull := filepath.Join(remotePath, path) + + // Ensure parent directory exists + parentDir := filepath.Dir(remoteFull) + _, _ = execCommand(computer, SafeCommands.MkdirP(parentDir), nil) + + // Read and upload file + data, err := os.ReadFile(localPath) + if err != nil { + logger.Warn("failed to read file", "path", path, "error", err) + continue + } + + logger.Debug("uploading", "path", path, "size", len(data)) + uploadCtx, uploadCancel := context.WithTimeout(context.Background(), uploadTimeout) + uploadErr := uploadBytes(uploadCtx, computer, data, remoteFull) + uploadCancel() + if uploadErr != nil { + logger.Warn("failed to upload file", "path", path, "error", uploadErr) + continue + } + } + + // Save updated manifest + localManifest.Generated = time.Now() + if err := SaveRemoteManifest(computer, remotePath, localManifest); err != nil { + logger.Warn("failed to save manifest", "error", err) + } + + if verbose { + fmt.Printf("Synced %d files (%.2f KB)\n", + len(filesToUpload), + float64(diff.Stats.BytesToUpload)/1024) + } + + return nil +} + +// hashFile computes the SHA256 hash of a file. +func hashFile(path string) (string, error) { + f, err := os.Open(path) + if err != nil { + return "", err + } + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return "", err + } + + return hex.EncodeToString(h.Sum(nil)), nil +} + +// ShouldUseIncrementalSync determines if incremental sync is appropriate. +func ShouldUseIncrementalSync(computer RemoteSandbox, opts SyncOptions) bool { + remotePath := GetWorktreeRepoPath(computer, opts) + manifestPath := filepath.Join(remotePath, manifestFileName) + + // Check if manifest exists + resp, err := execCommand(computer, SafeCommands.Test("-f", manifestPath), nil) + if err != nil || resp.ExitCode != 0 { + return false // No manifest, do full sync + } + + // Check manifest age (if older than 7 days, do full sync for safety) + cmd := SafeCommands.Stat(manifestPath) + resp, err = execCommand(computer, cmd, nil) + if err != nil || resp.ExitCode != 0 { + return true // Can't get age, try incremental anyway + } + + modTimeStr := strings.TrimSpace(getStdout(resp)) + var modTime int64 + _, _ = fmt.Sscanf(modTimeStr, "%d", &modTime) + + if modTime > 0 { + age := time.Since(time.Unix(modTime, 0)) + if age > 7*24*time.Hour { + LogDebug("manifest too old, will do full sync", "age", age) + return false + } + } + + return true +} + +// SmartSync chooses between incremental and full sync automatically. +func SmartSync(computer RemoteSandbox, opts SyncOptions, verbose bool) error { + if ShouldUseIncrementalSync(computer, opts) { + err := IncrementalSync(computer, opts, verbose) + if err == nil { + return nil + } + LogWarn("incremental sync failed, falling back to full sync", "error", err) + } + + return UploadWorkspace(computer, opts, verbose) +} diff --git a/internal/sandbox/testing.go b/internal/sandbox/testing.go new file mode 100644 index 00000000..d337c030 --- /dev/null +++ b/internal/sandbox/testing.go @@ -0,0 +1,252 @@ +package sandbox + +import ( + "context" + "fmt" + "io" + "strings" + "sync" + "time" +) + +// MockRemoteSandbox provides a test double for RemoteSandbox interface. +type MockRemoteSandbox struct { + mu sync.RWMutex + + id string + name string + state SandboxState + labels map[string]string + homeDir string + username string + + // Files simulates the filesystem + Files map[string]string + + // ExecResults maps command prefixes to (output, error) pairs + ExecResults map[string]MockExecResult + + // ExecHistory records all executed commands + ExecHistory []string + + // UploadHistory records all uploaded files + UploadHistory []MockUpload +} + +// MockExecResult represents the result of a command execution. +type MockExecResult struct { + Output string + ExitCode int + Error error +} + +// MockUpload represents a file upload operation. +type MockUpload struct { + Source string + Dest string +} + +// NewMockRemoteSandbox creates a new mock sandbox with default settings. +func NewMockRemoteSandbox(id string) *MockRemoteSandbox { + return &MockRemoteSandbox{ + id: id, + name: "mock-sandbox", + state: StateStarted, + labels: map[string]string{"amux.provider": "mock"}, + homeDir: "/home/user", + username: "user", + Files: make(map[string]string), + ExecResults: make(map[string]MockExecResult), + } +} + +func (m *MockRemoteSandbox) ID() string { + m.mu.RLock() + defer m.mu.RUnlock() + return m.id +} + +func (m *MockRemoteSandbox) Name() string { + m.mu.RLock() + defer m.mu.RUnlock() + return m.name +} + +func (m *MockRemoteSandbox) State() SandboxState { + m.mu.RLock() + defer m.mu.RUnlock() + return m.state +} + +func (m *MockRemoteSandbox) Labels() map[string]string { + m.mu.RLock() + defer m.mu.RUnlock() + result := make(map[string]string) + for k, v := range m.labels { + result[k] = v + } + return result +} + +func (m *MockRemoteSandbox) Start(ctx context.Context) error { + m.mu.Lock() + defer m.mu.Unlock() + m.state = StateStarted + return nil +} + +func (m *MockRemoteSandbox) Stop(ctx context.Context) error { + m.mu.Lock() + defer m.mu.Unlock() + m.state = StateStopped + return nil +} + +func (m *MockRemoteSandbox) WaitReady(ctx context.Context, timeout time.Duration) error { + return nil +} + +func (m *MockRemoteSandbox) Exec(ctx context.Context, cmd string, opts *ExecOptions) (*ExecResult, error) { + m.mu.Lock() + defer m.mu.Unlock() + + m.ExecHistory = append(m.ExecHistory, cmd) + + // Check for matching exec results + for prefix, result := range m.ExecResults { + if strings.HasPrefix(cmd, prefix) { + if result.Error != nil { + return nil, result.Error + } + return &ExecResult{ + Stdout: result.Output, + ExitCode: result.ExitCode, + }, nil + } + } + + // Default: command succeeded with empty output + return &ExecResult{Stdout: "", ExitCode: 0}, nil +} + +func (m *MockRemoteSandbox) ExecInteractive(ctx context.Context, cmd string, stdin io.Reader, stdout, stderr io.Writer, opts *ExecOptions) (int, error) { + m.mu.Lock() + m.ExecHistory = append(m.ExecHistory, cmd) + m.mu.Unlock() + + // Check for matching exec results + m.mu.RLock() + for prefix, result := range m.ExecResults { + if strings.HasPrefix(cmd, prefix) { + if result.Error != nil { + m.mu.RUnlock() + return 1, result.Error + } + if stdout != nil && result.Output != "" { + stdout.Write([]byte(result.Output)) + } + m.mu.RUnlock() + return result.ExitCode, nil + } + } + m.mu.RUnlock() + + return 0, nil +} + +func (m *MockRemoteSandbox) UploadFile(ctx context.Context, src, dst string) error { + m.mu.Lock() + defer m.mu.Unlock() + + m.UploadHistory = append(m.UploadHistory, MockUpload{Source: src, Dest: dst}) + return nil +} + +func (m *MockRemoteSandbox) DownloadFile(ctx context.Context, src, dst string) error { + return nil +} + +func (m *MockRemoteSandbox) GetPreviewURL(ctx context.Context, port int) (string, error) { + return fmt.Sprintf("http://localhost:%d", port), nil +} + +func (m *MockRemoteSandbox) Refresh(ctx context.Context) error { + return nil +} + +// Additional helper methods not in interface + +func (m *MockRemoteSandbox) Username() string { + m.mu.RLock() + defer m.mu.RUnlock() + return m.username +} + +func (m *MockRemoteSandbox) HomeDir() string { + m.mu.RLock() + defer m.mu.RUnlock() + return m.homeDir +} + +// Test helper methods + +// SetState sets the sandbox state for testing. +func (m *MockRemoteSandbox) SetState(state SandboxState) { + m.mu.Lock() + defer m.mu.Unlock() + m.state = state +} + +// SetHomeDir sets the home directory for testing. +func (m *MockRemoteSandbox) SetHomeDir(dir string) { + m.mu.Lock() + defer m.mu.Unlock() + m.homeDir = dir +} + +// SetExecResult sets the result for commands starting with the given prefix. +func (m *MockRemoteSandbox) SetExecResult(cmdPrefix string, output string, exitCode int) { + m.mu.Lock() + defer m.mu.Unlock() + m.ExecResults[cmdPrefix] = MockExecResult{Output: output, ExitCode: exitCode} +} + +// SetExecError sets an error for commands starting with the given prefix. +func (m *MockRemoteSandbox) SetExecError(cmdPrefix string, err error) { + m.mu.Lock() + defer m.mu.Unlock() + m.ExecResults[cmdPrefix] = MockExecResult{Error: err} +} + +// SetFile sets a file in the mock filesystem. +func (m *MockRemoteSandbox) SetFile(path, content string) { + m.mu.Lock() + defer m.mu.Unlock() + m.Files[path] = content +} + +// GetExecHistory returns all executed commands. +func (m *MockRemoteSandbox) GetExecHistory() []string { + m.mu.RLock() + defer m.mu.RUnlock() + result := make([]string, len(m.ExecHistory)) + copy(result, m.ExecHistory) + return result +} + +// GetUploadHistory returns all uploaded files. +func (m *MockRemoteSandbox) GetUploadHistory() []MockUpload { + m.mu.RLock() + defer m.mu.RUnlock() + result := make([]MockUpload, len(m.UploadHistory)) + copy(result, m.UploadHistory) + return result +} + +// ClearHistory clears the exec and upload history. +func (m *MockRemoteSandbox) ClearHistory() { + m.mu.Lock() + defer m.mu.Unlock() + m.ExecHistory = nil + m.UploadHistory = nil +} diff --git a/internal/ui/center/model.go b/internal/ui/center/model.go index 7a4346e6..099d0c11 100644 --- a/internal/ui/center/model.go +++ b/internal/ui/center/model.go @@ -117,7 +117,7 @@ type Model struct { focused bool canFocusRight bool tabsRevision uint64 - agentManager *appPty.AgentManager + agentProvider AgentProvider msgSink func(tea.Msg) tabEvents chan tabEvent tabActorReady uint32 @@ -168,6 +168,16 @@ func (m *Model) SetTmuxConfig(serverName, configPath string) { m.tmuxConfig.ConfigPath = configPath } +// AgentProvider abstracts agent creation for different runtimes. +type AgentProvider interface { + CreateAgent(ws *data.Workspace, agentType appPty.AgentType, rows, cols uint16) (*appPty.Agent, error) + CreateAgentWithTags(ws *data.Workspace, agentType appPty.AgentType, sessionName string, rows, cols uint16, tags tmux.SessionTags) (*appPty.Agent, error) + CreateViewer(ws *data.Workspace, command string, rows, cols uint16) (*appPty.Agent, error) + CreateViewerWithTags(ws *data.Workspace, command string, sessionName string, rows, cols uint16, tags tmux.SessionTags) (*appPty.Agent, error) + CloseAgent(agent *appPty.Agent) error + CloseAll() +} + type tabHitKind int const ( @@ -253,6 +263,36 @@ func (m *Model) terminalMetrics() TerminalMetrics { } } +// localAgentProvider wraps *pty.AgentManager to satisfy the AgentProvider interface. +type localAgentProvider struct { + mgr *appPty.AgentManager +} + +func (l *localAgentProvider) CreateAgent(ws *data.Workspace, agentType appPty.AgentType, rows, cols uint16) (*appPty.Agent, error) { + return l.mgr.CreateAgent(ws, agentType, "", rows, cols) +} + +func (l *localAgentProvider) CreateAgentWithTags(ws *data.Workspace, agentType appPty.AgentType, sessionName string, rows, cols uint16, tags tmux.SessionTags) (*appPty.Agent, error) { + return l.mgr.CreateAgentWithTags(ws, agentType, sessionName, rows, cols, tags) +} + +func (l *localAgentProvider) CreateViewer(ws *data.Workspace, command string, rows, cols uint16) (*appPty.Agent, error) { + return l.mgr.CreateViewer(ws, command, "", rows, cols) +} + +func (l *localAgentProvider) CreateViewerWithTags(ws *data.Workspace, command string, sessionName string, rows, cols uint16, tags tmux.SessionTags) (*appPty.Agent, error) { + return l.mgr.CreateViewerWithTags(ws, command, sessionName, rows, cols, tags) +} + +func (l *localAgentProvider) CloseAgent(agent *appPty.Agent) error { + return l.mgr.CloseAgent(agent) +} + +func (l *localAgentProvider) CloseAll() { + l.mgr.CloseAll() +} + + func (m *Model) isTabActorReady() bool { return atomic.LoadUint32(&m.tabActorReady) == 1 } @@ -376,9 +416,4 @@ func (m *Model) CleanupWorkspace(ws *data.Workspace) { delete(m.tabsByWorkspace, wsID) delete(m.activeTabByWorkspace, wsID) m.noteTabsChanged() - - // Also cleanup agents for this workspace - if m.agentManager != nil { - m.agentManager.CloseWorkspaceAgents(ws) - } } diff --git a/internal/ui/center/model_input_lifecycle.go b/internal/ui/center/model_input_lifecycle.go index 7e67bdfc..db53c81a 100644 --- a/internal/ui/center/model_input_lifecycle.go +++ b/internal/ui/center/model_input_lifecycle.go @@ -134,7 +134,7 @@ func (m *Model) updateTabSessionStatus(msg messages.TabSessionStatus) (*Model, t tab.Agent = nil tab.mu.Unlock() if agent != nil { - _ = m.agentManager.CloseAgent(agent) + _ = m.agentProvider.CloseAgent(agent) } tab.mu.Lock() tab.Running = false diff --git a/internal/ui/center/model_lifecycle.go b/internal/ui/center/model_lifecycle.go index 115d16f0..64f141f7 100644 --- a/internal/ui/center/model_lifecycle.go +++ b/internal/ui/center/model_lifecycle.go @@ -10,12 +10,15 @@ import ( ) // New creates a new center pane model. -func New(cfg *config.Config) *Model { +func New(cfg *config.Config, provider AgentProvider) *Model { + if provider == nil { + provider = &localAgentProvider{mgr: appPty.NewAgentManager(cfg)} + } return &Model{ tabsByWorkspace: make(map[string][]*Tab), activeTabByWorkspace: make(map[string]int), config: cfg, - agentManager: appPty.NewAgentManager(cfg), + agentProvider: provider, styles: common.DefaultStyles(), tabEvents: make(chan tabEvent, 4096), } @@ -145,8 +148,8 @@ func (m *Model) Close() { tab.markClosed() } } - if m.agentManager != nil { - m.agentManager.CloseAll() + if m.agentProvider != nil { + m.agentProvider.CloseAll() } } diff --git a/internal/ui/center/model_tabs.go b/internal/ui/center/model_tabs.go index bc52d3f8..fcdc8cfc 100644 --- a/internal/ui/center/model_tabs.go +++ b/internal/ui/center/model_tabs.go @@ -113,7 +113,7 @@ func (m *Model) createAgentTabWithSession(assistant string, ws *data.Workspace, CreatedAt: time.Now().Unix(), InstanceID: m.instanceID, } - agent, err := m.agentManager.CreateAgentWithTags(ws, appPty.AgentType(assistant), sessionName, uint16(termHeight), uint16(termWidth), tags) + agent, err := m.agentProvider.CreateAgentWithTags(ws, appPty.AgentType(assistant), sessionName, uint16(termHeight), uint16(termWidth), tags) if err != nil { logging.Error("Failed to create agent: %v", err) return messages.Error{Err: err, Context: "creating agent"} @@ -230,7 +230,7 @@ func (m *Model) handlePtyTabCreated(msg ptyTabCreateResult) tea.Cmd { tab.cachedShowCursor = false tab.mu.Unlock() if oldAgent != nil && oldAgent != msg.Agent { - _ = m.agentManager.CloseAgent(oldAgent) + _ = m.agentProvider.CloseAgent(oldAgent) } // Set up response writer for terminal queries (DSR, DA, etc.) diff --git a/internal/ui/center/model_tabs_actions.go b/internal/ui/center/model_tabs_actions.go index 93f62a40..ce622fff 100644 --- a/internal/ui/center/model_tabs_actions.go +++ b/internal/ui/center/model_tabs_actions.go @@ -38,7 +38,7 @@ func (m *Model) closeTabAt(index int) tea.Cmd { // Close agent if tab.Agent != nil { - _ = m.agentManager.CloseAgent(tab.Agent) + _ = m.agentProvider.CloseAgent(tab.Agent) } tab.mu.Lock() diff --git a/internal/ui/center/model_tabs_restore.go b/internal/ui/center/model_tabs_restore.go index 5b5b4e07..e846e1cc 100644 --- a/internal/ui/center/model_tabs_restore.go +++ b/internal/ui/center/model_tabs_restore.go @@ -132,7 +132,7 @@ func (m *Model) reattachToSession(ws *data.Workspace, tabID TabID, assistant str Assistant: assistant, InstanceID: m.instanceID, } - agent, err := m.agentManager.CreateAgentWithTags(ws, appPty.AgentType(assistant), sessionName, uint16(termHeight), uint16(termWidth), tags) + agent, err := m.agentProvider.CreateAgentWithTags(ws, appPty.AgentType(assistant), sessionName, uint16(termHeight), uint16(termWidth), tags) if err != nil { return ptyTabReattachFailed{ WorkspaceID: string(ws.ID()), diff --git a/internal/ui/center/model_tabs_session.go b/internal/ui/center/model_tabs_session.go index 0ff3769b..ed11679e 100644 --- a/internal/ui/center/model_tabs_session.go +++ b/internal/ui/center/model_tabs_session.go @@ -60,7 +60,7 @@ func (m *Model) detachTab(tab *Tab, index int) tea.Cmd { tab.Agent = nil tab.mu.Unlock() if agent != nil { - _ = m.agentManager.CloseAgent(agent) + _ = m.agentProvider.CloseAgent(agent) } return func() tea.Msg { return messages.TabDetached{Index: index} @@ -165,7 +165,7 @@ func (m *Model) ReattachActiveTab() tea.Cmd { Assistant: assistant, CreatedAt: time.Now().Unix(), } - agent, err := m.agentManager.CreateAgentWithTags(ws, appPty.AgentType(assistant), sessionName, uint16(termHeight), uint16(termWidth), tags) + agent, err := m.agentProvider.CreateAgentWithTags(ws, appPty.AgentType(assistant), sessionName, uint16(termHeight), uint16(termWidth), tags) if err != nil { return ptyTabReattachFailed{ WorkspaceID: string(ws.ID()), @@ -192,7 +192,7 @@ func (m *Model) ReattachActiveTab() tea.Cmd { Assistant: assistant, InstanceID: m.instanceID, } - agent, err := m.agentManager.CreateAgentWithTags(ws, appPty.AgentType(assistant), sessionName, uint16(termHeight), uint16(termWidth), tags) + agent, err := m.agentProvider.CreateAgentWithTags(ws, appPty.AgentType(assistant), sessionName, uint16(termHeight), uint16(termWidth), tags) if err != nil { return ptyTabReattachFailed{ WorkspaceID: string(ws.ID()), @@ -258,7 +258,7 @@ func (m *Model) RestartActiveTab() tea.Cmd { tab.Agent = nil tab.mu.Unlock() if existingAgent != nil { - _ = m.agentManager.CloseAgent(existingAgent) + _ = m.agentProvider.CloseAgent(existingAgent) } tmuxOpts := m.getTmuxOptions() @@ -284,7 +284,7 @@ func (m *Model) RestartActiveTab() tea.Cmd { CreatedAt: time.Now().Unix(), InstanceID: m.instanceID, } - agent, err := m.agentManager.CreateAgentWithTags(ws, appPty.AgentType(assistant), sessionName, uint16(termHeight), uint16(termWidth), tags) + agent, err := m.agentProvider.CreateAgentWithTags(ws, appPty.AgentType(assistant), sessionName, uint16(termHeight), uint16(termWidth), tags) if err != nil { return ptyTabReattachFailed{ WorkspaceID: string(ws.ID()), diff --git a/internal/ui/center/model_tabs_viewer.go b/internal/ui/center/model_tabs_viewer.go index d015e2be..e13732e4 100644 --- a/internal/ui/center/model_tabs_viewer.go +++ b/internal/ui/center/model_tabs_viewer.go @@ -44,7 +44,7 @@ func (m *Model) createVimTab(filePath string, ws *data.Workspace) tea.Cmd { CreatedAt: time.Now().Unix(), InstanceID: m.instanceID, } - agent, err := m.agentManager.CreateViewerWithTags(ws, cmd, sessionName, uint16(termHeight), uint16(termWidth), tags) + agent, err := m.agentProvider.CreateViewerWithTags(ws, cmd, sessionName, uint16(termHeight), uint16(termWidth), tags) if err != nil { logging.Error("Failed to create vim viewer: %v", err) return messages.Error{Err: err, Context: "creating vim viewer"} @@ -150,7 +150,7 @@ func (m *Model) createViewerTabLegacy(file string, statusCode string, ws *data.W CreatedAt: time.Now().Unix(), InstanceID: m.instanceID, } - agent, err := m.agentManager.CreateViewerWithTags(ws, cmd, sessionName, uint16(termHeight), uint16(termWidth), tags) + agent, err := m.agentProvider.CreateViewerWithTags(ws, cmd, sessionName, uint16(termHeight), uint16(termWidth), tags) if err != nil { logging.Error("Failed to create viewer: %v", err) return messages.Error{Err: err, Context: "creating viewer"} diff --git a/internal/ui/center/perf_test.go b/internal/ui/center/perf_test.go index 790fc773..2bd63a4e 100644 --- a/internal/ui/center/perf_test.go +++ b/internal/ui/center/perf_test.go @@ -36,7 +36,7 @@ func TestPerfScenario(t *testing.T) { t.Fatalf("default config: %v", err) } - m := New(cfg) + m := New(cfg, nil) wt := &data.Workspace{ Name: "perf", Repo: "/tmp/perf-repo", diff --git a/internal/ui/center/selection_test.go b/internal/ui/center/selection_test.go index 07041bfa..42730358 100644 --- a/internal/ui/center/selection_test.go +++ b/internal/ui/center/selection_test.go @@ -16,7 +16,7 @@ func setupSelectionModel(t *testing.T) (*Model, *Tab) { if err != nil { t.Fatalf("default config: %v", err) } - m := New(cfg) + m := New(cfg, nil) wt := &data.Workspace{ Name: "wt", Repo: "/tmp/repo", @@ -126,7 +126,7 @@ func TestTabBarClickPlusButton(t *testing.T) { if err != nil { t.Fatalf("default config: %v", err) } - m := New(cfg) + m := New(cfg, nil) wt := &data.Workspace{ Name: "wt", Repo: "/tmp/repo", diff --git a/internal/ui/common/icons.go b/internal/ui/common/icons.go index b1cf852b..f629f1cd 100644 --- a/internal/ui/common/icons.go +++ b/internal/ui/common/icons.go @@ -101,7 +101,7 @@ func FileStatusIcon(status string) (icon string, desc string) { case "U": return "U", "unmerged" case "??": - return "A", "new file" + return "?", "untracked" case "!!": return "!", "ignored" default: diff --git a/internal/ui/sidebar/terminal.go b/internal/ui/sidebar/terminal.go index 3bdfecd4..5d26a08e 100644 --- a/internal/ui/sidebar/terminal.go +++ b/internal/ui/sidebar/terminal.go @@ -135,6 +135,8 @@ type TerminalModel struct { tmuxServerName string tmuxConfigPath string instanceID string + + terminalFactory func(*data.Workspace) (*pty.Terminal, error) } // NewTerminalModel creates a new sidebar terminal model @@ -169,6 +171,11 @@ func (m *TerminalModel) getTmuxOptions() tmux.Options { return opts } +// SetTerminalFactory provides a custom terminal creator (e.g. sandbox shell). +func (m *TerminalModel) SetTerminalFactory(factory func(*data.Workspace) (*pty.Terminal, error)) { + m.terminalFactory = factory +} + // SetShowKeymapHints controls whether helper text is rendered. func (m *TerminalModel) SetShowKeymapHints(show bool) { if m.showKeymapHints == show {